I create deployment and service from *.yaml. Into container I find ns record via nslookup
or dig
, but can’t connect to db via service name or service IP.
Anybody can tell me, what I do wrong?
Environment:
Minikube version:
$minikube version
v0.24.1
OS (e.g. from /etc/os-release):
NAME="Ubuntu"
VERSION="16.04.2 LTS (Xenial Xerus)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 16.04.2 LTS"
VERSION_ID="16.04"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
VERSION_CODENAME=xenial
UBUNTU_CODENAME=xenial
VM Driver (e.g. cat ~/.minikube/machines/minikube/config.json | grep DriverName
):
"DriverName": "kvm2",
and
"DriverName": "virtualbox",
ISO version (e.g. cat ~/.minikube/machines/minikube/config.json | grep -i ISO
or minikube ssh cat /etc/VERSION
):
v0.23.6
DNS logs
sidecar
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c sidecar
ERROR: logging before flag.Parse: I1221 13:49:25.085555 1 main.go:48] Version v1.14.4-2-g5584e04
ERROR: logging before flag.Parse: I1221 13:49:25.085647 1 server.go:45] Starting server (options {DnsMasqPort:53 DnsMasqAddr:127.0.0.1 DnsMasqPollIntervalMs:5000 Probes:[{Label:kubedns Server:127.0.0.1:10053 Name:kubernetes.default.svc.cluster.local. Interval:5s Type:1} {Label:dnsmasq Server:127.0.0.1:53 Name:kubernetes.default.svc.cluster.local. Interval:5s Type:1}] PrometheusAddr:0.0.0.0 PrometheusPort:10054 PrometheusPath:/metrics PrometheusNamespace:kubedns})
ERROR: logging before flag.Parse: I1221 13:49:25.085854 1 dnsprobe.go:75] Starting dnsProbe {Label:kubedns Server:127.0.0.1:10053 Name:kubernetes.default.svc.cluster.local. Interval:5s Type:1}
ERROR: logging before flag.Parse: I1221 13:49:25.086013 1 dnsprobe.go:75] Starting dnsProbe {Label:dnsmasq Server:127.0.0.1:53 Name:kubernetes.default.svc.cluster.local. Interval:5s Type:1}
dnsmasq
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c dnsmasq
I1221 13:49:24.134834 1 main.go:76] opts: {{/usr/sbin/dnsmasq [-k --cache-size=1000 --log-facility=- --server=/cluster.local/127.0.0.1#10053 --server=/in-addr.arpa/127.0.0.1#10053 --server=/ip6.arpa/127.0.0.1#10053] true} /etc/k8s/dns/dnsmasq-nanny 10000000000}
I1221 13:49:24.135086 1 nanny.go:86] Starting dnsmasq [-k --cache-size=1000 --log-facility=- --server=/cluster.local/127.0.0.1#10053 --server=/in-addr.arpa/127.0.0.1#10053 --server=/ip6.arpa/127.0.0.1#10053]
I1221 13:49:24.353157 1 nanny.go:111]
W1221 13:49:24.353184 1 nanny.go:112] Got EOF from stdout
I1221 13:49:24.353308 1 nanny.go:108] dnsmasq[10]: started, version 2.78-security-prerelease cachesize 1000
I1221 13:49:24.353340 1 nanny.go:108] dnsmasq[10]: compile time options: IPv6 GNU-getopt no-DBus no-i18n no-IDN DHCP DHCPv6 no-Lua TFTP no-conntrack ipset auth no-DNSSEC loop-detect inotify
I1221 13:49:24.353364 1 nanny.go:108] dnsmasq[10]: using nameserver 127.0.0.1#10053 for domain ip6.arpa
I1221 13:49:24.353385 1 nanny.go:108] dnsmasq[10]: using nameserver 127.0.0.1#10053 for domain in-addr.arpa
I1221 13:49:24.353419 1 nanny.go:108] dnsmasq[10]: using nameserver 127.0.0.1#10053 for domain cluster.local
I1221 13:49:24.353457 1 nanny.go:108] dnsmasq[10]: reading /etc/resolv.conf
I1221 13:49:24.353487 1 nanny.go:108] dnsmasq[10]: using nameserver 127.0.0.1#10053 for domain ip6.arpa
I1221 13:49:24.353514 1 nanny.go:108] dnsmasq[10]: using nameserver 127.0.0.1#10053 for domain in-addr.arpa
I1221 13:49:24.353534 1 nanny.go:108] dnsmasq[10]: using nameserver 127.0.0.1#10053 for domain cluster.local
I1221 13:49:24.353554 1 nanny.go:108] dnsmasq[10]: using nameserver 10.110.7.1#53
I1221 13:49:24.353617 1 nanny.go:108] dnsmasq[10]: read /etc/hosts - 7 addresses
kubedns
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c kubedns
I1221 13:49:23.122626 1 dns.go:48] version: 1.14.4-2-g5584e04
I1221 13:49:23.202663 1 server.go:66] Using configuration read from ConfigMap: kube-system:kube-dns
I1221 13:49:23.202797 1 server.go:113] FLAG: --alsologtostderr="false"
I1221 13:49:23.202924 1 server.go:113] FLAG: --config-dir=""
I1221 13:49:23.202932 1 server.go:113] FLAG: --config-map="kube-dns"
I1221 13:49:23.202936 1 server.go:113] FLAG: --config-map-namespace="kube-system"
I1221 13:49:23.202959 1 server.go:113] FLAG: --config-period="10s"
I1221 13:49:23.203028 1 server.go:113] FLAG: --dns-bind-address="0.0.0.0"
I1221 13:49:23.203042 1 server.go:113] FLAG: --dns-port="10053"
I1221 13:49:23.203082 1 server.go:113] FLAG: --domain="cluster.local."
I1221 13:49:23.203101 1 server.go:113] FLAG: --federations=""
I1221 13:49:23.203107 1 server.go:113] FLAG: --healthz-port="8081"
I1221 13:49:23.203111 1 server.go:113] FLAG: --initial-sync-timeout="1m0s"
I1221 13:49:23.203115 1 server.go:113] FLAG: --kube-master-url=""
I1221 13:49:23.203194 1 server.go:113] FLAG: --kubecfg-file=""
I1221 13:49:23.203198 1 server.go:113] FLAG: --log-backtrace-at=":0"
I1221 13:49:23.203249 1 server.go:113] FLAG: --log-dir=""
I1221 13:49:23.203254 1 server.go:113] FLAG: --log-flush-frequency="5s"
I1221 13:49:23.203277 1 server.go:113] FLAG: --logtostderr="true"
I1221 13:49:23.203281 1 server.go:113] FLAG: --nameservers=""
I1221 13:49:23.203348 1 server.go:113] FLAG: --stderrthreshold="2"
I1221 13:49:23.203369 1 server.go:113] FLAG: --v="2"
I1221 13:49:23.203416 1 server.go:113] FLAG: --version="false"
I1221 13:49:23.203447 1 server.go:113] FLAG: --vmodule=""
I1221 13:49:23.203554 1 server.go:176] Starting SkyDNS server (0.0.0.0:10053)
I1221 13:49:23.203842 1 server.go:198] Skydns metrics enabled (/metrics:10055)
I1221 13:49:23.203858 1 dns.go:147] Starting endpointsController
I1221 13:49:23.203863 1 dns.go:150] Starting serviceController
I1221 13:49:23.204165 1 logs.go:41] skydns: ready for queries on cluster.local. for tcp://0.0.0.0:10053 [rcache 0]
I1221 13:49:23.204175 1 logs.go:41] skydns: ready for queries on cluster.local. for udp://0.0.0.0:10053 [rcache 0]
I1221 13:49:23.555942 1 sync_configmap.go:107] ConfigMap kube-system:kube-dns was created
I1221 13:49:24.054105 1 dns.go:171] Initialized services and endpoints from apiserver
I1221 13:49:24.054128 1 server.go:129] Setting up Healthz Handler (/readiness)
I1221 13:49:24.054206 1 server.go:134] Setting up cache handler (/cache)
I1221 13:49:24.054257 1 server.go:120] Status HTTP port 8081
What happened:
Can not ping
or traceroute
service via service name or IP.
What you expected to happen:
ping
to the service via service name.
How to reproduce it (as minimally and precisely as possible):
Default namesapce
$ kc get all
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/memcached 1 1 1 1 1h
deploy/mongo 1 1 1 1 1h
NAME DESIRED CURRENT READY AGE
rs/memcached-64dcdbc9f6 1 1 1 1h
rs/mongo-67d67fddf9 1 1 1 39m
rs/mongo-6fc9bd6d6c 0 0 0 1h
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/memcached 1 1 1 1 1h
deploy/mongo 1 1 1 1 1h
NAME DESIRED CURRENT READY AGE
rs/memcached-64dcdbc9f6 1 1 1 1h
rs/mongo-67d67fddf9 1 1 1 39m
rs/mongo-6fc9bd6d6c 0 0 0 1h
NAME READY STATUS RESTARTS AGE
po/busybox 1/1 Running 0 29m
po/memcached-64dcdbc9f6-j2v97 1/1 Running 0 1h
po/mongo-67d67fddf9-55zgd 1/1 Running 0 39m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 1h
svc/memcached ClusterIP 10.100.42.68 <none> 55555/TCP 1h
svc/mongo ClusterIP 10.99.92.189 <none> 27017/TCP 1h
kube-system
$ kc get --namespace=kube-system all
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/kube-dns 1 1 1 1 1h
NAME DESIRED CURRENT READY AGE
rs/kube-dns-86f6f55dd5 1 1 1 1h
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/kube-dns 1 1 1 1 1h
NAME DESIRED CURRENT READY AGE
rs/kube-dns-86f6f55dd5 1 1 1 1h
NAME READY STATUS RESTARTS AGE
po/kube-addon-manager-minikube 1/1 Running 1 1h
po/kube-dns-86f6f55dd5-mrtrm 3/3 Running 3 1h
po/kubernetes-dashboard-5sgcl 1/1 Running 1 1h
po/storage-provisioner 1/1 Running 1 1h
NAME DESIRED CURRENT READY AGE
rc/kubernetes-dashboard 1 1 1 1h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP 1h
svc/kubernetes-dashboard NodePort 10.110.68.80 <none> 80:30000/TCP 1h
resolv.conf
$ kc exec -it mongo-67d67fddf9-55zgd -- cat /etc/resolv.conf
nameserver 10.96.0.10
search default.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
nslookup test
$ kc exec -it mongo-67d67fddf9-55zgd nslookup kubernetes
Server: 10.96.0.10
Address: 10.96.0.10#53
Non-authoritative answer:
Name: kubernetes.default.svc.cluster.local
Address: 10.96.0.1
ping test
$ kc exec -it mongo-67d67fddf9-55zgd -- ping kubernetes
PING kubernetes.default.svc.cluster.local (10.96.0.1): 56 data bytes
64 bytes from 10.96.0.1: icmp_seq=0 ttl=250 time=2.873 ms
64 bytes from 10.96.0.1: icmp_seq=1 ttl=250 time=1.845 ms
64 bytes from 10.96.0.1: icmp_seq=2 ttl=250 time=1.809 ms
64 bytes from 10.96.0.1: icmp_seq=3 ttl=250 time=2.035 ms
64 bytes from 10.96.0.1: icmp_seq=4 ttl=250 time=1.805 ms
--- kubernetes.default.svc.cluster.local ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max/stddev = 1.805/2.073/2.873/0.409 ms
traceroute test (ok)
$ kc exec -it mongo-67d67fddf9-55zgd -- traceroute -n kubernetes
traceroute to kubernetes (10.96.0.1), 30 hops max, 60 byte packets
1 10.110.7.1 0.207 ms 0.195 ms 0.186 ms
2 192.168.1.1 0.317 ms 0.392 ms 0.456 ms
3 10.77.0.1 2.261 ms 2.977 ms 3.755 ms
4 10.128.132.1 1.568 ms 1.721 ms 1.934 ms
5 192.168.39.136 2.055 ms 2.329 ms 2.456 ms
6 10.128.145.2 8.603 ms 8.971 ms 9.391 ms
test nslookup
$ kc exec -it mongo-67d67fddf9-55zgd -- nslookup mongo
Server: 10.96.0.10
Address: 10.96.0.10#53
Name: mongo.default.svc.cluster.local
Address: 10.99.92.189
test ping
$ kc exec -it mongo-67d67fddf9-55zgd -- ping mongo
PING mongo.default.svc.cluster.local (10.99.92.189): 56 data bytes
--- mongo.default.svc.cluster.local ping statistics ---
210 packets transmitted, 0 packets received, 100% packet loss
command terminated with exit code 1
test traceroute (bad)
$ kc exec -it mongo-67d67fddf9-55zgd -- traceroute -n mongo
traceroute to mongo (10.99.92.189), 30 hops max, 60 byte packets
1 10.110.7.1 0.228 ms 0.203 ms 0.194 ms
2 192.168.1.1 0.438 ms 0.519 ms 0.582 ms
3 10.77.0.1 2.290 ms 3.599 ms 4.396 ms
4 10.128.132.1 1.851 ms 1.949 ms 2.166 ms
5 192.168.39.136 2.258 ms 2.421 ms 2.618 ms
6 10.128.145.5 5.193 ms 6.084 ms 8.301 ms
7 * * *
8 * * *
9 * * *
10 * * *
11 * * *
12 * * *
13 * * *
14 * * *
15 * * *
16 * * *
17 * * *
18 * * *
19 * * *
20 * * *
21 * * *
22 * * *
23 * * *
24 * * *
25 * * *
26 * * *
27 * * *
28 * * *
29 * * *
30 * * *
traceroute IP (bad)
$ kc exec -it mongo-67d67fddf9-55zgd -- traceroute -n 10.99.92.189
traceroute to 10.99.92.189 (10.99.92.189), 30 hops max, 60 byte packets
1 10.110.7.1 0.190 ms 0.136 ms 0.124 ms
2 192.168.1.1 0.431 ms 0.485 ms 0.547 ms
3 10.77.0.1 2.402 ms 3.256 ms 4.040 ms
4 10.128.132.1 1.780 ms 1.790 ms 1.930 ms
5 192.168.39.136 2.214 ms 2.209 ms 2.562 ms
6 10.128.145.5 7.645 ms 8.028 ms 8.284 ms
7 * * *
8 * * *
9 * * *
10 * * *
11 * * *
12 * * *
13 * * *
14 * * *
15 * * *
16 * * *
17 * * *
18 * * *
19 * * *
20 * * *
21 * * *
22 * * *
23 * * *
24 * * *
25 * * *
26 * * *
27 * * *
28 * * *
29 * * *
30 * * *
iptables from node
$ sudo iptables-save | grep mongo
-A KUBE-SEP-HYCP7OGZ3WQCZP76 -s 172.17.0.6/32 -m comment --comment "default/mongo:27017" -j KUBE-MARK-MASQ
-A KUBE-SEP-HYCP7OGZ3WQCZP76 -p tcp -m comment --comment "default/mongo:27017" -m tcp -j DNAT --to-destination 172.17.0.6:27017
-A KUBE-SERVICES -d 10.99.92.189/32 -p tcp -m comment --comment "default/mongo:27017 cluster IP" -m tcp --dport 27017 -j KUBE-SVC-VMEO5WN4YXST2YCP
-A KUBE-SVC-VMEO5WN4YXST2YCP -m comment --comment "default/mongo:27017" -j KUBE-SEP-HYCP7OGZ3WQCZP76
mongo-deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml
kompose.version: 1.6.0 (e4adfef)
creationTimestamp: null
labels:
io.kompose.service: mongo
name: mongo
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: mongo
spec:
containers:
- image: docker.scnetservices.ru/mongo:dev
name: mongo
resources: {}
volumeMounts:
- mountPath: /data/db
name: mongo-claim0
restartPolicy: Always
volumes:
- name: mongo-claim0
persistentVolumeClaim:
claimName: mongo-claim0
status: {}
mongo-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert -f docker-compose.yml
kompose.version: 1.6.0 (e4adfef)
creationTimestamp: null
labels:
io.kompose.service: mongo
name: mongo
spec:
ports:
- name: "27017"
port: 27017
targetPort: 27017
selector:
io.kompose.service: mongo
status:
loadBalancer: {}
mongo-volume0-persistentvolume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
creationTimestamp: null
labels:
type: local
name: mongo-volume0
spec:
storageClassName: manual
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/home/docker/mongo"
type: "DirectoryOrCreate"
persistentVolumeReclaimPolicy: Recycle
claimRef:
namespace: default
name: mongo-claim0
mongo-claim0-persistentvolumeclaim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mongo-claim0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
But if I connect to container mongo via ssh tunneling it work:
$ ssh -fN -l docker -i "~/.minikube/machines/minikube/id_rsa" -L 27017:localhost:27017 $(minikube ip)
sah4ez@PC001:~$ mongo localhost:27017
MongoDB shell version v3.4.9
connecting to: localhost:27017
MongoDB server version: 3.4.2
Server has startup warnings:
2017-12-21T14:48:20.434+0000 I CONTROL [initandlisten]
2017-12-21T14:48:20.434+0000 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database.
2017-12-21T14:48:20.434+0000 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted.
2017-12-21T14:48:20.434+0000 I CONTROL [initandlisten]
> db.hostInfo()
{
"system" : {
"currentTime" : ISODate("2017-12-21T16:23:35.940Z"),
"hostname" : "minikube",
"cpuAddrSize" : 64,
"memSizeMB" : 1906,
"numCores" : 2,
"cpuArch" : "x86_64",
"numaEnabled" : false
},
"os" : {
"type" : "Linux",
"name" : "PRETTY_NAME="Debian GNU/Linux 8 (jessie)"",
"version" : "Kernel 4.9.13"
},
"extra" : {
"versionString" : "Linux version 4.9.13 (jenkins@jenkins) (gcc version 5.4.0 (Buildroot 2017.02) ) #1 SMP Thu Oct 19 17:14:00 UTC 2017",
"libcVersion" : "2.19",
"kernelVersion" : "4.9.13",
"cpuFrequencyMHz" : "2993.200",
"cpuFeatures" : "fpu de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pse36 clflush mmx fxsr sse sse2 syscall nx lm rep_good nopl eagerfpu pni vmx cx16 x2apic hypervisor lahf_lm tpr_shadow vnmi flexpriority ept vpid",
"pageSize" : NumberLong(4096),
"numPages" : 487940,
"maxOpenFiles" : 65536
},
"ok" : 1
}
My OS info:
sah4ez@PC001:~$ uname -a
Linux PC001 4.8.0-58-generic #63~16.04.1-Ubuntu SMP Mon Jun 26 18:08:51 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux
In example up mongo connected to minikube and Kernel 4.9.13…
=====
UPD 2017/12/22
Now create two pods with mongo into (mongo
and mongo2
).
From mongo2
instance I can connect via dns name mongo.default.svc.cluster.local:27017
and not connect via service IP. But from mongo
instance I can’t connect via mongo2.default.svc.cluster.local
.
$ minikube ssh -- sudo iptables-save | grep mongo
-A KUBE-SEP-HYCP7OGZ3WQCZP76 -s 172.17.0.6/32 -m comment --comment "default/mongo:27017" -j KUBE-MARK-MASQ
-A KUBE-SEP-HYCP7OGZ3WQCZP76 -p tcp -m comment --comment "default/mongo:27017" -m tcp -j DNAT --to-destination 172.17.0.6:27017
-A KUBE-SEP-KVDY7RMLLBYXOYB5 -s 172.17.0.8/32 -m comment --comment "default/mongo:27017" -j KUBE-MARK-MASQ
-A KUBE-SEP-KVDY7RMLLBYXOYB5 -p tcp -m comment --comment "default/mongo:27017" -m tcp -j DNAT --to-destination 172.17.0.8:27017
-A KUBE-SERVICES -d 10.110.87.97/32 -p tcp -m comment --comment "default/mongo2:27017 cluster IP" -m tcp --dport 27017 -j KUBE-SVC-SDHY4S2JVGEDTQ2U
-A KUBE-SERVICES -d 10.98.1.35/32 -p tcp -m comment --comment "default/mongo:27017 cluster IP" -m tcp --dport 27017 -j KUBE-SVC-VMEO5WN4YXST2YCP
-A KUBE-SVC-VMEO5WN4YXST2YCP -m comment --comment "default/mongo:27017" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-HYCP7OGZ3WQCZP76
-A KUBE-SVC-VMEO5WN4YXST2YCP -m comment --comment "default/mongo:27017" -j KUBE-SEP-KVDY7RMLLBYXOYB5
-A KUBE-SERVICES -d 10.110.87.97/32 -p tcp -m comment --comment "default/mongo2:27017 has no endpoints" -m tcp --dport 27017 -j REJECT --reject-with icmp-port-unreachable
Attached to mongo2
pods and connecting to db mongo
pods.
root@mongo2-848b44844f-dbpxx:/# mongo mongo:27017
MongoDB shell version v3.4.2
connecting to: mongo:27017
MongoDB server version: 3.4.2
Server has startup warnings:
2017-12-22T13:27:46.904+0000 I CONTROL [initandlisten]
2017-12-22T13:27:46.904+0000 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database.
2017-12-22T13:27:46.904+0000 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted.
2017-12-22T13:27:46.904+0000 I CONTROL [initandlisten]
> db.hostInfo()
{
"system" : {
"currentTime" : ISODate("2017-12-22T14:37:32.222Z"),
"hostname" : "mongo-6fc9bd6d6c-cc8gh",
"cpuAddrSize" : 64,
"memSizeMB" : 1906,
"numCores" : 2,
"cpuArch" : "x86_64",
"numaEnabled" : false
},
"os" : {
"type" : "Linux",
"name" : "PRETTY_NAME="Debian GNU/Linux 8 (jessie)"",
"version" : "Kernel 4.9.13"
},
"extra" : {
"versionString" : "Linux version 4.9.13 (jenkins@jenkins) (gcc version 5.4.0 (Buildroot 2017.02) ) #1 SMP Thu Oct 19 17:14:00 UTC 2017",
"libcVersion" : "2.19",
"kernelVersion" : "4.9.13",
"cpuFrequencyMHz" : "2993.200",
"cpuFeatures" : "fpu de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pse36 clflush mmx fxsr sse sse2 syscall nx lm rep_good nopl eagerfpu pni vmx cx16 x2apic hypervisor lahf_lm tpr_shadow vnmi flexpriority ept vpid",
"pageSize" : NumberLong(4096),
"numPages" : 487940,
"maxOpenFiles" : 65536
},
"ok" : 1
}
> exit
2
Answers
Short answer: they technically don’t exist.
Long answer: they’re iptables rules http://leebriggs.co.uk/blog/2017/02/15/kubernetes-networking-part1.html
You need a DNS service to map the service name to the dns name (kube-dns).
EDIT:
pinging a service is not possible. Services are routed by iptables and load balanced over the matching pods. What would you be pinging?