antrea ImagePullBackOff antrea-gw0 tcpdump antrea service 添加一个pod trace-packet ovs-appctl ofproto/trace Tanzu Kubernetes Grid and Antrea A closer look at Antrea, the new CNI for vSphere with Tanzu guest clusters First look at Antrea: a CNI plugin based on Open vSwitch
root@ubuntu:~# kubectl apply -f ./antrea/build/yamls/antrea.yml customresourcedefinition.apiextensions.k8s.io/antreaagentinfos.clusterinformation.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/antreaagentinfos.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/antreacontrollerinfos.clusterinformation.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/antreacontrollerinfos.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/clustergroups.core.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/clustergroups.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/clusternetworkpolicies.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/clusternetworkpolicies.security.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/egresses.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/externalentities.core.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/externalentities.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/networkpolicies.security.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/tiers.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/tiers.security.antrea.tanzu.vmware.com created customresourcedefinition.apiextensions.k8s.io/traceflows.crd.antrea.io created customresourcedefinition.apiextensions.k8s.io/traceflows.ops.antrea.tanzu.vmware.com created serviceaccount/antctl created serviceaccount/antrea-agent created serviceaccount/antrea-controller created clusterrole.rbac.authorization.k8s.io/aggregate-antrea-clustergroups-edit created clusterrole.rbac.authorization.k8s.io/aggregate-antrea-clustergroups-view created clusterrole.rbac.authorization.k8s.io/aggregate-antrea-policies-edit created clusterrole.rbac.authorization.k8s.io/aggregate-antrea-policies-view created clusterrole.rbac.authorization.k8s.io/aggregate-traceflows-edit created clusterrole.rbac.authorization.k8s.io/aggregate-traceflows-view created clusterrole.rbac.authorization.k8s.io/antctl created clusterrole.rbac.authorization.k8s.io/antrea-agent created clusterrole.rbac.authorization.k8s.io/antrea-cluster-identity-reader created clusterrole.rbac.authorization.k8s.io/antrea-controller created clusterrolebinding.rbac.authorization.k8s.io/antctl created clusterrolebinding.rbac.authorization.k8s.io/antrea-agent created clusterrolebinding.rbac.authorization.k8s.io/antrea-controller created configmap/antrea-config-cbfh568k9m created service/antrea created deployment.apps/antrea-controller created apiservice.apiregistration.k8s.io/v1alpha1.stats.antrea.io created apiservice.apiregistration.k8s.io/v1alpha1.stats.antrea.tanzu.vmware.com created apiservice.apiregistration.k8s.io/v1beta1.controlplane.antrea.tanzu.vmware.com created apiservice.apiregistration.k8s.io/v1beta1.networking.antrea.tanzu.vmware.com created apiservice.apiregistration.k8s.io/v1beta1.system.antrea.io created apiservice.apiregistration.k8s.io/v1beta1.system.antrea.tanzu.vmware.com created apiservice.apiregistration.k8s.io/v1beta2.controlplane.antrea.io created apiservice.apiregistration.k8s.io/v1beta2.controlplane.antrea.tanzu.vmware.com created daemonset.apps/antrea-agent created mutatingwebhookconfiguration.admissionregistration.k8s.io/crdmutator.antrea.io created mutatingwebhookconfiguration.admissionregistration.k8s.io/crdmutator.antrea.tanzu.vmware.com created validatingwebhookconfiguration.admissionregistration.k8s.io/crdvalidator.antrea.io created validatingwebhookconfiguration.admissionregistration.k8s.io/crdvalidator.antrea.tanzu.vmware.com created root@ubuntu:~# kubectl get configmap -n kube-system NAME DATA AGE antrea-config-cbfh568k9m 3 14s coredns 1 223d extension-apiserver-authentication 6 223d kube-proxy 2 223d kubeadm-config 2 223d kubelet-config-1.18 1 223d root@ubuntu:~#
root@ubuntu:~# kubectl get pods NAME READY STATUS RESTARTS AGE kata-busybox 1/1 Running 0 223d kata-nginx 1/1 Running 0 221d my-deployment-68bdbbb5cc-bbszv 0/1 ImagePullBackOff 0 15d my-deployment-68bdbbb5cc-nrst9 0/1 ImagePullBackOff 0 15d my-deployment-68bdbbb5cc-rlgzt 0/1 ImagePullBackOff 0 15d my-nginx-5dc4865748-jqx54 1/1 Running 2 15d my-nginx-5dc4865748-pcrbg 1/1 Running 2 15d nginx 0/1 ImagePullBackOff 0 15d root@ubuntu:~# kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE default kata-busybox 1/1 Running 0 223d default kata-nginx 1/1 Running 0 221d default my-deployment-68bdbbb5cc-bbszv 0/1 ImagePullBackOff 0 15d default my-deployment-68bdbbb5cc-nrst9 0/1 ImagePullBackOff 0 15d default my-deployment-68bdbbb5cc-rlgzt 0/1 ImagePullBackOff 0 15d default my-nginx-5dc4865748-jqx54 1/1 Running 2 15d default my-nginx-5dc4865748-pcrbg 1/1 Running 2 15d default nginx 0/1 ImagePullBackOff 0 15d kube-system antrea-agent-fzt9g 0/2 Init:ImagePullBackOff 0 151m kube-system antrea-controller-685ff89775-bv49m 0/1 ImagePullBackOff 0 151m kube-system coredns-66bff467f8-54h8n 1/1 Running 0 223d kube-system coredns-66bff467f8-gkq4g 1/1 Running 0 223d kube-system etcd-ubuntu 1/1 Running 1 223d kube-system kube-apiserver-ubuntu 1/1 Running 1 223d kube-system kube-controller-manager-ubuntu 1/1 Running 3 223d kube-system kube-proxy-896mz 1/1 Running 0 223d kube-system kube-scheduler-ubuntu 1/1 Running 5 223d root@ubuntu:~#
root@ubuntu:~# kubectl get pods -n kube-system | grep antrea antrea-agent-fzt9g 0/2 Init:ImagePullBackOff 0 153m antrea-controller-685ff89775-bv49m 0/1 ImagePullBackOff 0 153m root@ubuntu:~#
root@ubuntu:~# kubectl get pods -n kube-system | grep antrea antrea-agent-fzt9g 0/2 Init:ImagePullBackOff 0 153m antrea-controller-685ff89775-bv49m 0/1 ImagePullBackOff 0 153m root@ubuntu:~# kubectl describe pod antrea-agent-fzt9g Error from server (NotFound): pods "antrea-agent-fzt9g" not found root@ubuntu:~# kubectl describe pod antrea-agent-fzt9g -n kube-system
root@ubuntu:~# kubectl get pods -n kube-system | grep antrea antrea-agent-rgvt5 2/2 Running 0 9m36s antrea-controller-685ff89775-n7vzb 1/1 Running 0 9m36s root@ubuntu:~# kubectl exec -it antrea-agent-rgvt5 -c antrea-agent ovs-vsctl show kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. Error from server (NotFound): pods "antrea-agent-rgvt5" not found root@ubuntu:~# kubectl logs -f pods/antrea-agent-rgvt5 -n kube-system | tail error: a container name must be specified for pod antrea-agent-rgvt5, choose one of: [antrea-agent antrea-ovs] or one of the init containers: [install-cni] root@ubuntu:~# kubectl logs -f pods/antrea-agent-rgvt5 -c antrea-agent -n kube-system | tail
root@ubuntu:~# kubectl logs -f pods/antrea-agent-rgvt5 -c antrea-agent -n kube-system | more I0528 06:48:10.786331 1 log_file.go:99] Set log file max size to 104857600 I0528 06:48:10.789887 1 agent.go:65] Starting Antrea agent (version v1.1.0-dev-9772276) I0528 06:48:10.789947 1 client.go:37] No kubeconfig file was specified. Falling back to in-cluster config I0528 06:48:10.803521 1 client.go:86] No kubeconfig file was specified. Falling back to in-cluster config I0528 06:48:10.805347 1 prometheus.go:161] Initializing prometheus metrics I0528 06:48:10.805897 1 ovs_client.go:67] Connecting to OVSDB at address /var/run/openvswitch/db.sock I0528 06:48:11.806138 1 ovs_client.go:86] Not connected yet, will try again in 2s I0528 06:48:11.807781 1 agent.go:215] Setting up node network I0528 06:48:11.830397 1 agent.go:686] Setting Node MTU=1450 I0528 06:48:11.831832 1 ovs_client.go:118] Created bridge: cc512db6-0763-4a13-80d1-32ad79467ed1 I0528 06:48:12.852556 1 agent.go:817] No round number found in OVSDB, using 1 I0528 06:48:12.852598 1 agent.go:829] Using round number 1 I0528 06:48:12.852783 1 ofctrl.go:170] Initialize connection or re-connect to /var/run/openvswitch/br-int.mgmt. I0528 06:48:12.921175 1 route_linux.go:125] Initialized iptables I0528 06:48:13.853233 1 ofctrl.go:185] Connected to socket /var/run/openvswitch/br-int.mgmt I0528 06:48:13.853627 1 ofctrl.go:247] New connection.. I0528 06:48:13.853666 1 ofctrl.go:254] Send hello with OF version: 4 I0528 06:48:13.854928 1 ofctrl.go:268] Received Openflow 1.3 Hello message
Error from server (NotFound): pods "antrea-agent-rgvt5" not found
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-agent ovs-vsctl show -n kube-system kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. 67555ae4-7e45-4aa4-9298-c2a1c4803993 Bridge br-int datapath_type: system Port antrea-gw0 Interface antrea-gw0 type: internal Port antrea-tun0 Interface antrea-tun0 type: geneve options: {csum="true", key=flow, remote_ip=flow} ovs_version: "2.14.0" root@ubuntu:~#
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-ovs ovs-ofctl dump-flows br-int -n kube-system | grep table=90 kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. cookie=0x1000000000000, duration=945.430s, table=90, n_packets=0, n_bytes=0, priority=210,ct_state=-new+est,ip actions=resubmit(,101) cookie=0x1000000000000, duration=945.428s, table=90, n_packets=0, n_bytes=0, priority=210,pkt_mark=0x1/0x1 actions=resubmit(,105) cookie=0x1000000000000, duration=945.445s, table=90, n_packets=0, n_bytes=0, priority=0 actions=resubmit(,100) root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-agent ovs-ofctl dump-flows br-int -n kube-system | grep table=90 kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. cookie=0x1000000000000, duration=960.393s, table=90, n_packets=0, n_bytes=0, priority=210,ct_state=-new+est,ip actions=resubmit(,101) cookie=0x1000000000000, duration=960.391s, table=90, n_packets=0, n_bytes=0, priority=210,pkt_mark=0x1/0x1 actions=resubmit(,105) cookie=0x1000000000000, duration=960.408s, table=90, n_packets=0, n_bytes=0, priority=0 actions=resubmit(,100) root@ubuntu:~#
flags的描述: 0x01:new 新建连接的开始,这个表示一般存在于uncommitted状态的连接中。 0x02:est 这是已经存在,并且准备好的连接,存在于committed连接。 0x04:rel 这是和一个存在连接相关的连接。比如ICMP 目的不可达消息或者FTP数据连接。存在于committed连接。 0x08:rpl 这个流是反方向,意味着没有初始化连接。存在于committed连接。 0x10:inv 无效状态,意味着连接跟踪不能识别连接。这个标志包含了一个连接跟踪可能遇到的任何问题,例如:L3/L4协议处理程序没有加载或者不可用。在Linux内核datapath,意味着nf_conntrack_ipv4或者nf_conntrack_ipv6模块没有加载。L3/L4协议处理程序确定报文分组格式不对。报文协议的长度不对。 0x20:trk 报文状态是tracked,意味着他先前已经经历了连接跟踪。如果该标志没有设置,其他的标志也不会被设置。如果这个标志设置了,报文状态为tracked,其他的标志才会被设置。这个字段是Open vSwitch2.5才引入的。
antrea-agent pk antrea-ovs
root@ubuntu:~# kubectl logs antrea-agent-rgvt5 -c antrea-agent -n kube-system | grep error root@ubuntu:~# kubectl logs antrea-agent-rgvt5 -c antrea-agent -n kube-system | grep -i error root@ubuntu:~#
root@ubuntu:~# kubectl logs antrea-agent-rgvt5 -c antrea-ovs -n kube-system [2021-05-28T06:48:10Z INFO antrea-ovs]: Starting ovsdb-server * /var/run/openvswitch/conf.db does not exist * Creating empty database /var/run/openvswitch/conf.db * Starting ovsdb-server * Configuring Open vSwitch system IDs * Enabling remote OVSDB managers [2021-05-28T06:48:11Z INFO antrea-ovs]: Started ovsdb-server [2021-05-28T06:48:11Z INFO antrea-ovs]: Starting ovs-vswitchd [2021-05-28T06:48:11Z INFO antrea-ovs]: ovs-vswitchd set hw-offload to false * Starting ovs-vswitchd * Enabling remote OVSDB managers [2021-05-28T06:48:11Z INFO antrea-ovs]: Started ovs-vswitchd [2021-05-28T06:48:11Z INFO antrea-ovs]: Started the loop that checks OVS status every 30 seconds root@ubuntu:~#
antrea-gw0 tcpdump
root@ubuntu:~# tcpdump -i cni0 icmp and host 8.8.8.8 -nv tcpdump: listening on cni0, link-type EN10MB (Ethernet), capture size 262144 bytes 15:22:36.406718 IP (tos 0x0, ttl 64, id 28767, offset 0, flags [DF], proto ICMP (1), length 84) 10.244.0.5 > 8.8.8.8: ICMP echo request, id 28928, seq 74, length 64 15:22:36.418588 IP (tos 0x0, ttl 105, id 0, offset 0, flags [none], proto ICMP (1), length 84) 8.8.8.8 > 10.244.0.5: ICMP echo reply, id 28928, seq 74, length 64 ^C 2 packets captured 2 packets received by filter 0 packets dropped by kernel root@ubuntu:~# tcpdump -i flannel.1 icmp and host 8.8.8.8 -nv tcpdump: listening on flannel.1, link-type EN10MB (Ethernet), capture size 262144 bytes ^C 0 packets captured 0 packets received by filter 0 packets dropped by kernel root@ubuntu:~# tcpdump -i cni0 icmp and host 8.8.8.8 -nv tcpdump: listening on cni0, link-type EN10MB (Ethernet), capture size 262144 bytes 15:22:49.408249 IP (tos 0x0, ttl 64, id 30622, offset 0, flags [DF], proto ICMP (1), length 84) 10.244.0.5 > 8.8.8.8: ICMP echo request, id 28928, seq 87, length 64 15:22:49.420240 IP (tos 0x0, ttl 105, id 0, offset 0, flags [none], proto ICMP (1), length 84) 8.8.8.8 > 10.244.0.5: ICMP echo reply, id 28928, seq 87, length 64 15:22:50.408351 IP (tos 0x0, ttl 64, id 30856, offset 0, flags [DF], proto ICMP (1), length 84) 10.244.0.5 > 8.8.8.8: ICMP echo request, id 28928, seq 88, length 64 15:22:50.420163 IP (tos 0x0, ttl 105, id 0, offset 0, flags [none], proto ICMP (1), length 84) 8.8.8.8 > 10.244.0.5: ICMP echo reply, id 28928, seq 88, length 64 ^C 4 packets captured 4 packets received by filter 0 packets dropped by kernel root@ubuntu:~# brctl show antrea-gw0 bridge name bridge id STP enabled interfaces antrea-gw0 can't get info Operation not supported root@ubuntu:~# brctl show antrea-gw0 bridge name bridge id STP enabled interfaces antrea-gw0 can't get info Operation not supported root@ubuntu:~#
antrea service
root@ubuntu:~# kubectl get svc -A NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 223d default my-nginx ClusterIP 10.110.79.116 <none> 8280/TCP 16d default my-nginx-np NodePort 10.99.1.231 <none> 8081:31199/TCP 15d kube-system antrea ClusterIP 10.108.137.187 <none> 443/TCP 41m kube-system kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 223d root@ubuntu:~#
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-agent -n kube-system -- bash root@ubuntu:/# ovs-vsctl show 67555ae4-7e45-4aa4-9298-c2a1c4803993 Bridge br-int datapath_type: system Port antrea-gw0 Interface antrea-gw0 type: internal Port antrea-tun0 Interface antrea-tun0 type: geneve options: {csum="true", key=flow, remote_ip=flow} ovs_version: "2.14.0" root@ubuntu:/# antctl get Get the status or resource of a topic Usage: antctl get [command] Available Commands: addressgroup Print address groups agentinfo Print agent's basic information appliedtogroup Print appliedto groups networkpolicy Print control plane NetworkPolicies ovsflows Dump OVS flows podinterface Print Pod's network interface information Flags: -h, --help help for get Global Flags: -k, --kubeconfig string absolute path to the kubeconfig file -s, --server string address and port of the API server, taking precedence over the default endpoint and the one set in kubeconfig -t, --timeout duration time limit of the execution of the command -v, --verbose enable verbose output Use "antctl get [command] --help" for more information about a command. root@ubuntu:/# antctl get podinterface Error: Get "https://127.0.0.1:10350/podinterfaces": dial tcp 127.0.0.1:10350: connect: connection refused root@ubuntu:/#
添加一个pod
apiVersion: apps/v1 kind: Deployment metadata: name: web2 namespace: default spec: selector: matchLabels: run: web2 template: metadata: labels: run: web2 spec: containers: - image: nginx imagePullPolicy: IfNotPresent name: web2 ports: - containerPort: 8087 protocol: TCP
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-agent ovs-vsctl show -n kube-system kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. 67555ae4-7e45-4aa4-9298-c2a1c4803993 Bridge br-int datapath_type: system Port antrea-gw0 Interface antrea-gw0 type: internal Port web2-6d7-c9c7e9 Interface web2-6d7-c9c7e9 Port antrea-tun0 Interface antrea-tun0 type: geneve options: {csum="true", key=flow, remote_ip=flow} ovs_version: "2.14.0"
root@ubuntu:~# cat debian-deployment-v2.yaml apiVersion: apps/v1 kind: Deployment metadata: name: debian namespace: default spec: selector: matchLabels: run: debian template: metadata: labels: run: debian spec: containers: - image: debian imagePullPolicy: IfNotPresent name: debian ports: - containerPort: 8087 protocol: TCP
root@ubuntu:~# kubectl get pods NAME READY STATUS RESTARTS AGE debian-75ff5999c8-bbjsm 0/1 Completed 4 100s kata-busybox 1/1 Running 0 223d kata-nginx 1/1 Running 0 221d my-deployment-68bdbbb5cc-bbszv 0/1 ImagePullBackOff 0 16d my-deployment-68bdbbb5cc-nrst9 0/1 ImagePullBackOff 0 16d my-deployment-68bdbbb5cc-rlgzt 0/1 ImagePullBackOff 0 16d my-nginx-5dc4865748-jqx54 1/1 Running 2 16d my-nginx-5dc4865748-pcrbg 1/1 Running 2 16d nginx 0/1 ImagePullBackOff 0 16d web2-6d784f67bf-4gqq2 1/1 Running 0 56m
yaml 文件有问题
apiVersion: batch/v1 kind: Job metadata: name: hello spec: template: # 这里是 Pod 模版 spec: containers: - name: hello image: busybox command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] restartPolicy: OnFailure
root@ubuntu:~# kubectl apply -f busybox.yaml job.batch/hello created root@ubuntu:~# kubectl get pods NAME READY STATUS RESTARTS AGE debian-6c44fc6956-jqz4l 0/1 CrashLoopBackOff 6 10m hello-kqg48 0/1 ContainerCreating 0 4s my-deployment-68bdbbb5cc-bbszv 0/1 ImagePullBackOff 0 16d my-deployment-68bdbbb5cc-nrst9 0/1 ImagePullBackOff 0 16d my-deployment-68bdbbb5cc-rlgzt 0/1 ErrImagePull 0 16d my-nginx-5dc4865748-jqx54 1/1 Running 2 16d my-nginx-5dc4865748-pcrbg 1/1 Running 2 16d nginx 0/1 ImagePullBackOff 0 16d web2-6d784f67bf-4gqq2 1/1 Running 0 68m root@ubuntu:~# kubectl get pods
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-ovs ovs-ofctl dump-flows br-int -n kube-system | grep table=30 kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. cookie=0x1000000000000, duration=9802.456s, table=30, n_packets=0, n_bytes=0, priority=200,ip actions=ct(table=31,zone=65520,nat) cookie=0x1000000000000, duration=9802.456s, table=30, n_packets=0, n_bytes=0, priority=200,ipv6 actions=ct(table=31,zone=65510,nat)
trace-packet
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-agent antctl trace-packet -S default/mc1 -D 10.244.0.1 -n kube-system Error: unknown shorthand flag: 'S' in -S See 'kubectl exec --help' for usage.
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-agent -n kube-system bash kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. root@ubuntu:/# antctl trace-packet -S default/mc1 -D 10.244.0.1 -n kube-system Error: unknown shorthand flag: 'n' in -n root@ubuntu:/# antctl trace-packet -S default/mc1 -D 10.244.0.1 Error: Get "https://127.0.0.1:10350/ovstracing?destination=10.244.0.1&source=default%2Fmc1": dial tcp 127.0.0.1:10350: connect: connection refused root@ubuntu:/#
ovs-appctl ofproto/trace
10.244.0.13/24的mac:a6:d0:a7:be:73:fc
root@ubuntu:~# kubectl exec -it pods/mc1 -c 2nd ip a kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead. 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 3: eth0@if670: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default link/ether a6:d0:a7:be:73:fc brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.244.0.13/24 brd 10.244.0.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::a4d0:a7ff:febe:73fc/64 scope link valid_lft forever preferred_lft forever root@ubuntu:~#
10.244.0.1的mac f2:c9:55:a9:35:ad
antrea-gw0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default qlen 1000 link/ether f2:c9:55:a9:35:ad brd ff:ff:ff:ff:ff:ff inet 10.244.0.1/24 brd 10.244.0.255 scope global antrea-gw0 valid_lft forever preferred_lft forever inet6 fe80::f0c9:55ff:fea9:35ad/64 scope link valid_lft forever preferred_lft forever
ofport
root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface mc1-36cb6d external_ids : {} name : mc1-36cb6d ofport : 14 root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface antrea-gw0 external_ids : {} name : antrea-gw0 ofport : 2 root@ubuntu:/#
root@ubuntu:~# kubectl exec -it pods/antrea-agent-rgvt5 -c antrea-ovs -n kube-system bash
root@ubuntu:/# ovs-appctl ofproto/trace br-int in_port=2,ct_state=new,dl_src=f2:c9:55:a9:35:ad,dl_dst=a6:d0:a7:be:73:fc,dl_type=0x0800,nw_proto=1,nw_src=10.244.0.1,nw_dst=10.244.0.13 Flow: ct_state=new,icmp,in_port=2,vlan_tci=0x0000,dl_src=f2:c9:55:a9:35:ad,dl_dst=a6:d0:a7:be:73:fc,nw_src=10.244.0.1,nw_dst=10.244.0.13,nw_tos=0,nw_ecn=0,nw_ttl=0,icmp_type=0,icmp_code=0 bridge("br-int") ---------------- 0. in_port=2, priority 200, cookie 0x1000000000000 load:0x1->NXM_NX_REG0[0..15] goto_table:10 10. ip,in_port=2, priority 200, cookie 0x1000000000000 goto_table:29 29. priority 0, cookie 0x1000000000000 goto_table:30 30. ip, priority 200, cookie 0x1000000000000 ct(table=31,zone=65520,nat) nat -> A clone of the packet is forked to recirculate. The forked pipeline will be resumed at table 31. -> Sets the packet to an untracked state, and clears all the conntrack fields. Final flow: icmp,reg0=0x1,in_port=2,vlan_tci=0x0000,dl_src=f2:c9:55:a9:35:ad,dl_dst=a6:d0:a7:be:73:fc,nw_src=10.244.0.1,nw_dst=10.244.0.13,nw_tos=0,nw_ecn=0,nw_ttl=0,icmp_type=0,icmp_code=0 Megaflow: recirc_id=0,eth,ip,in_port=2,nw_dst=0.0.0.0/1,nw_frag=no Datapath actions: ct(zone=65520,nat),recirc(0x1)
packout 注入报文
import binascii from scapy.all import* a=Ether(dst="f2:c9:55:a9:35:ad",src="a6:d0:a7:be:73:fc")/IP(dst="10.244.0.1",src="10.244.0.13", ttl=10)/ICMP() print binascii.hexlify(str(a))
f2c955a935ada6d0a7be73fc08004500001c000100000a019aeb0af4000d0af400010800f7ff00000000
ofport : 14 root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface mc1-36cb6d external_ids : {} name : mc1-36cb6d ofport : 14 root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface antrea-gw0 external_ids : {} name : antrea-gw0 ofport : 2
root@ubuntu:/# ovs-ofctl packet-out br-int "normal" 14 f2c955a935ada6d0a7be73fc08004500001c000100000a019aeb0af4000d0af400010800f7ff00000000 2021-05-31T10:21:33Z|00001|ofp_packet|WARN|packet-out has bad input port 0xfffa OFPT_ERROR (xid=0x6): OFPBRC_BAD_PORT OFPT_PACKET_OUT (xid=0x6): ***decode error: OFPBRC_BAD_PORT*** 00000000 01 0d 00 42 00 00 00 06-ff ff ff ff ff fa 00 08 |...B............| 00000010 00 00 00 08 00 0e 00 00-f2 c9 55 a9 35 ad a6 d0 |..........U.5...| 00000020 a7 be 73 fc 08 00 45 00-00 1c 00 01 00 00 0a 01 |..s...E.........| 00000030 9a eb 0a f4 00 0d 0a f4-00 01 08 00 f7 ff 00 00 |................| 00000040 00 00 |.. | root@ubuntu:/#
root@ubuntu:/# ovs-vsctl show 67555ae4-7e45-4aa4-9298-c2a1c4803993 Bridge br-int datapath_type: system Port antrea-gw0 Interface antrea-gw0 type: internal Port web2-6d7-c9c7e9 Interface web2-6d7-c9c7e9 Port antrea-tun0 Interface antrea-tun0 type: geneve options: {csum="true", key=flow, remote_ip=flow} Port mc1-36cb6d Interface mc1-36cb6d Port debian-6-177c0e Interface debian-6-177c0e ovs_version: "2.14.0" root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface mc1-36cb6d external_ids : {} name : mc1-36cb6d ofport : 14 root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface mc1-36cb6d external_ids : {} name : mc1-36cb6d ofport : 14 root@ubuntu:/# ovs-vsctl --columns=external_ids,name,ofport list interface antrea-gw0 external_ids : {} name : antrea-gw0 ofport : 2
root@ubuntu:/# ovs-ofctl packet-out br-int "normal" 2 f2c955a935ada6d0a7be73fc08004500001c000100000a019aeb0af4000d0af400010800f7ff00000000 2021-05-31T10:21:18Z|00001|ofp_packet|WARN|packet-out has bad input port 0xfffa OFPT_ERROR (xid=0x6): OFPBRC_BAD_PORT OFPT_PACKET_OUT (xid=0x6): ***decode error: OFPBRC_BAD_PORT*** 00000000 01 0d 00 42 00 00 00 06-ff ff ff ff ff fa 00 08 |...B............| 00000010 00 00 00 08 00 02 00 00-f2 c9 55 a9 35 ad a6 d0 |..........U.5...| 00000020 a7 be 73 fc 08 00 45 00-00 1c 00 01 00 00 0a 01 |..s...E.........| 00000030 9a eb 0a f4 00 0d 0a f4-00 01 08 00 f7 ff 00 00 |................| 00000040 00 00 |.. | root@ubuntu:/# ovs-ofctl packet-out br-int "normal" 14 f2c955a935ada6d0a7be73fc08004500001c000100000a019aeb0af4000d0af400010800f7ff00000000 2021-05-31T10:21:33Z|00001|ofp_packet|WARN|packet-out has bad input port 0xfffa OFPT_ERROR (xid=0x6): OFPBRC_BAD_PORT OFPT_PACKET_OUT (xid=0x6): ***decode error: OFPBRC_BAD_PORT*** 00000000 01 0d 00 42 00 00 00 06-ff ff ff ff ff fa 00 08 |...B............| 00000010 00 00 00 08 00 0e 00 00-f2 c9 55 a9 35 ad a6 d0 |..........U.5...| 00000020 a7 be 73 fc 08 00 45 00-00 1c 00 01 00 00 0a 01 |..s...E.........| 00000030 9a eb 0a f4 00 0d 0a f4-00 01 08 00 f7 ff 00 00 |................| 00000040 00 00 |.. | root@ubuntu:/# ovs-ofctl packet-out br-int "normal" output:2 f2c955a935ada6d0a7be73fc08004500001c000100000a019aeb0af4000d0af400010800f7ff00000000 2021-05-31T10:24:52Z|00001|ofp_packet|WARN|packet-out has bad input port 0xfffa OFPT_ERROR (xid=0x6): OFPBRC_BAD_PORT OFPT_PACKET_OUT (xid=0x6): ***decode error: OFPBRC_BAD_PORT*** 00000000 01 0d 00 42 00 00 00 06-ff ff ff ff ff fa 00 08 |...B............| 00000010 00 00 00 08 00 02 00 00-f2 c9 55 a9 35 ad a6 d0 |..........U.5...| 00000020 a7 be 73 fc 08 00 45 00-00 1c 00 01 00 00 0a 01 |..s...E.........| 00000030 9a eb 0a f4 00 0d 0a f4-00 01 08 00 f7 ff 00 00 |................| 00000040 00 00
14是 mc1-36cb6d的ofport |.. | root@ubuntu:/# ovs-ofctl packet-out br-int 14 "normal" f2c955a935ada6d0a7be73fc08004500001c000100000a019aeb0af4000d0af400010800f7ff00000000 root@ubuntu:/#