Cilium имеет реализацию service-mesh поэтому подключусь к UI интерфейсу (HUBBLE UI), чтобы увидеть и отрассировать связности между контейнерами. В YC есть возможность выбрать cilium как основу для связности в кластере (наряду calico). И наличие cilium позволяет подключать (разносить) ноды (worker and master) по произвольным (разным) L3 сегментам. Важно чтобы трафик пропускался для VXLAN (в принципе, сам VXLAN работает и через 1:1 NAT, возможно и через маскарадинг?)
Для подключения необходимо установить контейнер HUBBLE UI, ставим манифестом (kubectl apply -f hubble-ui.yaml)
# Source: cilium/templates/hubble-ui-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "hubble-ui"
namespace: kube-system
---
# Source: cilium/templates/hubble-ui-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-ui-envoy
namespace: kube-system
data:
envoy.yaml: |
static_resources:
listeners:
- name: listener_hubble_ui
address:
socket_address:
address: 0.0.0.0
port_value: 8081
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: auto
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/api/"
route:
cluster: backend
prefix_rewrite: "/"
timeout: 0s
max_stream_duration:
grpc_timeout_header_max: 0s
- match:
prefix: "/"
route:
cluster: frontend
cors:
allow_origin_string_match:
- prefix: "*"
allow_methods: GET, PUT, DELETE, POST, OPTIONS
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
max_age: "1728000"
expose_headers: grpc-status,grpc-message
http_filters:
- name: envoy.filters.http.grpc_web
- name: envoy.filters.http.cors
- name: envoy.filters.http.router
clusters:
- name: frontend
connect_timeout: 0.25s
type: strict_dns
lb_policy: round_robin
load_assignment:
cluster_name: frontend
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 8080
- name: backend
connect_timeout: 0.25s
type: logical_dns
lb_policy: round_robin
http2_protocol_options: {}
load_assignment:
cluster_name: backend
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 8090
---
# Source: cilium/templates/hubble-ui-clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- "*"
verbs:
- get
- list
- watch
---
# Source: cilium/templates/hubble-ui-clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-ui
subjects:
- kind: ServiceAccount
namespace: kube-system
name: "hubble-ui"
---
# Source: cilium/templates/hubble-ui-service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-ui
labels:
k8s-app: hubble-ui
namespace: kube-system
spec:
selector:
k8s-app: hubble-ui
ports:
- name: http
port: 80
targetPort: 8081
type: ClusterIP
---
# Source: cilium/templates/hubble-ui-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: kube-system
labels:
k8s-app: hubble-ui
name: hubble-ui
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-ui
template:
metadata:
annotations:
labels:
k8s-app: hubble-ui
spec:
securityContext:
runAsUser: 1001
serviceAccount: "hubble-ui"
serviceAccountName: "hubble-ui"
containers:
- name: frontend
image: "quay.io/cilium/hubble-ui:v0.7.9@sha256:e0e461c680ccd083ac24fe4f9e19e675422485f04d8720635ec41f2ba9e5562c"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: http
resources: {}
- name: backend
image: "quay.io/cilium/hubble-ui-backend:v0.7.9@sha256:632c938ef6ff30e3a080c59b734afb1fb7493689275443faa1435f7141aabe76"
imagePullPolicy: IfNotPresent
env:
- name: EVENTS_SERVER_PORT
value: "8090"
- name: FLOWS_API_ADDR
value: "hubble-relay:80"
ports:
- containerPort: 8090
name: grpc
resources: {}
- name: proxy
image: "docker.io/envoyproxy/envoy:v1.18.2@sha256:e8b37c1d75787dd1e712ff389b0d37337dc8a174a63bed9c34ba73359dc67da7"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8081
name: http
resources: {}
command: ["envoy"]
args: ["-c", "/etc/envoy.yaml", "-l", "info"]
volumeMounts:
- name: hubble-ui-envoy-yaml
mountPath: /etc/envoy.yaml
subPath: envoy.yaml
volumes:
- name: hubble-ui-envoy-yaml
configMap:
name: hubble-ui-envoy
проверяем что поставилось:
C:\work\projects>kubectl get deployment -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
cilium-operator 1/1 1 1 21h
coredns 1/1 1 1 21h
hubble-relay 1/1 1 1 21h
hubble-ui 1/1 1 1 42m
kube-dns-autoscaler 1/1 1 1 21h
metrics-server 1/1 1 1 21h
C:\Users\adm>kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
cilium-agent ClusterIP None <none> 9090/TCP,9095/TCP 21h
cilium-operator ClusterIP None <none> 6942/TCP 21h
hubble-metrics ClusterIP None <none> 9091/TCP 21h
hubble-relay ClusterIP 10.96.212.143 <none> 80/TCP 21h
hubble-ui ClusterIP 10.96.42.39 <none> 80/TCP 45m
kube-dns ClusterIP 10.96.0.2 <none> 53/UDP,53/TCP,9153/TCP 21h
metrics-server ClusterIP 10.96.124.89 <none> 443/TCP 21h
Затем биндим порт командой:
kubectl port-forward -n kube-system svc/hubble-ui 12000:80
Заходим без авторизации и находим такие визуализации (старые деплойменты надо будет перезапустить, иначе их связностей не будет видно - но они появятся после рестарта)
Комментариев нет:
Отправить комментарий