Closed8

EKS & CiliumでWithout kube-proxyに挑戦

dekimasoondekimasoon

訳あってeksctlを使わずにPulumi(Terraform)で作業している。
普通にクラスターを作って、Pulumi経由のHelmでCiliumを入れている。
以下でCiliumのインストールと、不要になるaws-nodeとkube-proxyを無効にするPatchを当てる。

import * as k8s from "@pulumi/kubernetes"
import * as pulumi from "@pulumi/pulumi"

export type KuberneteCiliumArgs = {
  clusterEndpoint: pulumi.Input<string>
}

export class KuberneteCilium extends pulumi.ComponentResource {
  public opts: pulumi.ResourceOptions
  public release: k8s.helm.v3.Release

  constructor(
    name: string,
    args: KuberneteCiliumArgs,
    opts?: pulumi.ResourceOptions,
  ) {
    super("stack8:kubernetes:Cilium", name, undefined, opts)

    this.opts = { ...opts, parent: this }

    const disabledAWSNode = new k8s.apps.v1.DaemonSetPatch(
      "disabled-aws-node",
      {
        metadata: {
          namespace: "kube-system",
          name: "aws-node",
        },
        spec: {
          template: {
            spec: {
              nodeSelector: {
                node: "non-existing",
              },
            },
          },
        },
      },
      this.opts,
    )

    const disabledKubeProxy = new k8s.apps.v1.DaemonSetPatch(
      "disabled-kube-proxy",
      {
        metadata: {
          namespace: "kube-system",
          name: "kube-proxy",
        },
        spec: {
          template: {
            spec: {
              nodeSelector: {
                node: "non-existing",
              },
            },
          },
        },
      },
      this.opts,
    )

    this.release = new k8s.helm.v3.Release(
      "release",
      {
        chart: "cilium",
        namespace: "kube-system",
        version: "v1.15.0-rc.0",
        repositoryOpts: {
          repo: "https://helm.cilium.io/",
        },
        values: {
          // NOTE: For running in EKS
          eni: {
            enabled: true,
            awsEnablePrefixDelegation: true,
          },
          ipam: {
            mode: "eni",
          },
          egressMasqueradeInterfaces: "eth0",
          routingMode: "native",
          // NOTE: For replacing kube-proxy with eBPF
          kubeProxyReplacement: true,
          k8sServiceHost: args.clusterEndpoint,
          k8sServicePort: "443",
          // NOTE: For getting better performance
          loadBalancer: {
            algorithm: "maglev",
          },
        },
      },
      { ...this.opts, dependsOn: [disabledAWSNode, disabledKubeProxy] },
    )
  }
}

dekimasoondekimasoon

しかしcloudwatch-agentがCrashLoopBackOffしている

✗ kubectl get po --all-namespaces
NAMESPACE           NAME                                                              READY   STATUS             RESTARTS         AGE
amazon-cloudwatch   amazon-cloudwatch-observability-controller-manager-6765dcf9ltbs   1/1     Running            0                85m
amazon-cloudwatch   cloudwatch-agent-rhjwn                                            0/1     CrashLoopBackOff   20 (2m56s ago)   85m
amazon-cloudwatch   cloudwatch-agent-xbhts                                            0/1     CrashLoopBackOff   20 (2m47s ago)   85m
amazon-cloudwatch   fluent-bit-6vnfw                                                  1/1     Running            0                85m
amazon-cloudwatch   fluent-bit-dzz22                                                  1/1     Running            0                85m
kube-system         cilium-bn4dt                                                      1/1     Running            0                85m
kube-system         cilium-mzdq8                                                      1/1     Running            0                85m
kube-system         cilium-operator-55cdc495c4-67cgb                                  1/1     Running            0                85m
kube-system         cilium-operator-55cdc495c4-wllbp                                  1/1     Running            0                85m
kube-system         coredns-5488df4cc7-485pw                                          1/1     Running            0                85m
kube-system         coredns-5488df4cc7-v8gbp                                          1/1     Running            0                85m
kube-system         eks-pod-identity-agent-jttcz                                      1/1     Running            0                85m
kube-system         eks-pod-identity-agent-mcbtt                                      1/1     Running            0                85m

describeしても情報無しに見える

✗ kubectl describe po -n amazon-cloudwatch cloudwatch-agent-rhjwn
Name:             cloudwatch-agent-rhjwn
Namespace:        amazon-cloudwatch
Priority:         0
Service Account:  cloudwatch-agent
Node:             ip-10-0-49-247.ap-northeast-1.compute.internal/10.0.49.247
Start Time:       Thu, 04 Jan 2024 02:54:18 +0000
Labels:           app.kubernetes.io/component=amazon-cloudwatch-agent
                  app.kubernetes.io/instance=amazon-cloudwatch.cloudwatch-agent
                  app.kubernetes.io/managed-by=amazon-cloudwatch-agent-operator
                  app.kubernetes.io/name=cloudwatch-agent
                  app.kubernetes.io/part-of=aws
                  app.kubernetes.io/version=1.300031.1b317
                  controller-revision-hash=6ddd78df4
                  pod-template-generation=1
Annotations:      amazon-cloudwatch-agent-operator-config/sha256: 0c59a72be17ab0055f8b14b9c59bdb88e4419b21b944625bb2aeadd87c8fcb1c
Status:           Running
IP:               10.0.59.11
IPs:
  IP:           10.0.59.11
Controlled By:  DaemonSet/cloudwatch-agent
Containers:
  cloudwatch-agent:
    Container ID:   containerd://921b7290faad5696650eda56a274ee7ba34456f4a9577ca56c51b1a415fa05da
    Image:          public.ecr.aws/cloudwatch-agent/cloudwatch-agent:1.300031.1b317
    Image ID:       public.ecr.aws/cloudwatch-agent/cloudwatch-agent@sha256:d660a92f0e97e529235e21d0ee440942f503c4cdd48dadaa80781fdc11427087
    Ports:          2000/TCP, 4315/TCP, 4316/TCP
    Host Ports:     0/TCP, 0/TCP, 0/TCP
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Error
      Exit Code:    1
      Started:      Thu, 04 Jan 2024 04:21:32 +0000
      Finished:     Thu, 04 Jan 2024 04:21:46 +0000
    Ready:          False
    Restart Count:  21
    Limits:
      cpu:     500m
      memory:  512Mi
    Requests:
      cpu:     250m
      memory:  128Mi
    Environment:
      K8S_NODE_NAME:   (v1:spec.nodeName)
      HOST_IP:         (v1:status.hostIP)
      HOST_NAME:       (v1:spec.nodeName)
      K8S_NAMESPACE:  amazon-cloudwatch (v1:metadata.namespace)
      POD_NAME:       cloudwatch-agent-rhjwn (v1:metadata.name)
    Mounts:
      /dev/disk from devdisk (ro)
      /etc/cwagentconfig from cwaagentconfig (rw)
      /rootfs from rootfs (ro)
      /run/containerd/containerd.sock from containerdsock (rw)
      /sys from sys (ro)
      /var/lib/docker from varlibdocker (ro)
      /var/run/docker.sock from dockersock (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-p2d6p (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             False
  ContainersReady   False
  PodScheduled      True
Volumes:
  cwaagentconfig:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      cwaagentconfig
    Optional:  false
  rootfs:
    Type:          HostPath (bare host directory volume)
    Path:          /
    HostPathType:
  dockersock:
    Type:          HostPath (bare host directory volume)
    Path:          /var/run/docker.sock
    HostPathType:
  varlibdocker:
    Type:          HostPath (bare host directory volume)
    Path:          /var/lib/docker
    HostPathType:
  containerdsock:
    Type:          HostPath (bare host directory volume)
    Path:          /run/containerd/containerd.sock
    HostPathType:
  sys:
    Type:          HostPath (bare host directory volume)
    Path:          /sys
    HostPathType:
  devdisk:
    Type:          HostPath (bare host directory volume)
    Path:          /dev/disk/
    HostPathType:
  kube-api-access-p2d6p:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/disk-pressure:NoSchedule op=Exists
                             node.kubernetes.io/memory-pressure:NoSchedule op=Exists
                             node.kubernetes.io/not-ready:NoExecute op=Exists
                             node.kubernetes.io/pid-pressure:NoSchedule op=Exists
                             node.kubernetes.io/unreachable:NoExecute op=Exists
                             node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
  Type     Reason   Age                    From     Message
  ----     ------   ----                   ----     -------
  Normal   Pulled   38m (x14 over 88m)     kubelet  Container image "public.ecr.aws/cloudwatch-agent/cloudwatch-agent:1.300031.1b317" already present on machine
  Warning  BackOff  3m48s (x371 over 88m)  kubelet  Back-off restarting failed container cloudwatch-agent in pod cloudwatch-agent-rhjwn_amazon-cloudwatch(a9e23700-c490-484b-ba7b-c1966e2bed6a)
dekimasoondekimasoon

切り分けのためにCilium無しでcloudwatch-agentがどうなるか確認すべきか、このまま原因調査に入るか迷う。とりあえず1時間ほど原因調査して、だめなら切り分けしよう。

ログを見ると、エラーが出ていた。
ちょっと読みづらいが抜粋。

✗ kubectl logs -n amazon-cloudwatch cloudwatch-agent-kzvzn
E! [EC2] Fetch hostname from EC2 metadata fail: EC2MetadataError: failed to make EC2Metadata request

	status code: 401, request id:
D! should retry true for imds error : RequestError: send request failed
caused by: Put "http://169.254.169.254/latest/api/token": context deadline exceeded (Client.Timeout exceeded while awaiting headers)D! should retry true for imds error : RequestError: send request failed
caused by: Put "http://169.254.169.254/latest/api/token": context deadline exceeded (Client.Timeout exceeded while awaiting headers)D! could not get instance document without imds v1 fallback enable thus enable fallback
E! [EC2] Fetch identity document from EC2 metadata fail: EC2MetadataRequestError: failed to get EC2 instance identity document
caused by: EC2MetadataError: failed to make EC2Metadata request

	status code: 401, request id:

よくわからずIMDSv2のみ有効にしていたので、IMDSv1が有効になるようにしてみる。

    const lanchTemplate = new aws.ec2.LaunchTemplate(
      "launch-template",
      {
        name: temlateTags.Name,
        metadataOptions: {
-         httpTokens: "required",
+         httpTokens: "optional",
        },
        tagSpecifications: [
          {
            resourceType: "instance",
            tags: temlateTags,
          },
        ],
        tags: temlateTags,
      },
      this.opts,
    )

適応まち。ノードグループのupdateが終わらないな。

dekimasoondekimasoon

10分弱で完了。
結果を見てみる。

✗ kubectl get po --all-namespaces
NAMESPACE           NAME                                                              READY   STATUS    RESTARTS   AGE
amazon-cloudwatch   amazon-cloudwatch-observability-controller-manager-6765dcfjc4dk   1/1     Running   0          8m52s
amazon-cloudwatch   cloudwatch-agent-4ggc6                                            1/1     Running   0          9m50s
amazon-cloudwatch   cloudwatch-agent-jc6nv                                            1/1     Running   0          9m55s
amazon-cloudwatch   fluent-bit-9zdvt                                                  1/1     Running   0          9m50s
amazon-cloudwatch   fluent-bit-vhnbh                                                  1/1     Running   0          9m55s
kube-system         cilium-hj54m                                                      1/1     Running   0          10m
kube-system         cilium-operator-55cdc495c4-5vh5p                                  1/1     Running   0          4m48s
kube-system         cilium-operator-55cdc495c4-n5jlf                                  1/1     Running   0          8m52s
kube-system         cilium-tklrn                                                      1/1     Running   0          10m
kube-system         coredns-5488df4cc7-gkt7z                                          1/1     Running   0          7m50s
kube-system         coredns-5488df4cc7-sk5j4                                          1/1     Running   0          8m52s
kube-system         eks-pod-identity-agent-7lpls                                      1/1     Running   0          9m55s
kube-system         eks-pod-identity-agent-j7tk5                                      1/1     Running   0          9m50s

おお、直った。
IMDSv2のみにする方法が間違っているのかな。
他に優先度が高い部分が多々あるので、以下のコメントを付けて後回しにした。

// FIXME: Disabled IMDSv1
// When this is enabled, the following error occurs that
// EC2 Metadata cannot be retrieved by cloudwatch-agent pods.
// I don't think it is that important, so I'll put it off.
// `Fetch hostname from EC2 metadata fail: EC2MetadataError`
// httpTokens: "required",
dekimasoondekimasoon

公式ドキュメントを参考に動作確認してみる。

https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#validate-the-setup

まずはコマンドレベルでチェックするらしい。
まったく内容はわからないが、うまくインストールできているっぽい。

✗ kubectl -n kube-system exec ds/cilium -- cilium status | grep KubeProxyReplacement
Defaulted container "cilium-agent" out of: cilium-agent, config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init)
KubeProxyReplacement:    True   [eth0    10.0.42.43 2406:da14:1d6a:2b02:9c61:cec0:e223:56a0 fe80::474:9cff:fe16:acd3 (Direct Routing), pod-id-link0    169.254.170.23 fd00:ec2::23 fe80::48ab:34ff:fe93:8b66]
✗ kubectl -n kube-system exec ds/cilium -- cilium status --verbose
Defaulted container "cilium-agent" out of: cilium-agent, config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init)
KVStore:                Ok   Disabled
Kubernetes:             Ok   1.28+ (v1.28.4-eks-8cb36c9) [linux/amd64]
Kubernetes APIs:        ["EndpointSliceOrEndpoint", "cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "cilium/v2alpha1::CiliumCIDRGroup", "core/v1::Namespace", "core/v1::Pods", "core/v1::Service", "networking.k8s.io/v1::NetworkPolicy"]
KubeProxyReplacement:   True   [eth0    10.0.42.43 2406:da14:1d6a:2b02:9c61:cec0:e223:56a0 fe80::474:9cff:fe16:acd3 (Direct Routing), pod-id-link0    169.254.170.23 fd00:ec2::23 fe80::48ab:34ff:fe93:8b66]
Host firewall:          Disabled
SRv6:                   Disabled
CNI Chaining:           none
Cilium:                 Ok   1.15.0-rc.0 (v1.15.0-rc.0-5d19e956)
NodeMonitor:            Listening for events on 2 CPUs with 64x4096 of shared memory
Cilium health daemon:   Ok
IPAM:                   IPv4: 5/16 allocated,
Allocated addresses:
  10.0.43.129 (health)
  10.0.43.130 (amazon-cloudwatch/cloudwatch-agent-jc6nv)
  10.0.43.135 (amazon-cloudwatch/amazon-cloudwatch-observability-controller-manager-6765dcfjc4dk)
  10.0.43.140 (router)
  10.0.43.141 (kube-system/coredns-5488df4cc7-gkt7z)
IPv4 BIG TCP:           Disabled
IPv6 BIG TCP:           Disabled
BandwidthManager:       Disabled
Host Routing:           Legacy
Masquerading:           IPTables [IPv4: Enabled, IPv6: Disabled]
Clock Source for BPF:   ktime
Controller Status:      34/34 healthy
  Name                                                                                               Last success   Last error   Count   Message
  cilium-health-ep                                                                                   31s ago        never        0       no error
  dns-garbage-collector-job                                                                          39s ago        never        0       no error
  endpoint-138-regeneration-recovery                                                                 never          never        0       no error
  endpoint-1769-regeneration-recovery                                                                never          never        0       no error
  endpoint-1897-regeneration-recovery                                                                never          never        0       no error
  endpoint-3177-regeneration-recovery                                                                never          never        0       no error
  endpoint-3322-regeneration-recovery                                                                never          never        0       no error
  endpoint-gc                                                                                        1m39s ago      never        0       no error
  ep-bpf-prog-watchdog                                                                               2s ago         never        0       no error
  ipcache-inject-labels                                                                              32s ago        41m34s ago   0       no error
  k8s-heartbeat                                                                                      9s ago         never        0       no error
  link-cache                                                                                         2s ago         never        0       no error
  neighbor-table-refresh                                                                             2s ago         never        0       no error
  resolve-identity-138                                                                               4m26s ago      never        0       no error
  resolve-identity-1769                                                                              1m30s ago      never        0       no error
  resolve-identity-1897                                                                              27s ago        never        0       no error
  resolve-identity-3177                                                                              1m32s ago      never        0       no error
  resolve-identity-3322                                                                              1m31s ago      never        0       no error
  resolve-labels-amazon-cloudwatch/amazon-cloudwatch-observability-controller-manager-6765dcfjc4dk   40m27s ago     never        0       no error
  resolve-labels-amazon-cloudwatch/cloudwatch-agent-jc6nv                                            41m30s ago     never        0       no error
  resolve-labels-kube-system/coredns-5488df4cc7-gkt7z                                                39m26s ago     never        0       no error
  sync-host-ips                                                                                      32s ago        never        0       no error
  sync-lb-maps-with-k8s-services                                                                     41m32s ago     never        0       no error
  sync-policymap-138                                                                                 9m26s ago      never        0       no error
  sync-policymap-1769                                                                                11m27s ago     never        0       no error
  sync-policymap-1897                                                                                10m27s ago     never        0       no error
  sync-policymap-3177                                                                                11m27s ago     never        0       no error
  sync-policymap-3322                                                                                11m27s ago     never        0       no error
  sync-to-k8s-ciliumendpoint (138)                                                                   5s ago         never        0       no error
  sync-to-k8s-ciliumendpoint (1769)                                                                  10s ago        never        0       no error
  sync-to-k8s-ciliumendpoint (1897)                                                                  7s ago         never        0       no error
  sync-utime                                                                                         32s ago        never        0       no error
  template-dir-watcher                                                                               never          never        0       no error
  write-cni-file                                                                                     41m39s ago     never        0       no error
Proxy Status:            OK, ip 10.0.43.140, 0 redirects active on ports 10000-20000, Envoy: embedded
Global Identity Range:   min 256, max 65535
Hubble:                  Ok   Current/Max Flows: 4095/4095 (100.00%), Flows/s: 7.58   Metrics: Disabled
KubeProxyReplacement Details:
  Status:                 True
  Socket LB:              Enabled
  Socket LB Tracing:      Enabled
  Socket LB Coverage:     Full
  Devices:                eth0    10.0.42.43 2406:da14:1d6a:2b02:9c61:cec0:e223:56a0 fe80::474:9cff:fe16:acd3 (Direct Routing), pod-id-link0    169.254.170.23 fd00:ec2::23 fe80::48ab:34ff:fe93:8b66
  Mode:                   SNAT
  Backend Selection:      Maglev (Table Size: 16381)
  Session Affinity:       Enabled
  Graceful Termination:   Enabled
  NAT46/64 Support:       Disabled
  XDP Acceleration:       Disabled
  Services:
  - ClusterIP:      Enabled
  - NodePort:       Enabled (Range: 30000-32767)
  - LoadBalancer:   Enabled
  - externalIPs:    Enabled
  - HostPort:       Enabled
BPF Maps:   dynamic sizing: on (ratio: 0.002500)
  Name                          Size
  Auth                          524288
  Non-TCP connection tracking   65536
  TCP connection tracking       131072
  Endpoint policy               65535
  IP cache                      512000
  IPv4 masquerading agent       16384
  IPv6 masquerading agent       16384
  IPv4 fragmentation            8192
  IPv4 service                  65536
  IPv6 service                  65536
  IPv4 service backend          65536
  IPv6 service backend          65536
  IPv4 service reverse NAT      65536
  IPv6 service reverse NAT      65536
  Metrics                       1024
  NAT                           131072
  Neighbor table                131072
  Global policy                 16384
  Session affinity              65536
  Sock reverse NAT              65536
  Tunnel                        65536
Encryption:                                                   Disabled
Cluster health:                                               2/2 reachable   (2024-01-04T05:36:57Z)
  Name                                                        IP              Node        Endpoints
  ip-10-0-42-43.ap-northeast-1.compute.internal (localhost)   10.0.42.43      reachable   reachable
  ip-10-0-63-82.ap-northeast-1.compute.internal               10.0.63.82      reachable   reachable
Modules Health:
agent
├── controlplane
│   ├── daemon
│   │   └── ep-bpf-prog-watchdog                            [OK] ep-bpf-prog-watchdog (41m, x84)
│   ├── endpoint-manager
│   │   ├── cilium-endpoint-1897 (amazon-cloudwatch/amazon-cloudwatch-observability-controller-manager-6765dcfjc4dk)
│   │   │   ├── cep-k8s-sync                                [OK] sync-to-k8s-ciliumendpoint (1897) (40m, x244)
│   │   │   ├── datapath-regenerate                         [OK] Endpoint regeneration successful (40m, x1)
│   │   │   └── policymap-sync                              [OK] sync-policymap-1897 (40m, x3)
│   │   ├── cilium-endpoint-138 (kube-system/coredns-5488df4cc7-gkt7z)
│   │   │   ├── cep-k8s-sync                                [OK] sync-to-k8s-ciliumendpoint (138) (39m, x238)
│   │   │   ├── datapath-regenerate                         [OK] Endpoint regeneration successful (39m, x1)
│   │   │   └── policymap-sync                              [OK] sync-policymap-138 (39m, x3)
│   │   ├── endpoint-gc                                     [OK] endpoint-gc (41m, x9)
│   │   ├── cilium-endpoint-3177
│   │   │   ├── policymap-sync                              [OK] sync-policymap-3177 (41m, x3)
│   │   │   └── datapath-regenerate                         [OK] Endpoint regeneration successful (41m, x1)
│   │   ├── cilium-endpoint-3322
│   │   │   ├── policymap-sync                              [OK] sync-policymap-3322 (41m, x3)
│   │   │   └── datapath-regenerate                         [OK] Endpoint regeneration successful (41m, x1)
│   │   └── cilium-endpoint-1769 (amazon-cloudwatch/cloudwatch-agent-jc6nv)
│   │       ├── cep-k8s-sync                                [OK] sync-to-k8s-ciliumendpoint (1769) (41m, x251)
│   │       ├── datapath-regenerate                         [OK] Endpoint regeneration successful (41m, x1)
│   │       └── policymap-sync                              [OK] sync-policymap-1769 (41m, x3)
│   ├── node-manager
│   │   ├── background-sync                                 [OK] Node validation successful (41m, x37)
│   │   ├── nodes-add                                       [OK] Node adds successful (41m, x4)
│   │   ├── nodes-update                                    [OK] Node updates successful (41m, x11)
│   │   └── nodes-delete                                    [OK] Node deletions successful (36m, x2)
│   ├── auth
│   │   ├── observer-job-auth request-authentication        [OK] Primed (41m, x1)
│   │   ├── observer-job-auth gc-identity-events            [OK] Primed (41m, x1)
│   │   └── timer-job-auth gc-cleanup                       [OK] OK (16.921µs) (41m, x1)
│   ├── l2-announcer
│   │   └── leader-election                                 [OK]  (41m, x1)
│   └── envoy-proxy
│       └── timer-job-version-check                         [OK] OK (35.240567ms) (41m, x1)
└── datapath
    ├── agent-liveness-updater
    │   └── timer-job-agent-liveness-updater                [OK] OK (35.848µs) (41m, x1)
    ├── node-address
    │   └── job-node-address-update                         [OK] 169.254.170.23 (pod-id-link0), fd00:ec2::23 (pod-id-link0) (41m, x1)
    └── l2-responder
        └── job-l2-responder-reconciler                     [OK] Running (41m, x1)
dekimasoondekimasoon

次はnginxを立てて疎通確認するらしい

apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: my-nginx
  name: my-nginx
spec:
  selector:
    matchLabels:
      run: my-nginx
  replicas: 2
  template:
    metadata:
      labels:
        run: my-nginx
    spec:
      containers:
      - name: my-nginx
        image: nginx
        ports:
        - containerPort: 80

deploymentをapplyする。

✗ kubectl create ns my-nginx
namespace/my-nginx created

✗ kubectl apply -f my-nginx.yaml
deployment.apps/my-nginx created

✗ kubectl get pods -n my-nginx -l run=my-nginx -o wide
NAME                        READY   STATUS    RESTARTS   AGE   IP            NODE                                            NOMINATED NODE   READINESS GATES
my-nginx-684dd4dcd4-vvtnp   1/1     Running   0          80s   10.0.48.189   ip-10-0-63-82.ap-northeast-1.compute.internal   <none>           <none>
my-nginx-684dd4dcd4-z6wq4   1/1     Running   0          80s   10.0.43.139   ip-10-0-42-43.ap-northeast-1.compute.internal   <none>           <none>

公式だとNodePortなのだけど、Privateなクラスターになっているのでtype=LoadBalancerでexposeしてみる。port-forwardingしても良さそう。

✗ kubectl expose deployment -n my-nginx my-nginx --type=LoadBalancer --port=80
service/my-nginx exposed

✗ kubectl get svc -n my-nginx my-nginx
NAME       TYPE           CLUSTER-IP      EXTERNAL-IP                                                                    PORT(S)        AGE
my-nginx   LoadBalancer   172.20.248.40   a21eb19d3a6e444aea927ccb724e7fe2-1196477804.ap-northeast-1.elb.amazonaws.com   80:32715/TCP   7s

以下のコマンドで、CiliumのeBPFがkube-proxyの代わりになる設定を作成していることを確認できるっぽい。
ポートが32715の部分が今回のやつらしい。

✗ kubectl -n kube-system exec ds/cilium -- cilium service list
Defaulted container "cilium-agent" out of: cilium-agent, config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init)
ID   Frontend               Service Type   Backend
1    172.20.58.139:443      ClusterIP      1 => 10.0.43.135:9443 (active)
2    172.20.193.33:4315     ClusterIP      1 => 10.0.43.130:4315 (active)
3    172.20.193.33:4316     ClusterIP      1 => 10.0.43.130:4316 (active)
4    172.20.193.33:2000     ClusterIP      1 => 10.0.43.130:2000 (active)
5    172.20.217.229:8888    ClusterIP      1 => 10.0.43.130:8888 (active)
                                           2 => 10.0.48.190:8888 (active)
6    172.20.0.1:443         ClusterIP      1 => 10.0.28.36:443 (active)
                                           2 => 10.0.39.11:443 (active)
7    172.20.14.33:443       ClusterIP      1 => 10.0.42.43:4244 (active)
8    172.20.0.10:53         ClusterIP      1 => 10.0.48.184:53 (active)
                                           2 => 10.0.43.141:53 (active)
9    172.20.0.10:9153       ClusterIP      1 => 10.0.48.184:9153 (active)
                                           2 => 10.0.43.141:9153 (active)
18   172.20.248.40:80       ClusterIP      1 => 10.0.48.189:80 (active)
                                           2 => 10.0.43.139:80 (active)
19   10.0.42.43:32715       NodePort       1 => 10.0.48.189:80 (active)
                                           2 => 10.0.43.139:80 (active)
20   169.254.170.23:32715   NodePort       1 => 10.0.48.189:80 (active)
                                           2 => 10.0.43.139:80 (active)
21   0.0.0.0:32715          NodePort       1 => 10.0.48.189:80 (active)
                                           2 => 10.0.43.139:80 (active)

curlで疎通確認してみる。良さそう。
DNSが伝搬されるまで1分くらい必要だった。

✗ curl a21eb19d3a6e444aea927ccb724e7fe2-1196477804.ap-northeast-1.elb.amazonaws.com
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
dekimasoondekimasoon

お掃除して完了!

✗ kubectl delete namespace my-nginx
namespace "my-nginx" deleted
このスクラップは2024/01/10にクローズされました