Open31
CKAD
export do="--dry-run=client -o yaml"
kubectl run nginx --image=nginx $do > nginx.yaml
alias k="kubectl"
alias kc="kubectl create"
alias ka="kubectl apply -f"
alias kr="kubectl replace --force -f"
alias kn="kubectl config set-context --current --namespace"
alias debug="kubectl run -it --rm --image=curlimages/curl c -- sh"
pv (PersistentVolume)
pvc (PersistentVolumeClaim)
svc (Service)
マルチコンテナポッド
k run multiple --image=httpd --dry-run=client -o yaml >multiple.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: multiple
name: multiple
spec:
containers:
- image: httpd
name: primary
resources: {}
- image: busybox
name: secoundary
command: ["sh", "-c", "sleep 3000"]
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
k apply -f multiple.yaml
k get pods
k exec multiple -c secoundary -- sh -c "wget -q0- http://localhost"
Initコンテナ
k run init-pod --image=nginx:alpine --dry-run=client -o yaml >init-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: init-pod
name: init-pod
spec:
volumes:
- name: index
emptyDir: {}
containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80
resources: {}
volumeMounts:
- name: index
mountPath: /usr/share/nginx/html
initContainers:
- name: init
image: busybox
volumeMounts:
- name: index
mountPath: /etc/init
command: ["sh", "-c", "echo 'hello' > /etc/init/index.html"]
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
k apply -f init-pod.yaml
k get pods
k get pods -o wide
k run -it --rm --image=curlimages/curl c -- sh
curl 192.168.1.5
podman build . -t hello:2.0
podman save hello:2.0 -o hello-2.0.tar
k create cj cron-pi --image=perl --schedule="*/1 * * * *" --dry-run=client -o yaml > cron-pi.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
creationTimestamp: null
name: cron-pi
spec:
jobTemplate:
metadata:
creationTimestamp: null
name: cron-pi
spec:
completions: 5
paralleism: 2
template:
metadata:
creationTimestamp: null
spec:
containers:
- name: cron-pi
image: perl
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(100)"]
resources: {}
k apply cron-pi.yaml
watch -n 5 kubectl get cj,pods
apiVersion: v1
kind: PersistentVolume
metadata:
name: my-pv
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: sc
hostPath:
path: /etc/data
k apply my-pv.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-claim
spec:
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 200Mi
storageClassName: sc
k apply my-claim.yaml
k run pv-pod --image=nginx --dry-run=client -o yaml > pv-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pv-pod
name: pv-pod
spec:
containers:
- image: nginx
name: pv-pod
resources: {}
volumeMounts:
- montPath: /etc/my-volume
name: my-volume
volumes:
- name: my-volume
persistentVolumeClaim:
claimName: my-claim
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
k apply pv-pod.yaml
k get pv,pvc,pods
k label nodes contro;p;ane disktype=ssd
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: assigned
name: assigned
spec:
nodeSelector
disktype: ssd
containers:
- image: httpd
name: assigned
resources: {}
volumeMounts:
- montPath: /etc/my-volume
name: my-volume
volumes:
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
k get pods -o wide
k label nodes node01 siza=large
apiVersion: v1
kind: Pod
metadata:
name: affinity
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: size
operator: In
values:
- large
containers:
- name: affinity
image: nginx:alpine
k get pods -o wide
k taint nodes node01 dedicated=admin:NoSchedule
apiVersion: v1
kind: Pod
metadata:
labels:
run:admindb
name: admindb
spec:
tolerations:
key: "dedicated"
operator: "Equal"
value: "admin"
effect: "NoSchedule"
affinity:
containers:
- name: admindb
image: redis:alpine
k apply
k get pods -o wide
k create deploy frontent --image=nginx --replicas=2
k get deploy
k scale deploy frontend --replicas=5
k get deploy
k create deploy nginx-app --image=nginx --replicas=3
k get deploy -o wide
k set image deploy nginx-app nginx=nginx:mainline
k get deploy -o wide
cp blue.yaml green.yaml
app-version: green
k apply green.yaml
k edit svc -n bgd blue-green-svc
selector
app-version: green
helm repo add bitnami http://
helm repo list
helm search repo bitnami/redis
helm install my-redis bitnami/redis
helm list
livenessProve
httpGet
path: /
port: 80
initialDelaySeeconds: 3
periodSeconds: 5
k get pods
livenessProve
tcpSocket:
port: 8080
initialDelaySeeconds: 5
periodSeconds: 3
readinessProbe:
exec:
command: ["sh", "-c", "ls /etc/ready/probe"]
initialDelaySeconds: 8
periodSeconds: 5
k create ns logging
k run logger --image=busybox -n logging --command --sh -c "while ture; do echo 'hello'; sleep 4; done"
k logs -n logging logger --since=20s --timestamps > /etc/hello.log
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
# name must match the spec fields below, and be in the form: <plural>.<group>
name: vehicles.stable.example.com
spec:
# group name to use for REST API: /apis/<group>/<version>
group: stable.example.com
# list of versions supported by this CustomResourceDefinition
versions:
- name: v1
# Each version can be enabled/disabled by Served flag.
served: true
# One and only one version must be marked as the storage version.
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
vehicleType:
type: string
color:
type: string
# either Namespaced or Cluster
scope: Namespaced
names:
# plural name to be used in the URL: /apis/<group>/<version>/<plural>
plural: vehicles
# singular name to be used as an alias on the CLI and for display
singular: vehicle
# kind is normally the CamelCased singular type. Your resource manifests use this.
kind: Vehicle
# shortNames allow shorter string to match your resource on the CLI
shortNames:
- vc
piVersion: "stable.example.com/v1"
kind: Vehicle
metadata:
name: my-new-car
spec:
vehicleType: car
color: red
k describe vc my-new-car
k create cm
env:
- name: EXAM_CODE
valueFrom:
configMapKeyRef:
name: exam-config-map
key: EXAM_CODE
or
envFrom:
- configMapRef:
name: exam-config-map
k create my-index-html --from-file=index.html
volumeMounts:
- name: index
mountPath: /usr/share/nginx/html
volumes:
- name: index
configMap:
name: my-index-html
k create secret generic
env:
- name: REDIS_HOST
valueFrom:
secretKeyRef:
name: db-secret
key: DB_HOST
k create sa my-sa
k create deploy api
k set sa deploy api my-sa
k describe deploy api
k create sa deployer-sa
k create role deployer-role --verb=create --resource=deployments
k create rolebinding deployer-binding --serviceaccount=default:deployer-sa --role=deployer-role
k auth can-i create deployments --as=system:serviceaccount:default:deployer-sa
securityContext:
runAsUser: 1000
runAsGroup: 3000
k exec -it context -- id
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 500m
memory: 1Gi
k describe pod resorce
k deploy http-deploy
k expose deploy http-deploy --name=httpd-svc --type=clusterIP --port=80
k run -it -rm --image=curlimage/curl c --sh
curl httpd-svc
k create
k expose --type=NodePort --port=80
k get svc
curl controlplane:31621
k crete ingress hello-ingress --class=nginx --rule"hello-ckad.example/=hello-svc:80"
curl hello-ckad.example:30100
kubectl config set-context --current --namespace backend
k get pods -o wide
export web1=192.168.1.4
k exec web1 -- sh -c "curl -s -m 2 $web2"
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: web1
namespace: backend
spec:
podSelector:
matchLabels:
type: web1
policyTypes:
- Ingress
- Egress
ingress:
- from:
- podSelector:
matchLabels:
type: web3
ports:
- protocol: TCP
port: 80
egress:
- podSelector:
matchLabels:
type: web3
ports:
- protocol: TCP
port: 80
k logs -n filter -l 'app in (pod-3, pod-7, pod-8)' > /etc/pods.log
cat /etc/pods.log
kubectl config set-context --current --namespace credential
k exec -it deployments/login -- env
k get secret creds -o yaml
echo 'my-new-password' | base64
k edit secret creds
k rollout restart deploy login