# kubectl describe <object type> <object name>
> kubectl describe pod client-pod
...........
Name: client-pod
Namespace: default
Node: minikube/10.0.2.15
Start Time: Sat, 02 Feb 2019 12:05:16 +0900
Labels: component=web
Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"labels":{"component":"web"},"name":"client-pod","namespace":"default"},"spec":{"container...
Status: Running
IP: 172.17.0.16
Containers:
client:
Container ID: docker://465ecbe522f537a36c26c021d88c1efb21782daf1e6fffd1e93be3469701a4d5
Image: bear2u/multi-worker
Image ID: docker-pullable://bear2u/multi-worker@sha256:6559ad68144e14b8f6f3054ab0f19056853ea07a7c4ead068d9140bd0a33b926
Port: 3000/TCP
Host Port: 0/TCP
State: Running
Started: Sat, 09 Feb 2019 10:24:04 +0900
Last State: Terminated
Reason: Completed
Exit Code: 0
Started: Sat, 09 Feb 2019 10:06:12 +0900
Finished: Sat, 09 Feb 2019 10:24:01 +0900
Ready: True
Restart Count: 3
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-28mbg (ro)
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
Volumes:
default-token-28mbg:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-28mbg
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 6d default-scheduler Successfully assigned client-pod to minikube
Normal SuccessfulMountVolume 6d kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-28mbg"
Normal Pulling 6d kubelet, minikube pulling image "bear2u/multi-client"
Normal Pulled 6d kubelet, minikube Successfully pulled image "bear2u/multi-client"
Normal Created 6d kubelet, minikube Created container
Normal Started 6d kubelet, minikube Started container
Normal SuccessfulMountVolume 12h kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-28mbg"
Normal SandboxChanged 12h kubelet, minikube Pod sandbox changed, it will be killed andre-created.
Normal Pulling 12h kubelet, minikube pulling image "bear2u/multi-client"
Normal Pulled 12h kubelet, minikube Successfully pulled image "bear2u/multi-client"
Normal Created 12h kubelet, minikube Created container
Normal Started 12h kubelet, minikube Started container
Normal SuccessfulMountVolume 25m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-28mbg"
Normal SandboxChanged 25m kubelet, minikube Pod sandbox changed, it will be killed andre-created.
Normal Pulling 25m kubelet, minikube pulling image "bear2u/multi-client"
Normal Pulled 25m kubelet, minikube Successfully pulled image "bear2u/multi-client"
Normal Killing 7m kubelet, minikube Killing container with id docker://client:Container spec hash changed (3635549375 vs 3145631940).. Container will be killed and recreated.
Normal Pulling 7m kubelet, minikube pulling image "bear2u/multi-worker"
Normal Created 7m (x2 over 25m) kubelet, minikube Created container
Normal Pulled 7m kubelet, minikube Successfully pulled image "bear2u/multi-worker"
Normal Started 7m (x2 over 25m) kubelet, minikube Started container
업데이트 오류
만약 pod 설정파일에서 containerPort를 변경시 어떻게 되는지 보자
> kubectl apply -f client-pod.yaml
.......
the Pod "client-pod" is invalid: spec: Forbidden: pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` or `spec.tolerations` (only additions to existing tolerations)
{"Volumes":[{"Name":"default-token-28mbg","HostPath":null,"EmptyDir":null,"GCEPersistentDisk":null,"AWSElasticBlockStore":null,"GitRepo":null,"Secret":{"SecretName":"default-token-28mbg","Items":null,"DefaultMode":420,"Optional":null},"NFS":null,"ISCSI":null,"Glusterfs":null,"PersistentVolumeClaim":null,"RBD":null,"Quobyte":null,"FlexVolume":null,"Cinder":null,"CephFS":null,"Flocker":null,"DownwardAPI":null,"FC":null,"AzureFile":null,"ConfigMap":null,"VsphereVolume":null,"AzureDisk":null,"PhotonPersistentDisk":null,"Projected":null,"PortworxVolume":null,"ScaleIO":null,"StorageOS":null}],"InitContainers":null,"Containers":[{"Name":"client","Image":"bear2u/multi-worker","Command":null,"Args":null,"WorkingDir":"","Ports":[{"Name":"","HostPort":0,"ContainerPort":
A: 9999,"Protocol":"TCP","HostIP":""}],"EnvFrom":null,"Env":null,"Resources":{"Limits":null,"Requests":null},"VolumeMounts":[{"Name":"default-token-28mbg","ReadOnly":true,"MountPath":"/var/run/secrets/kubernetes.io/serviceaccount","SubPath":"","MountPropagation":null}],"VolumeDevices":null,"LivenessProbe":null,"ReadinessProbe":null,"Lifecycle":null,"TerminationMessagePath":"/dev/termination-log","TerminationMessagePolicy":"File","ImagePullPolicy":"Always","SecurityContext":null,"Stdin":false,"StdinOnce":false,"TTY":false}],"RestartPolicy":"Always","TerminationGracePeriodSeconds":30,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"default","AutomountServiceAccountToken":null,"NodeName":"minikube","SecurityContext":{"HostNetwork":false,"HostPID":false,"HostIPC":false,"ShareProcessNamespace":null,"SELinuxOptions":null,"RunAsUser":null,"RunAsGroup":null,"RunAsNonRoot":null,"SupplementalGroups":null,"FSGroup":null},"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"default-scheduler","Tolerations":[{"Key":"node.kubernetes.io/not-ready","Operator":"Exists","Value":"","Effect":"NoExecute","TolerationSeconds":300},{"Key":"node.kubernetes.io/unreachable","Operator":"Exists","Value":"","Effect":"NoExecute","TolerationSeconds":300}],"HostAliases":null,"PriorityClassName":"","Priority":null,"DNSConfig":null}
B: 3000,"Protocol":"TCP","HostIP":""}],"EnvFrom":null,"Env":null,"Resources":{"Limits":null,"Requests":null},"VolumeMounts":[{"Name":"default-token-28mbg","ReadOnly":true,"MountPath":"/var/run/secrets/kubernetes.io/serviceaccount","SubPath":"","MountPropagation":null}],"VolumeDevices":null,"LivenessProbe":null,"ReadinessProbe":null,"Lifecycle":null,"TerminationMessagePath":"/dev/termination-log","TerminationMessagePolicy":"File","ImagePullPolicy":"Always","SecurityContext":null,"Stdin":false,"StdinOnce":false,"TTY":false}],"RestartPolicy":"Always","TerminationGracePeriodSeconds":30,"ActiveDeadlineSeconds":null,"DNSPolicy":"ClusterFirst","NodeSelector":null,"ServiceAccountName":"default","AutomountServiceAccountToken":null,"NodeName":"minikube","SecurityContext":{"HostNetwork":false,"HostPID":false,"HostIPC":false,"ShareProcessNamespace":null,"SELinuxOptions":null,"RunAsUser":null,"RunAsGroup":null,"RunAsNonRoot":null,"SupplementalGroups":null,"FSGroup":null},"ImagePullSecrets":null,"Hostname":"","Subdomain":"","Affinity":null,"SchedulerName":"default-scheduler","Tolerations":[{"Key":"node.kubernetes.io/not-ready","Operator":"Exists","Value":"","Effect":"NoExecute","TolerationSeconds":300},{"Key":"node.kubernetes.io/unreachable","Operator":"Exists","Value":"","Effect":"NoExecute","TolerationSeconds":300}],"HostAliases":null,"PriorityClassName":"","Priority":null,"DNSConfig":null}
이미지만 변경할수 있다는 걸 유념하자
Deployment
기존 포드형태에서는 이미지말고는 변경이 안된다. 이걸 극복하기 위해서 Depoyment라는 개념을 하나 더 추가를 하자
Deployment에서는 Pod 설정을 가지고 있다.
pod 에서 포트를 변경시 Deployment 에서는 포트를 죽이고 새로운 포트를 올린다.
$ kubectl get deployments
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
client-deployment 1 1 1 1 56d
pods 확인시 deployment 로 자동 생성되는 걸 확인 가능하다
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
client-deployment-848b54d879-ch26z 1/1 Running 5 56d
이미지를 바꿔서 새롭게 deployment 에서 포드가 변경되는 걸 보자
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
client-deployment-848b54d879-ch26z 1/1 Running 5 56d
client-deployment-89bb69575-54pnn 0/1 ContainerCreating 0 5s
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
client-deployment-89bb69575-54pnn 1/1 Running 0 43s
자세한 설명을 보여주는 명령어
$ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
client-deployment-89bb69575-54pnn 1/1 Running 0 7m 172.17.0.1 minikube
kubectl describe pods client-deployment
deployment.yaml 에서 replica를 변경시 숫자를 주목하자
...
replicas: 5
....
$ kubectl apply -f client-deployment.yaml
$ kubectl get deployment
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
client-deployment 5 3 3 1 56d
# 노드가 담긴 alpine 이미지 가져오기
FROM node:10.15-alpine
//작업할 디렉토리 설정
WORKDIR "/app"
//npm install을 캐싱하기 위해 package.json만 따로 카피
COPY ./package.json ./
RUN npm install
// 소스 복사
COPY . .
//client 소스 실행
CMD ["npm","run","start"]
data <Buffer ed 85 8c ec 8a a4 ed 8a> 8
data <Buffer b8 31 ed 85 8c ec 8a a4> 8
data <Buffer ed 8a b8 32 ed 85 8c ec> 8
data <Buffer 8a a4 ed 8a b8 31 ed 85> 8
data <Buffer 8c ec 8a a4 ed 8a b8 32> 8
data <Buffer ed 85 8c ec 8a a4 ed 8a> 8
data <Buffer b8 31 ed 85 8c ec 8a a4> 8
data <Buffer ed 8a b8 32 ed 85 8c ec> 8
data <Buffer 8a a4 ed 8a b8 31 ed 85> 8
data <Buffer 8c ec 8a a4 ed 8a b8 32> 8
data <Buffer ed 85 8c ec 8a a4 ed 8a> 8
data <Buffer b8 31 ed 85 8c ec 8a a4> 8
data <Buffer ed 8a b8 32 ed 85 8c ec> 8
data <Buffer 8a a4 ed 8a b8 31 ed 85> 8
data <Buffer 8c ec 8a a4 ed 8a b8 32> 8
data <Buffer ed 85 8c ec 8a a4 ed 8a> 8
data <Buffer b8 31 ed 85 8c ec 8a a4> 8
data <Buffer ed 8a b8 32 ed 85 8c ec> 8
data <Buffer 8a a4 ed 8a b8 31 ed 85> 8
data <Buffer 8c ec 8a a4 ed 8a b8 32> 8
data <Buffer ed 85 8c ec 8a a4 ed 8a> 8
data <Buffer b8 31 ed 85 8c ec 8a a4> 8
data <Buffer ed 8a b8 32 ed 85 8c ec> 8
data <Buffer 8a a4 ed 8a b8 31 ed 85> 8
data <Buffer 8c ec 8a a4 ed 8a b8 32> 8
constjumpData=tf.tensor([
[70, 70, 70],
[80, 70, 90],
[70, 70, 70]
]);
constplayerData=tf.tensor([
[1,160],
[2,160],
[3,160],
[4,160],
]);
jumpData.concat(playerData)
>> 에러
Error:Errorin concat2D: Shape of tensors[1] (4,2) does not match the shape of the rest (3,3) along the non-concatenated axis 1.
// node myFile.jsconstpendingTimers= [];
constpendingOSTasks= [];
constpendingOperations= [];
// New timbers, tasks, operations are recorded from myFile runningmyFile.runContents();
functionshouldContinue() {
// Check one : Any pending setTimeout, setInterval, setImmediate?// Check two: Any pending OS tasks? (Like server listening to port)// Check three: Any pending long running operations? (Like fs module)returnpendingTimers.length||pendingOSTasks.length||pendingOperations.length;
}
// Entire body executes in one 'tick'while(shouldContinue()) {
// 1) Node looks at pendintTimers and sees if any functions// are ready to be called. setTimeout, setInterval// 2) Node looks at pendingOSTasks and pendingOperations// and calls relevant callbacks// 3) Pause execution. Continue when ....// - a new pendingOSTask is done// - a new pendingOperation is done// - a timer is about to complete// 4) Look at pendingTimers. Call any setImmediate// 5) Handle any 'close' events
}
// exit back to terminal
만약에 2개의 쓰레드가 있다고 가정하고 한개의 쓰레드가 I/O 로 시간이 걸린다고 했을 경우 스케쥴러에서 따로 뺀 공간에서 처리를 하고 2번이 끝나고 1을 다시 넣어서 마무리를 할 수 있다.
추후 다시 한번 소스를 보면서 쓰레드 관련된 내용을 살펴볼 예정이다.
Event loop
이벤트 루프는 한 쓰레드가 무엇을 해야하는지 결정하는 제어 구조와 같다고 생각하면 된다.
전체적인 이벤트 루프 구조를 Pseudo code 로 살펴보자.
크게 3가지 제어 구문이 끝나야 이벤트 루프가 끝난다.
pendingTimers
Check Any pending setTimeout, setInterval, setImmediate?
pendingOSTasks
Check Any pending OS tasks? (Like server listening to port)
pendingOperations
Check Any pending long running operations? (Like fs module)
// node myFile.jsconstpendingTimers= [];
constpendingOSTasks= [];
constpendingOperations= [];
// New timbers, tasks, operations are recorded from myFile runningmyFile.runContents();
functionshouldContinue() {
// Check one : Any pending setTimeout, setInterval, setImmediate?// Check two: Any pending OS tasks? (Like server listening to port)// Check three: Any pending long running operations? (Like fs module)returnpendingTimers.length||pendingOSTasks.length||pendingOperations.length;
}
// Entire body executes in one 'tick'while(shouldContinue()) {
// 1) Node looks at pendintTimers and sees if any functions// are ready to be called. setTimeout, setInterval// 2) Node looks at pendingOSTasks and pendingOperations// and calls relevant callbacks// 3) Pause execution. Continue when ....// - a new pendingOSTask is done// - a new pendingOperation is done// - a timer is about to complete// 4) Look at pendingTimers. Call any setImmediate// 5) Handle any 'close' events
}
// exit back to terminal