kubectl describe pod -n pulsar pulsar-zookeeper-0 Name: pulsar-zookeeper-0 Namespace: pulsar Priority: 0 Node: Labels: app=pulsar cluster=pulsar component=zookeeper controller-revision-hash=pulsar-zookeeper-696fff875c release=pulsar statefulset.kubernetes.io/pod-name=pulsar-zookeeper-0 Annotations: kubernetes.io/psp: eks.privileged prometheus.io/port: 8000 prometheus.io/scrape: true Status: Pending IP: IPs: Controlled By: StatefulSet/pulsar-zookeeper Containers: pulsar-zookeeper: Image: artifactory.cwantools.io:5000/apachepulsar/pulsar:2.9.2 Ports: 8000/TCP, 2181/TCP, 2888/TCP, 3888/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP Command: sh -c Args: bin/apply-config-from-env.py conf/zookeeper.conf; bin/generate-zookeeper-config.sh conf/zookeeper.conf; OPTS="${OPTS} -Dlog4j2.formatMsgNoLookups=true" exec bin/pulsar zookeeper; Requests: cpu: 100m memory: 256Mi Liveness: exec [timeout 30 bash -c echo ruok | nc -q 1 localhost 2181 | grep imok] delay=20s timeout=30s period=30s #success=1 #failure=10 Readiness: exec [timeout 30 bash -c echo ruok | nc -q 1 localhost 2181 | grep imok] delay=20s timeout=30s period=30s #success=1 #failure=10 Environment Variables from: pulsar-zookeeper ConfigMap Optional: false Environment: ZOOKEEPER_SERVERS: pulsar-zookeeper-0,pulsar-zookeeper-1,pulsar-zookeeper-2 Mounts: /pulsar/data from pulsar-zookeeper-data (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-7tm4p (ro) Conditions: Type Status PodScheduled False Volumes: pulsar-zookeeper-data: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: pulsar-zookeeper-data-pulsar-zookeeper-0 ReadOnly: false kube-api-access-7tm4p: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: DownwardAPI: true QoS Class: Burstable Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedScheduling 26s (x12 over 50m) default-scheduler 0/69 nodes are available: 69 pod has unbound immediate PersistentVolumeClaims. preemption: 0/69 nodes are available: 69 Preemption is not helpful for scheduling. Normal NotTriggerScaleUp 6s (x301 over 50m) cluster-autoscaler pod didn't trigger scale-up: 1 max node group size reached