Guest User

Untitled

a guest
Mar 21st, 2019
51
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 13.44 KB | None | 0 0
  1. sudo ps aux | grep kubelet
  2. root 5542 0.0 0.0 11824 2912 ? Ss Mar20 0:02 /bin/bash -c #!/bin/bash set -euo pipefail # set by the node image unset KUBECONFIG trap 'kill $(jobs -p); exit 0' TERM # track the current state of the config if [[ -f /etc/origin/node/node-config.yaml ]]; then md5sum /etc/origin/node/node-config.yaml > /tmp/.old else touch /tmp/.old fi if [[ -f /etc/origin/node/volume-config.yaml ]]; then md5sum /etc/origin/node/volume-config.yaml > /tmp/.old-volume.config else touch /tmp/.old-volume-config fi # loop until BOOTSTRAP_CONFIG_NAME is set while true; do file=/etc/sysconfig/origin-node if [[ -f /etc/sysconfig/atomic-openshift-node ]]; then file=/etc/sysconfig/atomic-openshift-node elif [[ -f /etc/sysconfig/origin-node ]]; then file=/etc/sysconfig/origin-node else echo "info: Waiting for the node sysconfig file to be created" 2>&1 sleep 15 & wait continue fi name="$(sed -nE 's|^BOOTSTRAP_CONFIG_NAME=([^#].+)|\1|p' "${file}" | head -1)" if [[ -z "${name}" ]]; then echo "info: Waiting for BOOTSTRAP_CONFIG_NAME to be set" 2>&1 sleep 15 & wait continue fi # in the background check to see if the value changes and exit if so pid=$BASHPID ( while true; do if ! updated="$(sed -nE 's|^BOOTSTRAP_CONFIG_NAME=([^#].+)|\1|p' "${file}" | head -1)"; then echo "error: Unable to check for bootstrap config, exiting" 2>&1 kill $pid exit 1 fi if [[ "${updated}" != "${name}" ]]; then echo "info: Bootstrap configuration profile name changed, exiting" 2>&1 kill $pid exit 0 fi sleep 15 done ) & break done mkdir -p /etc/origin/node/tmp # periodically refresh both node-config.yaml and relabel the node while true; do if ! oc extract "configmaps/${name}" -n openshift-node --to=/etc/origin/node/tmp --confirm --request-timeout=10s --config /etc/origin/node/node.kubeconfig "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" > /dev/null; then echo "error: Unable to retrieve latest config for node" 2>&1 sleep 15 & wait $! continue fi KUBELET_HOSTNAME_OVERRIDE=$(cat /etc/sysconfig/KUBELET_HOSTNAME_OVERRIDE 2>/dev/null) || : if ! [[ -z "$KUBELET_HOSTNAME_OVERRIDE" ]]; then #Patching node-config for hostname override echo "nodeName: $KUBELET_HOSTNAME_OVERRIDE" >> /etc/origin/node/tmp/node-config.yaml fi # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet. if [[ ! -f /etc/origin/node/node-config.yaml ]]; then cat /dev/null > /tmp/.old fi if [[ ! -f /etc/origin/node/volume-config.yaml ]]; then cat /dev/null > /tmp/.old-volume-config fi md5sum /etc/origin/node/tmp/node-config.yaml > /tmp/.new if [[ ! -f /etc/origin/node/tmp/volume-config.yaml ]]; then cat /dev/null > /tmp/.new-volume-config else md5sum /etc/origin/node/tmp/volume-config.yaml > /tmp/.new-volume-config fi trigger_restart=false if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then mv /etc/origin/node/tmp/node-config.yaml /etc/origin/node/node-config.yaml trigger_restart=true fi if [[ "$( cat /tmp/.old-volume-config )" != "$( cat /tmp/.new-volume-config )" ]]; then mv /etc/origin/node/tmp/volume-config.yaml /etc/origin/node/volume-config.yaml trigger_restart=true fi if [[ "$trigger_restart" = true ]]; then SYSTEMD_IGNORE_CHROOT=1 systemctl restart tuned || : echo "info: Configuration changed, restarting kubelet" 2>&1 # TODO: kubelet doesn't relabel nodes, best effort for now # https://github.com/kubernetes/kubernetes/issues/59314 if args="$(openshift-node-config --config /etc/origin/node/node-config.yaml)"; then labels=$(tr ' ' '\n' <<<$args | sed -ne '/^--node-labels=/ { s/^--node-labels=//; p; }' | tr ',\n' ' ') if [[ -n "${labels}" ]]; then echo "info: Applying node labels $labels" 2>&1 if ! oc label --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" ${labels} --overwrite; then echo "error: Unable to apply labels, will retry in 10" 2>&1 sleep 10 & wait $! continue fi fi else echo "error: The downloaded node configuration is invalid, retrying later" 2>&1 sleep 10 & wait $! continue fi if ! pkill -U 0 -f '(^|/)hyperkube kubelet '; then echo "error: Unable to restart Kubelet" 2>&1 sleep 10 & wait $! continue fi fi # annotate node with md5sum of the config oc annotate --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" \ node.openshift.io/md5sum="$( cat /tmp/.new | cut -d' ' -f1 )" --overwrite cp -f /tmp/.new /tmp/.old cp -f /tmp/.new-volume-config /tmp/.old-volume-config sleep 180 & wait $! done
  3. root 5691 0.0 0.0 11820 2432 ? S Mar20 0:04 /bin/bash -c #!/bin/bash set -euo pipefail # set by the node image unset KUBECONFIG trap 'kill $(jobs -p); exit 0' TERM # track the current state of the config if [[ -f /etc/origin/node/node-config.yaml ]]; then md5sum /etc/origin/node/node-config.yaml > /tmp/.old else touch /tmp/.old fi if [[ -f /etc/origin/node/volume-config.yaml ]]; then md5sum /etc/origin/node/volume-config.yaml > /tmp/.old-volume.config else touch /tmp/.old-volume-config fi # loop until BOOTSTRAP_CONFIG_NAME is set while true; do file=/etc/sysconfig/origin-node if [[ -f /etc/sysconfig/atomic-openshift-node ]]; then file=/etc/sysconfig/atomic-openshift-node elif [[ -f /etc/sysconfig/origin-node ]]; then file=/etc/sysconfig/origin-node else echo "info: Waiting for the node sysconfig file to be created" 2>&1 sleep 15 & wait continue fi name="$(sed -nE 's|^BOOTSTRAP_CONFIG_NAME=([^#].+)|\1|p' "${file}" | head -1)" if [[ -z "${name}" ]]; then echo "info: Waiting for BOOTSTRAP_CONFIG_NAME to be set" 2>&1 sleep 15 & wait continue fi # in the background check to see if the value changes and exit if so pid=$BASHPID ( while true; do if ! updated="$(sed -nE 's|^BOOTSTRAP_CONFIG_NAME=([^#].+)|\1|p' "${file}" | head -1)"; then echo "error: Unable to check for bootstrap config, exiting" 2>&1 kill $pid exit 1 fi if [[ "${updated}" != "${name}" ]]; then echo "info: Bootstrap configuration profile name changed, exiting" 2>&1 kill $pid exit 0 fi sleep 15 done ) & break done mkdir -p /etc/origin/node/tmp # periodically refresh both node-config.yaml and relabel the node while true; do if ! oc extract "configmaps/${name}" -n openshift-node --to=/etc/origin/node/tmp --confirm --request-timeout=10s --config /etc/origin/node/node.kubeconfig "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )" > /dev/null; then echo "error: Unable to retrieve latest config for node" 2>&1 sleep 15 & wait $! continue fi KUBELET_HOSTNAME_OVERRIDE=$(cat /etc/sysconfig/KUBELET_HOSTNAME_OVERRIDE 2>/dev/null) || : if ! [[ -z "$KUBELET_HOSTNAME_OVERRIDE" ]]; then #Patching node-config for hostname override echo "nodeName: $KUBELET_HOSTNAME_OVERRIDE" >> /etc/origin/node/tmp/node-config.yaml fi # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet. if [[ ! -f /etc/origin/node/node-config.yaml ]]; then cat /dev/null > /tmp/.old fi if [[ ! -f /etc/origin/node/volume-config.yaml ]]; then cat /dev/null > /tmp/.old-volume-config fi md5sum /etc/origin/node/tmp/node-config.yaml > /tmp/.new if [[ ! -f /etc/origin/node/tmp/volume-config.yaml ]]; then cat /dev/null > /tmp/.new-volume-config else md5sum /etc/origin/node/tmp/volume-config.yaml > /tmp/.new-volume-config fi trigger_restart=false if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then mv /etc/origin/node/tmp/node-config.yaml /etc/origin/node/node-config.yaml trigger_restart=true fi if [[ "$( cat /tmp/.old-volume-config )" != "$( cat /tmp/.new-volume-config )" ]]; then mv /etc/origin/node/tmp/volume-config.yaml /etc/origin/node/volume-config.yaml trigger_restart=true fi if [[ "$trigger_restart" = true ]]; then SYSTEMD_IGNORE_CHROOT=1 systemctl restart tuned || : echo "info: Configuration changed, restarting kubelet" 2>&1 # TODO: kubelet doesn't relabel nodes, best effort for now # https://github.com/kubernetes/kubernetes/issues/59314 if args="$(openshift-node-config --config /etc/origin/node/node-config.yaml)"; then labels=$(tr ' ' '\n' <<<$args | sed -ne '/^--node-labels=/ { s/^--node-labels=//; p; }' | tr ',\n' ' ') if [[ -n "${labels}" ]]; then echo "info: Applying node labels $labels" 2>&1 if ! oc label --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" ${labels} --overwrite; then echo "error: Unable to apply labels, will retry in 10" 2>&1 sleep 10 & wait $! continue fi fi else echo "error: The downloaded node configuration is invalid, retrying later" 2>&1 sleep 10 & wait $! continue fi if ! pkill -U 0 -f '(^|/)hyperkube kubelet '; then echo "error: Unable to restart Kubelet" 2>&1 sleep 10 & wait $! continue fi fi # annotate node with md5sum of the config oc annotate --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" \ node.openshift.io/md5sum="$( cat /tmp/.new | cut -d' ' -f1 )" --overwrite cp -f /tmp/.new /tmp/.old cp -f /tmp/.new-volume-config /tmp/.old-volume-config sleep 180 & wait $! done
  4. root 7893 15.0 1.7 2293384 211444 ? Ssl Mar20 205:07 /usr/bin/hyperkube kubelet --v=2 --address=0.0.0.0 --allow-privileged=true --anonymous-auth=true --authentication-token-webhook=true --authentication-token-webhook-cache-ttl=5m --authorization-mode=Webhook --authorization-webhook-cache-authorized-ttl=5m --authorization-webhook-cache-unauthorized-ttl=5m --bootstrap-kubeconfig=/etc/origin/node/bootstrap.kubeconfig --cadvisor-port=0 --cert-dir=/etc/origin/node/certificates --cgroup-driver=systemd --client-ca-file=/etc/origin/node/client-ca.crt --cluster-dns=10.200.0.26 --cluster-domain=cluster.local --container-runtime-endpoint=/var/run/dockershim.sock --containerized=false --enable-controller-attach-detach=true --experimental-dockershim-root-directory=/var/lib/dockershim --fail-swap-on=false --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true --healthz-bind-address= --healthz-port=0 --host-ipc-sources=api --host-ipc-sources=file --host-network-sources=api --host-network-sources=file --host-pid-sources=api --host-pid-sources=file --hostname-override= --http-check-frequency=0s --image-service-endpoint=/var/run/dockershim.sock --iptables-masquerade-bit=0 --kubeconfig=/etc/origin/node/node.kubeconfig --max-pods=250 --network-plugin=cni --node-ip= --node-labels=node-role.kubernetes.io/master=true,node-role.kubernetes.io/infra=true,node-role.kubernetes.io/compute=true --pod-infra-container-image=docker.io/openshift/origin-pod:v3.11.0 --pod-manifest-path=/etc/origin/node/pods --port=10250 --read-only-port=0 --register-node=true --root-dir=/var/lib/origin/openshift.local.volumes --rotate-certificates=true --rotate-server-certificates=true --tls-cert-file= --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA --tls-cipher-suites=TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-cipher-suites=TLS_RSA_WITH_AES_256_GCM_SHA384 --tls-cipher-suites=TLS_RSA_WITH_AES_128_CBC_SHA --tls-cipher-suites=TLS_RSA_WITH_AES_256_CBC_SHA --tls-min-version=VersionTLS12 --tls-private-key-file=
  5. centos 8532 0.0 0.0 112720 2256 pts/0 S+ 09:51 0:00 grep --color=auto kubelet
  6. 1000150+ 10655 0.0 0.3 51192 37128 ? Ssl Mar20 0:47 /bin/operator --kubelet-service=kube-system/kubelet --logtostderr=true --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.23.2 --namespace=openshift-monitoring
  7. centos 11403 0.3 0.4 68540 54488 ? Ssl Mar20 4:29 heapster --source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250 --tls_cert=/heapster-certs/tls.crt --tls_key=/heapster-certs/tls.key --tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt --allowed_users=system:master-proxy --metric_resolution=30s --sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId=nodename&caCert=/hawkular-metrics-certs/tls.crt&user=hawkular&pass=$HEAPSTER_PASSWORD&filter=label(container_name:^system.slice.*|^user.slice)&concurrencyLimit=5
  8. 1000220+ 11621 0.1 0.4 877860 50744 ? Ssl Mar20 2:13 /usr/bin/metrics-server --source=kubernetes.summary_api:?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250 --tls-cert-file=/certs/tls.crt --tls-private-key-file=/certs/tls.key --metric_resolution=30s --secure-port=8443
Add Comment
Please, Sign In to add comment