apiVersion: v1 items: - apiVersion: apps/v1 kind: StatefulSet metadata: annotations: meta.helm.sh/release-name: spark meta.helm.sh/release-namespace: spark-operator labels: app.kubernetes.io/component: master app.kubernetes.io/instance: spark app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: spark app.kubernetes.io/version: 3.5.0 helm.sh/chart: spark-8.1.7 name: spark-master namespace: spark-operator spec: persistentVolumeClaimRetentionPolicy: whenDeleted: Retain whenScaled: Retain podManagementPolicy: OrderedReady replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: app.kubernetes.io/component: master app.kubernetes.io/instance: spark app.kubernetes.io/name: spark serviceName: spark-headless template: metadata: labels: app.kubernetes.io/component: master app.kubernetes.io/instance: spark app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: spark app.kubernetes.io/version: 3.5.0 helm.sh/chart: spark-8.1.7 spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/component: master app.kubernetes.io/instance: spark app.kubernetes.io/name: spark topologyKey: kubernetes.io/hostname weight: 1 containers: - env: - name: BITNAMI_DEBUG value: "false" - name: SPARK_MODE value: master - name: SPARK_DAEMON_MEMORY - name: SPARK_MASTER_PORT value: "7077" - name: SPARK_MASTER_WEBUI_PORT value: "8080" - name: SPARK_MASTER_OPTS value: -Dspark.ui.reverseProxy=true -Dspark.ui.reverseProxyUrl=https://spark.****************** image: docker.io/bitnami/spark:3.5.0-debian-11-r17 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 httpGet: path: / port: 8080 scheme: HTTP initialDelaySeconds: 180 periodSeconds: 20 successThreshold: 1 timeoutSeconds: 5 lifecycle: postStart: exec: command: - "/opt/bitnami/spark/sbin/start-connect-server.sh" - "--packages" - "org.apache.spark:spark-connect_2.12:3.5.0" - "--master" - "spark://spark-master-0.spark-headless.spark-operator.svc.cluster.local:7077" preStop: exec: command: - "/opt/bitnami/spark/sbin/stop-connect-server.sh" name: spark-master ports: - containerPort: 8080 name: http protocol: TCP - containerPort: 7077 name: cluster protocol: TCP - containerPort: 15002 name: grpc protocol: TCP readinessProbe: failureThreshold: 6 httpGet: path: / port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 seccompProfile: type: RuntimeDefault terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /experiments name: spark-experiments workingDir: /experiments dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1001 runAsGroup: 0 runAsUser: 1001 seLinuxOptions: {} serviceAccount: spark serviceAccountName: spark terminationGracePeriodSeconds: 30 volumes: - name: spark-experiments persistentVolumeClaim: claimName: spark-pvc updateStrategy: type: RollingUpdate - apiVersion: apps/v1 kind: StatefulSet metadata: annotations: meta.helm.sh/release-name: spark meta.helm.sh/release-namespace: spark-operator labels: app.kubernetes.io/component: worker app.kubernetes.io/instance: spark app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: spark app.kubernetes.io/version: 3.5.0 helm.sh/chart: spark-8.1.7 name: spark-worker namespace: spark-operator spec: persistentVolumeClaimRetentionPolicy: whenDeleted: Retain whenScaled: Retain podManagementPolicy: OrderedReady replicas: 2 revisionHistoryLimit: 10 selector: matchLabels: app.kubernetes.io/component: worker app.kubernetes.io/instance: spark app.kubernetes.io/name: spark serviceName: spark-headless template: metadata: creationTimestamp: null labels: app.kubernetes.io/component: worker app.kubernetes.io/instance: spark app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: spark app.kubernetes.io/version: 3.5.0 helm.sh/chart: spark-8.1.7 spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchLabels: app.kubernetes.io/component: worker app.kubernetes.io/instance: spark app.kubernetes.io/name: spark topologyKey: kubernetes.io/hostname weight: 1 containers: - env: - name: SPARK_MODE value: worker - name: BITNAMI_DEBUG value: "false" - name: SPARK_DAEMON_MEMORY - name: SPARK_WORKER_WEBUI_PORT value: "8080" - name: SPARK_DAEMON_JAVA_OPTS - name: SPARK_MASTER_URL value: spark://spark-master-svc:7077 - name: SPARK_WORKER_OPTS value: -Dspark.ui.reverseProxy=true -Dspark.ui.reverseProxyUrl=https://spark.****************************** image: docker.io/bitnami/spark:3.5.0-debian-11-r17 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 httpGet: path: / port: 8080 scheme: HTTP initialDelaySeconds: 180 periodSeconds: 20 successThreshold: 1 timeoutSeconds: 5 name: spark-worker ports: - containerPort: 8080 name: http protocol: TCP readinessProbe: failureThreshold: 6 httpGet: path: / port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 resources: {} securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: false runAsNonRoot: true runAsUser: 1001 seccompProfile: type: RuntimeDefault startupProbe: failureThreshold: 6 httpGet: path: / port: 8080 scheme: HTTP initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /experiments name: spark-experiments workingDir: /experiments dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: fsGroup: 1001 seLinuxOptions: {} serviceAccount: spark serviceAccountName: spark terminationGracePeriodSeconds: 30 volumes: - name: spark-experiments persistentVolumeClaim: claimName: spark-pvc updateStrategy: type: RollingUpdate kind: List metadata: resourceVersion: ""