Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- apiVersion: ceph.rook.io/v1
- kind: CephCluster
- metadata:
- creationTimestamp: "2021-06-07T10:12:50Z"
- finalizers:
- - cephcluster.ceph.rook.io
- generation: 1
- labels:
- app: ocs-storagecluster
- managedFields:
- - apiVersion: ceph.rook.io/v1
- fieldsType: FieldsV1
- fieldsV1:
- f:metadata:
- f:labels:
- .: {}
- f:app: {}
- f:ownerReferences: {}
- f:spec:
- .: {}
- f:cephVersion:
- .: {}
- f:allowUnsupported: {}
- f:image: {}
- f:cleanupPolicy:
- .: {}
- f:sanitizeDisks: {}
- f:continueUpgradeAfterChecksEvenIfNotHealthy: {}
- f:crashCollector: {}
- f:dashboard: {}
- f:dataDirHostPath: {}
- f:disruptionManagement:
- .: {}
- f:machineDisruptionBudgetNamespace: {}
- f:managePodBudgets: {}
- f:external: {}
- f:healthCheck:
- .: {}
- f:daemonHealth:
- .: {}
- f:mon: {}
- f:osd: {}
- f:status: {}
- f:logCollector:
- .: {}
- f:enabled: {}
- f:periodicity: {}
- f:mgr:
- .: {}
- f:modules: {}
- f:mon:
- .: {}
- f:count: {}
- f:monitoring:
- .: {}
- f:enabled: {}
- f:rulesNamespace: {}
- f:network:
- .: {}
- f:dualStack: {}
- f:ipFamily: {}
- f:provider: {}
- f:selectors:
- .: {}
- f:cluster: {}
- f:public: {}
- f:placement:
- .: {}
- f:all:
- .: {}
- f:nodeAffinity:
- .: {}
- f:requiredDuringSchedulingIgnoredDuringExecution:
- .: {}
- f:nodeSelectorTerms: {}
- f:tolerations: {}
- f:arbiter:
- .: {}
- f:tolerations: {}
- f:mon:
- .: {}
- f:nodeAffinity:
- .: {}
- f:requiredDuringSchedulingIgnoredDuringExecution:
- .: {}
- f:nodeSelectorTerms: {}
- f:podAntiAffinity:
- .: {}
- f:requiredDuringSchedulingIgnoredDuringExecution: {}
- f:priorityClassNames:
- .: {}
- f:mgr: {}
- f:mon: {}
- f:osd: {}
- f:resources:
- .: {}
- f:mds:
- .: {}
- f:limits:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:requests:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:mgr:
- .: {}
- f:limits:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:requests:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:mon:
- .: {}
- f:limits:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:requests:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:rgw:
- .: {}
- f:limits:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:requests:
- .: {}
- f:cpu: {}
- f:memory: {}
- f:security:
- .: {}
- f:kms: {}
- f:storage:
- .: {}
- f:storageClassDeviceSets: {}
- manager: ocs-operator
- operation: Update
- time: "2021-06-07T10:12:50Z"
- - apiVersion: ceph.rook.io/v1
- fieldsType: FieldsV1
- fieldsV1:
- f:metadata:
- f:finalizers: {}
- f:status:
- .: {}
- f:ceph:
- .: {}
- f:capacity: {}
- f:details:
- .: {}
- f:AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED:
- .: {}
- f:message: {}
- f:severity: {}
- f:MDS_SLOW_METADATA_IO:
- .: {}
- f:message: {}
- f:severity: {}
- f:PG_AVAILABILITY:
- .: {}
- f:message: {}
- f:severity: {}
- f:health: {}
- f:lastChecked: {}
- f:versions:
- .: {}
- f:mds:
- .: {}
- f:ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): {}
- f:mgr:
- .: {}
- f:ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): {}
- f:mon:
- .: {}
- f:ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): {}
- f:overall:
- .: {}
- f:ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): {}
- f:conditions: {}
- f:message: {}
- f:phase: {}
- f:state: {}
- f:version:
- .: {}
- f:image: {}
- f:version: {}
- manager: rook
- operation: Update
- time: "2021-06-07T10:16:07Z"
- name: ocs-storagecluster-cephcluster
- namespace: openshift-storage
- ownerReferences:
- - apiVersion: ocs.openshift.io/v1
- blockOwnerDeletion: true
- controller: true
- kind: StorageCluster
- name: ocs-storagecluster
- uid: 012a9c23-846b-43fd-b1cd-41f79a741516
- resourceVersion: "11994181"
- selfLink: /apis/ceph.rook.io/v1/namespaces/openshift-storage/cephclusters/ocs-storagecluster-cephcluster
- uid: 0090b395-f582-4077-b36d-838db2b8b6f1
- spec:
- cephVersion:
- allowUnsupported: true
- image: ceph/daemon-base:latest-pacific
- cleanupPolicy:
- sanitizeDisks: {}
- continueUpgradeAfterChecksEvenIfNotHealthy: true
- crashCollector: {}
- dashboard: {}
- dataDirHostPath: /var/lib/rook
- disruptionManagement:
- machineDisruptionBudgetNamespace: openshift-machine-api
- managePodBudgets: true
- external: {}
- healthCheck:
- daemonHealth:
- mon: {}
- osd: {}
- status: {}
- logCollector:
- enabled: true
- periodicity: 24h
- mgr:
- modules:
- - enabled: true
- name: pg_autoscaler
- - enabled: true
- name: balancer
- mon:
- count: 3
- monitoring:
- enabled: true
- rulesNamespace: openshift-storage
- network:
- dualStack: true
- ipFamily: IPv6
- provider: multus
- selectors:
- cluster: ipv6
- public: ipv6
- placement:
- all:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: cluster.ocs.openshift.io/openshift-storage
- operator: Exists
- tolerations:
- - effect: NoSchedule
- key: node.ocs.openshift.io/storage
- operator: Equal
- value: "true"
- arbiter:
- tolerations:
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Exists
- mon:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: cluster.ocs.openshift.io/openshift-storage
- operator: Exists
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mon
- topologyKey: topology.rook.io/rack
- priorityClassNames:
- mgr: system-node-critical
- mon: system-node-critical
- osd: system-node-critical
- resources:
- mds:
- limits:
- cpu: "3"
- memory: 8Gi
- requests:
- cpu: "3"
- memory: 8Gi
- mgr:
- limits:
- cpu: "1"
- memory: 3Gi
- requests:
- cpu: "1"
- memory: 3Gi
- mon:
- limits:
- cpu: "1"
- memory: 2Gi
- requests:
- cpu: "1"
- memory: 2Gi
- rgw:
- limits:
- cpu: "2"
- memory: 4Gi
- requests:
- cpu: "2"
- memory: 4Gi
- security:
- kms: {}
- storage:
- storageClassDeviceSets:
- - count: 1
- name: example-deviceset-0
- placement:
- topologySpreadConstraints:
- - labelSelector:
- matchExpressions:
- - key: ceph.rook.io/pvc
- operator: Exists
- maxSkew: 1
- topologyKey: kubernetes.io/hostname
- whenUnsatisfiable: DoNotSchedule
- preparePlacement:
- topologySpreadConstraints:
- - labelSelector:
- matchExpressions:
- - key: ceph.rook.io/pvc
- operator: Exists
- maxSkew: 1
- topologyKey: kubernetes.io/hostname
- whenUnsatisfiable: DoNotSchedule
- resources:
- limits:
- cpu: "2"
- memory: 5Gi
- requests:
- cpu: "2"
- memory: 5Gi
- volumeClaimTemplates:
- - metadata:
- annotations:
- crushDeviceClass: ""
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 100Gi
- storageClassName: block
- volumeMode: Block
- status: {}
- - count: 1
- name: example-deviceset-1
- placement:
- topologySpreadConstraints:
- - labelSelector:
- matchExpressions:
- - key: ceph.rook.io/pvc
- operator: Exists
- maxSkew: 1
- topologyKey: kubernetes.io/hostname
- whenUnsatisfiable: DoNotSchedule
- preparePlacement:
- topologySpreadConstraints:
- - labelSelector:
- matchExpressions:
- - key: ceph.rook.io/pvc
- operator: Exists
- maxSkew: 1
- topologyKey: kubernetes.io/hostname
- whenUnsatisfiable: DoNotSchedule
- resources:
- limits:
- cpu: "2"
- memory: 5Gi
- requests:
- cpu: "2"
- memory: 5Gi
- volumeClaimTemplates:
- - metadata:
- annotations:
- crushDeviceClass: ""
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 100Gi
- storageClassName: block
- volumeMode: Block
- status: {}
- - count: 1
- name: example-deviceset-2
- placement:
- topologySpreadConstraints:
- - labelSelector:
- matchExpressions:
- - key: ceph.rook.io/pvc
- operator: Exists
- maxSkew: 1
- topologyKey: kubernetes.io/hostname
- whenUnsatisfiable: DoNotSchedule
- preparePlacement:
- topologySpreadConstraints:
- - labelSelector:
- matchExpressions:
- - key: ceph.rook.io/pvc
- operator: Exists
- maxSkew: 1
- topologyKey: kubernetes.io/hostname
- whenUnsatisfiable: DoNotSchedule
- resources:
- limits:
- cpu: "2"
- memory: 5Gi
- requests:
- cpu: "2"
- memory: 5Gi
- volumeClaimTemplates:
- - metadata:
- annotations:
- crushDeviceClass: ""
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 100Gi
- storageClassName: block
- volumeMode: Block
- status: {}
- status:
- ceph:
- capacity: {}
- details:
- AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED:
- message: mons are allowing insecure global_id reclaim
- severity: HEALTH_WARN
- MDS_SLOW_METADATA_IO:
- message: 1 MDSs report slow metadata IOs
- severity: HEALTH_WARN
- PG_AVAILABILITY:
- message: 'Reduced data availability: 176 pgs inactive'
- severity: HEALTH_WARN
- health: HEALTH_WARN
- lastChecked: "2021-06-07T10:27:16Z"
- versions:
- mds:
- ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): 2
- mgr:
- ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): 1
- mon:
- ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): 3
- overall:
- ceph version 16.2.4 (3cbe25cde3cfa028984618ad32de9edc4c1eaed0) pacific (stable): 6
- conditions:
- - lastHeartbeatTime: "2021-06-07T10:27:16Z"
- lastTransitionTime: "2021-06-07T10:14:05Z"
- message: Cluster created successfully
- reason: ClusterCreated
- status: "True"
- type: Ready
- message: Cluster created successfully
- phase: Ready
- state: Created
- version:
- image: ceph/daemon-base:latest-pacific
- version: 16.2.4-0
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement