Guest User

Untitled

a guest
May 20th, 2020
85
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 12.56 KB | None | 0 0
  1. + [[ -f /env/_master ]]
  2. + [[ -f /usr/bin/ovn-appctl ]]
  3. + OVNCTL_PATH=/usr/share/ovn/scripts/ovn-ctl
  4. + MASTER_IP=10.19.138.33
  5. + [[ 10.19.138.38 == \1\0\.\1\9\.\1\3\8\.\3\3 ]]
  6. + echo 'joining cluster at 10.19.138.33'
  7. joining cluster at 10.19.138.33
  8. ++ bracketify 10.19.138.38
  9. ++ case "$1" in
  10. ++ echo 10.19.138.38
  11. ++ bracketify 10.19.138.33
  12. ++ case "$1" in
  13. ++ echo 10.19.138.33
  14. + exec /usr/share/ovn/scripts/ovn-ctl --db-sb-cluster-local-port=9644 --db-sb-cluster-remote-port=9644 --db-sb-cluster-local-addr=10.19.138.38 --db-sb-cluster-remote-addr=10.19.138.33 --no-monitor --db-sb-cluster-local-proto=ssl --db-sb-cluster-remote-proto=ssl --ovn-sb-db-ssl-key=/ovn-cert/tls.key --ovn-sb-db-ssl-cert=/ovn-cert/tls.crt --ovn-sb-db-ssl-ca-cert=/ovn-ca/ca-bundle.crt '--ovn-sb-log=-vconsole:info -vfile:off' run_sb_ovsdb
  15. 2020-05-18T15:20:00Z|00001|vlog|INFO|opened log file /var/log/ovn/ovsdb-server-sb.log
  16. ovsdb-server: ovsdb error: server does not belong to cluster
  17. + [[ -f /env/_master ]]
  18. + hybrid_overlay_flags=
  19. + [[ -n '' ]]
  20. ++ ovn-nbctl --pidfile=/var/run/ovn/ovn-nbctl.pid --detach -p /ovn-cert/tls.key -c /ovn-cert/tls.crt -C /ovn-ca/ca-bundle.crt --db ssl:10.19.138.33:9641,ssl:10.19.138.37:9641,ssl:10.19.138.38:9641
  21. + export OVN_NB_DAEMON=/var/run/openvswitch/ovn-nbctl.10.ctl
  22. + OVN_NB_DAEMON=/var/run/openvswitch/ovn-nbctl.10.ctl
  23. + ln -sf /var/run/openvswitch/ovn-nbctl.10.ctl /var/run/ovn/
  24. + exec /usr/bin/ovnkube --init-master kni1-vmaster-3 --config-file=/run/ovnkube-config/ovnkube.conf --ovn-empty-lb-events --loglevel 4 --metrics-bind-address 0.0.0.0:9102 --sb-address ssl://10.19.138.33:9642,ssl://10.19.138.37:9642,ssl://10.19.138.38:9642 --sb-client-privkey /ovn-cert/tls.key --sb-client-cert /ovn-cert/tls.crt --sb-client-cacert /ovn-ca/ca-bundle.crt --nb-address ssl://10.19.138.33:9641,ssl://10.19.138.37:9641,ssl://10.19.138.38:9641 --nb-client-privkey /ovn-cert/tls.key --nb-client-cert /ovn-cert/tls.crt --nb-client-cacert /ovn-ca/ca-bundle.crt --nbctl-daemon-mode true
  25. I0518 15:14:10.456655 1 config.go:1152] Parsed config file /run/ovnkube-config/ovnkube.conf
  26. I0518 15:14:10.456722 1 config.go:1153] Parsed config: {Default:{MTU:1400 ConntrackZone:64000 EncapType:geneve EncapIP: EncapPort:6081 InactivityProbe:100000 OpenFlowProbe:180 RawClusterSubnets:10.128.0.0/14/23 ClusterSubnets:[]} Logging:{File: Level:4} CNI:{ConfDir:/etc/cni/net.d Plugin:ovn-k8s-cni-overlay WinHNSNetworkID:} Kubernetes:{Kubeconfig: CACert: APIServer:https://api-int.kni1.cloud.lab.eng.bos.redhat.com:6443 Token: CompatServiceCIDR:172.30.0.0/16 RawServiceCIDRs:172.16.1.0/24 ServiceCIDRs:[] OVNConfigNamespace:openshift-ovn-kubernetes MetricsBindAddress: MetricsEnablePprof:false OVNEmptyLbEvents:false PodIP: RawNoHostSubnetNodes: NoHostSubnetNodes:nil} OvnNorth:{Address: PrivKey: Cert: CACert: Scheme: northbound:false externalID: exec:<nil>} OvnSouth:{Address: PrivKey: Cert: CACert: Scheme: northbound:false externalID: exec:<nil>} Gateway:{Mode:local Interface: NextHop: VLANID:0 NodeportEnable:true} MasterHA:{ElectionLeaseDuration:60 ElectionRenewDeadline:30 ElectionRetryPeriod:20} HybridOverlay:{Enabled:false RawClusterSubnets:10.132.0.0/14/23 ClusterSubnets:[]}}
  27. I0518 15:14:10.460690 1 reflector.go:150] Starting reflector *v1.Endpoints (12h0m0s) from k8s.io/client-go/informers/factory.go:135
  28. I0518 15:14:10.460725 1 reflector.go:185] Listing and watching *v1.Endpoints from k8s.io/client-go/informers/factory.go:135
  29. I0518 15:14:10.460819 1 reflector.go:150] Starting reflector *v1.Pod (12h0m0s) from k8s.io/client-go/informers/factory.go:135
  30. I0518 15:14:10.460855 1 reflector.go:185] Listing and watching *v1.Pod from k8s.io/client-go/informers/factory.go:135
  31. I0518 15:14:10.461287 1 reflector.go:150] Starting reflector *v1.NetworkPolicy (12h0m0s) from k8s.io/client-go/informers/factory.go:135
  32. I0518 15:14:10.461314 1 reflector.go:185] Listing and watching *v1.NetworkPolicy from k8s.io/client-go/informers/factory.go:135
  33. I0518 15:14:10.463082 1 reflector.go:150] Starting reflector *v1.Namespace (12h0m0s) from k8s.io/client-go/informers/factory.go:135
  34. I0518 15:14:10.463107 1 reflector.go:185] Listing and watching *v1.Namespace from k8s.io/client-go/informers/factory.go:135
  35. I0518 15:14:10.463323 1 reflector.go:150] Starting reflector *v1.Node (12h0m0s) from k8s.io/client-go/informers/factory.go:135
  36. I0518 15:14:10.463345 1 reflector.go:185] Listing and watching *v1.Node from k8s.io/client-go/informers/factory.go:135
  37. I0518 15:14:10.463431 1 reflector.go:150] Starting reflector *v1.Service (12h0m0s) from k8s.io/client-go/informers/factory.go:135
  38. I0518 15:14:10.463443 1 reflector.go:185] Listing and watching *v1.Service from k8s.io/client-go/informers/factory.go:135
  39. I0518 15:14:10.560410 1 shared_informer.go:227] caches populated
  40. I0518 15:14:10.560454 1 shared_informer.go:227] caches populated
  41. I0518 15:14:10.560463 1 shared_informer.go:227] caches populated
  42. I0518 15:14:10.560471 1 shared_informer.go:227] caches populated
  43. I0518 15:14:10.560496 1 shared_informer.go:227] caches populated
  44. I0518 15:14:10.560501 1 shared_informer.go:227] caches populated
  45. I0518 15:14:10.560622 1 ovnkube.go:301] Watching config file /run/ovnkube-config/ovnkube.conf for changes
  46. I0518 15:14:10.560698 1 ovnkube.go:301] Watching config file /run/ovnkube-config/..2020_05_18_15_14_08.921305988/ovnkube.conf for changes
  47. I0518 15:14:10.564790 1 leaderelection.go:242] attempting to acquire leader lease openshift-ovn-kubernetes/ovn-kubernetes-master...
  48. I0518 15:14:10.596314 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  49. I0518 15:14:10.596352 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  50. I0518 15:14:10.596368 1 master.go:81] lost the election to kni1-vmaster-2; in standby mode
  51. I0518 15:14:15.990990 1 reflector.go:418] k8s.io/client-go/informers/factory.go:135: Watch close - *v1.NetworkPolicy total 0 items received
  52. I0518 15:14:15.995238 1 reflector.go:418] k8s.io/client-go/informers/factory.go:135: Watch close - *v1.Namespace total 0 items received
  53. I0518 15:14:16.054941 1 reflector.go:324] k8s.io/client-go/informers/factory.go:135: watch of *v1.Namespace ended with: too old resource version: 60488 (212395)
  54. I0518 15:14:17.055110 1 reflector.go:185] Listing and watching *v1.Namespace from k8s.io/client-go/informers/factory.go:135
  55. I0518 15:14:22.834774 1 reflector.go:324] k8s.io/client-go/informers/factory.go:135: watch of *v1.NetworkPolicy ended with: too old resource version: 60491 (212436)
  56. I0518 15:14:23.836758 1 reflector.go:185] Listing and watching *v1.NetworkPolicy from k8s.io/client-go/informers/factory.go:135
  57. I0518 15:14:32.946070 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  58. I0518 15:14:32.946102 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  59. I0518 15:15:02.090701 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  60. I0518 15:15:02.090753 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  61. I0518 15:15:29.747155 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  62. I0518 15:15:29.747191 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  63. I0518 15:16:01.004193 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  64. I0518 15:16:01.004218 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  65. I0518 15:16:27.800639 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  66. I0518 15:16:27.800665 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  67. I0518 15:16:54.837996 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  68. I0518 15:16:54.838019 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  69. I0518 15:17:31.139796 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  70. I0518 15:17:31.139829 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  71. I0518 15:17:56.389790 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  72. I0518 15:17:56.389840 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  73. I0518 15:18:21.270073 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  74. I0518 15:18:21.270098 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  75. I0518 15:18:49.938427 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  76. I0518 15:18:49.938610 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  77. I0518 15:19:23.638032 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  78. I0518 15:19:23.638053 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  79. I0518 15:19:29.488896 1 reflector.go:418] k8s.io/client-go/informers/factory.go:135: Watch close - *v1.Endpoints total 24 items received
  80. I0518 15:19:56.518113 1 reflector.go:418] k8s.io/client-go/informers/factory.go:135: Watch close - *v1.Pod total 31 items received
  81. I0518 15:20:04.342585 1 leaderelection.go:352] lock is held by kni1-vmaster-2 and has not yet expired
  82. I0518 15:20:04.342610 1 leaderelection.go:247] failed to acquire lease openshift-ovn-kubernetes/ovn-kubernetes-master
  83. + [[ -f /env/_master ]]
  84. + exec ovn-northd --no-chdir -vconsole:info -vfile:off --ovnnb-db ssl:10.19.138.33:9641,ssl:10.19.138.37:9641,ssl:10.19.138.38:9641 --ovnsb-db ssl:10.19.138.33:9642,ssl:10.19.138.37:9642,ssl:10.19.138.38:9642 -p /ovn-cert/tls.key -c /ovn-cert/tls.crt -C /ovn-ca/ca-bundle.crt
  85. 2020-05-18T15:14:09Z|00001|reconnect|INFO|ssl:10.19.138.33:9641: connecting...
  86. 2020-05-18T15:14:09Z|00002|reconnect|INFO|ssl:10.19.138.33:9642: connecting...
  87. 2020-05-18T15:14:09Z|00003|reconnect|INFO|ssl:10.19.138.33:9641: connected
  88. 2020-05-18T15:14:09Z|00004|ovsdb_idl|INFO|ssl:10.19.138.33:9641: clustered database server is not cluster leader; trying another server
  89. 2020-05-18T15:14:09Z|00005|reconnect|INFO|ssl:10.19.138.33:9641: connection attempt timed out
  90. 2020-05-18T15:14:09Z|00006|reconnect|INFO|ssl:10.19.138.38:9641: connecting...
  91. 2020-05-18T15:14:09Z|00007|reconnect|INFO|ssl:10.19.138.38:9641: connection attempt failed (Connection refused)
  92. 2020-05-18T15:14:09Z|00008|reconnect|INFO|ssl:10.19.138.37:9641: connecting...
  93. 2020-05-18T15:14:09Z|00009|reconnect|INFO|ssl:10.19.138.33:9642: connected
  94. 2020-05-18T15:14:09Z|00010|ovn_northd|INFO|ovn-northd lock acquired. This ovn-northd instance is now active.
  95. 2020-05-18T15:14:09Z|00011|ovsdb_idl|INFO|ssl:10.19.138.33:9642: clustered database server is not cluster leader; trying another server
  96. 2020-05-18T15:14:09Z|00012|reconnect|INFO|ssl:10.19.138.33:9642: connection attempt timed out
  97. 2020-05-18T15:14:09Z|00013|reconnect|INFO|ssl:10.19.138.38:9642: connecting...
  98. 2020-05-18T15:14:09Z|00014|reconnect|INFO|ssl:10.19.138.38:9642: connection attempt failed (Connection refused)
  99. 2020-05-18T15:14:09Z|00015|reconnect|INFO|ssl:10.19.138.37:9642: connecting...
  100. 2020-05-18T15:14:09Z|00016|reconnect|INFO|ssl:10.19.138.37:9641: connected
  101. 2020-05-18T15:14:09Z|00017|reconnect|INFO|ssl:10.19.138.37:9642: connected
  102. 2020-05-18T15:14:09Z|00018|ovn_northd|INFO|ovn-northd lock lost. This ovn-northd instance is now on standby.
  103. + [[ -f /env/_master ]]
  104. + [[ -f /usr/bin/ovn-appctl ]]
  105. + OVNCTL_PATH=/usr/share/ovn/scripts/ovn-ctl
  106. + MASTER_IP=10.19.138.33
  107. + [[ 10.19.138.38 == \1\0\.\1\9\.\1\3\8\.\3\3 ]]
  108. ++ bracketify 10.19.138.38
  109. ++ case "$1" in
  110. ++ echo 10.19.138.38
  111. ++ bracketify 10.19.138.33
  112. ++ case "$1" in
  113. ++ echo 10.19.138.33
  114. + exec /usr/share/ovn/scripts/ovn-ctl --db-nb-cluster-local-port=9643 --db-nb-cluster-remote-port=9643 --db-nb-cluster-local-addr=10.19.138.38 --db-nb-cluster-remote-addr=10.19.138.33 --no-monitor --db-nb-cluster-local-proto=ssl --db-nb-cluster-remote-proto=ssl --ovn-nb-db-ssl-key=/ovn-cert/tls.key --ovn-nb-db-ssl-cert=/ovn-cert/tls.crt --ovn-nb-db-ssl-ca-cert=/ovn-ca/ca-bundle.crt '--ovn-nb-log=-vconsole:info -vfile:off' run_nb_ovsdb
  115. 2020-05-18T15:19:59Z|00001|vlog|INFO|opened log file /var/log/ovn/ovsdb-server-nb.log
  116. ovsdb-server: ovsdb error: server does not belong to cluster
Add Comment
Please, Sign In to add comment