util.sh 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. #!/bin/bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # A library of helper functions and constants for the local config.
  16. # Use the config file specified in $KUBE_CONFIG_FILE, or default to
  17. # config-default.sh.
  18. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
  19. source "${KUBE_ROOT}/cluster/vsphere/config-common.sh"
  20. source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}"
  21. source "${KUBE_ROOT}/cluster/common.sh"
  22. # Detect the IP for the master
  23. #
  24. # Assumed vars:
  25. # MASTER_NAME
  26. # Vars set:
  27. # KUBE_MASTER
  28. # KUBE_MASTER_IP
  29. function detect-master {
  30. KUBE_MASTER=${MASTER_NAME}
  31. if [[ -z "${KUBE_MASTER_IP-}" ]]; then
  32. KUBE_MASTER_IP=$(govc vm.ip ${MASTER_NAME})
  33. fi
  34. if [[ -z "${KUBE_MASTER_IP-}" ]]; then
  35. echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
  36. exit 1
  37. fi
  38. echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
  39. }
  40. # Detect the information about the nodes
  41. #
  42. # Assumed vars:
  43. # NODE_NAMES
  44. # Vars set:
  45. # KUBE_NODE_IP_ADDRESS (array)
  46. function detect-nodes {
  47. KUBE_NODE_IP_ADDRESSES=()
  48. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  49. local nodeip=$(govc vm.ip ${NODE_NAMES[$i]})
  50. if [[ -z "${nodeip-}" ]] ; then
  51. echo "Did not find ${NODE_NAMES[$i]}" >&2
  52. else
  53. echo "Found ${NODE_NAMES[$i]} at ${nodeip}"
  54. KUBE_NODE_IP_ADDRESSES+=("${nodeip}")
  55. fi
  56. done
  57. if [[ -z "${KUBE_NODE_IP_ADDRESSES-}" ]]; then
  58. echo "Could not detect Kubernetes nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
  59. exit 1
  60. fi
  61. }
  62. function trap-add {
  63. local handler="$1"
  64. local signal="${2-EXIT}"
  65. local cur
  66. cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
  67. if [[ -n "${cur}" ]]; then
  68. handler="${cur}; ${handler}"
  69. fi
  70. trap "${handler}" ${signal}
  71. }
  72. function verify-prereqs {
  73. which "govc" >/dev/null || {
  74. echo "Can't find govc in PATH, please install and retry."
  75. echo ""
  76. echo " go install github.com/vmware/govmomi/govc"
  77. echo ""
  78. exit 1
  79. }
  80. }
  81. function verify-ssh-prereqs {
  82. local rc
  83. rc=0
  84. ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
  85. # "Could not open a connection to your authentication agent."
  86. if [[ "${rc}" -eq 2 ]]; then
  87. eval "$(ssh-agent)" > /dev/null
  88. trap-add "kill ${SSH_AGENT_PID}" EXIT
  89. fi
  90. rc=0
  91. ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
  92. # "The agent has no identities."
  93. if [[ "${rc}" -eq 1 ]]; then
  94. # Try adding one of the default identities, with or without passphrase.
  95. ssh-add || true
  96. fi
  97. # Expect at least one identity to be available.
  98. if ! ssh-add -L 1> /dev/null 2> /dev/null; then
  99. echo "Could not find or add an SSH identity."
  100. echo "Please start ssh-agent, add your identity, and retry."
  101. exit 1
  102. fi
  103. }
  104. # Create a temp dir that'll be deleted at the end of this bash session.
  105. #
  106. # Vars set:
  107. # KUBE_TEMP
  108. function ensure-temp-dir {
  109. if [[ -z ${KUBE_TEMP-} ]]; then
  110. KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
  111. trap-add 'rm -rf "${KUBE_TEMP}"' EXIT
  112. fi
  113. }
  114. # Take the local tar files and upload them to the master.
  115. #
  116. # Assumed vars:
  117. # MASTER_NAME
  118. # SERVER_BINARY_TAR
  119. # SALT_TAR
  120. function upload-server-tars {
  121. local vm_ip
  122. vm_ip=$(govc vm.ip "${MASTER_NAME}")
  123. kube-ssh ${vm_ip} "mkdir -p /home/kube/cache/kubernetes-install"
  124. local tar
  125. for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do
  126. kube-scp ${vm_ip} "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}"
  127. done
  128. }
  129. # Run command over ssh
  130. function kube-ssh {
  131. local host="$1"
  132. shift
  133. ssh ${SSH_OPTS-} "kube@${host}" "$@" 2> /dev/null
  134. }
  135. # Copy file over ssh
  136. function kube-scp {
  137. local host="$1"
  138. local src="$2"
  139. local dst="$3"
  140. scp ${SSH_OPTS-} "${src}" "kube@${host}:${dst}"
  141. }
  142. # Instantiate a generic kubernetes virtual machine (master or node)
  143. #
  144. # Usage:
  145. # kube-up-vm VM_NAME [options to pass to govc vm.create]
  146. #
  147. # Example:
  148. # kube-up-vm "vm-name" -c 2 -m 4096
  149. #
  150. # Assumed vars:
  151. # DISK
  152. # GUEST_ID
  153. function kube-up-vm {
  154. local vm_name="$1"
  155. shift
  156. govc vm.create \
  157. -debug \
  158. -disk="${DISK}" \
  159. -g="${GUEST_ID}" \
  160. -link=true \
  161. "$@" \
  162. "${vm_name}"
  163. # Retrieve IP first, to confirm the guest operations agent is running.
  164. govc vm.ip "${vm_name}" > /dev/null
  165. govc guest.mkdir \
  166. -l "kube:kube" \
  167. -vm="${vm_name}" \
  168. -p \
  169. /home/kube/.ssh
  170. ssh-add -L > "${KUBE_TEMP}/${vm_name}-authorized_keys"
  171. govc guest.upload \
  172. -l "kube:kube" \
  173. -vm="${vm_name}" \
  174. -f \
  175. "${KUBE_TEMP}/${vm_name}-authorized_keys" \
  176. /home/kube/.ssh/authorized_keys
  177. }
  178. # Kick off a local script on a kubernetes virtual machine (master or node)
  179. #
  180. # Usage:
  181. # kube-run VM_NAME LOCAL_FILE
  182. function kube-run {
  183. local vm_name="$1"
  184. local file="$2"
  185. local dst="/tmp/$(basename "${file}")"
  186. govc guest.upload -l "kube:kube" -vm="${vm_name}" -f -perm=0755 "${file}" "${dst}"
  187. echo "uploaded ${file} to ${dst}"
  188. local vm_ip
  189. vm_ip=$(govc vm.ip "${vm_name}")
  190. kube-ssh ${vm_ip} "nohup sudo ${dst} < /dev/null 1> ${dst}.out 2> ${dst}.err &"
  191. }
  192. #
  193. # run the command remotely and check if the specific kube artifact is running or not.
  194. # keep checking till the you hit the timeout. default timeout 300s
  195. #
  196. # Usage:
  197. # kube_check 10.0.0.1 cmd timeout
  198. function kube-check {
  199. nodeip=$1
  200. cmd=$2
  201. sleepstep=5
  202. if [[ $# -lt 3 || -z $3 ]]; then
  203. timeout=300
  204. else
  205. timeout=$3
  206. fi
  207. let effective_timeout=($timeout/$sleepstep)
  208. attempt=0
  209. echo
  210. printf "This may take several minutes. Bound to $effective_timeout attempts"
  211. while true; do
  212. local rc=0
  213. output=$(kube-ssh ${nodeip} "${cmd}") || rc=1
  214. if [[ $rc != 0 ]]; then
  215. if (( $attempt == $effective_timeout )); then
  216. echo
  217. echo "(Failed) rc: $rc Output: ${output}"
  218. echo
  219. echo -e "${cmd} failed to start on ${nodeip}. Your cluster is unlikely" >&2
  220. echo "to work correctly. You may have to debug it by logging in." >&2
  221. echo
  222. exit 1
  223. fi
  224. else
  225. echo
  226. echo -e "[${cmd}] passed"
  227. echo
  228. break
  229. fi
  230. printf "."
  231. attempt=$(($attempt+1))
  232. sleep $sleepstep
  233. done
  234. }
  235. #
  236. # verify if salt master is up. check 30 times and then echo out bad output and return 0
  237. #
  238. # Usage:
  239. # remote-pgrep 10.0.0.1 salt-master
  240. #
  241. function remote-pgrep {
  242. nodeip=$1
  243. regex=$2
  244. max_attempt=60
  245. printf "This may take several minutes. Bound to $max_attempt attempts"
  246. attempt=0
  247. while true; do
  248. local rc=0
  249. output=$(kube-ssh ${nodeip} pgrep ${regex}) || rc=1
  250. if [[ $rc != 0 ]]; then
  251. if (( $attempt == $max_attempt )); then
  252. echo
  253. echo "(Failed) rc: $rc, output:${output}"
  254. echo
  255. echo -e "${regex} failed to start on ${nodeip} after checking for $attempt attempts. Your cluster is unlikely" >&2
  256. echo "to work correctly. You may have to debug it by logging in." >&2
  257. echo
  258. exit 1
  259. fi
  260. else
  261. echo
  262. echo -e "[${regex} running]"
  263. echo
  264. break
  265. fi
  266. printf "."
  267. attempt=$(($attempt+1))
  268. sleep 10
  269. done
  270. }
  271. # identify the pod routes and route them together.
  272. #
  273. # Assumptions:
  274. # All packages have been installed and kubelet has started running.
  275. #
  276. function setup-pod-routes {
  277. # wait till the kubelet sets up the bridge.
  278. echo "Setting up routes"
  279. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  280. printf "check if cbr0 bridge is ready on ${NODE_NAMES[$i]}\n"
  281. kube-check ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"'
  282. done
  283. # identify the subnet assigned to the node by the kubernertes controller manager.
  284. KUBE_NODE_BRIDGE_NETWORK=()
  285. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  286. printf " finding network of cbr0 bridge on node ${NODE_NAMES[$i]}\n"
  287. network=$(kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1')
  288. KUBE_NODE_BRIDGE_NETWORK+=("${network}")
  289. done
  290. # Make the pods visible to each other and to the master.
  291. # The master needs have routes to the pods for the UI to work.
  292. local j
  293. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  294. printf "setting up routes for ${NODE_NAMES[$i]}"
  295. kube-ssh "${KUBE_MASTER_IP}" "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[${i}]} gw ${KUBE_NODE_IP_ADDRESSES[${i}]}"
  296. for (( j=0; j<${#NODE_NAMES[@]}; j++)); do
  297. if [[ $i != $j ]]; then
  298. kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[$j]} gw ${KUBE_NODE_IP_ADDRESSES[$j]}"
  299. fi
  300. done
  301. done
  302. }
  303. # Instantiate a kubernetes cluster
  304. #
  305. # Assumed vars:
  306. # KUBE_ROOT
  307. # <Various vars set in config file>
  308. function kube-up {
  309. verify-ssh-prereqs
  310. find-release-tars
  311. ensure-temp-dir
  312. load-or-gen-kube-basicauth
  313. python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
  314. -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
  315. local htpasswd
  316. htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
  317. # This calculation of the service IP should work, but if you choose an
  318. # alternate subnet, there's a small chance you'd need to modify the
  319. # service_ip, below. We'll choose an IP like 10.244.240.1 by taking
  320. # the first three octets of the SERVICE_CLUSTER_IP_RANGE and tacking
  321. # on a .1
  322. local octets
  323. local service_ip
  324. octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
  325. ((octets[3]+=1))
  326. service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
  327. MASTER_EXTRA_SANS="IP:${service_ip},DNS:${MASTER_NAME},${MASTER_EXTRA_SANS}"
  328. echo "Starting master VM (this can take a minute)..."
  329. (
  330. echo "#! /bin/bash"
  331. echo "readonly MY_NAME=${MASTER_NAME}"
  332. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
  333. echo "cd /home/kube/cache/kubernetes-install"
  334. echo "readonly MASTER_NAME='${MASTER_NAME}'"
  335. echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'"
  336. echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
  337. echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'"
  338. echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'"
  339. echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
  340. echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
  341. echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
  342. echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
  343. echo "readonly ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI:-false}'"
  344. echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
  345. echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
  346. echo "readonly KUBE_USER='${KUBE_USER:-}'"
  347. echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD:-}'"
  348. echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
  349. echo "readonly SALT_TAR='${SALT_TAR##*/}'"
  350. echo "readonly MASTER_HTPASSWD='${htpasswd}'"
  351. echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
  352. echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
  353. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/create-dynamic-salt-files.sh"
  354. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
  355. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-master.sh"
  356. ) > "${KUBE_TEMP}/master-start.sh"
  357. kube-up-vm ${MASTER_NAME} -c ${MASTER_CPU-1} -m ${MASTER_MEMORY_MB-1024}
  358. upload-server-tars
  359. kube-run ${MASTER_NAME} "${KUBE_TEMP}/master-start.sh"
  360. # Print master IP, so user can log in for debugging.
  361. detect-master
  362. echo
  363. echo "Starting node VMs (this can take a minute)..."
  364. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  365. (
  366. echo "#! /bin/bash"
  367. echo "readonly MY_NAME=${NODE_NAMES[$i]}"
  368. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
  369. echo "KUBE_MASTER=${KUBE_MASTER}"
  370. echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
  371. echo "NODE_IP_RANGE=$NODE_IP_RANGES"
  372. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
  373. ) > "${KUBE_TEMP}/node-start-${i}.sh"
  374. (
  375. kube-up-vm "${NODE_NAMES[$i]}" -c ${NODE_CPU-1} -m ${NODE_MEMORY_MB-1024}
  376. kube-run "${NODE_NAMES[$i]}" "${KUBE_TEMP}/node-start-${i}.sh"
  377. ) &
  378. done
  379. local fail=0
  380. local job
  381. for job in $(jobs -p); do
  382. wait "${job}" || fail=$((fail + 1))
  383. done
  384. if (( $fail != 0 )); then
  385. echo "${fail} commands failed. Exiting." >&2
  386. exit 2
  387. fi
  388. # Print node IPs, so user can log in for debugging.
  389. detect-nodes
  390. printf "Waiting for salt-master to be up on ${KUBE_MASTER} ...\n"
  391. remote-pgrep ${KUBE_MASTER_IP} "salt-master"
  392. printf "Waiting for all packages to be installed on ${KUBE_MASTER} ...\n"
  393. kube-check ${KUBE_MASTER_IP} 'sudo salt "kubernetes-master" state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
  394. local i
  395. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  396. printf "Waiting for salt-minion to be up on ${NODE_NAMES[$i]} ....\n"
  397. remote-pgrep ${KUBE_NODE_IP_ADDRESSES[$i]} "salt-minion"
  398. printf "Waiting for all salt packages to be installed on ${NODE_NAMES[$i]} .... \n"
  399. kube-check ${KUBE_MASTER_IP} 'sudo salt '"${NODE_NAMES[$i]}"' state.highstate -t 30 | grep -E "Failed:[[:space:]]+0"'
  400. printf " OK\n"
  401. done
  402. echo
  403. echo "Waiting for master and node initialization."
  404. echo
  405. echo " This will continually check to see if the API for kubernetes is reachable."
  406. echo " This might loop forever if there was some uncaught error during start up."
  407. echo
  408. until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
  409. --fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
  410. printf "."
  411. sleep 2
  412. done
  413. printf " OK\n"
  414. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  415. printf "Waiting for ${NODE_NAMES[$i]} to become available..."
  416. until curl --max-time 5 \
  417. --fail --output /dev/null --silent "http://${KUBE_NODE_IP_ADDRESSES[$i]}:10250/healthz"; do
  418. printf "."
  419. sleep 2
  420. done
  421. printf " OK\n"
  422. done
  423. setup-pod-routes
  424. echo "Kubernetes cluster created."
  425. # TODO use token instead of basic auth
  426. export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
  427. export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
  428. export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
  429. export CONTEXT="vsphere_${INSTANCE_PREFIX}"
  430. (
  431. umask 077
  432. kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
  433. kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
  434. kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
  435. create-kubeconfig
  436. )
  437. printf "\n"
  438. echo
  439. echo "Sanity checking cluster..."
  440. sleep 5
  441. # Basic sanity checking
  442. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  443. # Make sure docker is installed
  444. kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || {
  445. echo "Docker failed to install on ${NODE_NAMES[$i]}. Your cluster is unlikely" >&2
  446. echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
  447. echo "cluster. (sorry!)" >&2
  448. exit 1
  449. }
  450. done
  451. # ensures KUBECONFIG is set
  452. get-kubeconfig-basicauth
  453. echo
  454. echo "Kubernetes cluster is running. The master is running at:"
  455. echo
  456. echo " https://${KUBE_MASTER_IP}"
  457. echo
  458. echo "The user name and password to use is located in ${KUBECONFIG}"
  459. echo
  460. }
  461. # Delete a kubernetes cluster
  462. function kube-down {
  463. govc vm.destroy ${MASTER_NAME} &
  464. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  465. govc vm.destroy ${NODE_NAMES[i]} &
  466. done
  467. wait
  468. }
  469. # Update a kubernetes cluster with latest source
  470. function kube-push {
  471. verify-ssh-prereqs
  472. find-release-tars
  473. detect-master
  474. upload-server-tars
  475. (
  476. echo "#! /bin/bash"
  477. echo "cd /home/kube/cache/kubernetes-install"
  478. echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
  479. echo "readonly SALT_TAR='${SALT_TAR##*/}'"
  480. grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/install-release.sh"
  481. echo "echo Executing configuration"
  482. echo "sudo salt '*' mine.update"
  483. echo "sudo salt --force-color '*' state.highstate"
  484. ) | kube-ssh "${KUBE_MASTER_IP}"
  485. get-kubeconfig-basicauth
  486. echo
  487. echo "Kubernetes cluster is running. The master is running at:"
  488. echo
  489. echo " https://${KUBE_MASTER_IP}"
  490. echo
  491. echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
  492. echo
  493. }
  494. # Execute prior to running tests to build a release if required for env
  495. function test-build-release {
  496. echo "TODO"
  497. }
  498. # Execute prior to running tests to initialize required structure
  499. function test-setup {
  500. echo "TODO"
  501. }
  502. # Execute after running tests to perform any required clean-up
  503. function test-teardown {
  504. echo "TODO"
  505. }