util.sh 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. #!/bin/bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # A library of helper functions and constant for the local config.
  16. # Use the config file specified in $KUBE_CONFIG_FILE, or default to
  17. # config-default.sh.
  18. set -e
  19. SOURCE="${BASH_SOURCE[0]}"
  20. while [ -h "$SOURCE" ]; do
  21. DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
  22. SOURCE="$(readlink "$SOURCE")"
  23. [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
  24. done
  25. DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
  26. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
  27. source "${KUBE_ROOT}/cluster/azure-legacy/${KUBE_CONFIG_FILE-"config-default.sh"}"
  28. source "${KUBE_ROOT}/cluster/common.sh"
  29. function prepare-e2e() {
  30. # (e2e script runs detect-project, I don't think we need to anything)
  31. # Note: we can't print anything here, or else the test tools will break with the extra output
  32. return
  33. }
  34. function azure_call {
  35. local -a params=()
  36. local param
  37. # the '... in "$@"' is implicit on a for, so doesn't need to be stated.
  38. for param; do
  39. params+=("${param}")
  40. done
  41. local rc=0
  42. local stderr
  43. local count=0
  44. while [[ count -lt 10 ]]; do
  45. stderr=$(azure "${params[@]}" 2>&1 >&3) && break
  46. rc=$?
  47. if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then
  48. break
  49. fi
  50. count=$(($count + 1))
  51. done 3>&1
  52. if [[ "${rc}" -ne 0 ]]; then
  53. echo "${stderr}" >&2
  54. return "${rc}"
  55. fi
  56. }
  57. function json_val () {
  58. python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
  59. }
  60. # Verify prereqs
  61. function verify-prereqs {
  62. if [[ -z "$(which azure)" ]]; then
  63. echo "Couldn't find azure in PATH"
  64. echo " please install with 'npm install azure-cli'"
  65. exit 1
  66. fi
  67. if [[ -z "$(azure_call account list | grep true)" ]]; then
  68. echo "Default azure account not set"
  69. echo " please set with 'azure account set'"
  70. exit 1
  71. fi
  72. account=$(azure_call account list | grep true)
  73. if which md5 > /dev/null 2>&1; then
  74. AZ_HSH=$(md5 -q -s "$account")
  75. else
  76. AZ_HSH=$(echo -n "$account" | md5sum)
  77. fi
  78. AZ_HSH=${AZ_HSH:0:7}
  79. AZ_STG=kube$AZ_HSH
  80. echo "==> AZ_STG: $AZ_STG"
  81. AZ_CS="$AZ_CS_PREFIX-$AZ_HSH"
  82. echo "==> AZ_CS: $AZ_CS"
  83. CONTAINER=kube-$TAG
  84. echo "==> CONTAINER: $CONTAINER"
  85. }
  86. # Create a temp dir that'll be deleted at the end of this bash session.
  87. #
  88. # Vars set:
  89. # KUBE_TEMP
  90. function ensure-temp-dir {
  91. if [[ -z ${KUBE_TEMP-} ]]; then
  92. KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
  93. trap 'rm -rf "${KUBE_TEMP}"' EXIT
  94. fi
  95. }
  96. # Take the local tar files and upload them to Azure Storage. They will then be
  97. # downloaded by the master as part of the start up script for the master.
  98. #
  99. # Assumed vars:
  100. # SERVER_BINARY_TAR
  101. # SALT_TAR
  102. # Vars set:
  103. # SERVER_BINARY_TAR_URL
  104. # SALT_TAR_URL
  105. function upload-server-tars() {
  106. SERVER_BINARY_TAR_URL=
  107. SALT_TAR_URL=
  108. echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR"
  109. echo "==> SALT_TAR: $SALT_TAR"
  110. echo "+++ Staging server tars to Azure Storage: $AZ_STG"
  111. local server_binary_url="${SERVER_BINARY_TAR##*/}"
  112. local salt_url="${SALT_TAR##*/}"
  113. SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url"
  114. SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url"
  115. echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
  116. echo "==> SALT_TAR_URL: $SALT_TAR_URL"
  117. echo "--> Checking storage exists..."
  118. if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
  119. grep data)" ]]; then
  120. echo "--> Creating storage..."
  121. azure_call storage account create -l "$AZ_LOCATION" $AZ_STG --type LRS
  122. fi
  123. echo "--> Getting storage key..."
  124. stg_key=$(azure_call storage account keys list $AZ_STG --json | \
  125. json_val '["primaryKey"]')
  126. echo "--> Checking storage container exists..."
  127. if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
  128. $CONTAINER 2>/dev/null | grep data)" ]]; then
  129. echo "--> Creating storage container..."
  130. azure_call storage container create \
  131. -a $AZ_STG \
  132. -k "$stg_key" \
  133. -p Blob \
  134. $CONTAINER
  135. fi
  136. echo "--> Checking server binary exists in the container..."
  137. if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
  138. $CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then
  139. echo "--> Deleting server binary in the container..."
  140. azure_call storage blob delete \
  141. -a $AZ_STG \
  142. -k "$stg_key" \
  143. $CONTAINER \
  144. $server_binary_url
  145. fi
  146. echo "--> Uploading server binary to the container..."
  147. azure_call storage blob upload \
  148. -a $AZ_STG \
  149. -k "$stg_key" \
  150. $SERVER_BINARY_TAR \
  151. $CONTAINER \
  152. $server_binary_url
  153. echo "--> Checking salt data exists in the container..."
  154. if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
  155. $CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then
  156. echo "--> Deleting salt data in the container..."
  157. azure_call storage blob delete \
  158. -a $AZ_STG \
  159. -k "$stg_key" \
  160. $CONTAINER \
  161. $salt_url
  162. fi
  163. echo "--> Uploading salt data to the container..."
  164. azure_call storage blob upload \
  165. -a $AZ_STG \
  166. -k "$stg_key" \
  167. $SALT_TAR \
  168. $CONTAINER \
  169. $salt_url
  170. }
  171. # Detect the information about the minions
  172. #
  173. # Assumed vars:
  174. # MINION_NAMES
  175. # ZONE
  176. # Vars set:
  177. #
  178. function detect-minions () {
  179. if [[ -z "$AZ_CS" ]]; then
  180. verify-prereqs-local
  181. fi
  182. ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
  183. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
  184. MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
  185. done
  186. }
  187. # Detect the IP for the master
  188. #
  189. # Assumed vars:
  190. # MASTER_NAME
  191. # ZONE
  192. # Vars set:
  193. # KUBE_MASTER
  194. # KUBE_MASTER_IP
  195. function detect-master () {
  196. if [[ -z "$AZ_CS" ]]; then
  197. verify-prereqs-local
  198. fi
  199. KUBE_MASTER=${MASTER_NAME}
  200. KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
  201. echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
  202. }
  203. # Instantiate a kubernetes cluster
  204. #
  205. # Assumed vars
  206. # KUBE_ROOT
  207. # <Various vars set in config file>
  208. function kube-up {
  209. # Make sure we have the tar files staged on Azure Storage
  210. find-release-tars
  211. upload-server-tars
  212. ensure-temp-dir
  213. gen-kube-basicauth
  214. python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
  215. -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
  216. local htpasswd
  217. htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
  218. # Generate openvpn certs
  219. echo "--> Generating openvpn certs"
  220. echo 01 > ${KUBE_TEMP}/ca.srl
  221. openssl genrsa -out ${KUBE_TEMP}/ca.key
  222. openssl req -new -x509 -days 1095 \
  223. -key ${KUBE_TEMP}/ca.key \
  224. -out ${KUBE_TEMP}/ca.crt \
  225. -subj "/CN=openvpn-ca"
  226. openssl genrsa -out ${KUBE_TEMP}/server.key
  227. openssl req -new \
  228. -key ${KUBE_TEMP}/server.key \
  229. -out ${KUBE_TEMP}/server.csr \
  230. -subj "/CN=server"
  231. openssl x509 -req -days 1095 \
  232. -in ${KUBE_TEMP}/server.csr \
  233. -CA ${KUBE_TEMP}/ca.crt \
  234. -CAkey ${KUBE_TEMP}/ca.key \
  235. -CAserial ${KUBE_TEMP}/ca.srl \
  236. -out ${KUBE_TEMP}/server.crt
  237. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
  238. openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key
  239. openssl req -new \
  240. -key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \
  241. -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
  242. -subj "/CN=${MINION_NAMES[$i]}"
  243. openssl x509 -req -days 1095 \
  244. -in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
  245. -CA ${KUBE_TEMP}/ca.crt \
  246. -CAkey ${KUBE_TEMP}/ca.key \
  247. -CAserial ${KUBE_TEMP}/ca.srl \
  248. -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
  249. done
  250. KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
  251. # Build up start up script for master
  252. echo "--> Building up start up script for master"
  253. (
  254. echo "#!/bin/bash"
  255. echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
  256. echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\""
  257. echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\""
  258. echo "mkdir -p /var/cache/kubernetes-install"
  259. echo "cd /var/cache/kubernetes-install"
  260. echo "readonly MASTER_NAME='${MASTER_NAME}'"
  261. echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
  262. echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
  263. echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
  264. echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
  265. echo "readonly MASTER_HTPASSWD='${htpasswd}'"
  266. echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
  267. echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
  268. echo "readonly KUBE_USER='${KUBE_USER}'"
  269. echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
  270. echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
  271. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
  272. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-dynamic-salt-files.sh"
  273. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
  274. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/download-release.sh"
  275. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-master.sh"
  276. ) > "${KUBE_TEMP}/master-start.sh"
  277. if [[ ! -f $AZ_SSH_KEY ]]; then
  278. ssh-keygen -f $AZ_SSH_KEY -N ''
  279. fi
  280. if [[ ! -f $AZ_SSH_CERT ]]; then
  281. openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \
  282. -subj "/CN=azure-ssh-key"
  283. fi
  284. if [[ -z "$(azure_call network vnet show "$AZ_VNET" 2>/dev/null | grep data)" ]]; then
  285. echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
  286. exit 1
  287. fi
  288. echo "--> Starting VM"
  289. azure_call vm create \
  290. -z "$MASTER_SIZE" \
  291. -w "$AZ_VNET" \
  292. -n $MASTER_NAME \
  293. -l "$AZ_LOCATION" \
  294. -t $AZ_SSH_CERT \
  295. -e 22000 -P \
  296. -d ${KUBE_TEMP}/master-start.sh \
  297. -b $AZ_SUBNET \
  298. $AZ_CS $AZ_IMAGE $USER
  299. ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
  300. #Build up start up script for minions
  301. echo "--> Building up start up script for minions"
  302. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
  303. (
  304. echo "#!/bin/bash"
  305. echo "MASTER_NAME='${MASTER_NAME}'"
  306. echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
  307. echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
  308. echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
  309. echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
  310. echo "readonly KUBE_USER='${KUBE_USER}'"
  311. echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
  312. echo "readonly KUBE_MASTER_IP='${KUBE_MASTER_IP}'"
  313. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/common.sh"
  314. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/create-kubeconfig.sh"
  315. grep -v "^#" "${KUBE_ROOT}/cluster/azure-legacy/templates/salt-minion.sh"
  316. ) > "${KUBE_TEMP}/minion-start-${i}.sh"
  317. echo "--> Starting VM"
  318. azure_call vm create \
  319. -z "$MINION_SIZE" \
  320. -c -w "$AZ_VNET" \
  321. -n ${MINION_NAMES[$i]} \
  322. -l "$AZ_LOCATION" \
  323. -t $AZ_SSH_CERT \
  324. -e ${ssh_ports[$i]} -P \
  325. -d ${KUBE_TEMP}/minion-start-${i}.sh \
  326. -b $AZ_SUBNET \
  327. $AZ_CS $AZ_IMAGE $USER
  328. done
  329. echo "--> Creating endpoint"
  330. azure_call vm endpoint create $MASTER_NAME 443
  331. detect-master > /dev/null
  332. echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
  333. echo "Waiting for cluster initialization."
  334. echo
  335. echo " This will continually check to see if the API for kubernetes is reachable."
  336. echo " This might loop forever if there was some uncaught error during start"
  337. echo " up."
  338. echo
  339. until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
  340. --fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
  341. printf "."
  342. sleep 2
  343. done
  344. printf "\n"
  345. echo "Kubernetes cluster created."
  346. export CONTEXT="azure_${INSTANCE_PREFIX}"
  347. create-kubeconfig
  348. export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
  349. export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
  350. export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
  351. # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
  352. # config file. Distribute the same way the htpasswd is done.
  353. (umask 077
  354. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
  355. sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
  356. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
  357. sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
  358. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
  359. sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
  360. )
  361. echo "Sanity checking cluster..."
  362. echo
  363. echo " This will continually check the minions to ensure docker is"
  364. echo " installed. This is usually a good indicator that salt has"
  365. echo " successfully provisioned. This might loop forever if there was"
  366. echo " some uncaught error during start up."
  367. echo
  368. # Basic sanity checking
  369. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
  370. # Make sure docker is installed
  371. echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
  372. until ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
  373. $AZ_CS.cloudapp.net which docker > /dev/null 2>&1; do
  374. printf "."
  375. sleep 2
  376. done
  377. done
  378. sleep 60
  379. KUBECONFIG_NAME="kubeconfig"
  380. KUBECONFIG="${HOME}/.kube/config"
  381. echo "Distributing kubeconfig for kubelet to master kubelet"
  382. scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P 22000 ${KUBECONFIG} \
  383. $AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
  384. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
  385. sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
  386. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
  387. sudo service kubelet restart
  388. echo "Distributing kubeconfig for kubelet to all minions"
  389. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
  390. scp -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -P ${ssh_ports[$i]} ${KUBECONFIG} \
  391. $AZ_CS.cloudapp.net:${KUBECONFIG_NAME}
  392. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
  393. sudo cp ${KUBECONFIG_NAME} /var/lib/kubelet/${KUBECONFIG_NAME}
  394. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
  395. sudo cp ${KUBECONFIG_NAME} /var/lib/kube-proxy/${KUBECONFIG_NAME}
  396. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
  397. sudo service kubelet restart
  398. ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net \
  399. sudo killall kube-proxy
  400. done
  401. # ensures KUBECONFIG is set
  402. get-kubeconfig-basicauth
  403. echo
  404. echo "Kubernetes cluster is running. The master is running at:"
  405. echo
  406. echo " https://${KUBE_MASTER_IP}"
  407. echo
  408. echo "The user name and password to use is located in ${KUBECONFIG}."
  409. echo
  410. }
  411. # Delete a kubernetes cluster
  412. function kube-down {
  413. echo "Bringing down cluster"
  414. set +e
  415. azure_call vm delete $MASTER_NAME -b -q
  416. for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
  417. azure_call vm delete ${MINION_NAMES[$i]} -b -q
  418. done
  419. wait
  420. }
  421. # Update a kubernetes cluster with latest source
  422. #function kube-push {
  423. # detect-project
  424. # detect-master
  425. # Make sure we have the tar files staged on Azure Storage
  426. # find-release-tars
  427. # upload-server-tars
  428. # (
  429. # echo "#! /bin/bash"
  430. # echo "mkdir -p /var/cache/kubernetes-install"
  431. # echo "cd /var/cache/kubernetes-install"
  432. # echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
  433. # echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
  434. # grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
  435. # grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
  436. # echo "echo Executing configuration"
  437. # echo "sudo salt '*' mine.update"
  438. # echo "sudo salt --force-color '*' state.highstate"
  439. # ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
  440. # get-kubeconfig-basicauth
  441. # echo
  442. # echo "Kubernetes cluster is running. The master is running at:"
  443. # echo
  444. # echo " https://${KUBE_MASTER_IP}"
  445. # echo
  446. # echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
  447. # echo
  448. #}
  449. # -----------------------------------------------------------------------------
  450. # Cluster specific test helpers
  451. # Execute prior to running tests to build a release if required for env.
  452. #
  453. # Assumed Vars:
  454. # KUBE_ROOT
  455. function test-build-release {
  456. # Make a release
  457. "${KUBE_ROOT}/build/release.sh"
  458. }
  459. # SSH to a node by name ($1) and run a command ($2).
  460. function ssh-to-node {
  461. local node="$1"
  462. local cmd="$2"
  463. ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}"
  464. }
  465. # Restart the kube-proxy on a node ($1)
  466. function restart-kube-proxy {
  467. ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
  468. }
  469. # Restart the kube-proxy on the master ($1)
  470. function restart-apiserver {
  471. ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
  472. }
  473. function test-setup {
  474. "${KUBE_ROOT}/cluster/kube-up.sh"
  475. }
  476. function test-teardown {
  477. "${KUBE_ROOT}/cluster/kube-down.sh"
  478. }