util.sh 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. #!/bin/bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # A library of helper functions for deploying on Rackspace
  16. # Use the config file specified in $KUBE_CONFIG_FILE, or default to
  17. # config-default.sh.
  18. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
  19. source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
  20. source "${KUBE_ROOT}/cluster/common.sh"
  21. source "${KUBE_ROOT}/cluster/rackspace/authorization.sh"
  22. verify-prereqs() {
  23. # Make sure that prerequisites are installed.
  24. for x in nova swiftly; do
  25. if [ "$(which $x)" == "" ]; then
  26. echo "cluster/rackspace/util.sh: Can't find $x in PATH, please fix and retry."
  27. exit 1
  28. fi
  29. done
  30. if [[ -z "${OS_AUTH_URL-}" ]]; then
  31. echo "cluster/rackspace/util.sh: OS_AUTH_URL not set."
  32. echo -e "\texport OS_AUTH_URL=https://identity.api.rackspacecloud.com/v2.0/"
  33. return 1
  34. fi
  35. if [[ -z "${OS_USERNAME-}" ]]; then
  36. echo "cluster/rackspace/util.sh: OS_USERNAME not set."
  37. echo -e "\texport OS_USERNAME=myusername"
  38. return 1
  39. fi
  40. if [[ -z "${OS_PASSWORD-}" ]]; then
  41. echo "cluster/rackspace/util.sh: OS_PASSWORD not set."
  42. echo -e "\texport OS_PASSWORD=myapikey"
  43. return 1
  44. fi
  45. }
  46. rax-ssh-key() {
  47. if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then
  48. echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}"
  49. ssh-keygen -f ${HOME}/.ssh/${SSH_KEY_NAME} -N '' > /dev/null
  50. fi
  51. if ! $(nova keypair-list | grep $SSH_KEY_NAME > /dev/null 2>&1); then
  52. echo "cluster/rackspace/util.sh: Uploading key to Rackspace:"
  53. echo -e "\tnova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub"
  54. nova keypair-add ${SSH_KEY_NAME} --pub-key ${HOME}/.ssh/${SSH_KEY_NAME}.pub > /dev/null 2>&1
  55. else
  56. echo "cluster/rackspace/util.sh: SSH key ${SSH_KEY_NAME}.pub already uploaded"
  57. fi
  58. }
  59. rackspace-set-vars() {
  60. CLOUDFILES_CONTAINER="kubernetes-releases-${OS_USERNAME}"
  61. CONTAINER_PREFIX=${CONTAINER_PREFIX-devel/}
  62. find-release-tars
  63. }
  64. # Retrieves a tempurl from cloudfiles to make the release object publicly accessible temporarily.
  65. find-object-url() {
  66. rackspace-set-vars
  67. KUBE_TAR=${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz
  68. # Create temp URL good for 24 hours
  69. RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${KUBE_TAR} 86400 )
  70. echo "cluster/rackspace/util.sh: Object temp URL:"
  71. echo -e "\t${RELEASE_TMP_URL}"
  72. }
  73. ensure_dev_container() {
  74. SWIFTLY_CMD="swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD}"
  75. if ! ${SWIFTLY_CMD} get ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 ; then
  76. echo "cluster/rackspace/util.sh: Container doesn't exist. Creating container ${CLOUDFILES_CONTAINER}"
  77. ${SWIFTLY_CMD} put ${CLOUDFILES_CONTAINER} > /dev/null 2>&1
  78. fi
  79. }
  80. # Copy kubernetes-server-linux-amd64.tar.gz to cloud files object store
  81. copy_dev_tarballs() {
  82. echo "cluster/rackspace/util.sh: Uploading to Cloud Files"
  83. ${SWIFTLY_CMD} put -i ${SERVER_BINARY_TAR} \
  84. ${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1
  85. echo "Release pushed."
  86. }
  87. prep_known_tokens() {
  88. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  89. generate_kubelet_tokens ${NODE_NAMES[i]}
  90. cat ${KUBE_TEMP}/${NODE_NAMES[i]}_tokens.csv >> ${KUBE_TEMP}/known_tokens.csv
  91. done
  92. # Generate tokens for other "service accounts". Append to known_tokens.
  93. #
  94. # NB: If this list ever changes, this script actually has to
  95. # change to detect the existence of this file, kill any deleted
  96. # old tokens and add any new tokens (to handle the upgrade case).
  97. local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
  98. for account in "${service_accounts[@]}"; do
  99. echo "$(create_token),${account},${account}" >> ${KUBE_TEMP}/known_tokens.csv
  100. done
  101. generate_admin_token
  102. }
  103. rax-boot-master() {
  104. DISCOVERY_URL=$(curl https://discovery.etcd.io/new)
  105. DISCOVERY_ID=$(echo "${DISCOVERY_URL}" | cut -f 4 -d /)
  106. echo "cluster/rackspace/util.sh: etcd discovery URL: ${DISCOVERY_URL}"
  107. # Copy cloud-config to KUBE_TEMP and work some sed magic
  108. sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
  109. -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
  110. -e "s|KUBE_USER|${KUBE_USER}|" \
  111. -e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \
  112. -e "s|SERVICE_CLUSTER_IP_RANGE|${SERVICE_CLUSTER_IP_RANGE}|" \
  113. -e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \
  114. -e "s|OS_USERNAME|${OS_USERNAME}|" \
  115. -e "s|OS_PASSWORD|${OS_PASSWORD}|" \
  116. -e "s|OS_TENANT_NAME|${OS_TENANT_NAME}|" \
  117. -e "s|OS_REGION_NAME|${OS_REGION_NAME}|" \
  118. $(dirname $0)/rackspace/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml
  119. MASTER_BOOT_CMD="nova boot \
  120. --key-name ${SSH_KEY_NAME} \
  121. --flavor ${KUBE_MASTER_FLAVOR} \
  122. --image ${KUBE_IMAGE} \
  123. --meta ${MASTER_TAG} \
  124. --meta ETCD=${DISCOVERY_ID} \
  125. --user-data ${KUBE_TEMP}/master-cloud-config.yaml \
  126. --config-drive true \
  127. --nic net-id=${NETWORK_UUID} \
  128. ${MASTER_NAME}"
  129. echo "cluster/rackspace/util.sh: Booting ${MASTER_NAME} with following command:"
  130. echo -e "\t$MASTER_BOOT_CMD"
  131. $MASTER_BOOT_CMD
  132. }
  133. rax-boot-nodes() {
  134. cp $(dirname $0)/rackspace/cloud-config/node-cloud-config.yaml \
  135. ${KUBE_TEMP}/node-cloud-config.yaml
  136. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  137. get_tokens_from_csv ${NODE_NAMES[i]}
  138. sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
  139. -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
  140. -e "s|DNS_SERVER_IP|${DNS_SERVER_IP:-}|" \
  141. -e "s|DNS_DOMAIN|${DNS_DOMAIN:-}|" \
  142. -e "s|ENABLE_CLUSTER_DNS|${ENABLE_CLUSTER_DNS:-false}|" \
  143. -e "s|ENABLE_NODE_LOGGING|${ENABLE_NODE_LOGGING:-false}|" \
  144. -e "s|INDEX|$((i + 1))|g" \
  145. -e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
  146. -e "s|KUBE_NETWORK|${KUBE_NETWORK}|" \
  147. -e "s|KUBELET_TOKEN|${KUBELET_TOKEN}|" \
  148. -e "s|KUBE_PROXY_TOKEN|${KUBE_PROXY_TOKEN}|" \
  149. -e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
  150. $(dirname $0)/rackspace/cloud-config/node-cloud-config.yaml > $KUBE_TEMP/node-cloud-config-$(($i + 1)).yaml
  151. NODE_BOOT_CMD="nova boot \
  152. --key-name ${SSH_KEY_NAME} \
  153. --flavor ${KUBE_NODE_FLAVOR} \
  154. --image ${KUBE_IMAGE} \
  155. --meta ${NODE_TAG} \
  156. --user-data ${KUBE_TEMP}/node-cloud-config-$(( i +1 )).yaml \
  157. --config-drive true \
  158. --nic net-id=${NETWORK_UUID} \
  159. ${NODE_NAMES[$i]}"
  160. echo "cluster/rackspace/util.sh: Booting ${NODE_NAMES[$i]} with following command:"
  161. echo -e "\t$NODE_BOOT_CMD"
  162. $NODE_BOOT_CMD
  163. done
  164. }
  165. rax-nova-network() {
  166. if ! $(nova network-list | grep $NOVA_NETWORK_LABEL > /dev/null 2>&1); then
  167. SAFE_CIDR=$(echo $NOVA_NETWORK_CIDR | tr -d '\\')
  168. NETWORK_CREATE_CMD="nova network-create $NOVA_NETWORK_LABEL $SAFE_CIDR"
  169. echo "cluster/rackspace/util.sh: Creating cloud network with following command:"
  170. echo -e "\t${NETWORK_CREATE_CMD}"
  171. $NETWORK_CREATE_CMD
  172. else
  173. echo "cluster/rackspace/util.sh: Using existing cloud network $NOVA_NETWORK_LABEL"
  174. fi
  175. }
  176. detect-nodes() {
  177. KUBE_NODE_IP_ADDRESSES=()
  178. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  179. local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
  180. | grep accessIPv4 | awk '{print $4}')
  181. echo "cluster/rackspace/util.sh: Found ${NODE_NAMES[$i]} at ${node_ip}"
  182. KUBE_NODE_IP_ADDRESSES+=("${node_ip}")
  183. done
  184. if [ -z "$KUBE_NODE_IP_ADDRESSES" ]; then
  185. echo "cluster/rackspace/util.sh: Could not detect Kubernetes node nodes. Make sure you've launched a cluster with 'kube-up.sh'"
  186. exit 1
  187. fi
  188. }
  189. detect-master() {
  190. KUBE_MASTER=${MASTER_NAME}
  191. echo "Waiting for ${MASTER_NAME} IP Address."
  192. echo
  193. echo " This will continually check to see if the master node has an IP address."
  194. echo
  195. KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
  196. while [ "${KUBE_MASTER_IP-|}" == "|" ]; do
  197. KUBE_MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep accessIPv4 | awk '{print $4}')
  198. printf "."
  199. sleep 2
  200. done
  201. echo "${KUBE_MASTER} IP Address is ${KUBE_MASTER_IP}"
  202. }
  203. # $1 should be the network you would like to get an IP address for
  204. detect-master-nova-net() {
  205. KUBE_MASTER=${MASTER_NAME}
  206. MASTER_IP=$(nova show $KUBE_MASTER --minimal | grep $1 | awk '{print $5}')
  207. }
  208. kube-up() {
  209. SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd)
  210. rackspace-set-vars
  211. ensure_dev_container
  212. copy_dev_tarballs
  213. # Find the release to use. Generally it will be passed when doing a 'prod'
  214. # install and will default to the release/config.sh version when doing a
  215. # developer up.
  216. find-object-url
  217. # Create a temp directory to hold scripts that will be uploaded to master/nodes
  218. KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
  219. trap "rm -rf ${KUBE_TEMP}" EXIT
  220. load-or-gen-kube-basicauth
  221. python2.7 $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD
  222. HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
  223. rax-nova-network
  224. NETWORK_UUID=$(nova network-list | grep -i ${NOVA_NETWORK_LABEL} | awk '{print $2}')
  225. # create and upload ssh key if necessary
  226. rax-ssh-key
  227. echo "cluster/rackspace/util.sh: Starting Cloud Servers"
  228. prep_known_tokens
  229. rax-boot-master
  230. rax-boot-nodes
  231. detect-master
  232. # TODO look for a better way to get the known_tokens to the master. This is needed over file injection since the files were too large on a 4 node cluster.
  233. $(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:/home/core/known_tokens.csv)
  234. $(sleep 2)
  235. $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo /usr/bin/mkdir -p /var/lib/kube-apiserver)
  236. $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo mv /home/core/known_tokens.csv /var/lib/kube-apiserver/known_tokens.csv)
  237. $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo chown root.root /var/lib/kube-apiserver/known_tokens.csv)
  238. $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo systemctl restart kube-apiserver)
  239. FAIL=0
  240. for job in `jobs -p`
  241. do
  242. wait $job || let "FAIL+=1"
  243. done
  244. if (( $FAIL != 0 )); then
  245. echo "${FAIL} commands failed. Exiting."
  246. exit 2
  247. fi
  248. echo "Waiting for cluster initialization."
  249. echo
  250. echo " This will continually check to see if the API for kubernetes is reachable."
  251. echo " This might loop forever if there was some uncaught error during start"
  252. echo " up."
  253. echo
  254. #This will fail until apiserver salt is updated
  255. until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
  256. --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/healthz); do
  257. printf "."
  258. sleep 2
  259. done
  260. echo "Kubernetes cluster created."
  261. export KUBE_CERT=""
  262. export KUBE_KEY=""
  263. export CA_CERT=""
  264. export CONTEXT="rackspace_${INSTANCE_PREFIX}"
  265. create-kubeconfig
  266. # Don't bail on errors, we want to be able to print some info.
  267. set +e
  268. detect-nodes
  269. # ensures KUBECONFIG is set
  270. get-kubeconfig-basicauth
  271. echo "All nodes may not be online yet, this is okay."
  272. echo
  273. echo "Kubernetes cluster is running. The master is running at:"
  274. echo
  275. echo " https://${KUBE_MASTER_IP}"
  276. echo
  277. echo "The user name and password to use is located in ${KUBECONFIG:-$DEFAULT_KUBECONFIG}."
  278. echo
  279. echo "Security note: The server above uses a self signed certificate. This is"
  280. echo " subject to \"Man in the middle\" type attacks."
  281. echo
  282. }
  283. # Perform preparations required to run e2e tests
  284. function prepare-e2e() {
  285. echo "Rackspace doesn't need special preparations for e2e tests"
  286. }