util.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. #!/bin/bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
  16. [ ! -z ${UTIL_SH_DEBUG+x} ] && set -x
  17. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
  18. readonly ROOT=$(dirname "${BASH_SOURCE}")
  19. source "$ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}"
  20. source "$KUBE_ROOT/cluster/common.sh"
  21. export LIBVIRT_DEFAULT_URI=qemu:///system
  22. export SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-false}
  23. export ADMISSION_CONTROL=${ADMISSION_CONTROL:-NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota}
  24. readonly POOL=kubernetes
  25. readonly POOL_PATH=/var/lib/libvirt/images/kubernetes
  26. [ ! -d "${POOL_PATH}" ] && (echo "$POOL_PATH" does not exist ; exit 1 )
  27. # join <delim> <list...>
  28. # Concatenates the list elements with the delimiter passed as first parameter
  29. #
  30. # Ex: join , a b c
  31. # -> a,b,c
  32. function join {
  33. local IFS="$1"
  34. shift
  35. echo "$*"
  36. }
  37. # Must ensure that the following ENV vars are set
  38. function detect-master {
  39. KUBE_MASTER_IP=$MASTER_IP
  40. KUBE_MASTER=$MASTER_NAME
  41. export KUBERNETES_MASTER=http://$KUBE_MASTER_IP:8080
  42. echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
  43. echo "KUBE_MASTER: $KUBE_MASTER"
  44. }
  45. # Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
  46. function detect-nodes {
  47. KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
  48. }
  49. function generate_certs {
  50. node_names=("${@}")
  51. #Root-CA
  52. tempdir=$(mktemp -d)
  53. CA_KEY=${CA_KEY:-"$tempdir/ca-key.pem"}
  54. CA_CERT=${CA_CERT:-"$tempdir/ca.pem"}
  55. openssl genrsa -out "${CA_KEY}" 2048 2>/dev/null
  56. openssl req -x509 -new -nodes -key "${CA_KEY}" -days 10000 -out "${CA_CERT}" -subj "/CN=kube-ca" 2>/dev/null
  57. #API server key pair
  58. KUBE_KEY=${KUBE_KEY:-"$tempdir/apiserver-key.pem"}
  59. API_SERVER_CERT_REQ=${API_SERVER_CERT_REQ:-"$tempdir/apiserver.csr"}
  60. openssl genrsa -out "${KUBE_KEY}" 2048 2>/dev/null
  61. KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl req -new -key "${KUBE_KEY}" -out "${API_SERVER_CERT_REQ}" -subj "/CN=kube-apiserver" -config cluster/libvirt-coreos/openssl.cnf 2>/dev/null
  62. KUBE_CERT=${KUBE_CERT:-"$tempdir/apiserver.pem"}
  63. KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl x509 -req -in "${API_SERVER_CERT_REQ}" -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out "${KUBE_CERT}" -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/openssl.cnf 2>/dev/null
  64. #Copy apiserver and controller tsl assets
  65. mkdir -p "$POOL_PATH/kubernetes/certs"
  66. cp "${KUBE_CERT}" "$POOL_PATH/kubernetes/certs"
  67. cp "${KUBE_KEY}" "$POOL_PATH/kubernetes/certs"
  68. cp "${CA_CERT}" "$POOL_PATH/kubernetes/certs"
  69. #Generate nodes certificate
  70. for (( i = 0 ; i < $NUM_NODES ; i++ )); do
  71. openssl genrsa -out $tempdir/${node_names[$i]}-node-key.pem 2048 2>/dev/null
  72. cp "$tempdir/${node_names[$i]}-node-key.pem" "$POOL_PATH/kubernetes/certs"
  73. WORKER_IP=${NODE_IPS[$i]} openssl req -new -key $tempdir/${node_names[$i]}-node-key.pem -out $tempdir/${node_names[$i]}-node.csr -subj "/CN=${node_names[$i]}" -config cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
  74. WORKER_IP=${NODE_IPS[$i]} openssl x509 -req -in $tempdir/${node_names[$i]}-node.csr -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out $tempdir/${node_names[$i]}-node.pem -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
  75. cp "$tempdir/${node_names[$i]}-node.pem" "$POOL_PATH/kubernetes/certs"
  76. done
  77. echo "TLS assets generated..."
  78. }
  79. # Verify prereqs on host machine
  80. function verify-prereqs {
  81. if ! which virsh >/dev/null; then
  82. echo "Can't find virsh in PATH, please fix and retry." >&2
  83. exit 1
  84. fi
  85. if ! virsh nodeinfo >/dev/null; then
  86. exit 1
  87. fi
  88. if [[ "$(</sys/kernel/mm/ksm/run)" -ne "1" ]]; then
  89. echo "KSM is not enabled" >&2
  90. echo "Enabling it would reduce the memory footprint of large clusters" >&2
  91. if [[ -t 0 ]]; then
  92. read -t 5 -n 1 -p "Do you want to enable KSM (requires root password) (y/n)? " answer
  93. echo ""
  94. if [[ "$answer" == 'y' ]]; then
  95. su -c 'echo 1 > /sys/kernel/mm/ksm/run'
  96. fi
  97. else
  98. echo "You can enable it with (as root):" >&2
  99. echo "" >&2
  100. echo " echo 1 > /sys/kernel/mm/ksm/run" >&2
  101. echo "" >&2
  102. fi
  103. fi
  104. }
  105. # Destroy the libvirt storage pool and all the images inside
  106. #
  107. # If 'keep_base_image' is passed as first parameter,
  108. # the base image is kept, as well as the storage pool.
  109. # All the other images are deleted.
  110. function destroy-pool {
  111. virsh pool-info $POOL >/dev/null 2>&1 || return
  112. rm -rf "$POOL_PATH"/kubernetes/*
  113. rm -rf "$POOL_PATH"/kubernetes_config*/*
  114. local vol
  115. virsh vol-list $POOL | awk 'NR>2 && !/^$/ && $1 ~ /^kubernetes/ {print $1}' | \
  116. while read vol; do
  117. virsh vol-delete $vol --pool $POOL
  118. done
  119. [[ "$1" == 'keep_base_image' ]] && return
  120. set +e
  121. virsh vol-delete coreos_base.img --pool $POOL
  122. virsh pool-destroy $POOL
  123. rmdir "$POOL_PATH"
  124. set -e
  125. }
  126. # Creates the libvirt storage pool and populate it with
  127. # - the CoreOS base image
  128. # - the kubernetes binaries
  129. function initialize-pool {
  130. mkdir -p "$POOL_PATH"
  131. if ! virsh pool-info $POOL >/dev/null 2>&1; then
  132. virsh pool-create-as $POOL dir --target "$POOL_PATH"
  133. fi
  134. wget -N -P "$ROOT" http://${COREOS_CHANNEL:-alpha}.release.core-os.net/amd64-usr/current/coreos_production_qemu_image.img.bz2
  135. if [[ "$ROOT/coreos_production_qemu_image.img.bz2" -nt "$POOL_PATH/coreos_base.img" ]]; then
  136. bunzip2 -f -k "$ROOT/coreos_production_qemu_image.img.bz2"
  137. virsh vol-delete coreos_base.img --pool $POOL 2> /dev/null || true
  138. fi
  139. if ! virsh vol-list $POOL | grep -q coreos_base.img; then
  140. virsh vol-create-as $POOL coreos_base.img 10G --format qcow2
  141. virsh vol-upload coreos_base.img "$ROOT/coreos_production_qemu_image.img" --pool $POOL
  142. fi
  143. mkdir -p "$POOL_PATH/kubernetes"
  144. kube-push-internal
  145. mkdir -p "$POOL_PATH/kubernetes/manifests"
  146. if [[ "$ENABLE_NODE_LOGGING" == "true" ]]; then
  147. if [[ "$LOGGING_DESTINATION" == "elasticsearch" ]]; then
  148. cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-es/fluentd-es.manifest" "$POOL_PATH/kubernetes/manifests"
  149. elif [[ "$LOGGING_DESTINATION" == "gcp" ]]; then
  150. cp "$KUBE_ROOT/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.manifest" "$POOL_PATH/kubernetes/manifests"
  151. fi
  152. fi
  153. mkdir -p "$POOL_PATH/kubernetes/addons"
  154. if [[ "$ENABLE_CLUSTER_DNS" == "true" ]]; then
  155. render-template "$ROOT/namespace.yaml" > "$POOL_PATH/kubernetes/addons/namespace.yaml"
  156. render-template "$ROOT/skydns-svc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-svc.yaml"
  157. render-template "$ROOT/skydns-rc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-rc.yaml"
  158. fi
  159. virsh pool-refresh $POOL
  160. }
  161. function destroy-network {
  162. set +e
  163. virsh net-destroy kubernetes_global
  164. virsh net-destroy kubernetes_pods
  165. set -e
  166. }
  167. function initialize-network {
  168. virsh net-create "$ROOT/network_kubernetes_global.xml"
  169. virsh net-create "$ROOT/network_kubernetes_pods.xml"
  170. }
  171. function render-template {
  172. eval "echo \"$(cat $1)\""
  173. }
  174. function wait-cluster-readiness {
  175. echo "Wait for cluster readiness"
  176. local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
  177. local timeout=120
  178. while [[ $timeout -ne 0 ]]; do
  179. nb_ready_nodes=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true)
  180. echo "Nb ready nodes: $nb_ready_nodes / $NUM_NODES"
  181. if [[ "$nb_ready_nodes" -eq "$NUM_NODES" ]]; then
  182. return 0
  183. fi
  184. timeout=$(($timeout-1))
  185. sleep .5
  186. done
  187. return 1
  188. }
  189. # Instantiate a kubernetes cluster
  190. function kube-up {
  191. detect-master
  192. detect-nodes
  193. initialize-pool keep_base_image
  194. generate_certs "${NODE_NAMES[@]}"
  195. initialize-network
  196. readonly ssh_keys="$(cat ~/.ssh/*.pub | sed 's/^/ - /')"
  197. readonly kubernetes_dir="$POOL_PATH/kubernetes"
  198. local i
  199. for (( i = 0 ; i <= $NUM_NODES ; i++ )); do
  200. if [[ $i -eq $NUM_NODES ]]; then
  201. etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380"
  202. else
  203. etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380"
  204. fi
  205. done
  206. etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}")
  207. readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}")
  208. for (( i = 0 ; i <= $NUM_NODES ; i++ )); do
  209. if [[ $i -eq $NUM_NODES ]]; then
  210. type=master
  211. name=$MASTER_NAME
  212. public_ip=$MASTER_IP
  213. else
  214. type=node-$(printf "%02d" $i)
  215. name=${NODE_NAMES[$i]}
  216. public_ip=${NODE_IPS[$i]}
  217. fi
  218. image=$name.img
  219. config=kubernetes_config_$type
  220. virsh vol-create-as $POOL $image 10G --format qcow2 --backing-vol coreos_base.img --backing-vol-format qcow2
  221. mkdir -p "$POOL_PATH/$config/openstack/latest"
  222. render-template "$ROOT/user_data.yml" > "$POOL_PATH/$config/openstack/latest/user_data"
  223. virsh pool-refresh $POOL
  224. domain_xml=$(mktemp)
  225. render-template $ROOT/coreos.xml > $domain_xml
  226. virsh create $domain_xml
  227. rm $domain_xml
  228. done
  229. export KUBE_SERVER="http://192.168.10.1:8080"
  230. export CONTEXT="libvirt-coreos"
  231. create-kubeconfig
  232. wait-cluster-readiness
  233. echo "Kubernetes cluster is running. The master is running at:"
  234. echo
  235. echo " http://${KUBE_MASTER_IP}:8080"
  236. echo
  237. echo "You can control the Kubernetes cluster with: 'cluster/kubectl.sh'"
  238. echo "You can connect on the master with: 'ssh core@${KUBE_MASTER_IP}'"
  239. }
  240. # Delete a kubernetes cluster
  241. function kube-down {
  242. virsh list | awk 'NR>2 && !/^$/ && $2 ~ /^kubernetes/ {print $2}' | \
  243. while read dom; do
  244. virsh destroy $dom
  245. done
  246. destroy-pool keep_base_image
  247. destroy-network
  248. }
  249. # The kubernetes binaries are pushed to a host directory which is exposed to the VM
  250. function upload-server-tars {
  251. tar -x -C "$POOL_PATH/kubernetes" -f "$SERVER_BINARY_TAR" kubernetes
  252. rm -rf "$POOL_PATH/kubernetes/bin"
  253. mv "$POOL_PATH/kubernetes/kubernetes/server/bin" "$POOL_PATH/kubernetes/bin"
  254. rm -fr "$POOL_PATH/kubernetes/kubernetes"
  255. }
  256. # Update a kubernetes cluster with latest source
  257. function kube-push {
  258. kube-push-internal
  259. ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler"
  260. for ((i=0; i < NUM_NODES; i++)); do
  261. ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy"
  262. done
  263. wait-cluster-readiness
  264. }
  265. function kube-push-internal {
  266. case "${KUBE_PUSH:-release}" in
  267. release)
  268. kube-push-release;;
  269. local)
  270. kube-push-local;;
  271. *)
  272. echo "The only known push methods are \"release\" to use the release tarball or \"local\" to use the binaries built by make. KUBE_PUSH is set \"$KUBE_PUSH\"" >&2
  273. return 1;;
  274. esac
  275. }
  276. function kube-push-release {
  277. find-release-tars
  278. upload-server-tars
  279. }
  280. function kube-push-local {
  281. rm -rf "$POOL_PATH/kubernetes/bin/*"
  282. mkdir -p "$POOL_PATH/kubernetes/bin"
  283. cp "${KUBE_ROOT}/_output/local/go/bin"/* "$POOL_PATH/kubernetes/bin"
  284. }
  285. # Execute prior to running tests to build a release if required for env
  286. function test-build-release {
  287. echo "TODO"
  288. }
  289. # Execute prior to running tests to initialize required structure
  290. function test-setup {
  291. "${KUBE_ROOT}/cluster/kube-up.sh"
  292. }
  293. # Execute after running tests to perform any required clean-up
  294. function test-teardown {
  295. kube-down
  296. }
  297. # SSH to a node by name or IP ($1) and run a command ($2).
  298. function ssh-to-node {
  299. local node="$1"
  300. local cmd="$2"
  301. local machine
  302. if [[ "$node" == "$MASTER_IP" ]] || [[ "$node" =~ ^"$NODE_IP_BASE" ]]; then
  303. machine="$node"
  304. elif [[ "$node" == "$MASTER_NAME" ]]; then
  305. machine="$MASTER_IP"
  306. else
  307. for ((i=0; i < NUM_NODES; i++)); do
  308. if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then
  309. machine="${NODE_IPS[$i]}"
  310. break
  311. fi
  312. done
  313. fi
  314. if [[ -z "$machine" ]]; then
  315. echo "$node is an unknown machine to ssh to" >&2
  316. fi
  317. ssh -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=no "core@$machine" "$cmd"
  318. }
  319. # Perform preparations required to run e2e tests
  320. function prepare-e2e() {
  321. echo "libvirt-coreos doesn't need special preparations for e2e tests" 1>&2
  322. }