util.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. #!/bin/bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
  16. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
  17. source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
  18. source "${KUBE_ROOT}/cluster/common.sh"
  19. function detect-master () {
  20. KUBE_MASTER_IP=$MASTER_IP
  21. echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
  22. }
  23. # Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
  24. function detect-nodes {
  25. echo "Nodes already detected" 1>&2
  26. KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
  27. }
  28. # Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
  29. # that our Vagrantfile doesn't error out.
  30. function verify-prereqs {
  31. for x in vagrant; do
  32. if ! which "$x" >/dev/null; then
  33. echo "Can't find $x in PATH, please fix and retry."
  34. exit 1
  35. fi
  36. done
  37. local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n')
  38. local providers=(
  39. # Format is:
  40. # provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re
  41. # either provider_ctl_executable or vagrant_provider_plugin_re can
  42. # be blank (i.e., '') if none is needed by Vagrant (see, e.g.,
  43. # virtualbox entry)
  44. '' vmware_fusion vagrant-vmware-fusion
  45. '' vmware_workstation vagrant-vmware-workstation
  46. prlctl parallels vagrant-parallels
  47. VBoxManage virtualbox ''
  48. virsh libvirt vagrant-libvirt
  49. '' vsphere vagrant-vsphere
  50. )
  51. local provider_found=''
  52. local provider_bin
  53. local provider_name
  54. local provider_plugin_re
  55. while [ "${#providers[@]}" -gt 0 ]; do
  56. provider_bin=${providers[0]}
  57. provider_name=${providers[1]}
  58. provider_plugin_re=${providers[2]}
  59. providers=("${providers[@]:3}")
  60. # If the provider is explicitly set, look only for that provider
  61. if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \
  62. && [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then
  63. continue
  64. fi
  65. if ([ -z "${provider_bin}" ] \
  66. || which "${provider_bin}" >/dev/null 2>&1) \
  67. && ([ -z "${provider_plugin_re}" ] \
  68. || [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then
  69. provider_found="${provider_name}"
  70. # Stop after finding the first viable provider
  71. break
  72. fi
  73. done
  74. if [ -z "${provider_found}" ]; then
  75. if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ]; then
  76. echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider."
  77. echo "Possible reasons could be: "
  78. echo -e "\t- vmrun utility is not in your path"
  79. echo -e "\t- Vagrant plugin was not found."
  80. echo -e "\t- VAGRANT_DEFAULT_PROVIDER is set, but not found."
  81. echo "Please fix and retry."
  82. else
  83. echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry."
  84. fi
  85. exit 1
  86. fi
  87. # Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
  88. # matter what directory the tools are called from.
  89. export VAGRANT_CWD="${KUBE_ROOT}"
  90. export USING_KUBE_SCRIPTS=true
  91. }
  92. # Create a temp dir that'll be deleted at the end of this bash session.
  93. #
  94. # Vars set:
  95. # KUBE_TEMP
  96. function ensure-temp-dir {
  97. if [[ -z ${KUBE_TEMP-} ]]; then
  98. export KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
  99. trap 'rm -rf "${KUBE_TEMP}"' EXIT
  100. fi
  101. }
  102. # Create a set of provision scripts for the master and each of the nodes
  103. function create-provision-scripts {
  104. ensure-temp-dir
  105. (
  106. echo "#! /bin/bash"
  107. echo-kube-env
  108. echo "NODE_IP='${MASTER_IP}'"
  109. echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
  110. echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
  111. awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
  112. awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
  113. awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
  114. ) > "${KUBE_TEMP}/master-start.sh"
  115. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  116. (
  117. echo "#! /bin/bash"
  118. echo-kube-env
  119. echo "NODE_NAME=(${NODE_NAMES[$i]})"
  120. echo "NODE_IP='${NODE_IPS[$i]}'"
  121. echo "NODE_ID='$i'"
  122. echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
  123. echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
  124. awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
  125. awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh"
  126. awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh"
  127. ) > "${KUBE_TEMP}/node-start-${i}.sh"
  128. done
  129. }
  130. function echo-kube-env() {
  131. echo "KUBE_ROOT=/vagrant"
  132. echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
  133. echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
  134. echo "MASTER_IP='${MASTER_IP}'"
  135. echo "NODE_NAMES=(${NODE_NAMES[@]})"
  136. echo "NODE_IPS=(${NODE_IPS[@]})"
  137. echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
  138. echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'"
  139. echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
  140. echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
  141. echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
  142. echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
  143. echo "MASTER_USER='${MASTER_USER}'"
  144. echo "MASTER_PASSWD='${MASTER_PASSWD}'"
  145. echo "KUBE_USER='${KUBE_USER}'"
  146. echo "KUBE_PASSWORD='${KUBE_PASSWORD}'"
  147. echo "KUBE_BEARER_TOKEN='${KUBE_BEARER_TOKEN}'"
  148. echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'"
  149. echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
  150. echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'"
  151. echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
  152. echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'"
  153. echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
  154. echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
  155. echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
  156. echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
  157. echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
  158. echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
  159. echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
  160. echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
  161. echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
  162. echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'"
  163. echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
  164. echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
  165. echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'"
  166. echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'"
  167. echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'"
  168. echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'"
  169. echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'"
  170. echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
  171. echo "CUSTOM_FEDORA_REPOSITORY_URL='${CUSTOM_FEDORA_REPOSITORY_URL:-}'"
  172. }
  173. function verify-cluster {
  174. # TODO: How does the user know the difference between "tak[ing] some
  175. # time" and "loop[ing] forever"? Can we give more specific feedback on
  176. # whether "an error" has occurred?
  177. echo "Each machine instance has been created/updated."
  178. echo " Now waiting for the Salt provisioning process to complete on each machine."
  179. echo " This can take some time based on your network, disk, and cpu speed."
  180. echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever."
  181. # verify master has all required daemons
  182. echo "Validating master"
  183. local machine="master"
  184. local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker")
  185. local validated="1"
  186. until [[ "$validated" == "0" ]]; do
  187. validated="0"
  188. for process in "${required_processes[@]}"; do
  189. vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
  190. printf "."
  191. validated="1"
  192. sleep 2
  193. }
  194. done
  195. done
  196. # verify each node has all required daemons
  197. local i
  198. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  199. echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
  200. local machine=${VAGRANT_NODE_NAMES[$i]}
  201. local -a required_processes=("kube-proxy" "kubelet" "docker")
  202. local validated="1"
  203. until [[ "${validated}" == "0" ]]; do
  204. validated="0"
  205. for process in "${required_processes[@]}"; do
  206. vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
  207. printf "."
  208. validated="1"
  209. sleep 2
  210. }
  211. done
  212. done
  213. done
  214. echo
  215. echo "Waiting for each node to be registered with cloud provider"
  216. for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
  217. local validated="0"
  218. until [[ "$validated" == "1" ]]; do
  219. local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name --api-version=v1)
  220. validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || {
  221. printf "."
  222. sleep 2
  223. validated="0"
  224. }
  225. done
  226. done
  227. # By this time, all kube api calls should work, so no need to loop and retry.
  228. echo "Validating we can run kubectl commands."
  229. vagrant ssh master --command "kubectl get pods" || {
  230. echo "WARNING: kubectl to localhost failed. This could mean localhost is not bound to an IP"
  231. }
  232. (
  233. # ensures KUBECONFIG is set
  234. get-kubeconfig-basicauth
  235. get-kubeconfig-bearertoken
  236. echo
  237. echo "Kubernetes cluster is running."
  238. echo
  239. echo "The master is running at:"
  240. echo
  241. echo " https://${MASTER_IP}"
  242. echo
  243. echo "Administer and visualize its resources using Cockpit:"
  244. echo
  245. echo " https://${MASTER_IP}:9090"
  246. echo
  247. echo "For more information on Cockpit, visit http://cockpit-project.org"
  248. echo
  249. echo "The user name and password to use is located in ${KUBECONFIG}"
  250. echo
  251. )
  252. }
  253. # Instantiate a kubernetes cluster
  254. function kube-up {
  255. load-or-gen-kube-basicauth
  256. load-or-gen-kube-bearertoken
  257. get-tokens
  258. create-provision-scripts
  259. vagrant up --no-parallel
  260. export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
  261. export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
  262. export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
  263. export CONTEXT="vagrant"
  264. (
  265. umask 077
  266. vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
  267. vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
  268. vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
  269. # Update the user's kubeconfig to include credentials for this apiserver.
  270. create-kubeconfig
  271. create-kubeconfig-for-federation
  272. )
  273. verify-cluster
  274. }
  275. # Delete a kubernetes cluster
  276. function kube-down {
  277. vagrant destroy -f
  278. }
  279. # Update a kubernetes cluster with latest source
  280. function kube-push {
  281. get-kubeconfig-basicauth
  282. get-kubeconfig-bearertoken
  283. create-provision-scripts
  284. vagrant provision
  285. }
  286. # Execute prior to running tests to build a release if required for env
  287. function test-build-release {
  288. # Make a release
  289. "${KUBE_ROOT}/build/release.sh"
  290. }
  291. # Execute prior to running tests to initialize required structure
  292. function test-setup {
  293. "${KUBE_ROOT}/cluster/kube-up.sh"
  294. echo "Vagrant test setup complete" 1>&2
  295. }
  296. # Execute after running tests to perform any required clean-up
  297. function test-teardown {
  298. kube-down
  299. }
  300. # Find the node name based on the IP address
  301. function find-vagrant-name-by-ip {
  302. local ip="$1"
  303. local ip_pattern="${NODE_IP_BASE}(.*)"
  304. # This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a
  305. # regexp and using the capture to construct the name.
  306. [[ $ip =~ $ip_pattern ]] || {
  307. return 1
  308. }
  309. echo "node-$((${BASH_REMATCH[1]} - 1))"
  310. }
  311. # Find the vagrant machine name based on the host name of the node
  312. function find-vagrant-name-by-node-name {
  313. local ip="$1"
  314. if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then
  315. echo "master"
  316. return $?
  317. fi
  318. local ip_pattern="${INSTANCE_PREFIX}-node-(.*)"
  319. [[ $ip =~ $ip_pattern ]] || {
  320. return 1
  321. }
  322. echo "node-${BASH_REMATCH[1]}"
  323. }
  324. # SSH to a node by name or IP ($1) and run a command ($2).
  325. function ssh-to-node {
  326. local node="$1"
  327. local cmd="$2"
  328. local machine
  329. machine=$(find-vagrant-name-by-ip $node) || true
  330. [[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true
  331. [[ -n ${machine-} ]] || {
  332. echo "Cannot find machine to ssh to: $1"
  333. return 1
  334. }
  335. vagrant ssh "${machine}" -c "${cmd}"
  336. }
  337. # Perform preparations required to run e2e tests
  338. function prepare-e2e() {
  339. echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
  340. }
  341. function get-tokens() {
  342. KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
  343. KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
  344. }