configure-vm.sh 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. #!/bin/bash
  2. # Copyright 2015 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. set -o errexit
  16. set -o nounset
  17. set -o pipefail
  18. # Note that this script is also used by AWS; we include it and then override
  19. # functions with AWS equivalents. Note `#+AWS_OVERRIDES_HERE` below.
  20. # TODO(justinsb): Refactor into common script & GCE specific script?
  21. # If we have any arguments at all, this is a push and not just setup.
  22. is_push=$@
  23. readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
  24. readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
  25. function ensure-basic-networking() {
  26. # Deal with GCE networking bring-up race. (We rely on DNS for a lot,
  27. # and it's just not worth doing a whole lot of startup work if this
  28. # isn't ready yet.)
  29. until getent hosts metadata.google.internal &>/dev/null; do
  30. echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...'
  31. sleep 3
  32. done
  33. until getent hosts $(hostname -f || echo _error_) &>/dev/null; do
  34. echo 'Waiting for functional DNS (trying to resolve my own FQDN)...'
  35. sleep 3
  36. done
  37. until getent hosts $(hostname -i || echo _error_) &>/dev/null; do
  38. echo 'Waiting for functional DNS (trying to resolve my own IP)...'
  39. sleep 3
  40. done
  41. echo "Networking functional on $(hostname) ($(hostname -i))"
  42. }
  43. # A hookpoint for installing any needed packages
  44. ensure-packages() {
  45. :
  46. }
  47. # A hookpoint for setting up local devices
  48. ensure-local-disks() {
  49. for ssd in /dev/disk/by-id/google-local-ssd-*; do
  50. if [ -e "$ssd" ]; then
  51. ssdnum=`echo $ssd | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
  52. echo "Formatting and mounting local SSD $ssd to /mnt/disks/ssd$ssdnum"
  53. mkdir -p /mnt/disks/ssd$ssdnum
  54. /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${ssd}" /mnt/disks/ssd$ssdnum &>/var/log/local-ssd-$ssdnum-mount.log || \
  55. { echo "Local SSD $ssdnum mount failed, review /var/log/local-ssd-$ssdnum-mount.log"; return 1; }
  56. else
  57. echo "No local SSD disks found."
  58. fi
  59. done
  60. }
  61. function ensure-install-dir() {
  62. INSTALL_DIR="/var/cache/kubernetes-install"
  63. mkdir -p ${INSTALL_DIR}
  64. cd ${INSTALL_DIR}
  65. }
  66. function salt-apiserver-timeout-grain() {
  67. cat <<EOF >>/etc/salt/minion.d/grains.conf
  68. minRequestTimeout: '$1'
  69. EOF
  70. }
  71. function set-broken-motd() {
  72. echo -e '\nBroken (or in progress) Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
  73. }
  74. function reset-motd() {
  75. # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
  76. local -r version="$(/usr/local/bin/kubelet --version=true | cut -f2 -d " ")"
  77. # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
  78. # or the git hash that's in the build info.
  79. local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
  80. local devel=""
  81. if [[ "${gitref}" != "${version}" ]]; then
  82. devel="
  83. Note: This looks like a development version, which might not be present on GitHub.
  84. If it isn't, the closest tag is at:
  85. https://github.com/kubernetes/kubernetes/tree/${gitref}
  86. "
  87. gitref="${version//*+/}"
  88. fi
  89. cat > /etc/motd <<EOF
  90. Welcome to Kubernetes ${version}!
  91. You can find documentation for Kubernetes at:
  92. http://docs.kubernetes.io/
  93. The source for this release can be found at:
  94. /usr/local/share/doc/kubernetes/kubernetes-src.tar.gz
  95. Or you can download it at:
  96. https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
  97. It is based on the Kubernetes source at:
  98. https://github.com/kubernetes/kubernetes/tree/${gitref}
  99. ${devel}
  100. For Kubernetes copyright and licensing information, see:
  101. /usr/local/share/doc/kubernetes/LICENSES
  102. EOF
  103. }
  104. function curl-metadata() {
  105. curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
  106. }
  107. function set-kube-env() {
  108. local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
  109. until curl-metadata kube-env > "${kube_env_yaml}"; do
  110. echo 'Waiting for kube-env...'
  111. sleep 3
  112. done
  113. # kube-env has all the environment variables we care about, in a flat yaml format
  114. eval "$(python -c '
  115. import pipes,sys,yaml
  116. for k,v in yaml.load(sys.stdin).iteritems():
  117. print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
  118. print("""export {var}""".format(var = k))
  119. ' < """${kube_env_yaml}""")"
  120. }
  121. function remove-docker-artifacts() {
  122. echo "== Deleting docker0 =="
  123. apt-get-install bridge-utils
  124. # Remove docker artifacts on minion nodes, if present
  125. iptables -t nat -F || true
  126. ifconfig docker0 down || true
  127. brctl delbr docker0 || true
  128. echo "== Finished deleting docker0 =="
  129. }
  130. # Retry a download until we get it. Takes a hash and a set of URLs.
  131. #
  132. # $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
  133. # $2+ are the URLs to download.
  134. download-or-bust() {
  135. local -r hash="$1"
  136. shift 1
  137. urls=( $* )
  138. while true; do
  139. for url in "${urls[@]}"; do
  140. local file="${url##*/}"
  141. rm -f "${file}"
  142. if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 "${url}"; then
  143. echo "== Failed to download ${url}. Retrying. =="
  144. elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
  145. echo "== Hash validation of ${url} failed. Retrying. =="
  146. else
  147. if [[ -n "${hash}" ]]; then
  148. echo "== Downloaded ${url} (SHA1 = ${hash}) =="
  149. else
  150. echo "== Downloaded ${url} =="
  151. fi
  152. return
  153. fi
  154. done
  155. done
  156. }
  157. validate-hash() {
  158. local -r file="$1"
  159. local -r expected="$2"
  160. local actual
  161. actual=$(sha1sum ${file} | awk '{ print $1 }') || true
  162. if [[ "${actual}" != "${expected}" ]]; then
  163. echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
  164. return 1
  165. fi
  166. }
  167. apt-get-install() {
  168. local -r packages=( $@ )
  169. installed=true
  170. for package in "${packages[@]}"; do
  171. if ! dpkg -s "${package}" &>/dev/null; then
  172. installed=false
  173. break
  174. fi
  175. done
  176. if [[ "${installed}" == "true" ]]; then
  177. echo "== ${packages[@]} already installed, skipped apt-get install ${packages[@]} =="
  178. return
  179. fi
  180. apt-get-update
  181. # Forcibly install packages (options borrowed from Salt logs).
  182. until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install $@; do
  183. echo "== install of packages $@ failed, retrying =="
  184. sleep 5
  185. done
  186. }
  187. apt-get-update() {
  188. echo "== Refreshing package database =="
  189. until apt-get update; do
  190. echo "== apt-get update failed, retrying =="
  191. sleep 5
  192. done
  193. }
  194. # Restart any services that need restarting due to a library upgrade
  195. # Uses needrestart
  196. restart-updated-services() {
  197. # We default to restarting services, because this is only done as part of an update
  198. if [[ "${AUTO_RESTART_SERVICES:-true}" != "true" ]]; then
  199. echo "Auto restart of services prevented by AUTO_RESTART_SERVICES=${AUTO_RESTART_SERVICES}"
  200. return
  201. fi
  202. echo "Restarting services with updated libraries (needrestart -r a)"
  203. # The pipes make sure that needrestart doesn't think it is running with a TTY
  204. # Debian bug #803249; fixed but not necessarily in package repos yet
  205. echo "" | needrestart -r a 2>&1 | tee /dev/null
  206. }
  207. # Reboot the machine if /var/run/reboot-required exists
  208. reboot-if-required() {
  209. if [[ ! -e "/var/run/reboot-required" ]]; then
  210. return
  211. fi
  212. echo "Reboot is required (/var/run/reboot-required detected)"
  213. if [[ -e "/var/run/reboot-required.pkgs" ]]; then
  214. echo "Packages that triggered reboot:"
  215. cat /var/run/reboot-required.pkgs
  216. fi
  217. # We default to rebooting the machine because this is only done as part of an update
  218. if [[ "${AUTO_REBOOT:-true}" != "true" ]]; then
  219. echo "Reboot prevented by AUTO_REBOOT=${AUTO_REBOOT}"
  220. return
  221. fi
  222. rm -f /var/run/reboot-required
  223. rm -f /var/run/reboot-required.pkgs
  224. echo "Triggering reboot"
  225. init 6
  226. }
  227. # Install upgrades using unattended-upgrades, then reboot or restart services
  228. auto-upgrade() {
  229. # We default to not installing upgrades
  230. if [[ "${AUTO_UPGRADE:-false}" != "true" ]]; then
  231. echo "AUTO_UPGRADE not set to true; won't auto-upgrade"
  232. return
  233. fi
  234. apt-get-install unattended-upgrades needrestart
  235. unattended-upgrade --debug
  236. reboot-if-required # We may reboot the machine right here
  237. restart-updated-services
  238. }
  239. #
  240. # Install salt from GCS. See README.md for instructions on how to update these
  241. # debs.
  242. install-salt() {
  243. if dpkg -s salt-minion &>/dev/null; then
  244. echo "== SaltStack already installed, skipping install step =="
  245. return
  246. fi
  247. echo "== Refreshing package database =="
  248. until apt-get update; do
  249. echo "== apt-get update failed, retrying =="
  250. sleep 5
  251. done
  252. mkdir -p /var/cache/salt-install
  253. cd /var/cache/salt-install
  254. DEBS=(
  255. libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
  256. python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
  257. salt-common_2014.1.13+ds-1~bpo70+1_all.deb
  258. salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
  259. )
  260. URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
  261. for deb in "${DEBS[@]}"; do
  262. if [ ! -e "${deb}" ]; then
  263. download-or-bust "" "${URL_BASE}/${deb}"
  264. fi
  265. done
  266. # Based on
  267. # https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
  268. # We do this to prevent Salt from starting the salt-minion
  269. # daemon. The other packages don't have relevant daemons. (If you
  270. # add a package that needs a daemon started, add it to a different
  271. # list.)
  272. cat > /usr/sbin/policy-rc.d <<EOF
  273. #!/bin/sh
  274. echo "Salt shall not start." >&2
  275. exit 101
  276. EOF
  277. chmod 0755 /usr/sbin/policy-rc.d
  278. for deb in "${DEBS[@]}"; do
  279. echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
  280. dpkg --skip-same-version --force-depends -i "${deb}"
  281. done
  282. # This will install any of the unmet dependencies from above.
  283. echo "== Installing unmet dependencies =="
  284. until apt-get install -f -y; do
  285. echo "== apt-get install failed, retrying =="
  286. sleep 5
  287. done
  288. rm /usr/sbin/policy-rc.d
  289. # Log a timestamp
  290. echo "== Finished installing Salt =="
  291. }
  292. # Ensure salt-minion isn't running and never runs
  293. stop-salt-minion() {
  294. if [[ -e /etc/init/salt-minion.override ]]; then
  295. # Assume this has already run (upgrade, or baked into containervm)
  296. return
  297. fi
  298. # This ensures it on next reboot
  299. echo manual > /etc/init/salt-minion.override
  300. update-rc.d salt-minion disable
  301. while service salt-minion status >/dev/null; do
  302. echo "salt-minion found running, stopping"
  303. service salt-minion stop
  304. sleep 1
  305. done
  306. }
  307. # Finds the master PD device; returns it in MASTER_PD_DEVICE
  308. find-master-pd() {
  309. MASTER_PD_DEVICE=""
  310. if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
  311. return
  312. fi
  313. device_info=$(ls -l /dev/disk/by-id/google-master-pd)
  314. relative_path=${device_info##* }
  315. MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
  316. }
  317. # Mounts a persistent disk (formatting if needed) to store the persistent data
  318. # on the master -- etcd's data, a few settings, and security certs/keys/tokens.
  319. #
  320. # This function can be reused to mount an existing PD because all of its
  321. # operations modifying the disk are idempotent -- safe_format_and_mount only
  322. # formats an unformatted disk, and mkdir -p will leave a directory be if it
  323. # already exists.
  324. mount-master-pd() {
  325. find-master-pd
  326. if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
  327. return
  328. fi
  329. # Format and mount the disk, create directories on it for all of the master's
  330. # persistent data, and link them to where they're used.
  331. echo "Mounting master-pd"
  332. mkdir -p /mnt/master-pd
  333. /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${MASTER_PD_DEVICE}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
  334. { echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; }
  335. # Contains all the data stored in etcd
  336. mkdir -m 700 -p /mnt/master-pd/var/etcd
  337. # Contains the dynamically generated apiserver auth certs and keys
  338. mkdir -p /mnt/master-pd/srv/kubernetes
  339. # Contains the cluster's initial config parameters and auth tokens
  340. mkdir -p /mnt/master-pd/srv/salt-overlay
  341. # Directory for kube-apiserver to store SSH key (if necessary)
  342. mkdir -p /mnt/master-pd/srv/sshproxy
  343. ln -s -f /mnt/master-pd/var/etcd /var/etcd
  344. ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
  345. ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy
  346. ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
  347. # This is a bit of a hack to get around the fact that salt has to run after the
  348. # PD and mounted directory are already set up. We can't give ownership of the
  349. # directory to etcd until the etcd user and group exist, but they don't exist
  350. # until salt runs if we don't create them here. We could alternatively make the
  351. # permissions on the directory more permissive, but this seems less bad.
  352. if ! id etcd &>/dev/null; then
  353. useradd -s /sbin/nologin -d /var/etcd etcd
  354. fi
  355. chown -R etcd /mnt/master-pd/var/etcd
  356. chgrp -R etcd /mnt/master-pd/var/etcd
  357. }
  358. # Create the overlay files for the salt tree. We create these in a separate
  359. # place so that we can blow away the rest of the salt configs on a kube-push and
  360. # re-apply these.
  361. function create-salt-pillar() {
  362. # Always overwrite the cluster-params.sls (even on a push, we have
  363. # these variables)
  364. mkdir -p /srv/salt-overlay/pillar
  365. cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
  366. instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
  367. node_tags: '$(echo "$NODE_TAGS" | sed -e "s/'/''/g")'
  368. node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
  369. cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
  370. allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
  371. non_masquerade_cidr: '$(echo "$NON_MASQUERADE_CIDR" | sed -e "s/'/''/g")'
  372. service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
  373. enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
  374. enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
  375. enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
  376. enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")'
  377. enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
  378. enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
  379. enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
  380. logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
  381. elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
  382. enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
  383. enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
  384. dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
  385. dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
  386. dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
  387. admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
  388. storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")'
  389. network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
  390. prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")'
  391. hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")'
  392. opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
  393. opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
  394. opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
  395. network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")'
  396. enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")'
  397. manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")'
  398. manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")'
  399. num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g")
  400. e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
  401. kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
  402. initial_etcd_cluster: '$(echo "${INITIAL_ETCD_CLUSTER:-}" | sed -e "s/'/''/g")'
  403. hostname: $(hostname -s)
  404. EOF
  405. if [ -n "${ADMISSION_CONTROL:-}" ] && [ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]; then
  406. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  407. admission-control-config-file: /etc/admission_controller.config
  408. EOF
  409. fi
  410. if [ -n "${KUBELET_PORT:-}" ]; then
  411. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  412. kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")'
  413. EOF
  414. fi
  415. # Configuration changes for test clusters
  416. if [ -n "${TEST_ETCD_VERSION:-}" ]; then
  417. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  418. etcd_docker_tag: '$(echo "$TEST_ETCD_VERSION" | sed -e "s/'/''/g")'
  419. EOF
  420. fi
  421. if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
  422. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  423. apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")'
  424. EOF
  425. fi
  426. if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
  427. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  428. api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
  429. EOF
  430. fi
  431. if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
  432. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  433. kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")'
  434. EOF
  435. fi
  436. if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
  437. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  438. kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
  439. EOF
  440. fi
  441. if [ -n "${DOCKER_TEST_LOG_LEVEL:-}" ]; then
  442. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  443. docker_test_log_level: '$(echo "$DOCKER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
  444. EOF
  445. fi
  446. if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
  447. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  448. controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")'
  449. EOF
  450. fi
  451. if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
  452. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  453. controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
  454. EOF
  455. fi
  456. if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
  457. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  458. scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")'
  459. EOF
  460. fi
  461. if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
  462. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  463. scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
  464. EOF
  465. fi
  466. if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
  467. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  468. kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
  469. EOF
  470. fi
  471. if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
  472. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  473. kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
  474. EOF
  475. fi
  476. # TODO: Replace this with a persistent volume (and create it).
  477. if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
  478. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  479. cluster_registry_disk_type: gce
  480. cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g")
  481. cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g")
  482. EOF
  483. fi
  484. if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
  485. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  486. terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")'
  487. EOF
  488. fi
  489. if [ -n "${ENABLE_CUSTOM_METRICS:-}" ]; then
  490. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  491. enable_custom_metrics: '$(echo "${ENABLE_CUSTOM_METRICS}" | sed -e "s/'/''/g")'
  492. EOF
  493. fi
  494. if [ -n "${NODE_LABELS:-}" ]; then
  495. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  496. node_labels: '$(echo "${NODE_LABELS}" | sed -e "s/'/''/g")'
  497. EOF
  498. fi
  499. if [ -n "${EVICTION_HARD:-}" ]; then
  500. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  501. eviction_hard: '$(echo "${EVICTION_HARD}" | sed -e "s/'/''/g")'
  502. EOF
  503. fi
  504. if [[ "${ENABLE_CLUSTER_AUTOSCALER:-false}" == "true" ]]; then
  505. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  506. enable_cluster_autoscaler: '$(echo "${ENABLE_CLUSTER_AUTOSCALER}" | sed -e "s/'/''/g")'
  507. autoscaler_mig_config: '$(echo "${AUTOSCALER_MIG_CONFIG}" | sed -e "s/'/''/g")'
  508. EOF
  509. fi
  510. if [[ "${FEDERATION:-}" == "true" ]]; then
  511. local federations_domain_map="${FEDERATIONS_DOMAIN_MAP:-}"
  512. if [[ -z "${federations_domain_map}" && -n "${FEDERATION_NAME:-}" && -n "${DNS_ZONE_NAME:-}" ]]; then
  513. federations_domain_map="${FEDERATION_NAME}=${DNS_ZONE_NAME}"
  514. fi
  515. if [[ -n "${federations_domain_map}" ]]; then
  516. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  517. federations_domain_map: '$(echo "- --federations=${federations_domain_map}" | sed -e "s/'/''/g")'
  518. EOF
  519. else
  520. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  521. federations_domain_map: ''
  522. EOF
  523. fi
  524. else
  525. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  526. federations_domain_map: ''
  527. EOF
  528. fi
  529. if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
  530. cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
  531. scheduling_algorithm_provider: '$(echo "${SCHEDULING_ALGORITHM_PROVIDER}" | sed -e "s/'/''/g")'
  532. EOF
  533. fi
  534. }
  535. # The job of this function is simple, but the basic regular expression syntax makes
  536. # this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
  537. # into [0-9]+, Ki, Mi, Gi, etc.
  538. # This is done in two steps:
  539. # 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
  540. # is optional.
  541. # 2. Attach an 'i' to the end of the string if we find a letter.
  542. # The two step process is needed to handle the edge case in which we want to convert
  543. # a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
  544. function convert-bytes-gce-kube() {
  545. local -r storage_space=$1
  546. echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
  547. }
  548. # This should only happen on cluster initialization.
  549. #
  550. # - Uses KUBE_PASSWORD and KUBE_USER to generate basic_auth.csv.
  551. # - Uses KUBE_BEARER_TOKEN, KUBELET_TOKEN, and KUBE_PROXY_TOKEN to generate
  552. # known_tokens.csv (KNOWN_TOKENS_FILE).
  553. # - Uses CA_CERT, MASTER_CERT, and MASTER_KEY to populate the SSL credentials
  554. # for the apiserver.
  555. # - Optionally uses KUBECFG_CERT and KUBECFG_KEY to store a copy of the client
  556. # cert credentials.
  557. #
  558. # After the first boot and on upgrade, these files exist on the master-pd
  559. # and should never be touched again (except perhaps an additional service
  560. # account, see NB below.)
  561. function create-salt-master-auth() {
  562. if [[ ! -e /srv/kubernetes/ca.crt ]]; then
  563. if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then
  564. mkdir -p /srv/kubernetes
  565. (umask 077;
  566. echo "${CA_CERT}" | base64 --decode > /srv/kubernetes/ca.crt;
  567. echo "${MASTER_CERT}" | base64 --decode > /srv/kubernetes/server.cert;
  568. echo "${MASTER_KEY}" | base64 --decode > /srv/kubernetes/server.key;
  569. # Kubecfg cert/key are optional and included for backwards compatibility.
  570. # TODO(roberthbailey): Remove these two lines once GKE no longer requires
  571. # fetching clients certs from the master VM.
  572. echo "${KUBECFG_CERT:-}" | base64 --decode > /srv/kubernetes/kubecfg.crt;
  573. echo "${KUBECFG_KEY:-}" | base64 --decode > /srv/kubernetes/kubecfg.key)
  574. fi
  575. fi
  576. if [ ! -e "${BASIC_AUTH_FILE}" ]; then
  577. mkdir -p /srv/salt-overlay/salt/kube-apiserver
  578. (umask 077;
  579. echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
  580. fi
  581. if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
  582. mkdir -p /srv/salt-overlay/salt/kube-apiserver
  583. (umask 077;
  584. echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
  585. echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}";
  586. echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}")
  587. fi
  588. }
  589. # This should happen only on cluster initialization. After the first boot
  590. # and on upgrade, the kubeconfig file exists on the master-pd and should
  591. # never be touched again.
  592. #
  593. # - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
  594. # KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
  595. # connect to the apiserver.
  596. function create-salt-master-kubelet-auth() {
  597. # Only configure the kubelet on the master if the required variables are
  598. # set in the environment.
  599. if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
  600. create-salt-kubelet-auth
  601. fi
  602. }
  603. # This should happen both on cluster initialization and node upgrades.
  604. #
  605. # - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
  606. # KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
  607. # connect to the apiserver.
  608. function create-salt-kubelet-auth() {
  609. local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
  610. if [ ! -e "${kubelet_kubeconfig_file}" ]; then
  611. # If there isn't a CA certificate set specifically for the kubelet, use
  612. # the cluster CA certificate.
  613. if [[ -z "${KUBELET_CA_CERT:-}" ]]; then
  614. KUBELET_CA_CERT="${CA_CERT}"
  615. fi
  616. mkdir -p /srv/salt-overlay/salt/kubelet
  617. (umask 077;
  618. cat > "${kubelet_kubeconfig_file}" <<EOF
  619. apiVersion: v1
  620. kind: Config
  621. users:
  622. - name: kubelet
  623. user:
  624. client-certificate-data: ${KUBELET_CERT}
  625. client-key-data: ${KUBELET_KEY}
  626. clusters:
  627. - name: local
  628. cluster:
  629. server: https://kubernetes-master
  630. certificate-authority-data: ${KUBELET_CA_CERT}
  631. contexts:
  632. - context:
  633. cluster: local
  634. user: kubelet
  635. name: service-account-context
  636. current-context: service-account-context
  637. EOF
  638. )
  639. fi
  640. }
  641. # This should happen both on cluster initialization and node upgrades.
  642. #
  643. # - Uses the CA_CERT and KUBE_PROXY_TOKEN to generate a kubeconfig file for
  644. # the kube-proxy to securely connect to the apiserver.
  645. function create-salt-kubeproxy-auth() {
  646. local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
  647. if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
  648. mkdir -p /srv/salt-overlay/salt/kube-proxy
  649. (umask 077;
  650. cat > "${kube_proxy_kubeconfig_file}" <<EOF
  651. apiVersion: v1
  652. kind: Config
  653. users:
  654. - name: kube-proxy
  655. user:
  656. token: ${KUBE_PROXY_TOKEN}
  657. clusters:
  658. - name: local
  659. cluster:
  660. certificate-authority-data: ${CA_CERT}
  661. contexts:
  662. - context:
  663. cluster: local
  664. user: kube-proxy
  665. name: service-account-context
  666. current-context: service-account-context
  667. EOF
  668. )
  669. fi
  670. }
  671. function split-commas() {
  672. echo $1 | tr "," "\n"
  673. }
  674. function try-download-release() {
  675. # TODO(zmerlynn): Now we REALLy have no excuse not to do the reboot
  676. # optimization.
  677. local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
  678. local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
  679. if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
  680. local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
  681. else
  682. echo "Downloading binary release sha1 (not found in env)"
  683. download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
  684. local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
  685. fi
  686. echo "Downloading binary release tar (${server_binary_tar_urls[@]})"
  687. download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
  688. local -r salt_tar_urls=( $(split-commas "${SALT_TAR_URL}") )
  689. local -r salt_tar="${salt_tar_urls[0]##*/}"
  690. if [[ -n "${SALT_TAR_HASH:-}" ]]; then
  691. local -r salt_tar_hash="${SALT_TAR_HASH}"
  692. else
  693. echo "Downloading Salt tar sha1 (not found in env)"
  694. download-or-bust "" "${salt_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
  695. local -r salt_tar_hash=$(cat "${salt_tar}.sha1")
  696. fi
  697. echo "Downloading Salt tar (${salt_tar_urls[@]})"
  698. download-or-bust "${salt_tar_hash}" "${salt_tar_urls[@]}"
  699. echo "Unpacking Salt tree and checking integrity of binary release tar"
  700. rm -rf kubernetes
  701. tar xzf "${salt_tar}" && tar tzf "${server_binary_tar}" > /dev/null
  702. }
  703. function download-release() {
  704. # In case of failure checking integrity of release, retry.
  705. until try-download-release; do
  706. sleep 15
  707. echo "Couldn't download release. Retrying..."
  708. done
  709. echo "Running release install script"
  710. kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
  711. }
  712. function fix-apt-sources() {
  713. sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
  714. sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
  715. }
  716. function salt-run-local() {
  717. cat <<EOF >/etc/salt/minion.d/local.conf
  718. file_client: local
  719. file_roots:
  720. base:
  721. - /srv/salt
  722. EOF
  723. }
  724. function salt-debug-log() {
  725. cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
  726. log_level: debug
  727. log_level_logfile: debug
  728. EOF
  729. }
  730. function salt-master-role() {
  731. cat <<EOF >/etc/salt/minion.d/grains.conf
  732. grains:
  733. roles:
  734. - kubernetes-master
  735. cloud: gce
  736. EOF
  737. cat <<EOF >/etc/gce.conf
  738. [global]
  739. EOF
  740. CLOUD_CONFIG='' # Set to non-empty path if we are using the gce.conf file
  741. if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then
  742. cat <<EOF >>/etc/gce.conf
  743. token-url = ${TOKEN_URL}
  744. token-body = ${TOKEN_BODY}
  745. project-id = ${PROJECT_ID}
  746. network-name = ${NODE_NETWORK}
  747. EOF
  748. CLOUD_CONFIG=/etc/gce.conf
  749. EXTERNAL_IP=$(curl --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
  750. cat <<EOF >>/etc/salt/minion.d/grains.conf
  751. advertise_address: '${EXTERNAL_IP}'
  752. proxy_ssh_user: '${PROXY_SSH_USER}'
  753. EOF
  754. fi
  755. if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
  756. if [[ -n "${NODE_TAGS:-}" ]]; then
  757. local -r node_tags="${NODE_TAGS}"
  758. else
  759. local -r node_tags="${NODE_INSTANCE_PREFIX}"
  760. fi
  761. cat <<EOF >>/etc/gce.conf
  762. node-tags = ${NODE_TAGS}
  763. node-instance-prefix = ${NODE_INSTANCE_PREFIX}
  764. EOF
  765. CLOUD_CONFIG=/etc/gce.conf
  766. fi
  767. if [[ -n "${MULTIZONE:-}" ]]; then
  768. cat <<EOF >>/etc/gce.conf
  769. multizone = ${MULTIZONE}
  770. EOF
  771. CLOUD_CONFIG=/etc/gce.conf
  772. fi
  773. if [[ -n "${CLOUD_CONFIG:-}" ]]; then
  774. cat <<EOF >>/etc/salt/minion.d/grains.conf
  775. cloud_config: ${CLOUD_CONFIG}
  776. EOF
  777. else
  778. rm -f /etc/gce.conf
  779. fi
  780. if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
  781. cat <<EOF >>/etc/salt/minion.d/grains.conf
  782. webhook_authentication_config: /etc/gcp_authn.config
  783. EOF
  784. cat <<EOF >/etc/gcp_authn.config
  785. clusters:
  786. - name: gcp-authentication-server
  787. cluster:
  788. server: ${GCP_AUTHN_URL}
  789. users:
  790. - name: kube-apiserver
  791. user:
  792. auth-provider:
  793. name: gcp
  794. current-context: webhook
  795. contexts:
  796. - context:
  797. cluster: gcp-authentication-server
  798. user: kube-apiserver
  799. name: webhook
  800. EOF
  801. fi
  802. if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
  803. cat <<EOF >>/etc/salt/minion.d/grains.conf
  804. webhook_authorization_config: /etc/gcp_authz.config
  805. EOF
  806. cat <<EOF >/etc/gcp_authz.config
  807. clusters:
  808. - name: gcp-authorization-server
  809. cluster:
  810. server: ${GCP_AUTHZ_URL}
  811. users:
  812. - name: kube-apiserver
  813. user:
  814. auth-provider:
  815. name: gcp
  816. current-context: webhook
  817. contexts:
  818. - context:
  819. cluster: gcp-authorization-server
  820. user: kube-apiserver
  821. name: webhook
  822. EOF
  823. fi
  824. if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
  825. # This is the config file for the image review webhook.
  826. cat <<EOF >>/etc/salt/minion.d/grains.conf
  827. image_review_config: /etc/gcp_image_review.config
  828. EOF
  829. cat <<EOF >/etc/gcp_image_review.config
  830. clusters:
  831. - name: gcp-image-review-server
  832. cluster:
  833. server: ${GCP_IMAGE_VERIFICATION_URL}
  834. users:
  835. - name: kube-apiserver
  836. user:
  837. auth-provider:
  838. name: gcp
  839. current-context: webhook
  840. contexts:
  841. - context:
  842. cluster: gcp-image-review-server
  843. user: kube-apiserver
  844. name: webhook
  845. EOF
  846. # This is the config for the image review admission controller.
  847. cat <<EOF >>/etc/salt/minion.d/grains.conf
  848. image_review_webhook_config: /etc/admission_controller.config
  849. EOF
  850. cat <<EOF >/etc/admission_controller.config
  851. imagePolicy:
  852. kubeConfigFile: /etc/gcp_image_review.config
  853. allowTTL: 30
  854. denyTTL: 30
  855. retryBackoff: 500
  856. defaultAllow: true
  857. EOF
  858. fi
  859. # If the kubelet on the master is enabled, give it the same CIDR range
  860. # as a generic node.
  861. if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
  862. cat <<EOF >>/etc/salt/minion.d/grains.conf
  863. kubelet_api_servers: '${KUBELET_APISERVER}'
  864. cbr-cidr: 10.123.45.0/30
  865. EOF
  866. else
  867. # If the kubelet is running disconnected from a master, give it a fixed
  868. # CIDR range.
  869. cat <<EOF >>/etc/salt/minion.d/grains.conf
  870. cbr-cidr: ${MASTER_IP_RANGE}
  871. EOF
  872. fi
  873. env-to-grains "runtime_config"
  874. env-to-grains "kube_user"
  875. }
  876. function salt-node-role() {
  877. cat <<EOF >/etc/salt/minion.d/grains.conf
  878. grains:
  879. roles:
  880. - kubernetes-pool
  881. cbr-cidr: 10.123.45.0/30
  882. cloud: gce
  883. api_servers: '${KUBERNETES_MASTER_NAME}'
  884. EOF
  885. }
  886. function env-to-grains {
  887. local key=$1
  888. local env_key=`echo $key | tr '[:lower:]' '[:upper:]'`
  889. local value=${!env_key:-}
  890. if [[ -n "${value}" ]]; then
  891. # Note this is yaml, so indentation matters
  892. cat <<EOF >>/etc/salt/minion.d/grains.conf
  893. ${key}: '$(echo "${value}" | sed -e "s/'/''/g")'
  894. EOF
  895. fi
  896. }
  897. function node-docker-opts() {
  898. if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
  899. DOCKER_OPTS="${DOCKER_OPTS:-} ${EXTRA_DOCKER_OPTS}"
  900. fi
  901. # Decide whether to enable a docker registry mirror. This is taken from
  902. # the "kube-env" metadata value.
  903. if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
  904. echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
  905. DOCKER_OPTS="${DOCKER_OPTS:-} --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
  906. fi
  907. }
  908. function salt-grains() {
  909. env-to-grains "docker_opts"
  910. env-to-grains "docker_root"
  911. env-to-grains "kubelet_root"
  912. env-to-grains "feature_gates"
  913. }
  914. function configure-salt() {
  915. mkdir -p /etc/salt/minion.d
  916. salt-run-local
  917. if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
  918. salt-master-role
  919. if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
  920. salt-apiserver-timeout-grain $KUBE_APISERVER_REQUEST_TIMEOUT
  921. fi
  922. else
  923. salt-node-role
  924. node-docker-opts
  925. fi
  926. salt-grains
  927. install-salt
  928. stop-salt-minion
  929. }
  930. function run-salt() {
  931. echo "== Calling Salt =="
  932. salt-call --local state.highstate || true
  933. }
  934. function run-user-script() {
  935. if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
  936. user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
  937. fi
  938. if [[ ! -z ${user_script:-} ]]; then
  939. chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
  940. echo "== running user startup script =="
  941. "${INSTALL_DIR}/k8s-user-script.sh"
  942. fi
  943. }
  944. # This script is re-used on AWS. Some of the above functions will be replaced.
  945. # The AWS kube-up script looks for this marker:
  946. #+AWS_OVERRIDES_HERE
  947. ####################################################################################
  948. if [[ -z "${is_push}" ]]; then
  949. echo "== kube-up node config starting =="
  950. set-broken-motd
  951. ensure-basic-networking
  952. fix-apt-sources
  953. ensure-install-dir
  954. ensure-packages
  955. set-kube-env
  956. auto-upgrade
  957. ensure-local-disks
  958. [[ "${KUBERNETES_MASTER}" == "true" ]] && mount-master-pd
  959. create-salt-pillar
  960. if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
  961. create-salt-master-auth
  962. create-salt-master-kubelet-auth
  963. else
  964. create-salt-kubelet-auth
  965. create-salt-kubeproxy-auth
  966. fi
  967. download-release
  968. configure-salt
  969. remove-docker-artifacts
  970. run-salt
  971. reset-motd
  972. run-user-script
  973. echo "== kube-up node config done =="
  974. else
  975. echo "== kube-push node config starting =="
  976. ensure-basic-networking
  977. ensure-install-dir
  978. set-kube-env
  979. create-salt-pillar
  980. download-release
  981. reset-motd
  982. run-salt
  983. echo "== kube-push node config done =="
  984. fi