common.sh 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596
  1. #!/bin/bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # Common utilities, variables and checks for all build scripts.
  16. set -o errexit
  17. set -o nounset
  18. set -o pipefail
  19. DOCKER_OPTS=${DOCKER_OPTS:-""}
  20. DOCKER_NATIVE=${DOCKER_NATIVE:-""}
  21. DOCKER=(docker ${DOCKER_OPTS})
  22. DOCKER_HOST=${DOCKER_HOST:-""}
  23. DOCKER_MACHINE_NAME=${DOCKER_MACHINE_NAME:-"kube-dev"}
  24. readonly DOCKER_MACHINE_DRIVER=${DOCKER_MACHINE_DRIVER:-"virtualbox --virtualbox-memory 4096 --virtualbox-cpu-count -1"}
  25. # This will canonicalize the path
  26. KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd -P)
  27. source "${KUBE_ROOT}/hack/lib/init.sh"
  28. # Incoming options
  29. #
  30. readonly KUBE_SKIP_CONFIRMATIONS="${KUBE_SKIP_CONFIRMATIONS:-n}"
  31. readonly KUBE_GCS_UPLOAD_RELEASE="${KUBE_GCS_UPLOAD_RELEASE:-n}"
  32. readonly KUBE_GCS_NO_CACHING="${KUBE_GCS_NO_CACHING:-y}"
  33. readonly KUBE_GCS_MAKE_PUBLIC="${KUBE_GCS_MAKE_PUBLIC:-y}"
  34. # KUBE_GCS_RELEASE_BUCKET default: kubernetes-releases-${project_hash}
  35. readonly KUBE_GCS_RELEASE_PREFIX=${KUBE_GCS_RELEASE_PREFIX-devel}/
  36. readonly KUBE_GCS_DOCKER_REG_PREFIX=${KUBE_GCS_DOCKER_REG_PREFIX-docker-reg}/
  37. readonly KUBE_GCS_PUBLISH_VERSION=${KUBE_GCS_PUBLISH_VERSION:-}
  38. readonly KUBE_GCS_DELETE_EXISTING="${KUBE_GCS_DELETE_EXISTING:-n}"
  39. # Set KUBE_BUILD_PPC64LE to y to build for ppc64le in addition to other
  40. # platforms.
  41. # TODO(IBM): remove KUBE_BUILD_PPC64LE and reenable ppc64le compilation by
  42. # default when
  43. # https://github.com/kubernetes/kubernetes/issues/30384 and
  44. # https://github.com/kubernetes/kubernetes/issues/25886 are fixed.
  45. # The majority of the logic is in hack/lib/golang.sh.
  46. readonly KUBE_BUILD_PPC64LE="${KUBE_BUILD_PPC64LE:-n}"
  47. # Constants
  48. readonly KUBE_BUILD_IMAGE_REPO=kube-build
  49. readonly KUBE_BUILD_IMAGE_CROSS_TAG="$(cat ${KUBE_ROOT}/build/build-image/cross/VERSION)"
  50. # KUBE_BUILD_DATA_CONTAINER_NAME=kube-build-data-<hash>"
  51. # Here we map the output directories across both the local and remote _output
  52. # directories:
  53. #
  54. # *_OUTPUT_ROOT - the base of all output in that environment.
  55. # *_OUTPUT_SUBPATH - location where golang stuff is built/cached. Also
  56. # persisted across docker runs with a volume mount.
  57. # *_OUTPUT_BINPATH - location where final binaries are placed. If the remote
  58. # is really remote, this is the stuff that has to be copied
  59. # back.
  60. # OUT_DIR can come in from the Makefile, so honor it.
  61. readonly LOCAL_OUTPUT_ROOT="${KUBE_ROOT}/${OUT_DIR:-_output}"
  62. readonly LOCAL_OUTPUT_SUBPATH="${LOCAL_OUTPUT_ROOT}/dockerized"
  63. readonly LOCAL_OUTPUT_BINPATH="${LOCAL_OUTPUT_SUBPATH}/bin"
  64. readonly LOCAL_OUTPUT_GOPATH="${LOCAL_OUTPUT_SUBPATH}/go"
  65. readonly LOCAL_OUTPUT_IMAGE_STAGING="${LOCAL_OUTPUT_ROOT}/images"
  66. # This is a symlink to binaries for "this platform" (e.g. build tools).
  67. readonly THIS_PLATFORM_BIN="${LOCAL_OUTPUT_ROOT}/bin"
  68. readonly REMOTE_OUTPUT_ROOT="/go/src/${KUBE_GO_PACKAGE}/_output"
  69. readonly REMOTE_OUTPUT_SUBPATH="${REMOTE_OUTPUT_ROOT}/dockerized"
  70. readonly REMOTE_OUTPUT_BINPATH="${REMOTE_OUTPUT_SUBPATH}/bin"
  71. readonly REMOTE_OUTPUT_GOPATH="${REMOTE_OUTPUT_SUBPATH}/go"
  72. readonly DOCKER_MOUNT_ARGS_BASE=(
  73. # where the container build will drop output
  74. --volume "${LOCAL_OUTPUT_BINPATH}:${REMOTE_OUTPUT_BINPATH}"
  75. # timezone
  76. --volume /etc/localtime:/etc/localtime:ro
  77. )
  78. # This is where the final release artifacts are created locally
  79. readonly RELEASE_STAGE="${LOCAL_OUTPUT_ROOT}/release-stage"
  80. readonly RELEASE_DIR="${LOCAL_OUTPUT_ROOT}/release-tars"
  81. readonly GCS_STAGE="${LOCAL_OUTPUT_ROOT}/gcs-stage"
  82. # Get the set of master binaries that run in Docker (on Linux)
  83. # Entry format is "<name-of-binary>,<base-image>".
  84. # Binaries are placed in /usr/local/bin inside the image.
  85. #
  86. # $1 - server architecture
  87. kube::build::get_docker_wrapped_binaries() {
  88. case $1 in
  89. "amd64")
  90. local targets=(
  91. kube-apiserver,busybox
  92. kube-controller-manager,busybox
  93. kube-scheduler,busybox
  94. kube-proxy,gcr.io/google_containers/debian-iptables-amd64:v3
  95. );;
  96. "arm")
  97. local targets=(
  98. kube-apiserver,armel/busybox
  99. kube-controller-manager,armel/busybox
  100. kube-scheduler,armel/busybox
  101. kube-proxy,gcr.io/google_containers/debian-iptables-arm:v3
  102. );;
  103. "arm64")
  104. local targets=(
  105. kube-apiserver,aarch64/busybox
  106. kube-controller-manager,aarch64/busybox
  107. kube-scheduler,aarch64/busybox
  108. kube-proxy,gcr.io/google_containers/debian-iptables-arm64:v3
  109. );;
  110. "ppc64le")
  111. local targets=(
  112. kube-apiserver,ppc64le/busybox
  113. kube-controller-manager,ppc64le/busybox
  114. kube-scheduler,ppc64le/busybox
  115. kube-proxy,gcr.io/google_containers/debian-iptables-ppc64le:v3
  116. );;
  117. esac
  118. echo "${targets[@]}"
  119. }
  120. # ---------------------------------------------------------------------------
  121. # Basic setup functions
  122. # Verify that the right utilities and such are installed for building Kube. Set
  123. # up some dynamic constants.
  124. #
  125. # Vars set:
  126. # KUBE_ROOT_HASH
  127. # KUBE_BUILD_IMAGE_TAG
  128. # KUBE_BUILD_IMAGE
  129. # KUBE_BUILD_CONTAINER_NAME
  130. # KUBE_BUILD_DATA_CONTAINER_NAME
  131. # DOCKER_MOUNT_ARGS
  132. # LOCAL_OUTPUT_BUILD_CONTEXT
  133. function kube::build::verify_prereqs() {
  134. kube::log::status "Verifying Prerequisites...."
  135. kube::build::ensure_tar || return 1
  136. kube::build::ensure_docker_in_path || return 1
  137. if kube::build::is_osx; then
  138. kube::build::docker_available_on_osx || return 1
  139. fi
  140. kube::build::ensure_docker_daemon_connectivity || return 1
  141. KUBE_ROOT_HASH=$(kube::build::short_hash "${HOSTNAME:-}:${KUBE_ROOT}")
  142. KUBE_BUILD_IMAGE_TAG="build-${KUBE_ROOT_HASH}"
  143. KUBE_BUILD_IMAGE="${KUBE_BUILD_IMAGE_REPO}:${KUBE_BUILD_IMAGE_TAG}"
  144. KUBE_BUILD_CONTAINER_NAME="kube-build-${KUBE_ROOT_HASH}"
  145. KUBE_BUILD_DATA_CONTAINER_NAME="kube-build-data-${KUBE_ROOT_HASH}"
  146. DOCKER_MOUNT_ARGS=("${DOCKER_MOUNT_ARGS_BASE[@]}" --volumes-from "${KUBE_BUILD_DATA_CONTAINER_NAME}")
  147. LOCAL_OUTPUT_BUILD_CONTEXT="${LOCAL_OUTPUT_IMAGE_STAGING}/${KUBE_BUILD_IMAGE}"
  148. }
  149. # ---------------------------------------------------------------------------
  150. # Utility functions
  151. function kube::build::docker_available_on_osx() {
  152. if [[ -z "${DOCKER_HOST}" ]]; then
  153. if [[ -S "/var/run/docker.sock" ]]; then
  154. kube::log::status "Using Docker for MacOS"
  155. return 0
  156. fi
  157. kube::log::status "No docker host is set. Checking options for setting one..."
  158. if [[ -z "$(which docker-machine)" && -z "$(which boot2docker)" ]]; then
  159. kube::log::status "It looks like you're running Mac OS X, yet none of Docker for Mac, docker-machine or boot2docker are on the path."
  160. kube::log::status "See: https://docs.docker.com/machine/ for installation instructions."
  161. return 1
  162. elif [[ -n "$(which docker-machine)" ]]; then
  163. kube::build::prepare_docker_machine
  164. elif [[ -n "$(which boot2docker)" ]]; then
  165. kube::build::prepare_boot2docker
  166. fi
  167. fi
  168. }
  169. function kube::build::prepare_docker_machine() {
  170. kube::log::status "docker-machine was found."
  171. docker-machine inspect "${DOCKER_MACHINE_NAME}" &> /dev/null || {
  172. kube::log::status "Creating a machine to build Kubernetes"
  173. docker-machine create --driver ${DOCKER_MACHINE_DRIVER} \
  174. --engine-env HTTP_PROXY="${KUBERNETES_HTTP_PROXY:-}" \
  175. --engine-env HTTPS_PROXY="${KUBERNETES_HTTPS_PROXY:-}" \
  176. --engine-env NO_PROXY="${KUBERNETES_NO_PROXY:-127.0.0.1}" \
  177. "${DOCKER_MACHINE_NAME}" > /dev/null || {
  178. kube::log::error "Something went wrong creating a machine."
  179. kube::log::error "Try the following: "
  180. kube::log::error "docker-machine create -d ${DOCKER_MACHINE_DRIVER} ${DOCKER_MACHINE_NAME}"
  181. return 1
  182. }
  183. }
  184. docker-machine start "${DOCKER_MACHINE_NAME}" &> /dev/null
  185. # it takes `docker-machine env` a few seconds to work if the machine was just started
  186. local docker_machine_out
  187. while ! docker_machine_out=$(docker-machine env "${DOCKER_MACHINE_NAME}" 2>&1); do
  188. if [[ ${docker_machine_out} =~ "Error checking TLS connection" ]]; then
  189. echo ${docker_machine_out}
  190. docker-machine regenerate-certs ${DOCKER_MACHINE_NAME}
  191. else
  192. sleep 1
  193. fi
  194. done
  195. eval $(docker-machine env "${DOCKER_MACHINE_NAME}")
  196. kube::log::status "A Docker host using docker-machine named '${DOCKER_MACHINE_NAME}' is ready to go!"
  197. return 0
  198. }
  199. function kube::build::prepare_boot2docker() {
  200. kube::log::status "boot2docker cli has been deprecated in favor of docker-machine."
  201. kube::log::status "See: https://github.com/boot2docker/boot2docker-cli for more details."
  202. if [[ $(boot2docker status) != "running" ]]; then
  203. kube::log::status "boot2docker isn't running. We'll try to start it."
  204. boot2docker up || {
  205. kube::log::error "Can't start boot2docker."
  206. kube::log::error "You may need to 'boot2docker init' to create your VM."
  207. return 1
  208. }
  209. fi
  210. # Reach over and set the clock. After sleep/resume the clock will skew.
  211. kube::log::status "Setting boot2docker clock"
  212. boot2docker ssh sudo date -u -D "%Y%m%d%H%M.%S" --set "$(date -u +%Y%m%d%H%M.%S)" >/dev/null
  213. kube::log::status "Setting boot2docker env variables"
  214. $(boot2docker shellinit)
  215. kube::log::status "boot2docker-vm has been successfully started."
  216. return 0
  217. }
  218. function kube::build::is_osx() {
  219. [[ "$(uname)" == "Darwin" ]]
  220. }
  221. function kube::build::is_gnu_sed() {
  222. [[ $(sed --version 2>&1) == *GNU* ]]
  223. }
  224. function kube::build::update_dockerfile() {
  225. if kube::build::is_gnu_sed; then
  226. sed_opts=(-i)
  227. else
  228. sed_opts=(-i '')
  229. fi
  230. sed "${sed_opts[@]}" "s/KUBE_BUILD_IMAGE_CROSS_TAG/${KUBE_BUILD_IMAGE_CROSS_TAG}/" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
  231. }
  232. function kube::build::ensure_docker_in_path() {
  233. if [[ -z "$(which docker)" ]]; then
  234. kube::log::error "Can't find 'docker' in PATH, please fix and retry."
  235. kube::log::error "See https://docs.docker.com/installation/#installation for installation instructions."
  236. return 1
  237. fi
  238. }
  239. function kube::build::ensure_docker_daemon_connectivity {
  240. if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
  241. {
  242. echo "Can't connect to 'docker' daemon. please fix and retry."
  243. echo
  244. echo "Possible causes:"
  245. echo " - On Mac OS X, DOCKER_HOST hasn't been set. You may need to: "
  246. echo " - Create and start your VM using docker-machine or boot2docker: "
  247. echo " - docker-machine create -d ${DOCKER_MACHINE_DRIVER} ${DOCKER_MACHINE_NAME}"
  248. echo " - boot2docker init && boot2docker start"
  249. echo " - Set your environment variables using: "
  250. echo " - eval \$(docker-machine env ${DOCKER_MACHINE_NAME})"
  251. echo " - \$(boot2docker shellinit)"
  252. echo " - Update your Docker VM"
  253. echo " - Error Message: 'Error response from daemon: client is newer than server (...)' "
  254. echo " - docker-machine upgrade ${DOCKER_MACHINE_NAME}"
  255. echo " - On Linux, user isn't in 'docker' group. Add and relogin."
  256. echo " - Something like 'sudo usermod -a -G docker ${USER-user}'"
  257. echo " - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8"
  258. echo " - On Linux, Docker daemon hasn't been started or has crashed."
  259. } >&2
  260. return 1
  261. fi
  262. }
  263. function kube::build::ensure_tar() {
  264. if [[ -n "${TAR:-}" ]]; then
  265. return
  266. fi
  267. # Find gnu tar if it is available, bomb out if not.
  268. TAR=tar
  269. if which gtar &>/dev/null; then
  270. TAR=gtar
  271. else
  272. if which gnutar &>/dev/null; then
  273. TAR=gnutar
  274. fi
  275. fi
  276. if ! "${TAR}" --version | grep -q GNU; then
  277. echo " !!! Cannot find GNU tar. Build on Linux or install GNU tar"
  278. echo " on Mac OS X (brew install gnu-tar)."
  279. return 1
  280. fi
  281. }
  282. function kube::build::clean_output() {
  283. # Clean out the output directory if it exists.
  284. if kube::build::has_docker ; then
  285. if kube::build::build_image_built ; then
  286. kube::log::status "Cleaning out _output/dockerized/bin/ via docker build image"
  287. kube::build::run_build_command bash -c "rm -rf '${REMOTE_OUTPUT_BINPATH}'/*"
  288. else
  289. kube::log::error "Build image not built. Cannot clean via docker build image."
  290. fi
  291. kube::log::status "Removing data container ${KUBE_BUILD_DATA_CONTAINER_NAME}"
  292. "${DOCKER[@]}" rm -v "${KUBE_BUILD_DATA_CONTAINER_NAME}" >/dev/null 2>&1 || true
  293. fi
  294. kube::log::status "Removing _output directory"
  295. rm -rf "${LOCAL_OUTPUT_ROOT}"
  296. }
  297. # Make sure the _output directory is created and mountable by docker
  298. function kube::build::prepare_output() {
  299. # See auto-creation of host mounts: https://github.com/docker/docker/pull/21666
  300. # if selinux is enabled, docker run -v /foo:/foo:Z will not autocreate the host dir
  301. mkdir -p "${LOCAL_OUTPUT_SUBPATH}"
  302. mkdir -p "${LOCAL_OUTPUT_BINPATH}"
  303. # On RHEL/Fedora SELinux is enabled by default and currently breaks docker
  304. # volume mounts. We can work around this by explicitly adding a security
  305. # context to the _output directory.
  306. # Details: http://www.projectatomic.io/blog/2015/06/using-volumes-with-docker-can-cause-problems-with-selinux/
  307. if which selinuxenabled &>/dev/null && \
  308. selinuxenabled && \
  309. which chcon >/dev/null ; then
  310. if [[ ! $(ls -Zd "${LOCAL_OUTPUT_ROOT}") =~ svirt_sandbox_file_t ]] ; then
  311. kube::log::status "Applying SELinux policy to '_output' directory."
  312. if ! chcon -Rt svirt_sandbox_file_t "${LOCAL_OUTPUT_ROOT}"; then
  313. echo " ***Failed***. This may be because you have root owned files under _output."
  314. echo " Continuing, but this build may fail later if SELinux prevents access."
  315. fi
  316. fi
  317. number=${#DOCKER_MOUNT_ARGS[@]}
  318. for (( i=0; i<number; i++ )); do
  319. if [[ "${DOCKER_MOUNT_ARGS[i]}" =~ "${KUBE_ROOT}" ]]; then
  320. ## Ensure we don't label the argument multiple times
  321. if [[ ! "${DOCKER_MOUNT_ARGS[i]}" == *:Z ]]; then
  322. DOCKER_MOUNT_ARGS[i]="${DOCKER_MOUNT_ARGS[i]}:Z"
  323. fi
  324. fi
  325. done
  326. fi
  327. }
  328. function kube::build::has_docker() {
  329. which docker &> /dev/null
  330. }
  331. # Detect if a specific image exists
  332. #
  333. # $1 - image repo name
  334. # #2 - image tag
  335. function kube::build::docker_image_exists() {
  336. [[ -n $1 && -n $2 ]] || {
  337. kube::log::error "Internal error. Image not specified in docker_image_exists."
  338. exit 2
  339. }
  340. # We cannot just specify the IMAGE here as `docker images` doesn't behave as
  341. # expected. See: https://github.com/docker/docker/issues/8048
  342. "${DOCKER[@]}" images | grep -Eq "^(\S+/)?${1}\s+${2}\s+"
  343. }
  344. # Takes $1 and computes a short has for it. Useful for unique tag generation
  345. function kube::build::short_hash() {
  346. [[ $# -eq 1 ]] || {
  347. kube::log::error "Internal error. No data based to short_hash."
  348. exit 2
  349. }
  350. local short_hash
  351. if which md5 >/dev/null 2>&1; then
  352. short_hash=$(md5 -q -s "$1")
  353. else
  354. short_hash=$(echo -n "$1" | md5sum)
  355. fi
  356. echo ${short_hash:0:10}
  357. }
  358. # Pedantically kill, wait-on and remove a container. The -f -v options
  359. # to rm don't actually seem to get the job done, so force kill the
  360. # container, wait to ensure it's stopped, then try the remove. This is
  361. # a workaround for bug https://github.com/docker/docker/issues/3968.
  362. function kube::build::destroy_container() {
  363. "${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true
  364. "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
  365. "${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true
  366. }
  367. # Validate a release version
  368. #
  369. # Globals:
  370. # None
  371. # Arguments:
  372. # version
  373. # Returns:
  374. # If version is a valid release version
  375. # Sets: (e.g. for '1.2.3-alpha.4')
  376. # VERSION_MAJOR (e.g. '1')
  377. # VERSION_MINOR (e.g. '2')
  378. # VERSION_PATCH (e.g. '3')
  379. # VERSION_EXTRA (e.g. '-alpha.4')
  380. # VERSION_PRERELEASE (e.g. 'alpha')
  381. # VERSION_PRERELEASE_REV (e.g. '4')
  382. function kube::release::parse_and_validate_release_version() {
  383. local -r version_regex="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(-(beta|alpha)\\.(0|[1-9][0-9]*))?$"
  384. local -r version="${1-}"
  385. [[ "${version}" =~ ${version_regex} ]] || {
  386. kube::log::error "Invalid release version: '${version}', must match regex ${version_regex}"
  387. return 1
  388. }
  389. VERSION_MAJOR="${BASH_REMATCH[1]}"
  390. VERSION_MINOR="${BASH_REMATCH[2]}"
  391. VERSION_PATCH="${BASH_REMATCH[3]}"
  392. VERSION_EXTRA="${BASH_REMATCH[4]}"
  393. VERSION_PRERELEASE="${BASH_REMATCH[5]}"
  394. VERSION_PRERELEASE_REV="${BASH_REMATCH[6]}"
  395. }
  396. # Validate a ci version
  397. #
  398. # Globals:
  399. # None
  400. # Arguments:
  401. # version
  402. # Returns:
  403. # If version is a valid ci version
  404. # Sets: (e.g. for '1.2.3-alpha.4.56+abcdef12345678')
  405. # VERSION_MAJOR (e.g. '1')
  406. # VERSION_MINOR (e.g. '2')
  407. # VERSION_PATCH (e.g. '3')
  408. # VERSION_PRERELEASE (e.g. 'alpha')
  409. # VERSION_PRERELEASE_REV (e.g. '4')
  410. # VERSION_BUILD_INFO (e.g. '.56+abcdef12345678')
  411. # VERSION_COMMITS (e.g. '56')
  412. function kube::release::parse_and_validate_ci_version() {
  413. # Accept things like "v1.2.3-alpha.4.56+abcdef12345678" or "v1.2.3-beta.4"
  414. local -r version_regex="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)-(beta|alpha)\\.(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*)\\+[0-9a-f]{7,40})?$"
  415. local -r version="${1-}"
  416. [[ "${version}" =~ ${version_regex} ]] || {
  417. kube::log::error "Invalid ci version: '${version}', must match regex ${version_regex}"
  418. return 1
  419. }
  420. VERSION_MAJOR="${BASH_REMATCH[1]}"
  421. VERSION_MINOR="${BASH_REMATCH[2]}"
  422. VERSION_PATCH="${BASH_REMATCH[3]}"
  423. VERSION_PRERELEASE="${BASH_REMATCH[4]}"
  424. VERSION_PRERELEASE_REV="${BASH_REMATCH[5]}"
  425. VERSION_BUILD_INFO="${BASH_REMATCH[6]}"
  426. VERSION_COMMITS="${BASH_REMATCH[7]}"
  427. }
  428. # ---------------------------------------------------------------------------
  429. # Building
  430. function kube::build::build_image_built() {
  431. kube::build::docker_image_exists "${KUBE_BUILD_IMAGE_REPO}" "${KUBE_BUILD_IMAGE_TAG}"
  432. }
  433. # The set of source targets to include in the kube-build image
  434. function kube::build::source_targets() {
  435. local targets=(
  436. $(find . -mindepth 1 -maxdepth 1 -not \( \
  437. \( -path ./_\* -o -path ./.git\* \) -prune \
  438. \))
  439. )
  440. echo "${targets[@]}"
  441. }
  442. # Set up the context directory for the kube-build image and build it.
  443. function kube::build::build_image() {
  444. kube::build::ensure_tar
  445. mkdir -p "${LOCAL_OUTPUT_BUILD_CONTEXT}"
  446. "${TAR}" czf "${LOCAL_OUTPUT_BUILD_CONTEXT}/kube-source.tar.gz" $(kube::build::source_targets)
  447. kube::version::get_version_vars
  448. kube::version::save_version_vars "${LOCAL_OUTPUT_BUILD_CONTEXT}/kube-version-defs"
  449. cp build/build-image/Dockerfile "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
  450. kube::build::update_dockerfile
  451. kube::build::docker_build "${KUBE_BUILD_IMAGE}" "${LOCAL_OUTPUT_BUILD_CONTEXT}" 'false'
  452. }
  453. # Build a docker image from a Dockerfile.
  454. # $1 is the name of the image to build
  455. # $2 is the location of the "context" directory, with the Dockerfile at the root.
  456. # $3 is the value to set the --pull flag for docker build; true by default
  457. function kube::build::docker_build() {
  458. local -r image=$1
  459. local -r context_dir=$2
  460. local -r pull="${3:-true}"
  461. local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "--pull=${pull}" "${context_dir}")
  462. kube::log::status "Building Docker image ${image}"
  463. local docker_output
  464. docker_output=$("${build_cmd[@]}" 2>&1) || {
  465. cat <<EOF >&2
  466. +++ Docker build command failed for ${image}
  467. ${docker_output}
  468. To retry manually, run:
  469. ${build_cmd[*]}
  470. EOF
  471. return 1
  472. }
  473. }
  474. function kube::build::clean_image() {
  475. local -r image=$1
  476. kube::log::status "Deleting docker image ${image}"
  477. "${DOCKER[@]}" rmi ${image} 2> /dev/null || true
  478. }
  479. function kube::build::clean_images() {
  480. kube::build::has_docker || return 0
  481. kube::build::clean_image "${KUBE_BUILD_IMAGE}"
  482. kube::log::status "Cleaning all other untagged docker images"
  483. "${DOCKER[@]}" rmi $("${DOCKER[@]}" images -q --filter 'dangling=true') 2> /dev/null || true
  484. }
  485. function kube::build::ensure_data_container() {
  486. # If the data container exists AND exited successfully, we can use it.
  487. # Otherwise nuke it and start over.
  488. local ret=0
  489. local code=$(docker inspect \
  490. -f '{{.State.ExitCode}}' \
  491. "${KUBE_BUILD_DATA_CONTAINER_NAME}" 2>/dev/null || ret=$?)
  492. if [[ "${ret}" == 0 && "${code}" != 0 ]]; then
  493. kube::build::destroy_container "${KUBE_BUILD_DATA_CONTAINER_NAME}"
  494. ret=1
  495. fi
  496. if [[ "${ret}" != 0 ]]; then
  497. kube::log::status "Creating data container ${KUBE_BUILD_DATA_CONTAINER_NAME}"
  498. # We have to ensure the directory exists, or else the docker run will
  499. # create it as root.
  500. mkdir -p "${LOCAL_OUTPUT_GOPATH}"
  501. # We want this to run as root to be able to chown, so non-root users can
  502. # later use the result as a data container. This run both creates the data
  503. # container and chowns the GOPATH.
  504. local -ra docker_cmd=(
  505. "${DOCKER[@]}" run
  506. --name "${KUBE_BUILD_DATA_CONTAINER_NAME}"
  507. --hostname "${HOSTNAME}"
  508. --volume "${REMOTE_OUTPUT_ROOT}" # white-out the whole output dir
  509. --volume "${REMOTE_OUTPUT_GOPATH}" # make a non-root owned mountpoint
  510. "${KUBE_BUILD_IMAGE}"
  511. chown -R $(id -u).$(id -g) "${REMOTE_OUTPUT_ROOT}"
  512. )
  513. "${docker_cmd[@]}"
  514. fi
  515. }
  516. # Run a command in the kube-build image. This assumes that the image has
  517. # already been built. This will sync out all output data from the build.
  518. function kube::build::run_build_command() {
  519. kube::log::status "Running build command...."
  520. [[ $# != 0 ]] || { echo "Invalid input - please specify a command to run." >&2; return 4; }
  521. kube::build::ensure_data_container
  522. kube::build::prepare_output
  523. local -a docker_run_opts=(
  524. "--name=${KUBE_BUILD_CONTAINER_NAME}"
  525. "--user=$(id -u):$(id -g)"
  526. "--hostname=${HOSTNAME}"
  527. "${DOCKER_MOUNT_ARGS[@]}"
  528. )
  529. if [ -n "${KUBERNETES_CONTRIB:-}" ]; then
  530. docker_run_opts+=(-e "KUBERNETES_CONTRIB=${KUBERNETES_CONTRIB}")
  531. fi
  532. docker_run_opts+=(
  533. --env "KUBE_FASTBUILD=${KUBE_FASTBUILD:-false}"
  534. --env "KUBE_BUILDER_OS=${OSTYPE:-notdetected}"
  535. --env "KUBE_BUILD_PPC64LE=${KUBE_BUILD_PPC64LE}" # TODO(IBM): remove
  536. )
  537. # If we have stdin we can run interactive. This allows things like 'shell.sh'
  538. # to work. However, if we run this way and don't have stdin, then it ends up
  539. # running in a daemon-ish mode. So if we don't have a stdin, we explicitly
  540. # attach stderr/stdout but don't bother asking for a tty.
  541. if [[ -t 0 ]]; then
  542. docker_run_opts+=(--interactive --tty)
  543. else
  544. docker_run_opts+=(--attach=stdout --attach=stderr)
  545. fi
  546. local -ra docker_cmd=(
  547. "${DOCKER[@]}" run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}")
  548. # Clean up container from any previous run
  549. kube::build::destroy_container "${KUBE_BUILD_CONTAINER_NAME}"
  550. "${docker_cmd[@]}" "$@"
  551. kube::build::destroy_container "${KUBE_BUILD_CONTAINER_NAME}"
  552. }
  553. # Test if the output directory is remote (and can only be accessed through
  554. # docker) or if it is "local" and we can access the output without going through
  555. # docker.
  556. function kube::build::is_output_remote() {
  557. rm -f "${LOCAL_OUTPUT_SUBPATH}/test_for_remote"
  558. kube::build::run_build_command touch "${REMOTE_OUTPUT_BINPATH}/test_for_remote"
  559. [[ ! -e "${LOCAL_OUTPUT_BINPATH}/test_for_remote" ]]
  560. }
  561. # If the Docker server is remote, copy the results back out.
  562. function kube::build::copy_output() {
  563. if kube::build::is_output_remote; then
  564. # At time of this code, docker cp does not work when copying from a volume.
  565. # As a workaround, the binaries are first copied to a local filesystem,
  566. # /tmp, then docker cp'd to the local binaries output directory.
  567. # The fix for the volume bug has been accepted and once it's widely
  568. # deployed the code below should be simplified to a simple docker cp
  569. # Bug: https://github.com/docker/docker/pull/8509
  570. local -a docker_run_opts=(
  571. "--name=${KUBE_BUILD_CONTAINER_NAME}"
  572. "--user=$(id -u):$(id -g)"
  573. "${DOCKER_MOUNT_ARGS[@]}"
  574. -d
  575. )
  576. local -ra docker_cmd=(
  577. "${DOCKER[@]}" run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}"
  578. )
  579. kube::log::status "Syncing back _output/dockerized/bin directory from remote Docker"
  580. rm -rf "${LOCAL_OUTPUT_BINPATH}"
  581. mkdir -p "${LOCAL_OUTPUT_BINPATH}"
  582. rm -f "${THIS_PLATFORM_BIN}"
  583. ln -s "${LOCAL_OUTPUT_BINPATH}" "${THIS_PLATFORM_BIN}"
  584. kube::build::destroy_container "${KUBE_BUILD_CONTAINER_NAME}"
  585. "${docker_cmd[@]}" bash -c "cp -r ${REMOTE_OUTPUT_BINPATH} /tmp/bin;touch /tmp/finished;rm /tmp/bin/test_for_remote;/bin/sleep 600" > /dev/null 2>&1
  586. # Wait until binaries have finished coppying
  587. count=0
  588. while true;do
  589. if "${DOCKER[@]}" cp "${KUBE_BUILD_CONTAINER_NAME}:/tmp/finished" "${LOCAL_OUTPUT_BINPATH}" > /dev/null 2>&1;then
  590. "${DOCKER[@]}" cp "${KUBE_BUILD_CONTAINER_NAME}:/tmp/bin" "${LOCAL_OUTPUT_SUBPATH}"
  591. break;
  592. fi
  593. let count=count+1
  594. if [[ $count -eq 60 ]]; then
  595. # break after 5m
  596. kube::log::error "Timed out waiting for binaries..."
  597. break
  598. fi
  599. sleep 5
  600. done
  601. "${DOCKER[@]}" rm -f -v "${KUBE_BUILD_CONTAINER_NAME}" >/dev/null 2>&1 || true
  602. else
  603. kube::log::status "Output directory is local. No need to copy results out."
  604. fi
  605. }
  606. # ---------------------------------------------------------------------------
  607. # Build final release artifacts
  608. function kube::release::clean_cruft() {
  609. # Clean out cruft
  610. find ${RELEASE_STAGE} -name '*~' -exec rm {} \;
  611. find ${RELEASE_STAGE} -name '#*#' -exec rm {} \;
  612. find ${RELEASE_STAGE} -name '.DS*' -exec rm {} \;
  613. }
  614. function kube::release::package_hyperkube() {
  615. # If we have these variables set then we want to build all docker images.
  616. if [[ -n "${KUBE_DOCKER_IMAGE_TAG-}" && -n "${KUBE_DOCKER_REGISTRY-}" ]]; then
  617. for arch in "${KUBE_SERVER_PLATFORMS[@]##*/}"; do
  618. kube::log::status "Building hyperkube image for arch: ${arch}"
  619. REGISTRY="${KUBE_DOCKER_REGISTRY}" VERSION="${KUBE_DOCKER_IMAGE_TAG}" ARCH="${arch}" make -C cluster/images/hyperkube/ build
  620. done
  621. fi
  622. }
  623. function kube::release::package_tarballs() {
  624. # Clean out any old releases
  625. rm -rf "${RELEASE_DIR}"
  626. mkdir -p "${RELEASE_DIR}"
  627. kube::release::package_build_image_tarball &
  628. kube::release::package_client_tarballs &
  629. kube::release::package_server_tarballs &
  630. kube::release::package_salt_tarball &
  631. kube::release::package_kube_manifests_tarball &
  632. kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
  633. kube::release::package_full_tarball & # _full depends on all the previous phases
  634. kube::release::package_test_tarball & # _test doesn't depend on anything
  635. kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
  636. }
  637. # Package the build image we used from the previous stage, for compliance/licensing/audit/yadda.
  638. function kube::release::package_build_image_tarball() {
  639. kube::log::status "Building tarball: src"
  640. "${TAR}" czf "${RELEASE_DIR}/kubernetes-src.tar.gz" -C "${LOCAL_OUTPUT_BUILD_CONTEXT}" .
  641. }
  642. # Package up all of the cross compiled clients. Over time this should grow into
  643. # a full SDK
  644. function kube::release::package_client_tarballs() {
  645. # Find all of the built client binaries
  646. local platform platforms
  647. platforms=($(cd "${LOCAL_OUTPUT_BINPATH}" ; echo */*))
  648. for platform in "${platforms[@]}"; do
  649. local platform_tag=${platform/\//-} # Replace a "/" for a "-"
  650. kube::log::status "Starting tarball: client $platform_tag"
  651. (
  652. local release_stage="${RELEASE_STAGE}/client/${platform_tag}/kubernetes"
  653. rm -rf "${release_stage}"
  654. mkdir -p "${release_stage}/client/bin"
  655. local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
  656. if [[ "${platform%/*}" == "windows" ]]; then
  657. client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
  658. fi
  659. # This fancy expression will expand to prepend a path
  660. # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
  661. # KUBE_CLIENT_BINARIES array.
  662. cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
  663. "${release_stage}/client/bin/"
  664. kube::release::clean_cruft
  665. local package_name="${RELEASE_DIR}/kubernetes-client-${platform_tag}.tar.gz"
  666. kube::release::create_tarball "${package_name}" "${release_stage}/.."
  667. ) &
  668. done
  669. kube::log::status "Waiting on tarballs"
  670. kube::util::wait-for-jobs || { kube::log::error "client tarball creation failed"; exit 1; }
  671. }
  672. # Package up all of the server binaries
  673. function kube::release::package_server_tarballs() {
  674. local platform
  675. for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
  676. local platform_tag=${platform/\//-} # Replace a "/" for a "-"
  677. local arch=$(basename ${platform})
  678. kube::log::status "Building tarball: server $platform_tag"
  679. local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
  680. rm -rf "${release_stage}"
  681. mkdir -p "${release_stage}/server/bin"
  682. mkdir -p "${release_stage}/addons"
  683. # This fancy expression will expand to prepend a path
  684. # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
  685. # KUBE_SERVER_BINARIES array.
  686. cp "${KUBE_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
  687. "${release_stage}/server/bin/"
  688. kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
  689. # Include the client binaries here too as they are useful debugging tools.
  690. local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
  691. if [[ "${platform%/*}" == "windows" ]]; then
  692. client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
  693. fi
  694. cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
  695. "${release_stage}/server/bin/"
  696. cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
  697. cp "${RELEASE_DIR}/kubernetes-src.tar.gz" "${release_stage}/"
  698. kube::release::clean_cruft
  699. local package_name="${RELEASE_DIR}/kubernetes-server-${platform_tag}.tar.gz"
  700. kube::release::create_tarball "${package_name}" "${release_stage}/.."
  701. done
  702. }
  703. function kube::release::md5() {
  704. if which md5 >/dev/null 2>&1; then
  705. md5 -q "$1"
  706. else
  707. md5sum "$1" | awk '{ print $1 }'
  708. fi
  709. }
  710. function kube::release::sha1() {
  711. if which shasum >/dev/null 2>&1; then
  712. shasum -a1 "$1" | awk '{ print $1 }'
  713. else
  714. sha1sum "$1" | awk '{ print $1 }'
  715. fi
  716. }
  717. # This will take binaries that run on master and creates Docker images
  718. # that wrap the binary in them. (One docker image per binary)
  719. # Args:
  720. # $1 - binary_dir, the directory to save the tared images to.
  721. # $2 - arch, architecture for which we are building docker images.
  722. function kube::release::create_docker_images_for_server() {
  723. # Create a sub-shell so that we don't pollute the outer environment
  724. (
  725. local binary_dir="$1"
  726. local arch="$2"
  727. local binary_name
  728. local binaries=($(kube::build::get_docker_wrapped_binaries ${arch}))
  729. for wrappable in "${binaries[@]}"; do
  730. local oldifs=$IFS
  731. IFS=","
  732. set $wrappable
  733. IFS=$oldifs
  734. local binary_name="$1"
  735. local base_image="$2"
  736. kube::log::status "Starting Docker build for image: ${binary_name}"
  737. (
  738. local md5_sum
  739. md5_sum=$(kube::release::md5 "${binary_dir}/${binary_name}")
  740. local docker_build_path="${binary_dir}/${binary_name}.dockerbuild"
  741. local docker_file_path="${docker_build_path}/Dockerfile"
  742. local binary_file_path="${binary_dir}/${binary_name}"
  743. rm -rf ${docker_build_path}
  744. mkdir -p ${docker_build_path}
  745. ln ${binary_dir}/${binary_name} ${docker_build_path}/${binary_name}
  746. printf " FROM ${base_image} \n ADD ${binary_name} /usr/local/bin/${binary_name}\n" > ${docker_file_path}
  747. if [[ ${arch} == "amd64" ]]; then
  748. # If we are building a amd64 docker image, preserve the original image name
  749. local docker_image_tag=gcr.io/google_containers/${binary_name}:${md5_sum}
  750. else
  751. # If we are building a docker image for another architecture, append the arch in the image tag
  752. local docker_image_tag=gcr.io/google_containers/${binary_name}-${arch}:${md5_sum}
  753. fi
  754. "${DOCKER[@]}" build -q -t "${docker_image_tag}" ${docker_build_path} >/dev/null
  755. "${DOCKER[@]}" save ${docker_image_tag} > ${binary_dir}/${binary_name}.tar
  756. echo $md5_sum > ${binary_dir}/${binary_name}.docker_tag
  757. rm -rf ${docker_build_path}
  758. # If we are building an official/alpha/beta release we want to keep docker images
  759. # and tag them appropriately.
  760. if [[ -n "${KUBE_DOCKER_IMAGE_TAG-}" && -n "${KUBE_DOCKER_REGISTRY-}" ]]; then
  761. local release_docker_image_tag="${KUBE_DOCKER_REGISTRY}/${binary_name}-${arch}:${KUBE_DOCKER_IMAGE_TAG}"
  762. kube::log::status "Tagging docker image ${docker_image_tag} as ${release_docker_image_tag}"
  763. "${DOCKER[@]}" tag -f "${docker_image_tag}" "${release_docker_image_tag}" 2>/dev/null
  764. fi
  765. kube::log::status "Deleting docker image ${docker_image_tag}"
  766. "${DOCKER[@]}" rmi ${docker_image_tag} 2>/dev/null || true
  767. ) &
  768. done
  769. kube::util::wait-for-jobs || { kube::log::error "previous Docker build failed"; return 1; }
  770. kube::log::status "Docker builds done"
  771. )
  772. }
  773. # Package up the salt configuration tree. This is an optional helper to getting
  774. # a cluster up and running.
  775. function kube::release::package_salt_tarball() {
  776. kube::log::status "Building tarball: salt"
  777. local release_stage="${RELEASE_STAGE}/salt/kubernetes"
  778. rm -rf "${release_stage}"
  779. mkdir -p "${release_stage}"
  780. cp -R "${KUBE_ROOT}/cluster/saltbase" "${release_stage}/"
  781. # TODO(#3579): This is a temporary hack. It gathers up the yaml,
  782. # yaml.in, json files in cluster/addons (minus any demos) and overlays
  783. # them into kube-addons, where we expect them. (This pipeline is a
  784. # fancy copy, stripping anything but the files we don't want.)
  785. local objects
  786. objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
  787. tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${release_stage}/saltbase/salt/kube-addons"
  788. kube::release::clean_cruft
  789. local package_name="${RELEASE_DIR}/kubernetes-salt.tar.gz"
  790. kube::release::create_tarball "${package_name}" "${release_stage}/.."
  791. }
  792. # This will pack kube-system manifests files for distros without using salt
  793. # such as GCI and Ubuntu Trusty. We directly copy manifests from
  794. # cluster/addons and cluster/saltbase/salt. The script of cluster initialization
  795. # will remove the salt configuration and evaluate the variables in the manifests.
  796. function kube::release::package_kube_manifests_tarball() {
  797. kube::log::status "Building tarball: manifests"
  798. local release_stage="${RELEASE_STAGE}/manifests/kubernetes"
  799. rm -rf "${release_stage}"
  800. local dst_dir="${release_stage}/gci-trusty"
  801. mkdir -p "${dst_dir}"
  802. local salt_dir="${KUBE_ROOT}/cluster/saltbase/salt"
  803. cp "${salt_dir}/cluster-autoscaler/cluster-autoscaler.manifest" "${dst_dir}/"
  804. cp "${salt_dir}/fluentd-es/fluentd-es.yaml" "${release_stage}/"
  805. cp "${salt_dir}/fluentd-gcp/fluentd-gcp.yaml" "${release_stage}/"
  806. cp "${salt_dir}/kube-registry-proxy/kube-registry-proxy.yaml" "${release_stage}/"
  807. cp "${salt_dir}/kube-proxy/kube-proxy.manifest" "${release_stage}/"
  808. cp "${salt_dir}/etcd/etcd.manifest" "${dst_dir}"
  809. cp "${salt_dir}/kube-scheduler/kube-scheduler.manifest" "${dst_dir}"
  810. cp "${salt_dir}/kube-apiserver/kube-apiserver.manifest" "${dst_dir}"
  811. cp "${salt_dir}/kube-apiserver/abac-authz-policy.jsonl" "${dst_dir}"
  812. cp "${salt_dir}/kube-controller-manager/kube-controller-manager.manifest" "${dst_dir}"
  813. cp "${salt_dir}/kube-addons/kube-addon-manager.yaml" "${dst_dir}"
  814. cp "${salt_dir}/l7-gcp/glbc.manifest" "${dst_dir}"
  815. cp "${salt_dir}/rescheduler/rescheduler.manifest" "${dst_dir}/"
  816. cp "${KUBE_ROOT}/cluster/gce/trusty/configure-helper.sh" "${dst_dir}/trusty-configure-helper.sh"
  817. cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
  818. cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
  819. cp -r "${salt_dir}/kube-admission-controls/limit-range" "${dst_dir}"
  820. local objects
  821. objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
  822. tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${dst_dir}"
  823. # This is for coreos only. ContainerVM, GCI, or Trusty does not use it.
  824. cp -r "${KUBE_ROOT}/cluster/gce/coreos/kube-manifests"/* "${release_stage}/"
  825. kube::release::clean_cruft
  826. local package_name="${RELEASE_DIR}/kubernetes-manifests.tar.gz"
  827. kube::release::create_tarball "${package_name}" "${release_stage}/.."
  828. }
  829. # This is the stuff you need to run tests from the binary distribution.
  830. function kube::release::package_test_tarball() {
  831. kube::log::status "Building tarball: test"
  832. local release_stage="${RELEASE_STAGE}/test/kubernetes"
  833. rm -rf "${release_stage}"
  834. mkdir -p "${release_stage}"
  835. local platform
  836. for platform in "${KUBE_TEST_PLATFORMS[@]}"; do
  837. local test_bins=("${KUBE_TEST_BINARIES[@]}")
  838. if [[ "${platform%/*}" == "windows" ]]; then
  839. test_bins=("${KUBE_TEST_BINARIES_WIN[@]}")
  840. fi
  841. mkdir -p "${release_stage}/platforms/${platform}"
  842. cp "${test_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
  843. "${release_stage}/platforms/${platform}"
  844. done
  845. # Add the test image files
  846. mkdir -p "${release_stage}/test/images"
  847. cp -fR "${KUBE_ROOT}/test/images" "${release_stage}/test/"
  848. tar c ${KUBE_TEST_PORTABLE[@]} | tar x -C ${release_stage}
  849. kube::release::clean_cruft
  850. local package_name="${RELEASE_DIR}/kubernetes-test.tar.gz"
  851. kube::release::create_tarball "${package_name}" "${release_stage}/.."
  852. }
  853. # This is all the stuff you need to run/install kubernetes. This includes:
  854. # - precompiled binaries for client
  855. # - Cluster spin up/down scripts and configs for various cloud providers
  856. # - tarballs for server binary and salt configs that are ready to be uploaded
  857. # to master by whatever means appropriate.
  858. function kube::release::package_full_tarball() {
  859. kube::log::status "Building tarball: full"
  860. local release_stage="${RELEASE_STAGE}/full/kubernetes"
  861. rm -rf "${release_stage}"
  862. mkdir -p "${release_stage}"
  863. # Copy all of the client binaries in here, but not test or server binaries.
  864. # The server binaries are included with the server binary tarball.
  865. local platform
  866. for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
  867. local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
  868. if [[ "${platform%/*}" == "windows" ]]; then
  869. client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
  870. fi
  871. mkdir -p "${release_stage}/platforms/${platform}"
  872. cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
  873. "${release_stage}/platforms/${platform}"
  874. done
  875. # We want everything in /cluster except saltbase. That is only needed on the
  876. # server.
  877. cp -R "${KUBE_ROOT}/cluster" "${release_stage}/"
  878. rm -rf "${release_stage}/cluster/saltbase"
  879. mkdir -p "${release_stage}/server"
  880. cp "${RELEASE_DIR}/kubernetes-salt.tar.gz" "${release_stage}/server/"
  881. cp "${RELEASE_DIR}"/kubernetes-server-*.tar.gz "${release_stage}/server/"
  882. cp "${RELEASE_DIR}/kubernetes-manifests.tar.gz" "${release_stage}/server/"
  883. mkdir -p "${release_stage}/third_party"
  884. cp -R "${KUBE_ROOT}/third_party/htpasswd" "${release_stage}/third_party/htpasswd"
  885. # Include only federation/cluster, federation/manifests and federation/deploy
  886. mkdir "${release_stage}/federation"
  887. cp -R "${KUBE_ROOT}/federation/cluster" "${release_stage}/federation/"
  888. cp -R "${KUBE_ROOT}/federation/manifests" "${release_stage}/federation/"
  889. cp -R "${KUBE_ROOT}/federation/deploy" "${release_stage}/federation/"
  890. cp -R "${KUBE_ROOT}/examples" "${release_stage}/"
  891. cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
  892. cp "${KUBE_ROOT}/README.md" "${release_stage}/"
  893. cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
  894. cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
  895. echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"
  896. kube::release::clean_cruft
  897. local package_name="${RELEASE_DIR}/kubernetes.tar.gz"
  898. kube::release::create_tarball "${package_name}" "${release_stage}/.."
  899. }
  900. # Build a release tarball. $1 is the output tar name. $2 is the base directory
  901. # of the files to be packaged. This assumes that ${2}/kubernetes is what is
  902. # being packaged.
  903. function kube::release::create_tarball() {
  904. kube::build::ensure_tar
  905. local tarfile=$1
  906. local stagingdir=$2
  907. "${TAR}" czf "${tarfile}" -C "${stagingdir}" kubernetes --owner=0 --group=0
  908. }
  909. # ---------------------------------------------------------------------------
  910. # GCS Release
  911. function kube::release::gcs::release() {
  912. [[ ${KUBE_GCS_UPLOAD_RELEASE} =~ ^[yY]$ ]] || return 0
  913. kube::release::gcs::verify_prereqs || return 1
  914. kube::release::gcs::ensure_release_bucket || return 1
  915. kube::release::gcs::copy_release_artifacts || return 1
  916. }
  917. # Verify things are set up for uploading to GCS
  918. function kube::release::gcs::verify_prereqs() {
  919. if [[ -z "$(which gsutil)" || -z "$(which gcloud)" ]]; then
  920. echo "Releasing Kubernetes requires gsutil and gcloud. Please download,"
  921. echo "install and authorize through the Google Cloud SDK: "
  922. echo
  923. echo " https://developers.google.com/cloud/sdk/"
  924. return 1
  925. fi
  926. if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
  927. GCLOUD_ACCOUNT=$(gcloud config list --format='value(core.account)' 2>/dev/null)
  928. fi
  929. if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
  930. echo "No account authorized through gcloud. Please fix with:"
  931. echo
  932. echo " gcloud auth login"
  933. return 1
  934. fi
  935. if [[ -z "${GCLOUD_PROJECT-}" ]]; then
  936. GCLOUD_PROJECT=$(gcloud config list --format='value(core.project)' 2>/dev/null)
  937. fi
  938. if [[ -z "${GCLOUD_PROJECT-}" ]]; then
  939. echo "No account authorized through gcloud. Please fix with:"
  940. echo
  941. echo " gcloud config set project <project id>"
  942. return 1
  943. fi
  944. }
  945. # Create a unique bucket name for releasing Kube and make sure it exists.
  946. function kube::release::gcs::ensure_release_bucket() {
  947. local project_hash
  948. project_hash=$(kube::build::short_hash "$GCLOUD_PROJECT")
  949. KUBE_GCS_RELEASE_BUCKET=${KUBE_GCS_RELEASE_BUCKET-kubernetes-releases-${project_hash}}
  950. if ! gsutil ls "gs://${KUBE_GCS_RELEASE_BUCKET}" >/dev/null 2>&1 ; then
  951. echo "Creating Google Cloud Storage bucket: $KUBE_GCS_RELEASE_BUCKET"
  952. gsutil mb -p "${GCLOUD_PROJECT}" "gs://${KUBE_GCS_RELEASE_BUCKET}" || return 1
  953. fi
  954. }
  955. function kube::release::gcs::stage_and_hash() {
  956. kube::build::ensure_tar || return 1
  957. # Split the args into srcs... and dst
  958. local -r args=( "$@" )
  959. local -r split=$((${#args[@]}-1)) # Split point for src/dst args
  960. local -r srcs=( "${args[@]::${split}}" )
  961. local -r dst="${args[${split}]}"
  962. for src in ${srcs[@]}; do
  963. srcdir=$(dirname ${src})
  964. srcthing=$(basename ${src})
  965. mkdir -p ${GCS_STAGE}/${dst} || return 1
  966. "${TAR}" c -C ${srcdir} ${srcthing} | "${TAR}" x -C ${GCS_STAGE}/${dst} || return 1
  967. done
  968. }
  969. function kube::release::gcs::copy_release_artifacts() {
  970. # TODO: This isn't atomic. There will be points in time where there will be
  971. # no active release. Also, if something fails, the release could be half-
  972. # copied. The real way to do this would perhaps to have some sort of release
  973. # version so that we are never overwriting a destination.
  974. local -r gcs_destination="gs://${KUBE_GCS_RELEASE_BUCKET}/${KUBE_GCS_RELEASE_PREFIX}"
  975. kube::log::status "Staging release artifacts to ${GCS_STAGE}"
  976. rm -rf ${GCS_STAGE} || return 1
  977. mkdir -p ${GCS_STAGE} || return 1
  978. # Stage everything in release directory
  979. kube::release::gcs::stage_and_hash "${RELEASE_DIR}"/* . || return 1
  980. # Having the configure-vm.sh script and GCI code from the GCE cluster
  981. # deploy hosted with the release is useful for GKE.
  982. kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/configure-vm.sh" extra/gce || return 1
  983. kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/gci/node.yaml" extra/gce || return 1
  984. kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/gci/master.yaml" extra/gce || return 1
  985. kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/gci/configure.sh" extra/gce || return 1
  986. # Upload the "naked" binaries to GCS. This is useful for install scripts that
  987. # download the binaries directly and don't need tars.
  988. local platform platforms
  989. platforms=($(cd "${RELEASE_STAGE}/client" ; echo *))
  990. for platform in "${platforms[@]}"; do
  991. local src="${RELEASE_STAGE}/client/${platform}/kubernetes/client/bin/*"
  992. local dst="bin/${platform/-//}/"
  993. # We assume here the "server package" is a superset of the "client package"
  994. if [[ -d "${RELEASE_STAGE}/server/${platform}" ]]; then
  995. src="${RELEASE_STAGE}/server/${platform}/kubernetes/server/bin/*"
  996. fi
  997. kube::release::gcs::stage_and_hash "$src" "$dst" || return 1
  998. done
  999. kube::log::status "Hashing files in ${GCS_STAGE}"
  1000. find ${GCS_STAGE} -type f | while read path; do
  1001. kube::release::md5 ${path} > "${path}.md5" || return 1
  1002. kube::release::sha1 ${path} > "${path}.sha1" || return 1
  1003. done
  1004. kube::log::status "Copying release artifacts to ${gcs_destination}"
  1005. # First delete all objects at the destination
  1006. if gsutil ls "${gcs_destination}" >/dev/null 2>&1; then
  1007. kube::log::error "${gcs_destination} not empty."
  1008. [[ ${KUBE_GCS_DELETE_EXISTING} =~ ^[yY]$ ]] || {
  1009. read -p "Delete everything under ${gcs_destination}? [y/n] " -r || {
  1010. kube::log::status "EOF on prompt. Skipping upload"
  1011. return
  1012. }
  1013. [[ $REPLY =~ ^[yY]$ ]] || {
  1014. kube::log::status "Skipping upload"
  1015. return
  1016. }
  1017. }
  1018. kube::log::status "Deleting everything under ${gcs_destination}"
  1019. gsutil -q -m rm -f -R "${gcs_destination}" || return 1
  1020. fi
  1021. local gcs_options=()
  1022. if [[ ${KUBE_GCS_NO_CACHING} =~ ^[yY]$ ]]; then
  1023. gcs_options=("-h" "Cache-Control:private, max-age=0")
  1024. fi
  1025. gsutil -q -m "${gcs_options[@]+${gcs_options[@]}}" cp -r "${GCS_STAGE}"/* ${gcs_destination} || return 1
  1026. # TODO(jbeda): Generate an HTML page with links for this release so it is easy
  1027. # to see it. For extra credit, generate a dynamic page that builds up the
  1028. # release list using the GCS JSON API. Use Angular and Bootstrap for extra
  1029. # extra credit.
  1030. if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then
  1031. kube::log::status "Marking all uploaded objects public"
  1032. gsutil -q -m acl ch -R -g all:R "${gcs_destination}" >/dev/null 2>&1 || return 1
  1033. fi
  1034. gsutil ls -lhr "${gcs_destination}" || return 1
  1035. if [[ -n "${KUBE_GCS_RELEASE_BUCKET_MIRROR:-}" ]] &&
  1036. [[ "${KUBE_GCS_RELEASE_BUCKET_MIRROR}" != "${KUBE_GCS_RELEASE_BUCKET}" ]]; then
  1037. local -r gcs_mirror="gs://${KUBE_GCS_RELEASE_BUCKET_MIRROR}/${KUBE_GCS_RELEASE_PREFIX}"
  1038. kube::log::status "Mirroring build to ${gcs_mirror}"
  1039. gsutil -q -m "${gcs_options[@]+${gcs_options[@]}}" rsync -d -r "${gcs_destination}" "${gcs_mirror}" || return 1
  1040. if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then
  1041. kube::log::status "Marking all uploaded mirror objects public"
  1042. gsutil -q -m acl ch -R -g all:R "${gcs_mirror}" >/dev/null 2>&1 || return 1
  1043. fi
  1044. fi
  1045. }
  1046. # Publish a new ci version, (latest,) but only if the release files actually
  1047. # exist on GCS.
  1048. #
  1049. # Globals:
  1050. # See callees
  1051. # Arguments:
  1052. # None
  1053. # Returns:
  1054. # Success
  1055. function kube::release::gcs::publish_ci() {
  1056. kube::release::gcs::verify_release_files || return 1
  1057. kube::release::parse_and_validate_ci_version "${KUBE_GCS_PUBLISH_VERSION}" || return 1
  1058. local -r version_major="${VERSION_MAJOR}"
  1059. local -r version_minor="${VERSION_MINOR}"
  1060. local -r publish_files=(ci/latest.txt ci/latest-${version_major}.txt ci/latest-${version_major}.${version_minor}.txt)
  1061. for publish_file in ${publish_files[*]}; do
  1062. # If there's a version that's above the one we're trying to release, don't
  1063. # do anything, and just try the next one.
  1064. kube::release::gcs::verify_ci_ge "${publish_file}" || continue
  1065. kube::release::gcs::publish "${publish_file}" || return 1
  1066. done
  1067. }
  1068. # Publish a new official version, (latest or stable,) but only if the release
  1069. # files actually exist on GCS and the release we're dealing with is newer than
  1070. # the contents in GCS.
  1071. #
  1072. # Globals:
  1073. # KUBE_GCS_PUBLISH_VERSION
  1074. # See callees
  1075. # Arguments:
  1076. # release_kind: either 'latest' or 'stable'
  1077. # Returns:
  1078. # Success
  1079. function kube::release::gcs::publish_official() {
  1080. local -r release_kind="${1-}"
  1081. kube::release::gcs::verify_release_files || return 1
  1082. kube::release::parse_and_validate_release_version "${KUBE_GCS_PUBLISH_VERSION}" || return 1
  1083. local -r version_major="${VERSION_MAJOR}"
  1084. local -r version_minor="${VERSION_MINOR}"
  1085. local publish_files
  1086. if [[ "${release_kind}" == 'latest' ]]; then
  1087. publish_files=(release/latest.txt release/latest-${version_major}.txt release/latest-${version_major}.${version_minor}.txt)
  1088. elif [[ "${release_kind}" == 'stable' ]]; then
  1089. publish_files=(release/stable.txt release/stable-${version_major}.txt release/stable-${version_major}.${version_minor}.txt)
  1090. else
  1091. kube::log::error "Wrong release_kind: must be 'latest' or 'stable'."
  1092. return 1
  1093. fi
  1094. for publish_file in ${publish_files[*]}; do
  1095. # If there's a version that's above the one we're trying to release, don't
  1096. # do anything, and just try the next one.
  1097. kube::release::gcs::verify_release_gt "${publish_file}" || continue
  1098. kube::release::gcs::publish "${publish_file}" || return 1
  1099. done
  1100. }
  1101. # Verify that the release files we expect actually exist.
  1102. #
  1103. # Globals:
  1104. # KUBE_GCS_RELEASE_BUCKET
  1105. # KUBE_GCS_RELEASE_PREFIX
  1106. # Arguments:
  1107. # None
  1108. # Returns:
  1109. # If release files exist
  1110. function kube::release::gcs::verify_release_files() {
  1111. local -r release_dir="gs://${KUBE_GCS_RELEASE_BUCKET}/${KUBE_GCS_RELEASE_PREFIX}"
  1112. if ! gsutil ls "${release_dir}" >/dev/null 2>&1 ; then
  1113. kube::log::error "Release files don't exist at '${release_dir}'"
  1114. return 1
  1115. fi
  1116. }
  1117. # Check if the new version is greater than the version currently published on
  1118. # GCS.
  1119. #
  1120. # Globals:
  1121. # KUBE_GCS_PUBLISH_VERSION
  1122. # KUBE_GCS_RELEASE_BUCKET
  1123. # Arguments:
  1124. # publish_file: the GCS location to look in
  1125. # Returns:
  1126. # If new version is greater than the GCS version
  1127. #
  1128. # TODO(16529): This should all be outside of build an in release, and should be
  1129. # refactored to reduce code duplication. Also consider using strictly nested
  1130. # if and explicit handling of equals case.
  1131. function kube::release::gcs::verify_release_gt() {
  1132. local -r publish_file="${1-}"
  1133. local -r new_version=${KUBE_GCS_PUBLISH_VERSION}
  1134. local -r publish_file_dst="gs://${KUBE_GCS_RELEASE_BUCKET}/${publish_file}"
  1135. kube::release::parse_and_validate_release_version "${new_version}" || return 1
  1136. local -r version_major="${VERSION_MAJOR}"
  1137. local -r version_minor="${VERSION_MINOR}"
  1138. local -r version_patch="${VERSION_PATCH}"
  1139. local -r version_prerelease="${VERSION_PRERELEASE}"
  1140. local -r version_prerelease_rev="${VERSION_PRERELEASE_REV}"
  1141. local gcs_version
  1142. if gcs_version="$(gsutil cat "${publish_file_dst}")"; then
  1143. kube::release::parse_and_validate_release_version "${gcs_version}" || {
  1144. kube::log::error "${publish_file_dst} contains invalid release version, can't compare: '${gcs_version}'"
  1145. return 1
  1146. }
  1147. local -r gcs_version_major="${VERSION_MAJOR}"
  1148. local -r gcs_version_minor="${VERSION_MINOR}"
  1149. local -r gcs_version_patch="${VERSION_PATCH}"
  1150. local -r gcs_version_prerelease="${VERSION_PRERELEASE}"
  1151. local -r gcs_version_prerelease_rev="${VERSION_PRERELEASE_REV}"
  1152. local greater=true
  1153. if [[ "${version_major}" -lt "${gcs_version_major}" ]]; then
  1154. greater=false
  1155. elif [[ "${version_major}" -gt "${gcs_version_major}" ]]; then
  1156. : # fall out
  1157. elif [[ "${version_minor}" -lt "${gcs_version_minor}" ]]; then
  1158. greater=false
  1159. elif [[ "${version_minor}" -gt "${gcs_version_minor}" ]]; then
  1160. : # fall out
  1161. elif [[ "${version_patch}" -lt "${gcs_version_patch}" ]]; then
  1162. greater=false
  1163. elif [[ "${version_patch}" -gt "${gcs_version_patch}" ]]; then
  1164. : # fall out
  1165. # Use lexicographic (instead of integer) comparison because
  1166. # version_prerelease is a string, ("alpha" or "beta",) but first check if
  1167. # either is an official release (i.e. empty prerelease string).
  1168. #
  1169. # We have to do this because lexicographically "beta" > "alpha" > "", but
  1170. # we want official > beta > alpha.
  1171. elif [[ -n "${version_prerelease}" && -z "${gcs_version_prerelease}" ]]; then
  1172. greater=false
  1173. elif [[ -z "${version_prerelease}" && -n "${gcs_version_prerelease}" ]]; then
  1174. : # fall out
  1175. elif [[ "${version_prerelease}" < "${gcs_version_prerelease}" ]]; then
  1176. greater=false
  1177. elif [[ "${version_prerelease}" > "${gcs_version_prerelease}" ]]; then
  1178. : # fall out
  1179. # Finally resort to -le here, since we want strictly-greater-than.
  1180. elif [[ "${version_prerelease_rev}" -le "${gcs_version_prerelease_rev}" ]]; then
  1181. greater=false
  1182. fi
  1183. if [[ "${greater}" != "true" ]]; then
  1184. kube::log::status "${new_version} (just uploaded) <= ${gcs_version} (latest on GCS), not updating ${publish_file_dst}"
  1185. return 1
  1186. else
  1187. kube::log::status "${new_version} (just uploaded) > ${gcs_version} (latest on GCS), updating ${publish_file_dst}"
  1188. fi
  1189. else # gsutil cat failed; file does not exist
  1190. kube::log::error "Release file '${publish_file_dst}' does not exist. Continuing."
  1191. return 0
  1192. fi
  1193. }
  1194. # Check if the new version is greater than or equal to the version currently
  1195. # published on GCS. (Ignore the build; if it's different, overwrite anyway.)
  1196. #
  1197. # Globals:
  1198. # KUBE_GCS_PUBLISH_VERSION
  1199. # KUBE_GCS_RELEASE_BUCKET
  1200. # Arguments:
  1201. # publish_file: the GCS location to look in
  1202. # Returns:
  1203. # If new version is greater than the GCS version
  1204. #
  1205. # TODO(16529): This should all be outside of build an in release, and should be
  1206. # refactored to reduce code duplication. Also consider using strictly nested
  1207. # if and explicit handling of equals case.
  1208. function kube::release::gcs::verify_ci_ge() {
  1209. local -r publish_file="${1-}"
  1210. local -r new_version=${KUBE_GCS_PUBLISH_VERSION}
  1211. local -r publish_file_dst="gs://${KUBE_GCS_RELEASE_BUCKET}/${publish_file}"
  1212. kube::release::parse_and_validate_ci_version "${new_version}" || return 1
  1213. local -r version_major="${VERSION_MAJOR}"
  1214. local -r version_minor="${VERSION_MINOR}"
  1215. local -r version_patch="${VERSION_PATCH}"
  1216. local -r version_prerelease="${VERSION_PRERELEASE}"
  1217. local -r version_prerelease_rev="${VERSION_PRERELEASE_REV}"
  1218. local -r version_commits="${VERSION_COMMITS}"
  1219. local gcs_version
  1220. if gcs_version="$(gsutil cat "${publish_file_dst}")"; then
  1221. kube::release::parse_and_validate_ci_version "${gcs_version}" || {
  1222. kube::log::error "${publish_file_dst} contains invalid ci version, can't compare: '${gcs_version}'"
  1223. return 1
  1224. }
  1225. local -r gcs_version_major="${VERSION_MAJOR}"
  1226. local -r gcs_version_minor="${VERSION_MINOR}"
  1227. local -r gcs_version_patch="${VERSION_PATCH}"
  1228. local -r gcs_version_prerelease="${VERSION_PRERELEASE}"
  1229. local -r gcs_version_prerelease_rev="${VERSION_PRERELEASE_REV}"
  1230. local -r gcs_version_commits="${VERSION_COMMITS}"
  1231. local greater=true
  1232. if [[ "${version_major}" -lt "${gcs_version_major}" ]]; then
  1233. greater=false
  1234. elif [[ "${version_major}" -gt "${gcs_version_major}" ]]; then
  1235. : # fall out
  1236. elif [[ "${version_minor}" -lt "${gcs_version_minor}" ]]; then
  1237. greater=false
  1238. elif [[ "${version_minor}" -gt "${gcs_version_minor}" ]]; then
  1239. : # fall out
  1240. elif [[ "${version_patch}" -lt "${gcs_version_patch}" ]]; then
  1241. greater=false
  1242. elif [[ "${version_patch}" -gt "${gcs_version_patch}" ]]; then
  1243. : # fall out
  1244. # Use lexicographic (instead of integer) comparison because
  1245. # version_prerelease is a string, ("alpha" or "beta")
  1246. elif [[ "${version_prerelease}" < "${gcs_version_prerelease}" ]]; then
  1247. greater=false
  1248. elif [[ "${version_prerelease}" > "${gcs_version_prerelease}" ]]; then
  1249. : # fall out
  1250. elif [[ "${version_prerelease_rev}" -lt "${gcs_version_prerelease_rev}" ]]; then
  1251. greater=false
  1252. elif [[ "${version_prerelease_rev}" -gt "${gcs_version_prerelease_rev}" ]]; then
  1253. : # fall out
  1254. # If either version_commits is empty, it will be considered less-than, as
  1255. # expected, (e.g. 1.2.3-beta < 1.2.3-beta.1).
  1256. elif [[ "${version_commits}" -lt "${gcs_version_commits}" ]]; then
  1257. greater=false
  1258. fi
  1259. if [[ "${greater}" != "true" ]]; then
  1260. kube::log::status "${new_version} (just uploaded) < ${gcs_version} (latest on GCS), not updating ${publish_file_dst}"
  1261. return 1
  1262. else
  1263. kube::log::status "${new_version} (just uploaded) >= ${gcs_version} (latest on GCS), updating ${publish_file_dst}"
  1264. fi
  1265. else # gsutil cat failed; file does not exist
  1266. kube::log::error "File '${publish_file_dst}' does not exist. Continuing."
  1267. return 0
  1268. fi
  1269. }
  1270. # Publish a release to GCS: upload a version file, if KUBE_GCS_MAKE_PUBLIC,
  1271. # make it public, and verify the result.
  1272. #
  1273. # Globals:
  1274. # KUBE_GCS_RELEASE_BUCKET
  1275. # RELEASE_STAGE
  1276. # KUBE_GCS_PUBLISH_VERSION
  1277. # KUBE_GCS_MAKE_PUBLIC
  1278. # Arguments:
  1279. # publish_file: the GCS location to look in
  1280. # Returns:
  1281. # If new version is greater than the GCS version
  1282. function kube::release::gcs::publish() {
  1283. local -r publish_file="${1-}"
  1284. kube::release::gcs::publish_to_bucket "${KUBE_GCS_RELEASE_BUCKET}" "${publish_file}" || return 1
  1285. if [[ -n "${KUBE_GCS_RELEASE_BUCKET_MIRROR:-}" ]] &&
  1286. [[ "${KUBE_GCS_RELEASE_BUCKET_MIRROR}" != "${KUBE_GCS_RELEASE_BUCKET}" ]]; then
  1287. kube::release::gcs::publish_to_bucket "${KUBE_GCS_RELEASE_BUCKET_MIRROR}" "${publish_file}" || return 1
  1288. fi
  1289. }
  1290. function kube::release::gcs::publish_to_bucket() {
  1291. local -r publish_bucket="${1}"
  1292. local -r publish_file="${2}"
  1293. local -r publish_file_dst="gs://${publish_bucket}/${publish_file}"
  1294. mkdir -p "${RELEASE_STAGE}/upload" || return 1
  1295. echo "${KUBE_GCS_PUBLISH_VERSION}" > "${RELEASE_STAGE}/upload/latest" || return 1
  1296. gsutil -m cp "${RELEASE_STAGE}/upload/latest" "${publish_file_dst}" || return 1
  1297. local contents
  1298. if [[ ${KUBE_GCS_MAKE_PUBLIC} =~ ^[yY]$ ]]; then
  1299. kube::log::status "Making uploaded version file public and non-cacheable."
  1300. gsutil acl ch -R -g all:R "${publish_file_dst}" >/dev/null 2>&1 || return 1
  1301. gsutil setmeta -h "Cache-Control:private, max-age=0" "${publish_file_dst}" >/dev/null 2>&1 || return 1
  1302. # If public, validate public link
  1303. local -r public_link="https://storage.googleapis.com/${publish_bucket}/${publish_file}"
  1304. kube::log::status "Validating uploaded version file at ${public_link}"
  1305. contents="$(curl -s "${public_link}")"
  1306. else
  1307. # If not public, validate using gsutil
  1308. kube::log::status "Validating uploaded version file at ${publish_file_dst}"
  1309. contents="$(gsutil cat "${publish_file_dst}")"
  1310. fi
  1311. if [[ "${contents}" == "${KUBE_GCS_PUBLISH_VERSION}" ]]; then
  1312. kube::log::status "Contents as expected: ${contents}"
  1313. else
  1314. kube::log::error "Expected contents of file to be ${KUBE_GCS_PUBLISH_VERSION}, but got ${contents}"
  1315. return 1
  1316. fi
  1317. }
  1318. # ---------------------------------------------------------------------------
  1319. # Docker Release
  1320. # Releases all docker images to a docker registry specified by KUBE_DOCKER_REGISTRY
  1321. # using tag KUBE_DOCKER_IMAGE_TAG.
  1322. #
  1323. # Globals:
  1324. # KUBE_DOCKER_REGISTRY
  1325. # KUBE_DOCKER_IMAGE_TAG
  1326. # KUBE_SERVER_PLATFORMS
  1327. # Returns:
  1328. # If new pushing docker images was successful.
  1329. function kube::release::docker::release() {
  1330. local binaries=(
  1331. "kube-apiserver"
  1332. "kube-controller-manager"
  1333. "kube-scheduler"
  1334. "kube-proxy"
  1335. "hyperkube"
  1336. )
  1337. local docker_push_cmd=("${DOCKER[@]}")
  1338. if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/"* ]]; then
  1339. docker_push_cmd=("gcloud" "docker")
  1340. fi
  1341. if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/google_containers" ]]; then
  1342. # Activate credentials for the k8s.production.user@gmail.com
  1343. gcloud config set account k8s.production.user@gmail.com
  1344. fi
  1345. for arch in "${KUBE_SERVER_PLATFORMS[@]##*/}"; do
  1346. for binary in "${binaries[@]}"; do
  1347. # TODO(IBM): Enable hyperkube builds for ppc64le again
  1348. if [[ ${binary} != "hyperkube" || ${arch} != "ppc64le" ]]; then
  1349. local docker_target="${KUBE_DOCKER_REGISTRY}/${binary}-${arch}:${KUBE_DOCKER_IMAGE_TAG}"
  1350. kube::log::status "Pushing ${binary} to ${docker_target}"
  1351. "${docker_push_cmd[@]}" push "${docker_target}"
  1352. # If we have a amd64 docker image. Tag it without -amd64 also and push it for compatibility with earlier versions
  1353. if [[ ${arch} == "amd64" ]]; then
  1354. local legacy_docker_target="${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_DOCKER_IMAGE_TAG}"
  1355. "${DOCKER[@]}" tag -f "${docker_target}" "${legacy_docker_target}" 2>/dev/null
  1356. kube::log::status "Pushing ${binary} to ${legacy_docker_target}"
  1357. "${docker_push_cmd[@]}" push "${legacy_docker_target}"
  1358. fi
  1359. fi
  1360. done
  1361. done
  1362. if [[ "${KUBE_DOCKER_REGISTRY}" == "gcr.io/google_containers" ]]; then
  1363. # Activate default account
  1364. gcloud config set account ${USER}@google.com
  1365. fi
  1366. }
  1367. function kube::release::gcloud_account_is_active() {
  1368. local -r account="${1-}"
  1369. if [[ "$(gcloud config list --format='value(core.account)')" == "${account}" ]]; then
  1370. return 0
  1371. else
  1372. return 1
  1373. fi
  1374. }