|
| 1 | +load('ext://restart_process', 'docker_build_with_restart') |
| 2 | +load('ext://cert_manager', 'deploy_cert_manager') |
| 3 | + |
| 4 | + |
| 5 | +def deploy_cert_manager_if_needed(): |
| 6 | + cert_manager_var = '__CERT_MANAGER__' |
| 7 | + if os.getenv(cert_manager_var) != '1': |
| 8 | + deploy_cert_manager(version="v1.15.3") |
| 9 | + os.putenv(cert_manager_var, '1') |
| 10 | + |
| 11 | + |
| 12 | +# Set up our build helper image that has delve in it. We use a helper so parallel image builds don't all simultaneously |
| 13 | +# install delve. Instead, they all wait for this build to complete, and then proceed in parallel. |
| 14 | +docker_build( |
| 15 | + ref='helper', |
| 16 | + context='.', |
| 17 | + build_args={'GO_VERSION': '1.23'}, |
| 18 | + dockerfile_contents=''' |
| 19 | +ARG GO_VERSION |
| 20 | +FROM golang:${GO_VERSION} |
| 21 | +ARG GO_VERSION |
| 22 | +RUN CGO_ENABLED=0 go install github.com/go-delve/delve/cmd/dlv@v${GO_VERSION} |
| 23 | +''' |
| 24 | +) |
| 25 | + |
| 26 | + |
| 27 | +def build_binary(repo, binary, deps, image, tags="", debug=True): |
| 28 | + gcflags = '' |
| 29 | + if debug: |
| 30 | + gcflags = "-gcflags 'all=-N -l'" |
| 31 | + |
| 32 | + # Treat the main binary as a local resource, so we can automatically rebuild it when any of the deps change. This |
| 33 | + # builds it locally, targeting linux, so it can run in a linux container. |
| 34 | + binary_name = binary.split("/")[-1] |
| 35 | + local_resource( |
| 36 | + '{}_{}_binary'.format(repo, binary_name), |
| 37 | + cmd=''' |
| 38 | +mkdir -p .tiltbuild/bin |
| 39 | +CGO_ENABLED=0 GOOS=linux go build {tags} {gcflags} -o .tiltbuild/bin/{binary_name} {binary} |
| 40 | +'''.format(repo=repo, binary_name=binary_name, binary=binary, gcflags=gcflags, tags=tags), |
| 41 | + deps=deps |
| 42 | + ) |
| 43 | + |
| 44 | + entrypoint = ['/{}'.format(binary_name)] |
| 45 | + if debug: |
| 46 | + entrypoint = ['/dlv', '--accept-multiclient', '--api-version=2', '--headless=true', '--listen', ':30000', 'exec', '--continue', '--'] + entrypoint |
| 47 | + |
| 48 | + # Configure our image build. If the file in live_update.sync (.tiltbuild/bin/$binary) changes, Tilt |
| 49 | + # copies it to the running container and restarts it. |
| 50 | + docker_build_with_restart( |
| 51 | + # This has to match an image in the k8s_yaml we call below, so Tilt knows to use this image for our Deployment, |
| 52 | + # instead of the actual image specified in the yaml. |
| 53 | + ref='{image}:{binary_name}'.format(image=image, binary_name=binary_name), |
| 54 | + # This is the `docker build` context, and because we're only copying in the binary we've already had Tilt build |
| 55 | + # locally, we set the context to the directory containing the binary. |
| 56 | + context='.tiltbuild/bin', |
| 57 | + # We use a slimmed-down Dockerfile that only has $binary in it. |
| 58 | + dockerfile_contents=''' |
| 59 | +FROM gcr.io/distroless/static:debug |
| 60 | +WORKDIR / |
| 61 | +COPY --from=helper /go/bin/dlv / |
| 62 | +COPY {} / |
| 63 | + '''.format(binary_name), |
| 64 | + # The set of files Tilt should include in the build. In this case, it's just the binary we built above. |
| 65 | + only=binary_name, |
| 66 | + # If .tiltbuild/bin/$binary changes, Tilt will copy it into the running container and restart the process. |
| 67 | + live_update=[ |
| 68 | + sync('.tiltbuild/bin/{}'.format(binary_name), '/{}'.format(binary_name)), |
| 69 | + ], |
| 70 | + # The command to run in the container. |
| 71 | + entrypoint=entrypoint, |
| 72 | + ) |
| 73 | + |
| 74 | + |
| 75 | +def process_yaml(yaml): |
| 76 | + if type(yaml) == 'string': |
| 77 | + objects = read_yaml_stream(yaml) |
| 78 | + elif type(yaml) == 'blob': |
| 79 | + objects = decode_yaml_stream(yaml) |
| 80 | + else: |
| 81 | + fail('expected a string or blob, got: {}'.format(type(yaml))) |
| 82 | + |
| 83 | + for o in objects: |
| 84 | + # For Tilt's live_update functionality to work, we have to run the container as root. Remove any PSA labels |
| 85 | + # to allow this. |
| 86 | + if o['kind'] == 'Namespace' and 'labels' in o['metadata']: |
| 87 | + labels_to_delete = [label for label in o['metadata']['labels'] if label.startswith('pod-security.kubernetes.io')] |
| 88 | + for label in labels_to_delete: |
| 89 | + o['metadata']['labels'].pop(label) |
| 90 | + |
| 91 | + if o['kind'] != 'Deployment': |
| 92 | + # We only need to modify Deployments, so we can skip this |
| 93 | + continue |
| 94 | + |
| 95 | + # For Tilt's live_update functionality to work, we have to run the container as root. Otherwise, Tilt won't |
| 96 | + # be able to untar the updated binary in the container's file system (this is how live update |
| 97 | + # works). If there are any securityContexts, remove them. |
| 98 | + if "securityContext" in o['spec']['template']['spec']: |
| 99 | + o['spec']['template']['spec'].pop('securityContext') |
| 100 | + for c in o['spec']['template']['spec']['containers']: |
| 101 | + if "securityContext" in c: |
| 102 | + c.pop('securityContext') |
| 103 | + |
| 104 | + # If multiple Deployment manifests all use the same image but use different entrypoints to change the binary, |
| 105 | + # we have to adjust each Deployment to use a different image. Tilt needs each Deployment's image to be |
| 106 | + # unique. We replace the tag with what is effectively :$binary, e.g. :helm. |
| 107 | + for c in o['spec']['template']['spec']['containers']: |
| 108 | + if c['name'] == 'kube-rbac-proxy': |
| 109 | + continue |
| 110 | + |
| 111 | + command = c['command'][0] |
| 112 | + if command.startswith('./'): |
| 113 | + command = command.removeprefix('./') |
| 114 | + elif command.startswith('/'): |
| 115 | + command = command.removeprefix('/') |
| 116 | + |
| 117 | + image_without_tag = c['image'].rsplit(':', 1)[0] |
| 118 | + |
| 119 | + # Update the image so instead of :$tag it's :$binary |
| 120 | + c['image'] = '{}:{}'.format(image_without_tag, command) |
| 121 | + |
| 122 | + # Now apply all the yaml |
| 123 | + # We are using allow_duplicates=True here as both |
| 124 | + # operator-controller and catalogd will be installed in the same |
| 125 | + # namespace "olmv1-system" as of https://github.com/operator-framework/operator-controller/pull/888 |
| 126 | + # and https://github.com/operator-framework/catalogd/pull/283 |
| 127 | + k8s_yaml(encode_yaml_stream(objects), allow_duplicates=True) |
| 128 | + |
| 129 | + |
| 130 | +# data format: |
| 131 | +# { |
| 132 | +# 'image': 'quay.io/operator-framework/rukpak', |
| 133 | +# 'yaml': 'manifests/overlays/cert-manager', |
| 134 | +# 'binaries': { |
| 135 | +# 'core': 'core', |
| 136 | +# 'crdvalidator': 'crd-validation-webhook', |
| 137 | +# 'helm': 'helm-provisioner', |
| 138 | +# 'webhooks': 'rukpak-webhooks', |
| 139 | +# }, |
| 140 | +# 'deps': ['api', 'cmd/binary_name', 'internal', 'pkg'], |
| 141 | +# }, |
| 142 | +def deploy_repo(repo, data, tags="", debug=True): |
| 143 | + print('Deploying repo {}'.format(repo)) |
| 144 | + deploy_cert_manager_if_needed() |
| 145 | + |
| 146 | + local_port = data['starting_debug_port'] |
| 147 | + for binary, deployment in data['binaries'].items(): |
| 148 | + build_binary(repo, binary, data['deps'], data['image'], tags, debug) |
| 149 | + k8s_resource(deployment, port_forwards=['{}:30000'.format(local_port)]) |
| 150 | + local_port += 1 |
| 151 | + process_yaml(kustomize(data['yaml'])) |
0 commit comments