initial logic

This commit is contained in:
ryan.kuba 2022-06-28 17:25:27 -04:00
commit bee5226d95
8 changed files with 551 additions and 0 deletions

110
.gitlab-ci.yml Normal file
View file

@ -0,0 +1,110 @@
image: docker
services:
- docker:dind
stages:
- build
- manifest
before_script:
- docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- export SANITIZED_BRANCH="$(echo $CI_COMMIT_REF_NAME | sed -r 's#^release/##' | sed 's/\//_/g')"
- export SANITIZED_ROLLING_BRANCH=${SANITIZED_BRANCH}-rolling
build:
stage: build
script:
- >
docker build
-t ${ORG_NAME}/workspaces:$(arch)-$SANITIZED_BRANCH
.
- docker push ${ORG_NAME}/workspaces:$(arch)-$SANITIZED_BRANCH
only:
- develop
- /^release\/.*$/
except:
- schedules
tags:
- ${TAG}
parallel:
matrix:
- TAG: [ aws-autoscale, aws-autoscale-arm64 ]
build_dev:
stage: build
script:
- >
docker build
-t ${ORG_NAME}/workspaces-private:$(arch)-$SANITIZED_BRANCH
.
- docker push ${ORG_NAME}/workspaces-private:$(arch)-$SANITIZED_BRANCH
except:
- develop
- /^release\/.*$/
tags:
- ${TAG}
parallel:
matrix:
- TAG: [ aws-autoscale, aws-autoscale-arm64 ]
build_scheduled:
stage: build
script:
- >
docker build
-t ${ORG_NAME}/workspaces:$(arch)-$SANITIZED_ROLLING_BRANCH
.
- docker push ${ORG_NAME}/workspaces:$(arch)-$SANITIZED_ROLLING_BRANCH
only:
- schedules
tags:
- ${TAG}
parallel:
matrix:
- TAG: [ aws-autoscale, aws-autoscale-arm64 ]
manifest:
stage: manifest
script:
- docker pull ${ORG_NAME}/workspaces:x86_64-$SANITIZED_BRANCH
- docker pull ${ORG_NAME}/workspaces:aarch64-$SANITIZED_BRANCH
- "docker manifest push --purge ${ORG_NAME}/workspaces:$SANITIZED_BRANCH || :"
- docker manifest create ${ORG_NAME}/workspaces:$SANITIZED_BRANCH ${ORG_NAME}/workspaces:x86_64-$SANITIZED_BRANCH ${ORG_NAME}/workspaces:aarch64-$SANITIZED_BRANCH
- docker manifest annotate ${ORG_NAME}/workspaces:$SANITIZED_BRANCH ${ORG_NAME}/workspaces:aarch64-$SANITIZED_BRANCH --os linux --arch arm64 --variant v8
- docker manifest push --purge ${ORG_NAME}/workspaces:$SANITIZED_BRANCH
only:
- develop
- /^release\/.*$/
except:
- schedules
tags:
- aws-autoscale
manifest_dev:
stage: manifest
script:
- docker pull ${ORG_NAME}/workspaces-private:x86_64-$SANITIZED_BRANCH
- docker pull ${ORG_NAME}/workspaces-private:aarch64-$SANITIZED_BRANCH
- "docker manifest push --purge ${ORG_NAME}/workspaces-private:$SANITIZED_BRANCH || :"
- docker manifest create ${ORG_NAME}/workspaces-private:$SANITIZED_BRANCH ${ORG_NAME}/workspaces-private:x86_64-$SANITIZED_BRANCH ${ORG_NAME}/workspaces-private:aarch64-$SANITIZED_BRANCH
- docker manifest annotate ${ORG_NAME}/workspaces-private:$SANITIZED_BRANCH ${ORG_NAME}/workspaces-private:aarch64-$SANITIZED_BRANCH --os linux --arch arm64 --variant v8
- docker manifest push --purge ${ORG_NAME}/workspaces-private:$SANITIZED_BRANCH
except:
- develop
- /^release\/.*$/
tags:
- aws-autoscale
manifest_scheduled:
stage: manifest
script:
- docker pull ${ORG_NAME}/workspaces:x86_64-$SANITIZED_ROLLING_BRANCH
- docker pull ${ORG_NAME}/workspaces:aarch64-$SANITIZED_ROLLING_BRANCH
- "docker manifest push --purge ${ORG_NAME}/workspaces:$SANITIZED_ROLLING_BRANCH || :"
- docker manifest create ${ORG_NAME}/workspaces:$SANITIZED_ROLLING_BRANCH ${ORG_NAME}/workspaces:x86_64-$SANITIZED_ROLLING_BRANCH ${ORG_NAME}/workspaces:aarch64-$SANITIZED_ROLLING_BRANCH
- docker manifest annotate ${ORG_NAME}/workspaces:$SANITIZED_ROLLING_BRANCH ${ORG_NAME}/workspaces:aarch64-$SANITIZED_ROLLING_BRANCH --os linux --arch arm64 --variant v8
- docker manifest push --purge ${ORG_NAME}/workspaces:$SANITIZED_ROLLING_BRANCH
only:
- schedules
tags:
- aws-autoscale

97
Dockerfile Normal file
View file

@ -0,0 +1,97 @@
FROM alpine:3.16
# Settings
ARG WIZARD_URL="https://kasmweb-build-artifacts.s3.amazonaws.com/wizard/develop.tar.gz"
ARG S3_URL="https://kasmweb-build-artifacts.s3.amazonaws.com/kasm_backend/7588f65420252c56401b20470fe95b8ed690a9a4/kasm_workspaces_develop_1.11.0.7588f6.tar.gz"
ARG OVERLAY_VERSION="v2.2.0.3"
ENV DOCKER_TLS_CERTDIR=""
# Container setup
RUN \
echo "**** install build packages ****" && \
apk add --no-cache --virtual=build-dependencies \
alpine-sdk \
npm && \
echo "**** install packages ****" && \
apk add --no-cache \
bash \
btrfs-progs \
ca-certificates \
coreutils \
curl \
docker \
docker-cli-compose \
e2fsprogs \
e2fsprogs-extra \
findutils \
fuse-overlayfs \
ip6tables \
iptables \
nodejs \
openssl \
pigz \
procps \
python3 \
shadow \
shadow-uidmap \
sudo \
tzdata \
xfsprogs \
xz \
zfs && \
echo "**** dind setup ****" && \
addgroup -S dockremap && \
adduser -S -G dockremap dockremap && \
echo 'dockremap:165536:65536' >> /etc/subuid && \
echo 'dockremap:165536:65536' >> /etc/subgid && \
curl -o \
/usr/local/bin/dind -L \
https://raw.githubusercontent.com/moby/moby/master/hack/dind && \
chmod +x /usr/local/bin/dind && \
echo 'hosts: files dns' > /etc/nsswitch.conf && \
echo "**** setup init ****" && \
curl -o \
/tmp/s6-overlay-installer -L \
https://github.com/just-containers/s6-overlay/releases/download/${OVERLAY_VERSION}/s6-overlay-$(uname -m | sed 's/x86_64/amd64/g')-installer && \
chmod +x /tmp/s6-overlay-installer && \
/tmp/s6-overlay-installer / && \
echo "**** add installer ****" && \
curl -o \
/tmp/kasm.tar.gz -L \
"${S3_URL}" && \
tar xf \
/tmp/kasm.tar.gz -C \
/ && \
echo "**** add wizard ****" && \
mkdir -p /wizard && \
curl -o \
/tmp/wizard.tar.gz -L \
"${WIZARD_URL}" && \
tar xf \
/tmp/wizard.tar.gz -C \
/wizard/ && \
echo "**** setup wizard ****" && \
cd /wizard && \
npm install && \
echo "**** copy assets ****" && \
cp \
/kasm_release/www/img/thumbnails/*.png \
/wizard/public/img/thumbnails/ && \
cp \
/kasm_release/conf/database/seed_data/default_images_a* \
/wizard/ && \
echo "**** cleanup ****" && \
apk del --purge \
build-dependencies && \
rm -rf \
/root/.npm \
/root/.cache \
/tmp/*
# add init files
COPY root/ /
# Ports volumes and init
EXPOSE 3000 443
VOLUME /opt/
ENTRYPOINT ["/init"]

71
README.md Normal file
View file

@ -0,0 +1,71 @@
# Docker Kasm
The purpose of this container is to allow a docker enabled system to easily deploy a fully functional Kasm Workspaces application stack isolated in a single container. It's main focus is on the consumer and hobbyist user acting as a stepping stone to a dedicated VM or full multi server deployment. The idea being we make spinning up the stack as simple as possible obfuscating as much as possible away from the user when it comes to installation and management.
It has a few core principles:
* Have no external dependencies outside of Docker
* All user facing processes must be presented in a web interface
* The installation must be upgraded using Docker
* The container must function in an ephemeral mode along with using a bind mount for a persistent install
* The user should have the option to run development or stable builds
# Usage
### docker-compose
```yaml
---
version: "2.1"
services:
kasm:
image: kasmweb/kasm:latest
privileged: true
container_name: kasm
environment:
- KASM_PORT=443 #optional
- DOCKER_HUB_USERNAME=USER #optional
- DOCKER_HUB_PASSWORD=PASS #optional
volumes:
- /kasm/local/storage:/opt
ports:
- 443:443
- 3000:3000
restart: unless-stopped
```
### docker cli
```
docker run -d \
--privileged \
--name=kasm \
-e KASM_PORT=443 `#optional` \
-e DOCKER_HUB_USERNAME=USER `#optional` \
-e DOCKER_HUB_PASSWORD=PASS `#optional` \
-p 443:443 \
-p 3000:3000 \
-v /kasm/local/storage:/opt \
kasmweb/kasm:latest
```
| Parameter | Function |
| :----: | --- |
| `-p 443` | Kasm Workspaces web UI |
| `-p 3000` | Kasm Installation and upgrade wizard |
| `-v /kasm/local/storage:/opt` | Docker and Kasm Storage |
| `-e KASM_PORT=443` | If not using port 443 this needs to be set to the port you are binding to (optional) |
| `-e DOCKER_HUB_USERNAME=USER` | Dockerhub username for logging in on init (optional) |
| `-e DOCKER_HUB_PASSWORD=PASS` | Dockerhub password for logging in on init (optional) |
# Versions
| Tag | Description |
| :----: | --- |
| latest | Latest stable release |
| develop | Development head |
| Architecture | Tag |
| :----: | ---- |
| x86-64 | x86_64-\<version tag\> |
| arm64 | aarch64-\<version tag\> |

View file

@ -0,0 +1,51 @@
#!/usr/bin/with-contenv bash
cat << "EOF"
_ __ __ __ _
| |/ / \ \ / / | |
| ' / __ _ ___ _ __ ___ \ \ /\ / /__ _ __| | _____ _ __ __ _ ___ ___ ___
| < / _` / __| '_ ` _ \ \ \/ \/ / _ \| '__| |/ / __| '_ \ / _` |/ __/ _ \/ __|
| . \ (_| \__ \ | | | | | \ /\ / (_) | | | <\__ \ |_) | (_| | (_| __/\__ \
|_|\_\__,_|___/_| |_| |_| \/ \/ \___/|_| |_|\_\___/ .__/ \__,_|\___\___||___/
| |
|_|
MKWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWNMMMMMMMMMMMWXkxKM
Mk. .cKMMMMMMMMMWXk:..kW
Mk. .;xKWMMMMMMMWXk:. .kW
Mk. .;xXWMMMMMMMWXk;. .kW
Mk. .;xXWMMMMMMMWXk;. .kW
Mk. .................... .;xXWMMMMMMMWXk;. .kW
Mk. .cO00000000000000000kl. .;xXWMMMMMMMWXk;. .kW
Mk. .xWMMMMMMMMMMMMMMMNOc. .;xXWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMMMMMMMNOc. .;xXWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMMMMMNOc. .;xXWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMMMNOc. ..;xXWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMNOc. .;x0XWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMM0, .;xXWMMMMMMMMWXx;. .kW
Mk. .xWMMMMMMWO. .;xXWMMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMO. .;xXWMMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMO. .;xXWMMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMO. .;xXWMMMMMMMWWXx;. .kW
Mk. .xWMMMMMMMO. .;xXWMMMMMMMWXxl;. .kW
Mk. .xWMMMMMMM0::xXWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMWXXWMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMMMMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMMMMMMMMWXx;. .kW
Mk. .xWMMMMMMMMMMMMWXx;. .;c, .kW
Mk. .xWMMMMMMMMMMWXx;. ..;xXNKd, .kW
Mk. .xWMMMMMMMMWXx;. .;kKXWMMMWKd,. .kW
Mk. .xWMMMMMMWXd;. .;xXWMMMMMMMMWKd, .kW
Mk. .xWMMMMWXx;. .;xXWMMMMMMMMMMMMWKd, .kW
Mk. .xWMMWKd,. .:kXWMMMMMMMNXNMMMMMMWKd,. .kW
Mk. .xWWKd,. .:kXWMMMMMMWNOc;o0WMMMMMMWKd,. .kW
Mk. .d0d,. .:kXWMMMMMMNOdc. .o0WMMMMMMWKd,. .kW
Mk. ... .:kXWMMMMMMNOc. .o0NNNNNNNXOl. .kW
Mk. .:kXMMMMMMWNOc. .,,,,,,,,,.. .kW
Mk. .:kXWMMMMMMNkc. .kW
Mk. .:kXWMMMMMMNk:. .kW
Mk. .:kXMMMMMMWNOc. .OW
Mk. .:kXMMMMMMWNkc. ;kNM
Mk. .:kXWMMMMMWNk:. ..;xNMM
M0l;;;;;;;;;ckXWMMMMMMMW0o:;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ckXNWMMM
MMWWWWWWWWWWWWMMMMMMMMMMWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWMMMMMMM
EOF

View file

@ -0,0 +1,11 @@
#!/usr/bin/with-contenv bash
# Create directories
if [ ! -d "/opt/docker" ]; then
mkdir -p /opt/docker
fi
# Login to Dockerhub
if [ ! -z "${DOCKER_HUB_USERNAME}" ]; then
docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
fi

View file

@ -0,0 +1,3 @@
#!/usr/bin/with-contenv bash
exec /usr/local/bin/dockerd-entrypoint.sh -l error --data-root /opt/docker

View file

@ -0,0 +1,12 @@
#!/usr/bin/with-contenv bash
# Wait for docker to be up
while true; do
if [ -S "/var/run/docker.sock" ]; then
break
fi
sleep 1
done
cd /wizard
/usr/bin/node index.js

View file

@ -0,0 +1,196 @@
#!/bin/sh
set -eu
_tls_ensure_private() {
local f="$1"; shift
[ -s "$f" ] || openssl genrsa -out "$f" 4096
}
_tls_san() {
{
ip -oneline address | awk '{ gsub(/\/.+$/, "", $4); print "IP:" $4 }'
{
cat /etc/hostname
echo 'docker'
echo 'localhost'
hostname -f
hostname -s
} | sed 's/^/DNS:/'
[ -z "${DOCKER_TLS_SAN:-}" ] || echo "$DOCKER_TLS_SAN"
} | sort -u | xargs printf '%s,' | sed "s/,\$//"
}
_tls_generate_certs() {
local dir="$1"; shift
# if ca/key.pem || !ca/cert.pem, generate CA public if necessary
# if ca/key.pem, generate server public
# if ca/key.pem, generate client public
# (regenerating public certs every startup to account for SAN/IP changes and/or expiration)
# https://github.com/FiloSottile/mkcert/issues/174
local certValidDays='825'
if [ -s "$dir/ca/key.pem" ] || [ ! -s "$dir/ca/cert.pem" ]; then
# if we either have a CA private key or do *not* have a CA public key, then we should create/manage the CA
mkdir -p "$dir/ca"
_tls_ensure_private "$dir/ca/key.pem"
openssl req -new -key "$dir/ca/key.pem" \
-out "$dir/ca/cert.pem" \
-subj '/CN=docker:dind CA' -x509 -days "$certValidDays"
fi
if [ -s "$dir/ca/key.pem" ]; then
# if we have a CA private key, we should create/manage a server key
mkdir -p "$dir/server"
_tls_ensure_private "$dir/server/key.pem"
openssl req -new -key "$dir/server/key.pem" \
-out "$dir/server/csr.pem" \
-subj '/CN=docker:dind server'
cat > "$dir/server/openssl.cnf" <<-EOF
[ x509_exts ]
subjectAltName = $(_tls_san)
EOF
openssl x509 -req \
-in "$dir/server/csr.pem" \
-CA "$dir/ca/cert.pem" \
-CAkey "$dir/ca/key.pem" \
-CAcreateserial \
-out "$dir/server/cert.pem" \
-days "$certValidDays" \
-extfile "$dir/server/openssl.cnf" \
-extensions x509_exts
cp "$dir/ca/cert.pem" "$dir/server/ca.pem"
openssl verify -CAfile "$dir/server/ca.pem" "$dir/server/cert.pem"
fi
if [ -s "$dir/ca/key.pem" ]; then
# if we have a CA private key, we should create/manage a client key
mkdir -p "$dir/client"
_tls_ensure_private "$dir/client/key.pem"
chmod 0644 "$dir/client/key.pem" # openssl defaults to 0600 for the private key, but this one needs to be shared with arbitrary client contexts
openssl req -new \
-key "$dir/client/key.pem" \
-out "$dir/client/csr.pem" \
-subj '/CN=docker:dind client'
cat > "$dir/client/openssl.cnf" <<-'EOF'
[ x509_exts ]
extendedKeyUsage = clientAuth
EOF
openssl x509 -req \
-in "$dir/client/csr.pem" \
-CA "$dir/ca/cert.pem" \
-CAkey "$dir/ca/key.pem" \
-CAcreateserial \
-out "$dir/client/cert.pem" \
-days "$certValidDays" \
-extfile "$dir/client/openssl.cnf" \
-extensions x509_exts
cp "$dir/ca/cert.pem" "$dir/client/ca.pem"
openssl verify -CAfile "$dir/client/ca.pem" "$dir/client/cert.pem"
fi
}
# no arguments passed
# or first arg is `-f` or `--some-option`
if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
# set "dockerSocket" to the default "--host" *unix socket* value (for both standard or rootless)
uid="$(id -u)"
if [ "$uid" = '0' ]; then
dockerSocket='unix:///var/run/docker.sock'
else
# if we're not root, we must be trying to run rootless
: "${XDG_RUNTIME_DIR:=/run/user/$uid}"
dockerSocket="unix://$XDG_RUNTIME_DIR/docker.sock"
fi
case "${DOCKER_HOST:-}" in
unix://*)
dockerSocket="$DOCKER_HOST"
;;
esac
# add our default arguments
if [ -n "${DOCKER_TLS_CERTDIR:-}" ] \
&& _tls_generate_certs "$DOCKER_TLS_CERTDIR" \
&& [ -s "$DOCKER_TLS_CERTDIR/server/ca.pem" ] \
&& [ -s "$DOCKER_TLS_CERTDIR/server/cert.pem" ] \
&& [ -s "$DOCKER_TLS_CERTDIR/server/key.pem" ] \
; then
# generate certs and use TLS if requested/possible (default in 19.03+)
set -- dockerd \
--host="$dockerSocket" \
--host=tcp://0.0.0.0:2376 \
--tlsverify \
--tlscacert "$DOCKER_TLS_CERTDIR/server/ca.pem" \
--tlscert "$DOCKER_TLS_CERTDIR/server/cert.pem" \
--tlskey "$DOCKER_TLS_CERTDIR/server/key.pem" \
"$@"
DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="${DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS:-} -p 0.0.0.0:2376:2376/tcp"
else
# TLS disabled (-e DOCKER_TLS_CERTDIR='') or missing certs
set -- dockerd \
--host="$dockerSocket" \
"$@"
DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="${DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS:-} -p 0.0.0.0:2375:2375/tcp"
fi
fi
if [ "$1" = 'dockerd' ]; then
# explicitly remove Docker's default PID file to ensure that it can start properly if it was stopped uncleanly (and thus didn't clean up the PID file)
find /run /var/run -iname 'docker*.pid' -delete || :
if dockerd --version | grep -qF ' 20.10.'; then
set -- docker-init -- "$@"
fi
if ! iptables -nL > /dev/null 2>&1; then
# if iptables fails to run, chances are high the necessary kernel modules aren't loaded (perhaps the host is using nftables with the translating "iptables" wrappers, for example)
# https://github.com/docker-library/docker/issues/350
# https://github.com/moby/moby/issues/26824
modprobe ip_tables || :
fi
uid="$(id -u)"
if [ "$uid" != '0' ]; then
# if we're not root, we must be trying to run rootless
if ! command -v rootlesskit > /dev/null; then
echo >&2 "error: attempting to run rootless dockerd but missing 'rootlesskit' (perhaps the 'docker:dind-rootless' image variant is intended?)"
exit 1
fi
user="$(id -un 2>/dev/null || :)"
if ! grep -qE "^($uid${user:+|$user}):" /etc/subuid || ! grep -qE "^($uid${user:+|$user}):" /etc/subgid; then
echo >&2 "error: attempting to run rootless dockerd but missing necessary entries in /etc/subuid and/or /etc/subgid for $uid"
exit 1
fi
: "${XDG_RUNTIME_DIR:=/run/user/$uid}"
export XDG_RUNTIME_DIR
if ! mkdir -p "$XDG_RUNTIME_DIR" || [ ! -w "$XDG_RUNTIME_DIR" ] || ! mkdir -p "$HOME/.local/share/docker" || [ ! -w "$HOME/.local/share/docker" ]; then
echo >&2 "error: attempting to run rootless dockerd but need writable HOME ($HOME) and XDG_RUNTIME_DIR ($XDG_RUNTIME_DIR) for user $uid"
exit 1
fi
if [ -f /proc/sys/kernel/unprivileged_userns_clone ] && unprivClone="$(cat /proc/sys/kernel/unprivileged_userns_clone)" && [ "$unprivClone" != '1' ]; then
echo >&2 "error: attempting to run rootless dockerd but need 'kernel.unprivileged_userns_clone' (/proc/sys/kernel/unprivileged_userns_clone) set to 1"
exit 1
fi
if [ -f /proc/sys/user/max_user_namespaces ] && maxUserns="$(cat /proc/sys/user/max_user_namespaces)" && [ "$maxUserns" = '0' ]; then
echo >&2 "error: attempting to run rootless dockerd but need 'user.max_user_namespaces' (/proc/sys/user/max_user_namespaces) set to a sufficiently large value"
exit 1
fi
# TODO overlay support detection?
exec rootlesskit \
--net="${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:-vpnkit}" \
--mtu="${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:-1500}" \
--disable-host-loopback \
--port-driver=builtin \
--copy-up=/etc \
--copy-up=/run \
${DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS:-} \
"$@"
elif [ -x '/usr/local/bin/dind' ]; then
# if we have the (mostly defunct now) Docker-in-Docker wrapper script, use it
set -- '/usr/local/bin/dind' "$@"
fi
else
# if it isn't `dockerd` we're trying to run, pass it through `docker-entrypoint.sh` so it gets `DOCKER_HOST` set appropriately too
set -- docker-entrypoint.sh "$@"
fi
exec "$@"