Dockerfile
Smarter Platform is a Docker-based Python-Django micro-service application that runs in Kubernetes. Below is the basic Dockerfile that is used to build the Smarter Platform Docker images for the application, the workers, and the celery-beat pods.
#------------------------------------------------------------------------------ # This Dockerfile is used to build the following: # - Smarter application. # - Smarter Celery worker. # - Smarter Celery beat. # # This image is used for all environments (local, alpha, beta, next and production). # - It is published to DockerHub as mcdaniel0073/smarter:latest # https://hub.docker.com/repository/docker/mcdaniel0073/smarter/general # - It is also the basis of the Helm chart used to deploy Smarter to production Kubernetes clusters. # https://artifacthub.io/packages/helm/project-smarter/smarter #------------------------------------------------------------------------------ ################################## base ####################################### # Use the official Python image as a parent image # see https://hub.docker.com/_/python # # 3.12-slim-trixie is an official Docker image tag for Python 3.12 based on # Debian "Trixie" (the codename for Debian 13). # The "slim" variant is a minimal image that excludes unnecessary files and packages, # making it smaller and faster to download and build. # It is commonly used for production deployments where a lightweight Python environment is preferred. FROM python:3.12-slim-trixie AS linux_base LABEL maintainer="Lawrence McDaniel" \ description="Docker image for the Smarter Api and web console" \ license="GNU AGPL v3" \ vcs-url="https://github.com/smarter-sh/smarter" \ org.opencontainers.image.title="Smarter API" \ org.opencontainers.image.version="0.13.92" \ org.opencontainers.image.authors="Lawrence McDaniel " \ org.opencontainers.image.url="https://smarter.sh/" \ org.opencontainers.image.source="https://github.com/smarter-sh/smarter" \ org.opencontainers.image.documentation="https://docs.smarter.sh/" # Environment: local, alpha, beta, next, or production ARG TARGETPLATFORM ARG TARGETARCH ARG ENVIRONMENT=local ARG COLLECT_STATIC_FILES=true ENV COLLECT_STATIC_FILES=${COLLECT_STATIC_FILES} ENV ENVIRONMENT=$ENVIRONMENT RUN echo "ENVIRONMENT: $ENVIRONMENT" ############################## install system packages ################################# # build-essential needed to build some python packages, but not included in 3.12-slim-trixie # libssl-dev ... ditto ... # libffi-dev ... ditto ... # python3-dev ... ditto ... # pkg-config ... ditto ... # ------ # ca-certificates needed for SSL/TLS support in http requests but not included in 3.12-slim-trixie # python-dev-is-python3 helper package to ensure that the 'python' command points to python3 # default-mysql-client needed for Django mysql backend support # libmariadb-dev needed for default-mysql-client python package # git used in manage.py commands # ------ # curl used below in this Dockerfile to download files # unzip used below in this Dockerfile to install aws cli # procps provides the 'ps' command, used for liveness/readiness probes of the beat pod in kubernetes FROM linux_base AS system_packages # Install system packages # The Python slim trixie image is based on Debian and is a limited installation. Most of these packages # would ordinarily be included in a full Debian installation, but are missing from the slim image and # we therefore need to "add these back in" as part of our Dockerfile. RUN apt-get update && apt-get upgrade -y && apt-get install -y \ build-essential \ libssl-dev \ libffi-dev \ python3-dev \ pkg-config \ ca-certificates \ python-dev-is-python3 \ wget \ git \ curl \ unzip \ procps \ redis-tools \ libmariadb-dev \ libncurses6 && \ rm -rf /var/lib/apt/lists/* # Install Oracle MySQL 8.x client binary for correct architecture RUN if [ "$TARGETARCH" = "arm64" ]; then \ MYSQL_TARBALL="mysql-8.0.36-linux-glibc2.28-aarch64.tar.xz"; \ MYSQL_DIR="mysql-8.0.36-linux-glibc2.28-aarch64"; \ else \ MYSQL_TARBALL="mysql-8.0.36-linux-glibc2.28-x86_64.tar.xz"; \ MYSQL_DIR="mysql-8.0.36-linux-glibc2.28-x86_64"; \ fi && \ wget "https://dev.mysql.com/get/Downloads/MySQL-8.0/${MYSQL_TARBALL}" && \ tar -xf "$MYSQL_TARBALL" && \ cp "$MYSQL_DIR/bin/mysql" /usr/local/bin/ && \ chmod +x /usr/local/bin/mysql && \ rm -rf "$MYSQL_TARBALL" "$MYSQL_DIR" # Install kubectl, required for smarter/common/helpers/k8s_helpers.py used for ChatBot/Agent # deployments in which dedicated Kubernetes ingress and TLS certificates are created. There # are Kubernetes builds for both amd64 and arm64 architectures (we build both for DockerHub # multi-arch support). RUN if [ "$TARGETARCH" = "arm64" ]; then \ KUBECTL_ARCH="arm64"; \ else \ KUBECTL_ARCH="amd64"; \ fi && \ curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/${KUBECTL_ARCH}/kubectl" && \ chmod +x ./kubectl && \ mv ./kubectl /usr/local/bin/kubectl # install aws cli, required for smarter/common/helpers/aws/ # We rely extensively on AWS support, for Route53, S3, Simple Email Service, # Elastic Kubernetes Service, etc. There are AWS CLI builds for both # amd64 and arm64 architectures (we build both for DockerHub multi-arch support). RUN if [ "$TARGETARCH" = "arm64" ]; then \ curl "https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" -o "awscliv2.zip"; \ else \ curl "https://d1vvhvl2y92vvt.cloudfront.net/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"; \ fi && \ unzip awscliv2.zip && \ ./aws/install && \ rm -rf awscliv2.zip aws ############################## create app user ################################# FROM system_packages AS user_setup # Create a non-root user to run the application RUN adduser --disabled-password --gecos '' smarter_user # create a data directory for the smarter_user that # the application can use to store data. # - add a .kube directory and an empty config file # - add a celery directory for celerybeat to use to store its schedule. RUN mkdir -p /home/smarter_user/data/.kube && touch /home/smarter_user/data/.kube/config && \ mkdir -p /home/smarter_user/data/celery && \ mkdir -p /home/smarter_user/data/media # fallback Django storage when not using S3 or other external storage. # Set the KUBECONFIG environment variable ENV KUBECONFIG=/home/smarter_user/data/.kube/config # ensure that the smarter_user owns everything in the /smarter directory. RUN chown -R smarter_user:smarter_user /home/smarter_user/ # so that the Docker file system matches up with the local file system. WORKDIR /smarter # Switch to non-root user USER smarter_user ############################## python setup ################################# FROM user_setup AS venv # Create and activate a virtual environment in the user's home directory RUN python -m venv /home/smarter_user/venv ENV PATH="/home/smarter_user/venv/bin:$PATH" # Add all Python package dependencies. # We do this before adding the application code so that we can take advantage # of Docker's caching mechanism. If the requirements files do not change, # Docker will use the cached layer and not reinstall the packages. # # mcdaniel jan-2026: adding local requirements.txt back in because of the # https://github.com/smarter-sh/smarter-deploy repo that is used to deploy # smarter locally for non-developers. COPY ./smarter/requirements requirements RUN pip install --upgrade pip && \ pip install --no-cache-dir -r requirements/docker.txt # Install Python dependencies for the local environment for cases where # we're going to run python unit tests in the Docker container. RUN if [ "$ENVIRONMENT" = "local" ] ; then pip install -r requirements/local.txt ; fi ############################## application ################################## FROM venv AS application # do this last so that we can take advantage of Docker's caching mechanism. WORKDIR /home/smarter_user/ COPY --chown=smarter_user:smarter_user ./smarter ./smarter COPY --chown=smarter_user:smarter_user ./smarter/smarter/apps/chatbot/data/ ./data/manifests/ RUN mkdir -p /home/smarter_user/data/manifests/example_manifests ################################# permissuions ####################################### FROM application AS permissions # ensure that smarter_user owns everything and has the minimum # permissions needed to run the application and to manage files # that the application needs to write to in /home/smarter_user. # this is important because by default Debian adds # read-only and execute permissions to the group and to public. # We don't want either of these. # # files: r-------- so that smarter_user can read them # directories: r-x------ so that smarter_user can cd into them # venv/bin/*: r-x------ so that smarter_user can execute them # smarter/**/migrations: rwx------ so that smarter_user can write django migration files. # data: rwx------ so that smarter_user can manage the data directory. # .cache: rwx------ bc some python packages want to write to .cache, like tldextract USER root RUN if [ "$ENVIRONMENT" != "local" ] ; then chown -R smarter_user:smarter_user /home/smarter_user/ && \ find /home/smarter_user/ -type f -exec chmod 400 {} + && \ find /home/smarter_user/ -type d -exec chmod 500 {} + && \ find /home/smarter_user/venv/bin/ -type f -exec chmod 500 {} + && \ find /home/smarter_user/smarter/smarter/ -type d -name migrations -exec chmod 700 {} + && \ chmod -R 700 /home/smarter_user/data && \ chmod -R 700 /home/smarter_user/.cache && \ chmod 755 /home/smarter_user/smarter/manage.py ; fi ################################# data ################################# FROM permissions AS data # Add our source code and make the 'smarter' directory the working directory # we want this to be the last step so that we can take advantage of Docker's # caching mechanism. WORKDIR /home/smarter_user/ COPY --chown=smarter_user:smarter_user ./docs ./data/docs COPY --chown=smarter_user:smarter_user ./README.md ./data/docs/README.md COPY --chown=smarter_user:smarter_user ./CHANGELOG.md ./data/docs/CHANGELOG.md COPY --chown=smarter_user:smarter_user ./CODE_OF_CONDUCT.md ./data/docs/CODE_OF_CONDUCT.md COPY --chown=smarter_user:smarter_user ./Dockerfile ./data/Dockerfile COPY --chown=smarter_user:smarter_user ./Makefile ./data/Makefile COPY --chown=smarter_user:smarter_user ./docker-compose.yml ./data/docker-compose.yml ############################## collect_assets ################################## # This is a Django application, so we need to collect static assets. # We do this in a separate stage so that if the application code changes # but the static assets do not change, we can take advantage of Docker's # caching mechanism. FROM data AS collect_assets WORKDIR /home/smarter_user/smarter RUN if [ "$COLLECT_STATIC_FILES" = "true" ]; then python manage.py collectstatic --noinput; else echo "Skipping collectstatic"; fi ################################# final ####################################### # This is the final stage that will be used to run the application. # Gunicorn is used as the application server. # "smarter.wsgi:application" is the WSGI application callable and corresponds # to the "application" variable in smarter/wsgi.py. # The application will listen on all interfaces (0.0.0.0). FROM collect_assets AS serve_application WORKDIR /home/smarter_user/smarter USER smarter_user CMD ["gunicorn", "smarter.wsgi:application", "-b", "0.0.0.0:9357"] EXPOSE 8000