From e35be88f8622c46ce2b8acad58a26ad5dbf4eff8 Mon Sep 17 00:00:00 2001 From: Morten Olsen Date: Tue, 9 Sep 2025 20:15:09 +0200 Subject: [PATCH] init --- .devcontainer/devcontainer.json | 33 +++ Dockerfile | 78 ++++++ coder.tf | 451 ++++++++++++++++++++++++++++++++ docker-compose.yaml | 5 + env/.gitconfig | 8 + env/.zshrc | 1 + 6 files changed, 576 insertions(+) create mode 100644 .devcontainer/devcontainer.json create mode 100644 Dockerfile create mode 100644 coder.tf create mode 100644 docker-compose.yaml create mode 100644 env/.gitconfig create mode 100644 env/.zshrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..765dc8c --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,33 @@ +{ + "name": "My Coder Test Environment", + "build": { + "dockerfile": "../Dockerfile", + "context": ".." + }, + "remoteUser": "coder", // This should match the USERNAME arg in your Dockerfile + // "workspaceMount": "Source=./workspace,target=/home/coder/workspace,type=bind,consistency=cached", + "workspaceFolder": "/workspaces", + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.defaultProfile.linux": "zsh", + "terminal.integrated.profiles.linux": { + "zsh": { + "path": "/usr/bin/zsh" + } + } + }, + "extensions": [ + "ms-azuretools.vscode-docker", + "golang.go", // Example: Add extensions relevant to your work + "rust-lang.rust-analyzer", + "esbenp.prettier-vscode", + "editorconfig.editorconfig" + ] + } + }, + // "postCreateCommand": "/etc/profile.d/nvm.sh && nvm install --lts && nvm use --lts", + "containerEnv": { + "MY_CUSTOM_VAR": "some-value" + } +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3c7d127 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,78 @@ +FROM ubuntu:24.04 + +# --- Setup for NVM and other tools --- + +# Update package lists and install basic dependencies +RUN apt-get update && apt-get install -y \ + curl \ + git \ + nodejs \ + npm \ + build-essential \ + zsh \ + tmux \ + wget \ + unzip \ + starship \ + iputils-ping \ + sudo \ + procps \ + ripgrep \ + fzf \ + python3-venv \ + software-properties-common \ + && apt update \ + && apt-get install -y neovim \ + && rm -rf /var/lib/apt/lists/* + +RUN \ + curl https://github.com/neovim/neovim/releases/download/v0.11.4/nvim-linux-x86_64.appimage -o nvim-linux-x86_64.appimage + +# Install NVM +# Using a specific NVM version is good practice. Check the latest stable version. +ENV NVM_DIR="/usr/local/nvm" +RUN mkdir -p ${NVM_DIR} && \ + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash && \ + # Ensure NVM paths are added to shell profiles for all users + echo 'export NVM_DIR="/usr/local/nvm"' >> /etc/profile.d/nvm.sh && \ + echo '[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm' >> /etc/profile.d/nvm.sh && \ + echo '[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion' >> /etc/profile.d/nvm.sh && \ + chmod +x /etc/profile.d/nvm.sh + +# Install some default Node.js version and global packages with NVM +# This will be available to all users. +# You might want to install `npm` itself, or specific global packages. +# Ensure NVM is sourced before using it. +RUN bashenv="$(mktemp)" && \ + echo '#!/bin/bash' > "$bashenv" && \ + echo '. /etc/profile.d/nvm.sh' >> "$bashenv" && \ + echo 'nvm install 23' >> "$bashenv" && \ + echo 'nvm use 23' >> "$bashenv" && \ + echo 'corepack enable' >> "$bashenv" && \ + chmod +x "$bashenv" && \ + "$bashenv" && \ + rm "$bashenv" + +RUN bashenv_nvim_init="$(mktemp)" && \ + echo '#!/bin/bash' > "$bashenv_nvim_init" && \ + echo '. /etc/profile.d/nvm.sh' >> "$bashenv_nvim_init" && \ + echo 'npm install -g neovim' >> "$bashenv_nvim_init" && \ + echo 'nvim --headless "+Lazy! sync" +qa || true' >> "$bashenv_nvim_init" && \ + echo 'nvim --headless "+MasonToolsUpdate" +qa || true' >> "$bashenv_nvim_init" && \ + chmod +x "$bashenv_nvim_init" && \ + "$bashenv_nvim_init" && \ + rm "$bashenv_nvim_init" + +RUN groupadd --gid 11005 coder \ + && useradd -s /bin/bash --uid 11005 --gid 11005 -m coder \ + && apt-get update && apt-get install -y sudo \ + && echo coder ALL=\(ALL\) NOPASSWD:ALL > /etc/sudoers.d/coder \ + && chmod 0440 /etc/sudoers.d/coder \ + && chsh -s /usr/bin/zsh coder + +RUN mkdir -p /home/coder/.config && chown -R coder:coder /home/coder +USER coder +RUN git clone https://gitea.olsen.cloud/morten/nvim.git /home/coder/.config/nvim + +COPY --chown=coder:coder ./env/ /home/coder/ + diff --git a/coder.tf b/coder.tf new file mode 100644 index 0000000..81bdfe2 --- /dev/null +++ b/coder.tf @@ -0,0 +1,451 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "~> 2.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + } + envbuilder = { + source = "coder/envbuilder" + } + } +} + +provider "coder" {} +provider "kubernetes" { + # Authenticate via ~/.kube/config or a Coder-specific ServiceAccount, depending on admin preferences + config_path = var.use_kubeconfig == true ? "~/.kube/config" : null +} +provider "envbuilder" {} + +data "coder_provisioner" "me" {} +data "coder_workspace" "me" {} +data "coder_workspace_owner" "me" {} + +variable "use_kubeconfig" { + type = bool + description = <<-EOF + Use host kubeconfig? (true/false) + + Set this to false if the Coder host is itself running as a Pod on the same + Kubernetes cluster as you are deploying workspaces to. + + Set this to true if the Coder host is running outside the Kubernetes cluster + for workspaces. A valid "~/.kube/config" must be present on the Coder host. + EOF + default = false +} + +variable "namespace" { + type = string + default = "default" + description = "The Kubernetes namespace to create workspaces in (must exist prior to creating workspaces). If the Coder host is itself running as a Pod on the same Kubernetes cluster as you are deploying workspaces to, set this to the same namespace." +} + +variable "cache_repo" { + default = "" + description = "Use a container registry as a cache to speed up builds." + type = string +} + +variable "insecure_cache_repo" { + default = false + description = "Enable this option if your cache registry does not serve HTTPS." + type = bool +} + +data "coder_parameter" "cpu" { + type = "number" + name = "cpu" + display_name = "CPU" + description = "CPU limit (cores)." + default = "2" + icon = "/emojis/1f5a5.png" + mutable = true + validation { + min = 1 + max = 99999 + } + order = 1 +} + +data "coder_parameter" "memory" { + type = "number" + name = "memory" + display_name = "Memory" + description = "Memory limit (GiB)." + default = "2" + icon = "/icon/memory.svg" + mutable = true + validation { + min = 1 + max = 99999 + } + order = 2 +} + +data "coder_parameter" "workspaces_volume_size" { + name = "workspaces_volume_size" + display_name = "Workspaces volume size" + description = "Size of the `/workspaces` volume (GiB)." + default = "10" + type = "number" + icon = "/emojis/1f4be.png" + mutable = false + validation { + min = 1 + max = 99999 + } + order = 3 +} + +data "coder_parameter" "repo" { + description = "Select a repository to automatically clone and start working with a devcontainer." + display_name = "Repository (auto)" + mutable = true + name = "repo" + order = 4 + type = "string" +} + +data "coder_parameter" "fallback_image" { + default = "codercom/enterprise-base:ubuntu" + description = "This image runs if the devcontainer fails to build." + display_name = "Fallback Image" + mutable = true + name = "fallback_image" + order = 6 +} + +data "coder_parameter" "devcontainer_builder" { + description = <<-EOF +Image that will build the devcontainer. +We highly recommend using a specific release as the `:latest` tag will change. +Find the latest version of Envbuilder here: https://github.com/coder/envbuilder/pkgs/container/envbuilder +EOF + display_name = "Devcontainer Builder" + mutable = true + name = "devcontainer_builder" + default = "ghcr.io/coder/envbuilder:latest" + order = 7 +} + +variable "cache_repo_secret_name" { + default = "" + description = "Path to a docker config.json containing credentials to the provided cache repo, if required." + sensitive = true + type = string +} + +data "kubernetes_secret" "cache_repo_dockerconfig_secret" { + count = var.cache_repo_secret_name == "" ? 0 : 1 + metadata { + name = var.cache_repo_secret_name + namespace = var.namespace + } +} + +locals { + deployment_name = "coder-${lower(data.coder_workspace.me.id)}" + devcontainer_builder_image = data.coder_parameter.devcontainer_builder.value + git_author_name = coalesce(data.coder_workspace_owner.me.full_name, data.coder_workspace_owner.me.name) + git_author_email = data.coder_workspace_owner.me.email + repo_url = data.coder_parameter.repo.value + # The envbuilder provider requires a key-value map of environment variables. + envbuilder_env = { + "CODER_AGENT_TOKEN" : coder_agent.main.token, + # Use the docker gateway if the access URL is 127.0.0.1 + "CODER_AGENT_URL" : replace(data.coder_workspace.me.access_url, "/localhost|127\\.0\\.0\\.1/", "host.docker.internal"), + # ENVBUILDER_GIT_URL and ENVBUILDER_CACHE_REPO will be overridden by the provider + # if the cache repo is enabled. + "ENVBUILDER_GIT_URL" : var.cache_repo == "" ? local.repo_url : "", + # Use the docker gateway if the access URL is 127.0.0.1 + "ENVBUILDER_INIT_SCRIPT" : replace(coder_agent.main.init_script, "/localhost|127\\.0\\.0\\.1/", "host.docker.internal"), + "ENVBUILDER_FALLBACK_IMAGE" : data.coder_parameter.fallback_image.value, + "ENVBUILDER_DOCKER_CONFIG_BASE64" : base64encode(try(data.kubernetes_secret.cache_repo_dockerconfig_secret[0].data[".dockerconfigjson"], "")), + "ENVBUILDER_PUSH_IMAGE" : var.cache_repo == "" ? "" : "true" + # You may need to adjust this if you get an error regarding deleting files when building the workspace. + # For example, when testing in KinD, it was necessary to set `/product_name` and `/product_uuid` in + # addition to `/var/run`. + # "ENVBUILDER_IGNORE_PATHS": "/product_name,/product_uuid,/var/run", + } +} + +# Check for the presence of a prebuilt image in the cache repo +# that we can use instead. +resource "envbuilder_cached_image" "cached" { + count = var.cache_repo == "" ? 0 : data.coder_workspace.me.start_count + builder_image = local.devcontainer_builder_image + git_url = local.repo_url + cache_repo = var.cache_repo + extra_env = local.envbuilder_env + insecure = var.insecure_cache_repo +} + +resource "kubernetes_persistent_volume_claim" "workspaces" { + metadata { + name = "coder-${lower(data.coder_workspace.me.id)}-workspaces" + namespace = var.namespace + labels = { + "app.kubernetes.io/name" = "coder-${lower(data.coder_workspace.me.id)}-workspaces" + "app.kubernetes.io/instance" = "coder-${lower(data.coder_workspace.me.id)}-workspaces" + "app.kubernetes.io/part-of" = "coder" + //Coder-specific labels. + "com.coder.resource" = "true" + "com.coder.workspace.id" = data.coder_workspace.me.id + "com.coder.workspace.name" = data.coder_workspace.me.name + "com.coder.user.id" = data.coder_workspace_owner.me.id + "com.coder.user.username" = data.coder_workspace_owner.me.name + } + annotations = { + "com.coder.user.email" = data.coder_workspace_owner.me.email + } + } + wait_until_bound = false + spec { + access_modes = ["ReadWriteOnce"] + resources { + requests = { + storage = "${data.coder_parameter.workspaces_volume_size.value}Gi" + } + } + storage_class_name = "prod" # Configure the StorageClass to use here, if required. + } +} + +resource "kubernetes_deployment" "main" { + count = data.coder_workspace.me.start_count + depends_on = [ + kubernetes_persistent_volume_claim.workspaces + ] + wait_for_rollout = false + metadata { + name = local.deployment_name + namespace = var.namespace + labels = { + "app.kubernetes.io/name" = "coder-workspace" + "app.kubernetes.io/instance" = local.deployment_name + "app.kubernetes.io/part-of" = "coder" + "com.coder.resource" = "true" + "com.coder.workspace.id" = data.coder_workspace.me.id + "com.coder.workspace.name" = data.coder_workspace.me.name + "com.coder.user.id" = data.coder_workspace_owner.me.id + "com.coder.user.username" = data.coder_workspace_owner.me.name + } + annotations = { + "com.coder.user.email" = data.coder_workspace_owner.me.email + } + } + + spec { + replicas = 1 + selector { + match_labels = { + "app.kubernetes.io/name" = "coder-workspace" + } + } + strategy { + type = "Recreate" + } + + template { + metadata { + labels = { + "app.kubernetes.io/name" = "coder-workspace" + } + } + spec { + security_context {} + + container { + name = "dev" + image = var.cache_repo == "" ? local.devcontainer_builder_image : envbuilder_cached_image.cached.0.image + image_pull_policy = "Always" + security_context {} + + # Set the environment using cached_image.cached.0.env if the cache repo is enabled. + # Otherwise, use the local.envbuilder_env. + # You could alternatively write the environment variables to a ConfigMap or Secret + # and use that as `env_from`. + dynamic "env" { + for_each = nonsensitive(var.cache_repo == "" ? local.envbuilder_env : envbuilder_cached_image.cached.0.env_map) + content { + name = env.key + value = env.value + } + } + + resources { + requests = { + "cpu" = "250m" + "memory" = "512Mi" + } + limits = { + "cpu" = "${data.coder_parameter.cpu.value}" + "memory" = "${data.coder_parameter.memory.value}Gi" + } + } + volume_mount { + mount_path = "/workspaces" + name = "workspaces" + read_only = false + } + } + + volume { + name = "workspaces" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim.workspaces.metadata.0.name + read_only = false + } + } + + affinity { + // This affinity attempts to spread out all workspace pods evenly across + // nodes. + pod_anti_affinity { + preferred_during_scheduling_ignored_during_execution { + weight = 1 + pod_affinity_term { + topology_key = "kubernetes.io/hostname" + label_selector { + match_expressions { + key = "app.kubernetes.io/name" + operator = "In" + values = ["coder-workspace"] + } + } + } + } + } + } + } + } + } +} + +resource "coder_agent" "main" { + arch = data.coder_provisioner.me.arch + os = "linux" + startup_script = <<-EOT + set -e + sudo chown -R coder /workspaces + EOT + dir = "/workspaces" + + # These environment variables allow you to make Git commits right away after creating a + # workspace. Note that they take precedence over configuration defined in ~/.gitconfig! + # You can remove this block if you'd prefer to configure Git manually or using + # dotfiles. (see docs/dotfiles.md) + env = { + GIT_AUTHOR_NAME = local.git_author_name + GIT_AUTHOR_EMAIL = local.git_author_email + GIT_COMMITTER_NAME = local.git_author_name + GIT_COMMITTER_EMAIL = local.git_author_email + } + + # The following metadata blocks are optional. They are used to display + # information about your workspace in the dashboard. You can remove them + # if you don't want to display any information. + # For basic resources, you can use the `coder stat` command. + # If you need more control, you can write your own script. + metadata { + display_name = "CPU Usage" + key = "0_cpu_usage" + script = "coder stat cpu" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "RAM Usage" + key = "1_ram_usage" + script = "coder stat mem" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "Workspaces Disk" + key = "3_workspaces_disk" + script = "coder stat disk --path /workspaces" + interval = 60 + timeout = 1 + } + + metadata { + display_name = "CPU Usage (Host)" + key = "4_cpu_usage_host" + script = "coder stat cpu --host" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "Memory Usage (Host)" + key = "5_mem_usage_host" + script = "coder stat mem --host" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "Load Average (Host)" + key = "6_load_host" + # get load avg scaled by number of cores + script = <