This commit is contained in:
Morten Olsen
2025-12-12 11:10:01 +01:00
commit 277fc459d5
64 changed files with 8625 additions and 0 deletions

View File

@@ -0,0 +1,25 @@
{
"name": "Kubebuilder DevContainer",
"image": "golang:1.24",
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/git:1": {}
},
"runArgs": ["--network=host"],
"customizations": {
"vscode": {
"settings": {
"terminal.integrated.shell.linux": "/bin/bash"
},
"extensions": [
"ms-kubernetes-tools.vscode-kubernetes-tools",
"ms-azuretools.vscode-docker"
]
}
},
"onCreateCommand": "bash .devcontainer/post-install.sh"
}

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -x
curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH)
chmod +x ./kind
mv ./kind /usr/local/bin/kind
curl -L -o kubebuilder https://go.kubebuilder.io/dl/latest/linux/$(go env GOARCH)
chmod +x kubebuilder
mv kubebuilder /usr/local/bin/
KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt)
curl -LO "https://dl.k8s.io/release/$KUBECTL_VERSION/bin/linux/$(go env GOARCH)/kubectl"
chmod +x kubectl
mv kubectl /usr/local/bin/kubectl
docker network create -d=bridge --subnet=172.19.0.0/24 kind
kind version
kubebuilder version
docker --version
go version
kubectl version --client

11
.dockerignore Normal file
View File

@@ -0,0 +1,11 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore everything by default and re-include only needed files
**
# Re-include Go source files (but not *_test.go)
!**/*.go
**/*_test.go
# Re-include Go module files
!go.mod
!go.sum

23
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Lint
on:
push:
pull_request:
jobs:
lint:
name: Run on Ubuntu
runs-on: ubuntu-latest
steps:
- name: Clone the code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Run linter
uses: golangci/golangci-lint-action@v8
with:
version: v2.5.0

32
.github/workflows/test-e2e.yml vendored Normal file
View File

@@ -0,0 +1,32 @@
name: E2E Tests
on:
push:
pull_request:
jobs:
test-e2e:
name: Run on Ubuntu
runs-on: ubuntu-latest
steps:
- name: Clone the code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Install the latest version of kind
run: |
curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-$(go env GOARCH)
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
- name: Verify kind installation
run: kind version
- name: Running Test e2e
run: |
go mod tidy
make test-e2e

23
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Tests
on:
push:
pull_request:
jobs:
test:
name: Run on Ubuntu
runs-on: ubuntu-latest
steps:
- name: Clone the code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: Running Tests
run: |
go mod tidy
make test

30
.gitignore vendored Normal file
View File

@@ -0,0 +1,30 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin/*
Dockerfile.cross
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Go workspace file
go.work
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
.vscode
*.swp
*.swo
*~
# Kubeconfig might contain secrets
*.kubeconfig

52
.golangci.yml Normal file
View File

@@ -0,0 +1,52 @@
version: "2"
run:
allow-parallel-runners: true
linters:
default: none
enable:
- copyloopvar
- dupl
- errcheck
- ginkgolinter
- goconst
- gocyclo
- govet
- ineffassign
- lll
- misspell
- nakedret
- prealloc
- revive
- staticcheck
- unconvert
- unparam
- unused
settings:
revive:
rules:
- name: comment-spacings
- name: import-shadowing
exclusions:
generated: lax
rules:
- linters:
- lll
path: api/*
- linters:
- dupl
- lll
path: internal/*
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

1410
ARCHITECTURE.md Normal file

File diff suppressed because it is too large Load Diff

373
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,373 @@
# Contributing to Nuclei Operator
Thank you for your interest in contributing to the Nuclei Operator! This document provides guidelines and instructions for contributing.
## Table of Contents
- [Code of Conduct](#code-of-conduct)
- [Getting Started](#getting-started)
- [Development Setup](#development-setup)
- [Making Changes](#making-changes)
- [Code Style Guidelines](#code-style-guidelines)
- [Testing](#testing)
- [Pull Request Process](#pull-request-process)
- [Reporting Issues](#reporting-issues)
## Code of Conduct
This project follows the [Kubernetes Code of Conduct](https://github.com/kubernetes/community/blob/master/code-of-conduct.md). By participating, you are expected to uphold this code.
## Getting Started
### Prerequisites
Before you begin, ensure you have the following installed:
- **Go 1.24+**: [Download Go](https://golang.org/dl/)
- **Docker**: [Install Docker](https://docs.docker.com/get-docker/)
- **kubectl**: [Install kubectl](https://kubernetes.io/docs/tasks/tools/)
- **Kind** (for local testing): [Install Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
- **Make**: Usually pre-installed on Linux/macOS
### Fork and Clone
1. Fork the repository on GitHub
2. Clone your fork locally:
```bash
git clone https://github.com/<your-username>/nuclei-operator.git
cd nuclei-operator
```
3. Add the upstream remote:
```bash
git remote add upstream https://github.com/mortenolsen/nuclei-operator.git
```
## Development Setup
### Install Dependencies
```bash
# Download Go modules
go mod download
# Install development tools
make controller-gen
make kustomize
make envtest
```
### Set Up Local Cluster
```bash
# Create a Kind cluster for development
kind create cluster --name nuclei-dev
# Verify cluster is running
kubectl cluster-info
```
### Install CRDs
```bash
# Generate and install CRDs
make manifests
make install
```
### Run the Operator Locally
```bash
# Run outside the cluster (for development)
make run
```
## Making Changes
### Branch Naming
Use descriptive branch names:
- `feature/add-webhook-support` - New features
- `fix/scan-timeout-issue` - Bug fixes
- `docs/update-api-reference` - Documentation updates
- `refactor/scanner-interface` - Code refactoring
### Commit Messages
Follow the [Conventional Commits](https://www.conventionalcommits.org/) specification:
```
<type>(<scope>): <description>
[optional body]
[optional footer(s)]
```
**Types:**
- `feat`: New feature
- `fix`: Bug fix
- `docs`: Documentation changes
- `style`: Code style changes (formatting, etc.)
- `refactor`: Code refactoring
- `test`: Adding or updating tests
- `chore`: Maintenance tasks
**Examples:**
```
feat(scanner): add support for custom nuclei templates
fix(controller): handle nil pointer in ingress reconciler
docs(readme): update installation instructions
```
### Keeping Your Fork Updated
```bash
# Fetch upstream changes
git fetch upstream
# Rebase your branch on upstream/main
git checkout main
git rebase upstream/main
# Update your feature branch
git checkout feature/your-feature
git rebase main
```
## Code Style Guidelines
### Go Code Style
- Follow the [Effective Go](https://golang.org/doc/effective_go) guidelines
- Use `gofmt` for formatting (run `make fmt`)
- Follow [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
- Use meaningful variable and function names
- Add comments for exported functions and types
### Linting
```bash
# Run the linter
make lint
# Auto-fix linting issues where possible
make lint-fix
```
### Code Organization
- **api/**: CRD type definitions
- **cmd/**: Main entry points
- **internal/controller/**: Reconciliation logic
- **internal/scanner/**: Nuclei scanner implementation
- **config/**: Kubernetes manifests
### Error Handling
- Always handle errors explicitly
- Use `fmt.Errorf` with `%w` for error wrapping
- Log errors with appropriate context
```go
if err != nil {
return fmt.Errorf("failed to create NucleiScan: %w", err)
}
```
### Logging
Use structured logging with controller-runtime's logger:
```go
log := logf.FromContext(ctx)
log.Info("Processing resource", "name", resource.Name, "namespace", resource.Namespace)
log.Error(err, "Failed to reconcile", "resource", req.NamespacedName)
```
## Testing
### Running Tests
```bash
# Run unit tests
make test
# Run tests with coverage
make test
go tool cover -html=cover.out
# Run end-to-end tests
make test-e2e
```
### Writing Tests
- Write unit tests for all new functionality
- Use table-driven tests where appropriate
- Mock external dependencies
- Test both success and error cases
**Example test structure:**
```go
var _ = Describe("IngressController", func() {
Context("When reconciling an Ingress", func() {
It("Should create a NucleiScan", func() {
// Test implementation
})
It("Should handle missing Ingress gracefully", func() {
// Test implementation
})
})
})
```
### Test Coverage
- Aim for at least 70% code coverage
- Focus on testing business logic and edge cases
- Don't test generated code or simple getters/setters
## Pull Request Process
### Before Submitting
1. **Update your branch:**
```bash
git fetch upstream
git rebase upstream/main
```
2. **Run all checks:**
```bash
make manifests generate fmt vet lint test
```
3. **Update documentation** if needed
4. **Add/update tests** for your changes
### Submitting a PR
1. Push your branch to your fork:
```bash
git push origin feature/your-feature
```
2. Create a Pull Request on GitHub
3. Fill out the PR template with:
- Description of changes
- Related issues
- Testing performed
- Breaking changes (if any)
### PR Review Process
1. **Automated checks** must pass (CI/CD pipeline)
2. **Code review** by at least one maintainer
3. **Address feedback** and update your PR
4. **Squash commits** if requested
5. **Merge** once approved
### PR Checklist
- [ ] Code follows the project's style guidelines
- [ ] Tests added/updated for the changes
- [ ] Documentation updated if needed
- [ ] Commit messages follow conventional commits
- [ ] All CI checks pass
- [ ] PR description is complete
## Reporting Issues
### Bug Reports
When reporting bugs, include:
1. **Description**: Clear description of the issue
2. **Steps to reproduce**: Minimal steps to reproduce
3. **Expected behavior**: What you expected to happen
4. **Actual behavior**: What actually happened
5. **Environment**:
- Kubernetes version
- Operator version
- Cloud provider (if applicable)
6. **Logs**: Relevant operator logs
7. **Resources**: Related Kubernetes resources (sanitized)
### Feature Requests
When requesting features, include:
1. **Problem statement**: What problem does this solve?
2. **Proposed solution**: How should it work?
3. **Alternatives considered**: Other approaches you've thought of
4. **Additional context**: Any other relevant information
### Security Issues
For security vulnerabilities, please **do not** open a public issue. Instead, email the maintainers directly or use GitHub's private vulnerability reporting feature.
## Development Tips
### Useful Make Targets
```bash
make help # Show all available targets
make manifests # Generate CRD manifests
make generate # Generate code (DeepCopy, etc.)
make fmt # Format code
make vet # Run go vet
make lint # Run linter
make test # Run tests
make build # Build binary
make run # Run locally
make docker-build # Build container image
make install # Install CRDs
make deploy # Deploy to cluster
```
### Debugging
```bash
# Increase log verbosity
go run ./cmd/main.go --zap-log-level=debug
# View controller logs
kubectl logs -f -n nuclei-operator-system deployment/nuclei-operator-controller-manager
# Debug with delve
dlv debug ./cmd/main.go
```
### IDE Setup
**VS Code:**
- Install the Go extension
- Enable `gopls` for language server
- Configure `golangci-lint` as the linter
**GoLand:**
- Import the project as a Go module
- Configure the Go SDK
- Enable `golangci-lint` integration
## Getting Help
- **Documentation**: Check the [README](README.md) and [docs/](docs/) directory
- **Issues**: Search existing [GitHub Issues](https://github.com/mortenolsen/nuclei-operator/issues)
- **Discussions**: Use [GitHub Discussions](https://github.com/mortenolsen/nuclei-operator/discussions) for questions
## Recognition
Contributors will be recognized in:
- The project's README
- Release notes for significant contributions
- GitHub's contributor graph
Thank you for contributing to the Nuclei Operator!

62
Dockerfile Normal file
View File

@@ -0,0 +1,62 @@
# Build the manager binary
FROM golang:1.24 AS builder
ARG TARGETOS
ARG TARGETARCH
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the Go source (relies on .dockerignore to filter)
COPY . .
# Build
# the GOARCH has no default value to allow the binary to be built according to the host where the command
# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
# Download and build nuclei binary
FROM golang:1.24 AS nuclei-builder
ARG TARGETOS
ARG TARGETARCH
# Install nuclei from source for the target architecture
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} \
go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest
# Final image
FROM alpine:3.19 AS final
# Install ca-certificates for HTTPS requests and create non-root user
RUN apk --no-cache add ca-certificates tzdata && \
adduser -D -u 65532 -g 65532 nonroot
# Create directories for nuclei
RUN mkdir -p /nuclei-templates /home/nonroot/.nuclei && \
chown -R 65532:65532 /nuclei-templates /home/nonroot
WORKDIR /
# Copy the manager binary
COPY --from=builder /workspace/manager .
# Copy nuclei binary
COPY --from=nuclei-builder /go/bin/nuclei /usr/local/bin/nuclei
# Set ownership
RUN chown 65532:65532 /manager /usr/local/bin/nuclei
# Use non-root user
USER 65532:65532
# Environment variables for nuclei
ENV NUCLEI_TEMPLATES_PATH=/nuclei-templates
ENV HOME=/home/nonroot
ENTRYPOINT ["/manager"]

250
Makefile Normal file
View File

@@ -0,0 +1,250 @@
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# CONTAINER_TOOL defines the container tool to be used for building images.
# Be aware that the target commands are only tested with Docker which is
# scaffolded by default. However, you might want to replace it to use other
# tools. (i.e. podman)
CONTAINER_TOOL ?= docker
# Setting SHELL to bash allows bash commands to be executed by recipes.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
.PHONY: all
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk command is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
.PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
"$(CONTROLLER_GEN)" rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
"$(CONTROLLER_GEN)" object:headerFile="hack/boilerplate.go.txt" paths="./..."
.PHONY: fmt
fmt: ## Run go fmt against code.
go fmt ./...
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: test
test: manifests generate fmt vet setup-envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell "$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally.
# CertManager is installed by default; skip with:
# - CERT_MANAGER_INSTALL_SKIP=true
KIND_CLUSTER ?= nuclei-operator-test-e2e
.PHONY: setup-test-e2e
setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist
@command -v $(KIND) >/dev/null 2>&1 || { \
echo "Kind is not installed. Please install Kind manually."; \
exit 1; \
}
@case "$$($(KIND) get clusters)" in \
*"$(KIND_CLUSTER)"*) \
echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \
*) \
echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \
$(KIND) create cluster --name $(KIND_CLUSTER) ;; \
esac
.PHONY: test-e2e
test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind.
KIND=$(KIND) KIND_CLUSTER=$(KIND_CLUSTER) go test -tags=e2e ./test/e2e/ -v -ginkgo.v
$(MAKE) cleanup-test-e2e
.PHONY: cleanup-test-e2e
cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests
@$(KIND) delete cluster --name $(KIND_CLUSTER)
.PHONY: lint
lint: golangci-lint ## Run golangci-lint linter
"$(GOLANGCI_LINT)" run
.PHONY: lint-fix
lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
"$(GOLANGCI_LINT)" run --fix
.PHONY: lint-config
lint-config: golangci-lint ## Verify golangci-lint linter configuration
"$(GOLANGCI_LINT)" config verify
##@ Build
.PHONY: build
build: manifests generate fmt vet ## Build manager binary.
go build -o bin/manager cmd/main.go
.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./cmd/main.go
# If you wish to build the manager image targeting other platforms you can use the --platform flag.
# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it.
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
.PHONY: docker-build
docker-build: ## Build docker image with the manager.
$(CONTAINER_TOOL) build -t ${IMG} .
.PHONY: docker-push
docker-push: ## Push docker image with the manager.
$(CONTAINER_TOOL) push ${IMG}
# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/
# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=<myregistry/image:<tag>> then the export will fail)
# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
.PHONY: docker-buildx
docker-buildx: ## Build and push docker image for the manager for cross-platform support
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name nuclei-operator-builder
$(CONTAINER_TOOL) buildx use nuclei-operator-builder
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
- $(CONTAINER_TOOL) buildx rm nuclei-operator-builder
rm Dockerfile.cross
.PHONY: build-installer
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
mkdir -p dist
cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG}
"$(KUSTOMIZE)" build config/default > dist/install.yaml
##@ Deployment
ifndef ignore-not-found
ignore-not-found = false
endif
.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
@out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \
if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" apply -f -; else echo "No CRDs to install; skipping."; fi
.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
@out="$$( "$(KUSTOMIZE)" build config/crd 2>/dev/null || true )"; \
if [ -n "$$out" ]; then echo "$$out" | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f -; else echo "No CRDs to delete; skipping."; fi
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && "$(KUSTOMIZE)" edit set image controller=${IMG}
"$(KUSTOMIZE)" build config/default | "$(KUBECTL)" apply -f -
.PHONY: undeploy
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
"$(KUSTOMIZE)" build config/default | "$(KUBECTL)" delete --ignore-not-found=$(ignore-not-found) -f -
##@ Dependencies
## Location to install dependencies to
LOCALBIN ?= $(shell pwd)/bin
$(LOCALBIN):
mkdir -p "$(LOCALBIN)"
## Tool Binaries
KUBECTL ?= kubectl
KIND ?= kind
KUSTOMIZE ?= $(LOCALBIN)/kustomize
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
ENVTEST ?= $(LOCALBIN)/setup-envtest
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
## Tool Versions
KUSTOMIZE_VERSION ?= v5.7.1
CONTROLLER_TOOLS_VERSION ?= v0.19.0
#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20)
ENVTEST_VERSION ?= $(shell v='$(call gomodver,sigs.k8s.io/controller-runtime)'; \
[ -n "$$v" ] || { echo "Set ENVTEST_VERSION manually (controller-runtime replace has no tag)" >&2; exit 1; }; \
printf '%s\n' "$$v" | sed -E 's/^v?([0-9]+)\.([0-9]+).*/release-\1.\2/')
#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31)
ENVTEST_K8S_VERSION ?= $(shell v='$(call gomodver,k8s.io/api)'; \
[ -n "$$v" ] || { echo "Set ENVTEST_K8S_VERSION manually (k8s.io/api replace has no tag)" >&2; exit 1; }; \
printf '%s\n' "$$v" | sed -E 's/^v?[0-9]+\.([0-9]+).*/1.\1/')
GOLANGCI_LINT_VERSION ?= v2.5.0
.PHONY: kustomize
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
$(KUSTOMIZE): $(LOCALBIN)
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
.PHONY: controller-gen
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
$(CONTROLLER_GEN): $(LOCALBIN)
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
.PHONY: setup-envtest
setup-envtest: envtest ## Download the binaries required for ENVTEST in the local bin directory.
@echo "Setting up envtest binaries for Kubernetes version $(ENVTEST_K8S_VERSION)..."
@"$(ENVTEST)" use $(ENVTEST_K8S_VERSION) --bin-dir "$(LOCALBIN)" -p path || { \
echo "Error: Failed to set up envtest binaries for version $(ENVTEST_K8S_VERSION)."; \
exit 1; \
}
.PHONY: envtest
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
$(ENVTEST): $(LOCALBIN)
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
.PHONY: golangci-lint
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
$(GOLANGCI_LINT): $(LOCALBIN)
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
# $1 - target path with name of binary
# $2 - package url which can be installed
# $3 - specific version of package
define go-install-tool
@[ -f "$(1)-$(3)" ] && [ "$$(readlink -- "$(1)" 2>/dev/null)" = "$(1)-$(3)" ] || { \
set -e; \
package=$(2)@$(3) ;\
echo "Downloading $${package}" ;\
rm -f "$(1)" ;\
GOBIN="$(LOCALBIN)" go install $${package} ;\
mv "$(LOCALBIN)/$$(basename "$(1)")" "$(1)-$(3)" ;\
} ;\
ln -sf "$$(realpath "$(1)-$(3)")" "$(1)"
endef
define gomodver
$(shell go list -m -f '{{if .Replace}}{{.Replace.Version}}{{else}}{{.Version}}{{end}}' $(1) 2>/dev/null)
endef

21
PROJECT Normal file
View File

@@ -0,0 +1,21 @@
# Code generated by tool. DO NOT EDIT.
# This file is used to track the info used to scaffold your project
# and allow the plugins properly work.
# More info: https://book.kubebuilder.io/reference/project-config.html
cliVersion: 4.10.1
domain: homelab.mortenolsen.pro
layout:
- go.kubebuilder.io/v4
projectName: nuclei-operator
repo: github.com/mortenolsen/nuclei-operator
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: homelab.mortenolsen.pro
group: nuclei
kind: NucleiScan
path: github.com/mortenolsen/nuclei-operator/api/v1alpha1
version: v1alpha1
version: "3"

381
README.md Normal file
View File

@@ -0,0 +1,381 @@
# Nuclei Operator
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![Go Report Card](https://goreportcard.com/badge/github.com/mortenolsen/nuclei-operator)](https://goreportcard.com/report/github.com/mortenolsen/nuclei-operator)
A Kubernetes operator that automates security scanning of web applications exposed through Kubernetes Ingress resources and Istio VirtualService CRDs using [Nuclei](https://github.com/projectdiscovery/nuclei), a fast and customizable vulnerability scanner.
## Overview
The Nuclei Operator watches for Ingress and VirtualService resources in your Kubernetes cluster and automatically creates security scans for the exposed endpoints. Scan results are stored in custom `NucleiScan` resources, making it easy to track and monitor vulnerabilities across your infrastructure.
### Key Features
- **Automatic Discovery**: Watches Kubernetes Ingress and Istio VirtualService resources for new endpoints
- **Automated Scanning**: Automatically creates and runs Nuclei scans when new endpoints are discovered
- **Scheduled Scans**: Support for cron-based scheduled rescanning
- **Flexible Configuration**: Configurable templates, severity filters, and scan options
- **Native Kubernetes Integration**: Results stored as Kubernetes custom resources
- **Owner References**: Automatic cleanup when source resources are deleted
### How It Works
```
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ Ingress / │────▶│ Nuclei Operator │────▶│ NucleiScan │
│ VirtualService │ │ Controllers │ │ Resource │
└─────────────────┘ └─────────────────┘ └─────────────────┘
│ │
▼ ▼
┌─────────────────┐ ┌─────────────────┐
│ Nuclei Engine │────▶│ Scan Results │
│ (Scanner) │ │ (Findings) │
└─────────────────┘ └─────────────────┘
```
1. **Watch**: The operator watches for Ingress and VirtualService resources
2. **Extract**: URLs are extracted from the resource specifications
3. **Create**: A NucleiScan custom resource is created with the target URLs
4. **Scan**: The Nuclei scanner executes security scans against the targets
5. **Store**: Results are stored in the NucleiScan status for easy access
## Prerequisites
- Kubernetes cluster v1.26+
- kubectl configured to access your cluster
- [Istio](https://istio.io/) (optional, required for VirtualService support)
- Container runtime (Docker, containerd, etc.)
## Installation
### Using kubectl/kustomize
1. **Install the CRDs:**
```bash
make install
```
2. **Deploy the operator:**
```bash
# Using the default image
make deploy IMG=ghcr.io/mortenolsen/nuclei-operator:latest
# Or build and deploy your own image
make docker-build docker-push IMG=<your-registry>/nuclei-operator:tag
make deploy IMG=<your-registry>/nuclei-operator:tag
```
### Using a Single YAML File
Generate and apply a consolidated installation manifest:
```bash
# Generate the installer
make build-installer IMG=<your-registry>/nuclei-operator:tag
# Apply to your cluster
kubectl apply -f dist/install.yaml
```
### Building from Source
```bash
# Clone the repository
git clone https://github.com/mortenolsen/nuclei-operator.git
cd nuclei-operator
# Build the binary
make build
# Build the container image
make docker-build IMG=<your-registry>/nuclei-operator:tag
# Push to your registry
make docker-push IMG=<your-registry>/nuclei-operator:tag
```
## Quick Start
### 1. Deploy the Operator
```bash
make deploy IMG=ghcr.io/mortenolsen/nuclei-operator:latest
```
### 2. Create an Ingress Resource
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: my-app-ingress
namespace: default
spec:
tls:
- hosts:
- myapp.example.com
secretName: myapp-tls
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app
port:
number: 80
```
```bash
kubectl apply -f my-ingress.yaml
```
### 3. View the NucleiScan Results
The operator automatically creates a NucleiScan resource:
```bash
# List all NucleiScans
kubectl get nucleiscans
# View detailed scan results
kubectl describe nucleiscan my-app-ingress-scan
# Get scan findings in JSON format
kubectl get nucleiscan my-app-ingress-scan -o jsonpath='{.status.findings}'
```
Example output:
```
NAME PHASE FINDINGS SOURCE AGE
my-app-ingress-scan Completed 3 Ingress 5m
```
## Configuration
### Environment Variables
The operator can be configured using the following environment variables:
| Variable | Description | Default |
|----------|-------------|---------|
| `NUCLEI_BINARY_PATH` | Path to the Nuclei binary | `nuclei` |
| `NUCLEI_TEMPLATES_PATH` | Path to Nuclei templates directory | (uses Nuclei default) |
| `NUCLEI_TIMEOUT` | Default scan timeout | `30m` |
### NucleiScan Spec Options
| Field | Type | Description |
|-------|------|-------------|
| `sourceRef` | SourceReference | Reference to the source Ingress/VirtualService |
| `targets` | []string | List of URLs to scan |
| `templates` | []string | Nuclei templates to use (optional) |
| `severity` | []string | Severity filter: info, low, medium, high, critical |
| `schedule` | string | Cron schedule for periodic scans (optional) |
| `suspend` | bool | Suspend scheduled scans |
### Example NucleiScan
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: my-security-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: my-ingress
namespace: default
uid: "abc123"
targets:
- https://myapp.example.com
- https://api.example.com
severity:
- medium
- high
- critical
templates:
- cves/
- vulnerabilities/
schedule: "@every 24h"
suspend: false
```
## CRD Reference
### NucleiScan
The `NucleiScan` custom resource represents a security scan configuration and its results.
**Short names:** `ns`, `nscan`
**Print columns:**
- `Phase`: Current scan phase (Pending, Running, Completed, Failed)
- `Findings`: Total number of findings
- `Source`: Source resource kind (Ingress/VirtualService)
- `Age`: Resource age
For detailed API documentation, see [docs/api.md](docs/api.md).
## Development
### Prerequisites
- Go 1.24+
- Docker or Podman
- kubectl
- Access to a Kubernetes cluster (kind, minikube, or remote)
### Building the Project
```bash
# Generate manifests and code
make manifests generate
# Build the binary
make build
# Run tests
make test
# Run linter
make lint
```
### Running Locally
```bash
# Install CRDs
make install
# Run the operator locally (outside the cluster)
make run
```
### Running Tests
```bash
# Unit tests
make test
# End-to-end tests (requires Kind)
make test-e2e
```
### Project Structure
```
nuclei-operator/
├── api/v1alpha1/ # CRD type definitions
├── cmd/ # Main entry point
├── config/ # Kubernetes manifests
│ ├── crd/ # CRD definitions
│ ├── default/ # Default kustomization
│ ├── manager/ # Operator deployment
│ ├── rbac/ # RBAC configuration
│ └── samples/ # Example resources
├── internal/
│ ├── controller/ # Reconciliation logic
│ └── scanner/ # Nuclei scan execution
└── test/ # Test suites
```
## Troubleshooting
### Common Issues
#### Operator not creating NucleiScan resources
1. Check operator logs:
```bash
kubectl logs -n nuclei-operator-system deployment/nuclei-operator-controller-manager
```
2. Verify RBAC permissions:
```bash
kubectl auth can-i list ingresses --as=system:serviceaccount:nuclei-operator-system:nuclei-operator-controller-manager
```
3. Ensure the Ingress has valid hosts defined
#### Scans stuck in Pending/Running state
1. Check if Nuclei binary is available in the container
2. Verify network connectivity to scan targets
3. Check for timeout issues in operator logs
#### No findings in completed scans
1. Verify targets are accessible from the operator pod
2. Check if severity filters are too restrictive
3. Ensure Nuclei templates are properly configured
### Debugging Tips
```bash
# View operator logs
kubectl logs -f -n nuclei-operator-system deployment/nuclei-operator-controller-manager
# Check NucleiScan status
kubectl describe nucleiscan <scan-name>
# View events
kubectl get events --field-selector involvedObject.kind=NucleiScan
# Check operator metrics
kubectl port-forward -n nuclei-operator-system svc/nuclei-operator-controller-manager-metrics-service 8080:8080
curl localhost:8080/metrics
```
## Uninstallation
```bash
# Remove all NucleiScan resources
kubectl delete nucleiscans --all --all-namespaces
# Undeploy the operator
make undeploy
# Remove CRDs
make uninstall
```
## Documentation
- [Architecture](ARCHITECTURE.md) - Detailed architecture documentation
- [API Reference](docs/api.md) - Complete CRD API reference
- [User Guide](docs/user-guide.md) - Detailed usage instructions
- [Contributing](CONTRIBUTING.md) - Contribution guidelines
## Contributing
Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
## License
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
## Acknowledgments
- [Nuclei](https://github.com/projectdiscovery/nuclei) - The vulnerability scanner powering this operator
- [Kubebuilder](https://book.kubebuilder.io/) - Framework used to build this operator
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) - Kubernetes controller library

View File

@@ -0,0 +1,36 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the nuclei v1alpha1 API group.
// +kubebuilder:object:generate=true
// +groupName=nuclei.homelab.mortenolsen.pro
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "nuclei.homelab.mortenolsen.pro", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@@ -0,0 +1,224 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// SourceReference identifies the Ingress or VirtualService that triggered this scan
type SourceReference struct {
// APIVersion of the source resource
// +kubebuilder:validation:Required
APIVersion string `json:"apiVersion"`
// Kind of the source resource - Ingress or VirtualService
// +kubebuilder:validation:Enum=Ingress;VirtualService
Kind string `json:"kind"`
// Name of the source resource
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace of the source resource
// +kubebuilder:validation:Required
Namespace string `json:"namespace"`
// UID of the source resource for owner reference
// +kubebuilder:validation:Required
UID string `json:"uid"`
}
// NucleiScanSpec defines the desired state of NucleiScan
type NucleiScanSpec struct {
// SourceRef references the Ingress or VirtualService being scanned
// +kubebuilder:validation:Required
SourceRef SourceReference `json:"sourceRef"`
// Targets is the list of URLs to scan, extracted from the source resource
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
Targets []string `json:"targets"`
// Templates specifies which Nuclei templates to use
// If empty, uses default templates
// +optional
Templates []string `json:"templates,omitempty"`
// Severity filters scan results by severity level
// +kubebuilder:validation:Enum=info;low;medium;high;critical
// +optional
Severity []string `json:"severity,omitempty"`
// Schedule for periodic rescanning in cron format
// If empty, scan runs once
// +optional
Schedule string `json:"schedule,omitempty"`
// Suspend prevents scheduled scans from running
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// ScanPhase represents the current phase of the scan
// +kubebuilder:validation:Enum=Pending;Running;Completed;Failed
type ScanPhase string
const (
ScanPhasePending ScanPhase = "Pending"
ScanPhaseRunning ScanPhase = "Running"
ScanPhaseCompleted ScanPhase = "Completed"
ScanPhaseFailed ScanPhase = "Failed"
)
// Finding represents a single Nuclei scan finding
type Finding struct {
// TemplateID is the Nuclei template identifier
TemplateID string `json:"templateId"`
// TemplateName is the human-readable template name
// +optional
TemplateName string `json:"templateName,omitempty"`
// Severity of the finding
Severity string `json:"severity"`
// Type of the finding - http, dns, ssl, etc.
// +optional
Type string `json:"type,omitempty"`
// Host that was scanned
Host string `json:"host"`
// MatchedAt is the specific URL or endpoint where the issue was found
// +optional
MatchedAt string `json:"matchedAt,omitempty"`
// ExtractedResults contains any data extracted by the template
// +optional
ExtractedResults []string `json:"extractedResults,omitempty"`
// Description provides details about the finding
// +optional
Description string `json:"description,omitempty"`
// Reference contains URLs to additional information about the finding
// +optional
Reference []string `json:"reference,omitempty"`
// Tags associated with the finding
// +optional
Tags []string `json:"tags,omitempty"`
// Timestamp when the finding was discovered
Timestamp metav1.Time `json:"timestamp"`
// Metadata contains additional template metadata
// +kubebuilder:pruning:PreserveUnknownFields
// +optional
Metadata *runtime.RawExtension `json:"metadata,omitempty"`
}
// ScanSummary provides aggregated statistics about the scan
type ScanSummary struct {
// TotalFindings is the total number of findings
TotalFindings int `json:"totalFindings"`
// FindingsBySeverity breaks down findings by severity level
// +optional
FindingsBySeverity map[string]int `json:"findingsBySeverity,omitempty"`
// TargetsScanned is the number of targets that were scanned
TargetsScanned int `json:"targetsScanned"`
// DurationSeconds is the duration of the scan in seconds
// +optional
DurationSeconds int64 `json:"durationSeconds,omitempty"`
}
// NucleiScanStatus defines the observed state of NucleiScan
type NucleiScanStatus struct {
// Phase represents the current scan phase
// +optional
Phase ScanPhase `json:"phase,omitempty"`
// Conditions represent the latest available observations
// +listType=map
// +listMapKey=type
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// LastScanTime is when the last scan was initiated
// +optional
LastScanTime *metav1.Time `json:"lastScanTime,omitempty"`
// CompletionTime is when the last scan completed
// +optional
CompletionTime *metav1.Time `json:"completionTime,omitempty"`
// NextScheduledTime is when the next scheduled scan will run
// +optional
NextScheduledTime *metav1.Time `json:"nextScheduledTime,omitempty"`
// Summary provides aggregated scan statistics
// +optional
Summary *ScanSummary `json:"summary,omitempty"`
// Findings contains the array of scan results from Nuclei JSONL output
// Each element is a parsed JSON object from Nuclei output
// +optional
Findings []Finding `json:"findings,omitempty"`
// LastError contains the error message if the scan failed
// +optional
LastError string `json:"lastError,omitempty"`
// ObservedGeneration is the generation observed by the controller
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:shortName=ns;nscan
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Findings",type=integer,JSONPath=`.status.summary.totalFindings`
// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.sourceRef.kind`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
// NucleiScan is the Schema for the nucleiscans API
type NucleiScan struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NucleiScanSpec `json:"spec,omitempty"`
Status NucleiScanStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// NucleiScanList contains a list of NucleiScan
type NucleiScanList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []NucleiScan `json:"items"`
}
func init() {
SchemeBuilder.Register(&NucleiScan{}, &NucleiScanList{})
}

View File

@@ -0,0 +1,235 @@
//go:build !ignore_autogenerated
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Finding) DeepCopyInto(out *Finding) {
*out = *in
if in.ExtractedResults != nil {
in, out := &in.ExtractedResults, &out.ExtractedResults
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Timestamp.DeepCopyInto(&out.Timestamp)
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(runtime.RawExtension)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Finding.
func (in *Finding) DeepCopy() *Finding {
if in == nil {
return nil
}
out := new(Finding)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NucleiScan) DeepCopyInto(out *NucleiScan) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NucleiScan.
func (in *NucleiScan) DeepCopy() *NucleiScan {
if in == nil {
return nil
}
out := new(NucleiScan)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NucleiScan) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NucleiScanList) DeepCopyInto(out *NucleiScanList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NucleiScan, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NucleiScanList.
func (in *NucleiScanList) DeepCopy() *NucleiScanList {
if in == nil {
return nil
}
out := new(NucleiScanList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NucleiScanList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NucleiScanSpec) DeepCopyInto(out *NucleiScanSpec) {
*out = *in
out.SourceRef = in.SourceRef
if in.Targets != nil {
in, out := &in.Targets, &out.Targets
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Templates != nil {
in, out := &in.Templates, &out.Templates
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Severity != nil {
in, out := &in.Severity, &out.Severity
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NucleiScanSpec.
func (in *NucleiScanSpec) DeepCopy() *NucleiScanSpec {
if in == nil {
return nil
}
out := new(NucleiScanSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NucleiScanStatus) DeepCopyInto(out *NucleiScanStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.LastScanTime != nil {
in, out := &in.LastScanTime, &out.LastScanTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.NextScheduledTime != nil {
in, out := &in.NextScheduledTime, &out.NextScheduledTime
*out = (*in).DeepCopy()
}
if in.Summary != nil {
in, out := &in.Summary, &out.Summary
*out = new(ScanSummary)
(*in).DeepCopyInto(*out)
}
if in.Findings != nil {
in, out := &in.Findings, &out.Findings
*out = make([]Finding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NucleiScanStatus.
func (in *NucleiScanStatus) DeepCopy() *NucleiScanStatus {
if in == nil {
return nil
}
out := new(NucleiScanStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScanSummary) DeepCopyInto(out *ScanSummary) {
*out = *in
if in.FindingsBySeverity != nil {
in, out := &in.FindingsBySeverity, &out.FindingsBySeverity
*out = make(map[string]int, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScanSummary.
func (in *ScanSummary) DeepCopy() *ScanSummary {
if in == nil {
return nil
}
out := new(ScanSummary)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SourceReference) DeepCopyInto(out *SourceReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceReference.
func (in *SourceReference) DeepCopy() *SourceReference {
if in == nil {
return nil
}
out := new(SourceReference)
in.DeepCopyInto(out)
return out
}

222
cmd/main.go Normal file
View File

@@ -0,0 +1,222 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"flag"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
"github.com/mortenolsen/nuclei-operator/internal/controller"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(nucleiv1alpha1.AddToScheme(scheme))
utilruntime.Must(networkingv1.AddToScheme(scheme))
utilruntime.Must(istionetworkingv1beta1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
// nolint:gocyclo
func main() {
var metricsAddr string
var metricsCertPath, metricsCertName, metricsCertKey string
var webhookCertPath, webhookCertName, webhookCertKey string
var enableLeaderElection bool
var probeAddr string
var secureMetrics bool
var enableHTTP2 bool
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&secureMetrics, "metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
flag.StringVar(&webhookCertPath, "webhook-cert-path", "", "The directory that contains the webhook certificate.")
flag.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The name of the webhook certificate file.")
flag.StringVar(&webhookCertKey, "webhook-cert-key", "tls.key", "The name of the webhook key file.")
flag.StringVar(&metricsCertPath, "metrics-cert-path", "",
"The directory that contains the metrics server certificate.")
flag.StringVar(&metricsCertName, "metrics-cert-name", "tls.crt", "The name of the metrics server certificate file.")
flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
// if the enable-http2 flag is false (the default), http/2 should be disabled
// due to its vulnerabilities. More specifically, disabling http/2 will
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
// Rapid Reset CVEs. For more information see:
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
// - https://github.com/advisories/GHSA-4374-p667-p6c8
disableHTTP2 := func(c *tls.Config) {
setupLog.Info("disabling http/2")
c.NextProtos = []string{"http/1.1"}
}
if !enableHTTP2 {
tlsOpts = append(tlsOpts, disableHTTP2)
}
// Initial webhook TLS options
webhookTLSOpts := tlsOpts
webhookServerOptions := webhook.Options{
TLSOpts: webhookTLSOpts,
}
if len(webhookCertPath) > 0 {
setupLog.Info("Initializing webhook certificate watcher using provided certificates",
"webhook-cert-path", webhookCertPath, "webhook-cert-name", webhookCertName, "webhook-cert-key", webhookCertKey)
webhookServerOptions.CertDir = webhookCertPath
webhookServerOptions.CertName = webhookCertName
webhookServerOptions.KeyName = webhookCertKey
}
webhookServer := webhook.NewServer(webhookServerOptions)
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
// More info:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/server
// - https://book.kubebuilder.io/reference/metrics.html
metricsServerOptions := metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: secureMetrics,
TLSOpts: tlsOpts,
}
if secureMetrics {
// FilterProvider is used to protect the metrics endpoint with authn/authz.
// These configurations ensure that only authorized users and service accounts
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.4/pkg/metrics/filters#WithAuthenticationAndAuthorization
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
}
// If the certificate is not specified, controller-runtime will automatically
// generate self-signed certificates for the metrics server. While convenient for development and testing,
// this setup is not recommended for production.
//
// TODO(user): If you enable certManager, uncomment the following lines:
// - [METRICS-WITH-CERTS] at config/default/kustomization.yaml to generate and use certificates
// managed by cert-manager for the metrics server.
// - [PROMETHEUS-WITH-CERTS] at config/prometheus/kustomization.yaml for TLS certification.
if len(metricsCertPath) > 0 {
setupLog.Info("Initializing metrics certificate watcher using provided certificates",
"metrics-cert-path", metricsCertPath, "metrics-cert-name", metricsCertName, "metrics-cert-key", metricsCertKey)
metricsServerOptions.CertDir = metricsCertPath
metricsServerOptions.CertName = metricsCertName
metricsServerOptions.KeyName = metricsCertKey
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: metricsServerOptions,
WebhookServer: webhookServer,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "501467ce.homelab.mortenolsen.pro",
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader don't have to wait
// LeaseDuration time first.
//
// In the default scaffold provided, the program ends immediately after
// the manager stops, so would be fine to enable this option. However,
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err := (&controller.NucleiScanReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "NucleiScan")
os.Exit(1)
}
if err := (&controller.IngressReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Ingress")
os.Exit(1)
}
if err := (&controller.VirtualServiceReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "VirtualService")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

View File

@@ -0,0 +1,306 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.19.0
name: nucleiscans.nuclei.homelab.mortenolsen.pro
spec:
group: nuclei.homelab.mortenolsen.pro
names:
kind: NucleiScan
listKind: NucleiScanList
plural: nucleiscans
shortNames:
- ns
- nscan
singular: nucleiscan
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .status.phase
name: Phase
type: string
- jsonPath: .status.summary.totalFindings
name: Findings
type: integer
- jsonPath: .spec.sourceRef.kind
name: Source
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: NucleiScan is the Schema for the nucleiscans API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: NucleiScanSpec defines the desired state of NucleiScan
properties:
schedule:
description: |-
Schedule for periodic rescanning in cron format
If empty, scan runs once
type: string
severity:
description: Severity filters scan results by severity level
enum:
- info
- low
- medium
- high
- critical
items:
type: string
type: array
sourceRef:
description: SourceRef references the Ingress or VirtualService being
scanned
properties:
apiVersion:
description: APIVersion of the source resource
type: string
kind:
description: Kind of the source resource - Ingress or VirtualService
enum:
- Ingress
- VirtualService
type: string
name:
description: Name of the source resource
type: string
namespace:
description: Namespace of the source resource
type: string
uid:
description: UID of the source resource for owner reference
type: string
required:
- apiVersion
- kind
- name
- namespace
- uid
type: object
suspend:
description: Suspend prevents scheduled scans from running
type: boolean
targets:
description: Targets is the list of URLs to scan, extracted from the
source resource
items:
type: string
minItems: 1
type: array
templates:
description: |-
Templates specifies which Nuclei templates to use
If empty, uses default templates
items:
type: string
type: array
required:
- sourceRef
- targets
type: object
status:
description: NucleiScanStatus defines the observed state of NucleiScan
properties:
completionTime:
description: CompletionTime is when the last scan completed
format: date-time
type: string
conditions:
description: Conditions represent the latest available observations
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
findings:
description: |-
Findings contains the array of scan results from Nuclei JSONL output
Each element is a parsed JSON object from Nuclei output
items:
description: Finding represents a single Nuclei scan finding
properties:
description:
description: Description provides details about the finding
type: string
extractedResults:
description: ExtractedResults contains any data extracted by
the template
items:
type: string
type: array
host:
description: Host that was scanned
type: string
matchedAt:
description: MatchedAt is the specific URL or endpoint where
the issue was found
type: string
metadata:
description: Metadata contains additional template metadata
type: object
x-kubernetes-preserve-unknown-fields: true
reference:
description: Reference contains URLs to additional information
about the finding
items:
type: string
type: array
severity:
description: Severity of the finding
type: string
tags:
description: Tags associated with the finding
items:
type: string
type: array
templateId:
description: TemplateID is the Nuclei template identifier
type: string
templateName:
description: TemplateName is the human-readable template name
type: string
timestamp:
description: Timestamp when the finding was discovered
format: date-time
type: string
type:
description: Type of the finding - http, dns, ssl, etc.
type: string
required:
- host
- severity
- templateId
- timestamp
type: object
type: array
lastError:
description: LastError contains the error message if the scan failed
type: string
lastScanTime:
description: LastScanTime is when the last scan was initiated
format: date-time
type: string
nextScheduledTime:
description: NextScheduledTime is when the next scheduled scan will
run
format: date-time
type: string
observedGeneration:
description: ObservedGeneration is the generation observed by the
controller
format: int64
type: integer
phase:
description: Phase represents the current scan phase
enum:
- Pending
- Running
- Completed
- Failed
type: string
summary:
description: Summary provides aggregated scan statistics
properties:
durationSeconds:
description: DurationSeconds is the duration of the scan in seconds
format: int64
type: integer
findingsBySeverity:
additionalProperties:
type: integer
description: FindingsBySeverity breaks down findings by severity
level
type: object
targetsScanned:
description: TargetsScanned is the number of targets that were
scanned
type: integer
totalFindings:
description: TotalFindings is the total number of findings
type: integer
required:
- targetsScanned
- totalFindings
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,16 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/nuclei.homelab.mortenolsen.pro_nucleiscans.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patches:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [WEBHOOK] To enable webhook, uncomment the following section
# the following config is for teaching kustomize how to do kustomization for CRDs.
#configurations:
#- kustomizeconfig.yaml

View File

@@ -0,0 +1,19 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

View File

@@ -0,0 +1,30 @@
# This patch adds the args, volumes, and ports to allow the manager to use the metrics-server certs.
# Add the volumeMount for the metrics-server certs
- op: add
path: /spec/template/spec/containers/0/volumeMounts/-
value:
mountPath: /tmp/k8s-metrics-server/metrics-certs
name: metrics-certs
readOnly: true
# Add the --metrics-cert-path argument for the metrics server
- op: add
path: /spec/template/spec/containers/0/args/-
value: --metrics-cert-path=/tmp/k8s-metrics-server/metrics-certs
# Add the metrics-server certs volume configuration
- op: add
path: /spec/template/spec/volumes/-
value:
name: metrics-certs
secret:
secretName: metrics-server-cert
optional: false
items:
- key: ca.crt
path: ca.crt
- key: tls.crt
path: tls.crt
- key: tls.key
path: tls.key

View File

@@ -0,0 +1,234 @@
# Adds namespace to all resources.
namespace: nuclei-operator-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: nuclei-operator-
# Labels to add to all resources and selectors.
#labels:
#- includeSelectors: true
# pairs:
# someName: someValue
resources:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
# [METRICS] Expose the controller manager metrics service.
- metrics_service.yaml
# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy.
# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics.
# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will
# be able to communicate with the Webhook Server.
#- ../network-policy
# Uncomment the patches line if you enable Metrics
patches:
# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
# More info: https://book.kubebuilder.io/reference/metrics
- path: manager_metrics_patch.yaml
target:
kind: Deployment
# Uncomment the patches line if you enable Metrics and CertManager
# [METRICS-WITH-CERTS] To enable metrics protected with certManager, uncomment the following line.
# This patch will protect the metrics with certManager self-signed certs.
#- path: cert_metrics_manager_patch.yaml
# target:
# kind: Deployment
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- path: manager_webhook_patch.yaml
# target:
# kind: Deployment
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
# Uncomment the following replacements to add the cert-manager CA injection annotations
#replacements:
# - source: # Uncomment the following block to enable certificates for metrics
# kind: Service
# version: v1
# name: controller-manager-metrics-service
# fieldPath: metadata.name
# targets:
# - select:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: metrics-certs
# fieldPaths:
# - spec.dnsNames.0
# - spec.dnsNames.1
# options:
# delimiter: '.'
# index: 0
# create: true
# - select: # Uncomment the following to set the Service name for TLS config in Prometheus ServiceMonitor
# kind: ServiceMonitor
# group: monitoring.coreos.com
# version: v1
# name: controller-manager-metrics-monitor
# fieldPaths:
# - spec.endpoints.0.tlsConfig.serverName
# options:
# delimiter: '.'
# index: 0
# create: true
# - source:
# kind: Service
# version: v1
# name: controller-manager-metrics-service
# fieldPath: metadata.namespace
# targets:
# - select:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: metrics-certs
# fieldPaths:
# - spec.dnsNames.0
# - spec.dnsNames.1
# options:
# delimiter: '.'
# index: 1
# create: true
# - select: # Uncomment the following to set the Service namespace for TLS in Prometheus ServiceMonitor
# kind: ServiceMonitor
# group: monitoring.coreos.com
# version: v1
# name: controller-manager-metrics-monitor
# fieldPaths:
# - spec.endpoints.0.tlsConfig.serverName
# options:
# delimiter: '.'
# index: 1
# create: true
# - source: # Uncomment the following block if you have any webhook
# kind: Service
# version: v1
# name: webhook-service
# fieldPath: .metadata.name # Name of the service
# targets:
# - select:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPaths:
# - .spec.dnsNames.0
# - .spec.dnsNames.1
# options:
# delimiter: '.'
# index: 0
# create: true
# - source:
# kind: Service
# version: v1
# name: webhook-service
# fieldPath: .metadata.namespace # Namespace of the service
# targets:
# - select:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPaths:
# - .spec.dnsNames.0
# - .spec.dnsNames.1
# options:
# delimiter: '.'
# index: 1
# create: true
# - source: # Uncomment the following block if you have a ValidatingWebhook (--programmatic-validation)
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # This name should match the one in certificate.yaml
# fieldPath: .metadata.namespace # Namespace of the certificate CR
# targets:
# - select:
# kind: ValidatingWebhookConfiguration
# fieldPaths:
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
# options:
# delimiter: '/'
# index: 0
# create: true
# - source:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPath: .metadata.name
# targets:
# - select:
# kind: ValidatingWebhookConfiguration
# fieldPaths:
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
# options:
# delimiter: '/'
# index: 1
# create: true
# - source: # Uncomment the following block if you have a DefaultingWebhook (--defaulting )
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPath: .metadata.namespace # Namespace of the certificate CR
# targets:
# - select:
# kind: MutatingWebhookConfiguration
# fieldPaths:
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
# options:
# delimiter: '/'
# index: 0
# create: true
# - source:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPath: .metadata.name
# targets:
# - select:
# kind: MutatingWebhookConfiguration
# fieldPaths:
# - .metadata.annotations.[cert-manager.io/inject-ca-from]
# options:
# delimiter: '/'
# index: 1
# create: true
# - source: # Uncomment the following block if you have a ConversionWebhook (--conversion)
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPath: .metadata.namespace # Namespace of the certificate CR
# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD.
# +kubebuilder:scaffold:crdkustomizecainjectionns
# - source:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert
# fieldPath: .metadata.name
# targets: # Do not remove or uncomment the following scaffold marker; required to generate code for target CRD.
# +kubebuilder:scaffold:crdkustomizecainjectionname

View File

@@ -0,0 +1,4 @@
# This patch adds the args to allow exposing the metrics endpoint using HTTPS
- op: add
path: /spec/template/spec/containers/0/args/0
value: --metrics-bind-address=:8443

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: 8443
selector:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator

View File

@@ -0,0 +1,2 @@
resources:
- manager.yaml

116
config/manager/manager.yaml Normal file
View File

@@ -0,0 +1,116 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
spec:
selector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
spec:
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
# according to the platforms which are supported by your solution.
# It is considered best practice to support multiple architectures. You can
# build your manager image using the makefile target docker-buildx.
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/arch
# operator: In
# values:
# - amd64
# - arm64
# - ppc64le
# - s390x
# - key: kubernetes.io/os
# operator: In
# values:
# - linux
securityContext:
# Projects are configured by default to adhere to the "restricted" Pod Security Standards.
# This ensures that deployments meet the highest security requirements for Kubernetes.
# For more details, see: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- command:
- /manager
args:
- --leader-elect
- --health-probe-bind-address=:8081
image: controller:latest
name: manager
ports: []
env:
- name: NUCLEI_BINARY_PATH
value: "/usr/local/bin/nuclei"
- name: NUCLEI_TEMPLATES_PATH
value: "/nuclei-templates"
- name: NUCLEI_TIMEOUT
value: "30m"
securityContext:
readOnlyRootFilesystem: false # Nuclei needs to write temporary files
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 65532
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# Resource limits appropriate for running Nuclei scans
resources:
limits:
cpu: "2"
memory: "2Gi"
requests:
cpu: "500m"
memory: "512Mi"
volumeMounts:
- name: nuclei-templates
mountPath: /nuclei-templates
readOnly: true
- name: nuclei-cache
mountPath: /home/nonroot/.nuclei
volumes:
- name: nuclei-templates
emptyDir: {}
- name: nuclei-cache
emptyDir: {}
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10

View File

@@ -0,0 +1,27 @@
# This NetworkPolicy allows ingress traffic
# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those
# namespaces are able to gather data from the metrics endpoint.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: allow-metrics-traffic
namespace: system
spec:
podSelector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
policyTypes:
- Ingress
ingress:
# This allows ingress traffic from any namespace with the label metrics: enabled
- from:
- namespaceSelector:
matchLabels:
metrics: enabled # Only from namespaces with this label
ports:
- port: 8443
protocol: TCP

View File

@@ -0,0 +1,2 @@
resources:
- allow-metrics-traffic.yaml

View File

@@ -0,0 +1,34 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
# Reference the default configuration
resources:
- ../default
# Namespace for production deployment
namespace: nuclei-operator-system
# Common labels for all resources
commonLabels:
environment: production
# Production-specific patches
patches:
- path: manager_patch.yaml
target:
kind: Deployment
name: controller-manager
# Image configuration for production
images:
- name: controller
newName: ghcr.io/mortenolsen/nuclei-operator
newTag: latest
# ConfigMap generator for production settings
configMapGenerator:
- name: nuclei-config
literals:
- NUCLEI_TIMEOUT=60m
- NUCLEI_RATE_LIMIT=150
- NUCLEI_BULK_SIZE=25

View File

@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
spec:
# Production replica count for high availability
replicas: 2
template:
spec:
containers:
- name: manager
# Higher resource limits for production workloads
resources:
limits:
cpu: "4"
memory: "4Gi"
requests:
cpu: "1"
memory: "1Gi"
env:
# Production environment variables
- name: NUCLEI_BINARY_PATH
value: "/usr/local/bin/nuclei"
- name: NUCLEI_TEMPLATES_PATH
value: "/nuclei-templates"
- name: NUCLEI_TIMEOUT
value: "60m"
- name: NUCLEI_RATE_LIMIT
value: "150"
- name: NUCLEI_BULK_SIZE
value: "25"
- name: NUCLEI_CONCURRENCY
value: "25"
# Enable metrics
- name: ENABLE_METRICS
value: "true"
# Production volume mounts with persistent templates
volumeMounts:
- name: nuclei-templates
mountPath: /nuclei-templates
readOnly: true
- name: nuclei-cache
mountPath: /home/nonroot/.nuclei
# Production volumes - consider using PVC for templates in production
volumes:
- name: nuclei-templates
emptyDir: {}
- name: nuclei-cache
emptyDir: {}
# Pod anti-affinity for high availability
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
control-plane: controller-manager
topologyKey: kubernetes.io/hostname
# Topology spread for better distribution
topologySpreadConstraints:
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
control-plane: controller-manager

View File

@@ -0,0 +1,11 @@
resources:
- monitor.yaml
# [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus
# to securely reference certificates created and managed by cert-manager.
# Additionally, ensure that you uncomment the [METRICS WITH CERTMANAGER] patch under config/default/kustomization.yaml
# to mount the "metrics-server-cert" secret in the Manager Deployment.
#patches:
# - path: monitor_tls_patch.yaml
# target:
# kind: ServiceMonitor

View File

@@ -0,0 +1,27 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https # Ensure this is the name of the port that exposes HTTPS metrics
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
# TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables
# certificate verification, exposing the system to potential man-in-the-middle attacks.
# For production environments, it is recommended to use cert-manager for automatic TLS certificate management.
# To apply this configuration, enable cert-manager and use the patch located at config/prometheus/servicemonitor_tls_patch.yaml,
# which securely references the certificate from the 'metrics-server-cert' secret.
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/name: nuclei-operator

View File

@@ -0,0 +1,19 @@
# Patch for Prometheus ServiceMonitor to enable secure TLS configuration
# using certificates managed by cert-manager
- op: replace
path: /spec/endpoints/0/tlsConfig
value:
# SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize
serverName: SERVICE_NAME.SERVICE_NAMESPACE.svc
insecureSkipVerify: false
ca:
secret:
name: metrics-server-cert
key: ca.crt
cert:
secret:
name: metrics-server-cert
key: tls.crt
keySecret:
name: metrics-server-cert
key: tls.key

View File

@@ -0,0 +1,28 @@
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# The following RBAC configurations are used to protect
# the metrics endpoint with authn/authz. These configurations
# ensure that only authorized users and service accounts
# can access the metrics endpoint. Comment the following
# permissions if you want to disable this protection.
# More info: https://book.kubebuilder.io/reference/metrics.html
- metrics_auth_role.yaml
- metrics_auth_role_binding.yaml
- metrics_reader_role.yaml
# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by
# default, aiding admins in cluster management. Those roles are
# not used by the nuclei-operator itself. You can comment the following lines
# if you do not want those helpers be installed with your Project.
- nucleiscan_admin_role.yaml
- nucleiscan_editor_role.yaml
- nucleiscan_viewer_role.yaml

View File

@@ -0,0 +1,40 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-auth-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-auth-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metrics-auth-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get

View File

@@ -0,0 +1,27 @@
# This rule is not used by the project nuclei-operator itself.
# It is provided to allow the cluster admin to help manage permissions for users.
#
# Grants full permissions ('*') over nuclei.homelab.mortenolsen.pro.
# This role is intended for users authorized to modify roles and bindings within the cluster,
# enabling them to delegate specific permissions to other users or groups as needed.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: nucleiscan-admin-role
rules:
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans
verbs:
- '*'
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans/status
verbs:
- get

View File

@@ -0,0 +1,33 @@
# This rule is not used by the project nuclei-operator itself.
# It is provided to allow the cluster admin to help manage permissions for users.
#
# Grants permissions to create, update, and delete resources within the nuclei.homelab.mortenolsen.pro.
# This role is intended for users who need to manage these resources
# but should not control RBAC or manage permissions for others.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: nucleiscan-editor-role
rules:
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans/status
verbs:
- get

View File

@@ -0,0 +1,29 @@
# This rule is not used by the project nuclei-operator itself.
# It is provided to allow the cluster admin to help manage permissions for users.
#
# Grants read-only access to nuclei.homelab.mortenolsen.pro resources.
# This role is intended for users who need visibility into these resources
# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: nucleiscan-viewer-role
rules:
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans
verbs:
- get
- list
- watch
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans/status
verbs:
- get

67
config/rbac/role.yaml Normal file
View File

@@ -0,0 +1,67 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: manager-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.istio.io
resources:
- virtualservices
verbs:
- get
- list
- watch
- apiGroups:
- networking.istio.io
resources:
- virtualservices/status
verbs:
- get
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- get
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans/finalizers
verbs:
- update
- apiGroups:
- nuclei.homelab.mortenolsen.pro
resources:
- nucleiscans/status
verbs:
- get
- patch
- update

View File

@@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: controller-manager
namespace: system

View File

@@ -0,0 +1,75 @@
# Example Ingress resource that would trigger NucleiScan creation
# When this Ingress is created, the nuclei-operator will automatically
# create a corresponding NucleiScan resource to scan the exposed endpoints.
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example-app-ingress
namespace: default
labels:
app.kubernetes.io/name: example-app
app.kubernetes.io/managed-by: kustomize
annotations:
# Optional: Add annotations to customize scan behavior
# nuclei.homelab.mortenolsen.pro/scan-enabled: "true"
# nuclei.homelab.mortenolsen.pro/severity: "high,critical"
kubernetes.io/ingress.class: nginx
spec:
# TLS configuration - endpoints will be scanned with HTTPS
tls:
- hosts:
- example.example.com
- api.example.com
secretName: example-tls-secret
rules:
# Main application endpoint
- host: example.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: example-app
port:
number: 80
- path: /api
pathType: Prefix
backend:
service:
name: example-api
port:
number: 8080
# API endpoint
- host: api.example.com
http:
paths:
- path: /v1
pathType: Prefix
backend:
service:
name: api-service
port:
number: 8080
---
# Example Ingress without TLS (HTTP only)
# This will be scanned with HTTP scheme
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: internal-app-ingress
namespace: default
labels:
app.kubernetes.io/name: internal-app
spec:
rules:
- host: internal.example.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: internal-app
port:
number: 80

View File

@@ -0,0 +1,5 @@
## Append samples of your project ##
resources:
- nuclei_v1alpha1_nucleiscan.yaml
- example-ingress.yaml
# +kubebuilder:scaffold:manifestskustomizesamples

View File

@@ -0,0 +1,94 @@
# Example NucleiScan resource
# This demonstrates a complete NucleiScan configuration
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
name: nucleiscan-sample
namespace: default
spec:
# Reference to the source resource that triggered this scan
# This is typically set automatically by the Ingress/VirtualService controllers
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: example-ingress
namespace: default
uid: "placeholder-uid"
# Target URLs to scan
# These are extracted from the source Ingress/VirtualService
targets:
- https://example.com
- https://example.com/api
- https://example.com/admin
# Severity levels to include in the scan
# Options: info, low, medium, high, critical
severity:
- medium
- high
- critical
# Optional: Specific Nuclei templates to use
# If not specified, all templates matching the severity will be used
templates:
- cves/
- vulnerabilities/
- exposures/
# Optional: Schedule for periodic rescanning (cron format)
# Examples:
# "0 2 * * *" - Daily at 2 AM
# "0 */6 * * *" - Every 6 hours
# "@every 24h" - Every 24 hours (simplified format)
schedule: "@every 24h"
# Optional: Suspend scheduled scans
# Set to true to pause scheduled scans without deleting the resource
suspend: false
---
# Example NucleiScan for a specific security audit
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
labels:
app.kubernetes.io/name: nuclei-operator
app.kubernetes.io/managed-by: kustomize
security-audit: "true"
name: security-audit-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: production-ingress
namespace: production
uid: "audit-placeholder-uid"
targets:
- https://api.example.com
- https://www.example.com
# Full severity scan for security audit
severity:
- info
- low
- medium
- high
- critical
# Comprehensive template coverage
templates:
- cves/
- vulnerabilities/
- exposures/
- misconfiguration/
- default-logins/
# Weekly security audit
schedule: "0 3 * * 0"
suspend: false

514
docs/api.md Normal file
View File

@@ -0,0 +1,514 @@
# API Reference
This document provides a complete reference for the Nuclei Operator Custom Resource Definitions (CRDs).
## Table of Contents
- [NucleiScan](#nucleiscan)
- [Metadata](#metadata)
- [Spec](#spec)
- [Status](#status)
- [Type Definitions](#type-definitions)
- [SourceReference](#sourcereference)
- [Finding](#finding)
- [ScanSummary](#scansummary)
- [ScanPhase](#scanphase)
- [Examples](#examples)
---
## NucleiScan
`NucleiScan` is the primary custom resource for the Nuclei Operator. It represents a security scan configuration and stores the scan results.
**API Group:** `nuclei.homelab.mortenolsen.pro`
**API Version:** `v1alpha1`
**Kind:** `NucleiScan`
**Short Names:** `ns`, `nscan`
### Metadata
Standard Kubernetes metadata fields apply. The operator automatically sets owner references when creating NucleiScan resources from Ingress or VirtualService resources.
| Field | Type | Description |
|-------|------|-------------|
| `name` | string | Unique name within the namespace |
| `namespace` | string | Namespace where the resource resides |
| `labels` | map[string]string | Labels for organizing and selecting resources |
| `annotations` | map[string]string | Annotations for storing additional metadata |
| `ownerReferences` | []OwnerReference | References to owner resources (set automatically) |
### Spec
The `spec` field defines the desired state of the NucleiScan.
```yaml
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: my-ingress
namespace: default
uid: "abc123-def456"
targets:
- https://example.com
- https://api.example.com
templates:
- cves/
- vulnerabilities/
severity:
- medium
- high
- critical
schedule: "@every 24h"
suspend: false
```
#### Spec Fields
| Field | Type | Required | Description |
|-------|------|----------|-------------|
| `sourceRef` | [SourceReference](#sourcereference) | Yes | Reference to the source Ingress or VirtualService |
| `targets` | []string | Yes | List of URLs to scan (minimum 1) |
| `templates` | []string | No | Nuclei templates to use. If empty, uses default templates |
| `severity` | []string | No | Severity filter. Valid values: `info`, `low`, `medium`, `high`, `critical` |
| `schedule` | string | No | Cron schedule for periodic rescanning |
| `suspend` | bool | No | When true, suspends scheduled scans |
#### Schedule Format
The `schedule` field supports two formats:
1. **Simplified interval format:**
- `@every <duration>` - e.g., `@every 24h`, `@every 6h`, `@every 30m`
2. **Standard cron format:**
- `* * * * *` - minute, hour, day of month, month, day of week
- Examples:
- `0 2 * * *` - Daily at 2:00 AM
- `0 */6 * * *` - Every 6 hours
- `0 3 * * 0` - Weekly on Sunday at 3:00 AM
### Status
The `status` field contains the observed state of the NucleiScan, including scan results.
```yaml
status:
phase: Completed
conditions:
- type: Ready
status: "True"
reason: ScanCompleted
message: "Scan completed with 3 findings"
lastTransitionTime: "2024-01-15T10:35:00Z"
- type: ScanActive
status: "False"
reason: ScanCompleted
message: "Scan completed successfully"
lastTransitionTime: "2024-01-15T10:35:00Z"
lastScanTime: "2024-01-15T10:30:00Z"
completionTime: "2024-01-15T10:35:00Z"
nextScheduledTime: "2024-01-16T10:30:00Z"
summary:
totalFindings: 3
findingsBySeverity:
medium: 2
high: 1
targetsScanned: 2
durationSeconds: 300
findings:
- templateId: CVE-2021-44228
templateName: Apache Log4j RCE
severity: critical
type: http
host: https://example.com
matchedAt: https://example.com/api/login
timestamp: "2024-01-15T10:32:00Z"
lastError: ""
observedGeneration: 1
```
#### Status Fields
| Field | Type | Description |
|-------|------|-------------|
| `phase` | [ScanPhase](#scanphase) | Current phase of the scan |
| `conditions` | []Condition | Standard Kubernetes conditions |
| `lastScanTime` | *Time | When the last scan was initiated |
| `completionTime` | *Time | When the last scan completed |
| `nextScheduledTime` | *Time | When the next scheduled scan will run |
| `summary` | *[ScanSummary](#scansummary) | Aggregated scan statistics |
| `findings` | [][Finding](#finding) | Array of scan results |
| `lastError` | string | Error message if the scan failed |
| `observedGeneration` | int64 | Generation observed by the controller |
#### Conditions
The operator maintains the following condition types:
| Type | Description |
|------|-------------|
| `Ready` | Indicates whether the scan has completed successfully |
| `ScanActive` | Indicates whether a scan is currently running |
**Condition Reasons:**
| Reason | Description |
|--------|-------------|
| `ScanPending` | Scan is waiting to start |
| `ScanRunning` | Scan is currently in progress |
| `ScanCompleted` | Scan completed successfully |
| `ScanFailed` | Scan failed with an error |
| `ScanSuspended` | Scan is suspended |
---
## Type Definitions
### SourceReference
`SourceReference` identifies the Ingress or VirtualService that triggered the scan.
```go
type SourceReference struct {
APIVersion string `json:"apiVersion"`
Kind string `json:"kind"`
Name string `json:"name"`
Namespace string `json:"namespace"`
UID string `json:"uid"`
}
```
| Field | Type | Required | Description |
|-------|------|----------|-------------|
| `apiVersion` | string | Yes | API version of the source resource (e.g., `networking.k8s.io/v1`) |
| `kind` | string | Yes | Kind of the source resource. Valid values: `Ingress`, `VirtualService` |
| `name` | string | Yes | Name of the source resource |
| `namespace` | string | Yes | Namespace of the source resource |
| `uid` | string | Yes | UID of the source resource |
### Finding
`Finding` represents a single vulnerability or issue discovered during a scan.
```go
type Finding struct {
TemplateID string `json:"templateId"`
TemplateName string `json:"templateName,omitempty"`
Severity string `json:"severity"`
Type string `json:"type,omitempty"`
Host string `json:"host"`
MatchedAt string `json:"matchedAt,omitempty"`
ExtractedResults []string `json:"extractedResults,omitempty"`
Description string `json:"description,omitempty"`
Reference []string `json:"reference,omitempty"`
Tags []string `json:"tags,omitempty"`
Timestamp metav1.Time `json:"timestamp"`
Metadata *runtime.RawExtension `json:"metadata,omitempty"`
}
```
| Field | Type | Required | Description |
|-------|------|----------|-------------|
| `templateId` | string | Yes | Nuclei template identifier (e.g., `CVE-2021-44228`) |
| `templateName` | string | No | Human-readable template name |
| `severity` | string | Yes | Severity level: `info`, `low`, `medium`, `high`, `critical` |
| `type` | string | No | Finding type: `http`, `dns`, `ssl`, `tcp`, etc. |
| `host` | string | Yes | Target host that was scanned |
| `matchedAt` | string | No | Specific URL or endpoint where the issue was found |
| `extractedResults` | []string | No | Data extracted by the template |
| `description` | string | No | Detailed description of the finding |
| `reference` | []string | No | URLs to additional information |
| `tags` | []string | No | Tags associated with the finding |
| `timestamp` | Time | Yes | When the finding was discovered |
| `metadata` | RawExtension | No | Additional template metadata (preserved as JSON) |
### ScanSummary
`ScanSummary` provides aggregated statistics about the scan.
```go
type ScanSummary struct {
TotalFindings int `json:"totalFindings"`
FindingsBySeverity map[string]int `json:"findingsBySeverity,omitempty"`
TargetsScanned int `json:"targetsScanned"`
DurationSeconds int64 `json:"durationSeconds,omitempty"`
}
```
| Field | Type | Description |
|-------|------|-------------|
| `totalFindings` | int | Total number of findings |
| `findingsBySeverity` | map[string]int | Breakdown of findings by severity level |
| `targetsScanned` | int | Number of targets that were scanned |
| `durationSeconds` | int64 | Duration of the scan in seconds |
### ScanPhase
`ScanPhase` represents the current phase of the scan lifecycle.
```go
type ScanPhase string
const (
ScanPhasePending ScanPhase = "Pending"
ScanPhaseRunning ScanPhase = "Running"
ScanPhaseCompleted ScanPhase = "Completed"
ScanPhaseFailed ScanPhase = "Failed"
)
```
| Phase | Description |
|-------|-------------|
| `Pending` | Scan is waiting to be executed |
| `Running` | Scan is currently in progress |
| `Completed` | Scan finished successfully |
| `Failed` | Scan failed with an error |
---
## Examples
### Basic NucleiScan
A minimal NucleiScan configuration:
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: basic-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: my-ingress
namespace: default
uid: "12345678-1234-1234-1234-123456789012"
targets:
- https://example.com
```
### NucleiScan with Severity Filter
Scan only for medium, high, and critical vulnerabilities:
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: severity-filtered-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: production-ingress
namespace: production
uid: "abcdef12-3456-7890-abcd-ef1234567890"
targets:
- https://api.example.com
- https://www.example.com
severity:
- medium
- high
- critical
```
### NucleiScan with Specific Templates
Use specific Nuclei template categories:
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: cve-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: app-ingress
namespace: default
uid: "fedcba98-7654-3210-fedc-ba9876543210"
targets:
- https://app.example.com
templates:
- cves/
- vulnerabilities/
- exposures/
severity:
- high
- critical
```
### Scheduled NucleiScan
Run a scan daily at 2:00 AM:
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: daily-security-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: main-ingress
namespace: default
uid: "11111111-2222-3333-4444-555555555555"
targets:
- https://example.com
- https://api.example.com
severity:
- medium
- high
- critical
schedule: "0 2 * * *"
suspend: false
```
### NucleiScan for VirtualService
Scan an Istio VirtualService:
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: istio-app-scan
namespace: istio-apps
spec:
sourceRef:
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
name: my-virtualservice
namespace: istio-apps
uid: "vs-uid-12345"
targets:
- https://istio-app.example.com
severity:
- low
- medium
- high
- critical
```
### Comprehensive Security Audit
Full security audit with all severity levels and template categories:
```yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: comprehensive-audit
namespace: security
labels:
audit-type: comprehensive
compliance: required
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: production-ingress
namespace: production
uid: "prod-uid-67890"
targets:
- https://www.example.com
- https://api.example.com
- https://admin.example.com
templates:
- cves/
- vulnerabilities/
- exposures/
- misconfiguration/
- default-logins/
- takeovers/
severity:
- info
- low
- medium
- high
- critical
schedule: "0 3 * * 0" # Weekly on Sunday at 3 AM
suspend: false
```
---
## Print Columns
When listing NucleiScan resources with `kubectl get nucleiscans`, the following columns are displayed:
| Column | JSONPath | Description |
|--------|----------|-------------|
| NAME | `.metadata.name` | Resource name |
| PHASE | `.status.phase` | Current scan phase |
| FINDINGS | `.status.summary.totalFindings` | Total number of findings |
| SOURCE | `.spec.sourceRef.kind` | Source resource kind |
| AGE | `.metadata.creationTimestamp` | Resource age |
**Example output:**
```
NAME PHASE FINDINGS SOURCE AGE
my-app-scan Completed 5 Ingress 2d
api-scan Running 0 Ingress 1h
istio-app-scan Completed 2 VirtualService 5d
```
---
## Validation
The CRD includes validation rules enforced by the Kubernetes API server:
### Spec Validation
- `sourceRef.kind` must be either `Ingress` or `VirtualService`
- `targets` must contain at least one item
- `severity` values must be one of: `info`, `low`, `medium`, `high`, `critical`
### Status Validation
- `phase` must be one of: `Pending`, `Running`, `Completed`, `Failed`
---
## RBAC Requirements
To interact with NucleiScan resources, the following RBAC permissions are needed:
### Read-only Access
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nucleiscan-viewer
rules:
- apiGroups: ["nuclei.homelab.mortenolsen.pro"]
resources: ["nucleiscans"]
verbs: ["get", "list", "watch"]
```
### Full Access
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nucleiscan-editor
rules:
- apiGroups: ["nuclei.homelab.mortenolsen.pro"]
resources: ["nucleiscans"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["nuclei.homelab.mortenolsen.pro"]
resources: ["nucleiscans/status"]
verbs: ["get"]

762
docs/user-guide.md Normal file
View File

@@ -0,0 +1,762 @@
# User Guide
This guide provides detailed instructions for using the Nuclei Operator to automate security scanning of your Kubernetes applications.
## Table of Contents
- [Introduction](#introduction)
- [Installation](#installation)
- [Basic Usage](#basic-usage)
- [Configuration Options](#configuration-options)
- [Working with Ingress Resources](#working-with-ingress-resources)
- [Working with VirtualService Resources](#working-with-virtualservice-resources)
- [Scheduled Scans](#scheduled-scans)
- [Viewing Scan Results](#viewing-scan-results)
- [Best Practices](#best-practices)
- [Security Considerations](#security-considerations)
- [Troubleshooting](#troubleshooting)
---
## Introduction
The Nuclei Operator automates security scanning by watching for Kubernetes Ingress and Istio VirtualService resources. When a new resource is created or updated, the operator automatically:
1. Extracts target URLs from the resource
2. Creates a NucleiScan custom resource
3. Executes a Nuclei security scan
4. Stores the results in the NucleiScan status
This enables continuous security monitoring of your web applications without manual intervention.
---
## Installation
### Prerequisites
Before installing the Nuclei Operator, ensure you have:
- A Kubernetes cluster (v1.26 or later)
- `kubectl` configured to access your cluster
- Cluster admin permissions (for CRD installation)
### Quick Installation
```bash
# Clone the repository
git clone https://github.com/mortenolsen/nuclei-operator.git
cd nuclei-operator
# Install CRDs
make install
# Deploy the operator
make deploy IMG=ghcr.io/mortenolsen/nuclei-operator:latest
```
### Verify Installation
```bash
# Check that the operator is running
kubectl get pods -n nuclei-operator-system
# Verify CRDs are installed
kubectl get crd nucleiscans.nuclei.homelab.mortenolsen.pro
```
Expected output:
```
NAME CREATED AT
nucleiscans.nuclei.homelab.mortenolsen.pro 2024-01-15T10:00:00Z
```
---
## Basic Usage
### Automatic Scanning via Ingress
The simplest way to use the operator is to create an Ingress resource. The operator will automatically create a NucleiScan.
**Step 1: Create an Ingress**
```yaml
# my-app-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: my-app
namespace: default
spec:
tls:
- hosts:
- myapp.example.com
secretName: myapp-tls
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-app
port:
number: 80
```
```bash
kubectl apply -f my-app-ingress.yaml
```
**Step 2: View the Created NucleiScan**
```bash
# List NucleiScans
kubectl get nucleiscans
# View details
kubectl describe nucleiscan my-app-scan
```
### Manual NucleiScan Creation
You can also create NucleiScan resources manually for more control:
```yaml
# manual-scan.yaml
apiVersion: nuclei.homelab.mortenolsen.pro/v1alpha1
kind: NucleiScan
metadata:
name: manual-security-scan
namespace: default
spec:
sourceRef:
apiVersion: networking.k8s.io/v1
kind: Ingress
name: my-app
namespace: default
uid: "your-ingress-uid" # Get with: kubectl get ingress my-app -o jsonpath='{.metadata.uid}'
targets:
- https://myapp.example.com
severity:
- high
- critical
```
```bash
kubectl apply -f manual-scan.yaml
```
---
## Configuration Options
### Severity Filtering
Filter scan results by severity level:
```yaml
spec:
severity:
- info # Informational findings
- low # Low severity
- medium # Medium severity
- high # High severity
- critical # Critical severity
```
**Recommended configurations:**
| Use Case | Severity Levels |
|----------|-----------------|
| Production monitoring | `medium`, `high`, `critical` |
| Security audit | `info`, `low`, `medium`, `high`, `critical` |
| Quick check | `high`, `critical` |
### Template Selection
Specify which Nuclei templates to use:
```yaml
spec:
templates:
- cves/ # CVE checks
- vulnerabilities/ # General vulnerabilities
- exposures/ # Exposed services/files
- misconfiguration/ # Misconfigurations
- default-logins/ # Default credentials
- takeovers/ # Subdomain takeovers
```
**Template categories:**
| Category | Description |
|----------|-------------|
| `cves/` | Known CVE vulnerabilities |
| `vulnerabilities/` | General vulnerability checks |
| `exposures/` | Exposed sensitive files and services |
| `misconfiguration/` | Security misconfigurations |
| `default-logins/` | Default credential checks |
| `takeovers/` | Subdomain takeover vulnerabilities |
| `technologies/` | Technology detection |
| `ssl/` | SSL/TLS issues |
### Environment Variables
Configure the operator using environment variables in the deployment:
```yaml
# In config/manager/manager.yaml
env:
- name: NUCLEI_BINARY_PATH
value: "/usr/local/bin/nuclei"
- name: NUCLEI_TEMPLATES_PATH
value: "/nuclei-templates"
- name: NUCLEI_TIMEOUT
value: "30m"
```
| Variable | Description | Default |
|----------|-------------|---------|
| `NUCLEI_BINARY_PATH` | Path to Nuclei binary | `nuclei` |
| `NUCLEI_TEMPLATES_PATH` | Custom templates directory | (Nuclei default) |
| `NUCLEI_TIMEOUT` | Scan timeout duration | `30m` |
---
## Working with Ingress Resources
### URL Extraction
The operator extracts URLs from Ingress resources based on:
1. **TLS configuration**: Hosts in `spec.tls[].hosts` are scanned with HTTPS
2. **Rules**: Hosts in `spec.rules[].host` are scanned
3. **Paths**: Individual paths from `spec.rules[].http.paths[]` are included
**Example Ingress:**
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: multi-path-app
spec:
tls:
- hosts:
- secure.example.com
secretName: secure-tls
rules:
- host: secure.example.com
http:
paths:
- path: /api
pathType: Prefix
backend:
service:
name: api-service
port:
number: 8080
- path: /admin
pathType: Prefix
backend:
service:
name: admin-service
port:
number: 8081
- host: public.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: public-service
port:
number: 80
```
**Extracted URLs:**
- `https://secure.example.com/api`
- `https://secure.example.com/admin`
- `http://public.example.com/`
### Naming Convention
NucleiScan resources are named based on the Ingress:
```
<ingress-name>-scan
```
For example, an Ingress named `my-app` creates a NucleiScan named `my-app-scan`.
### Owner References
The operator sets owner references on NucleiScan resources, enabling:
- **Automatic cleanup**: When an Ingress is deleted, its NucleiScan is also deleted
- **Relationship tracking**: Easy identification of which Ingress created which scan
---
## Working with VirtualService Resources
### Prerequisites
VirtualService support requires Istio to be installed in your cluster.
### URL Extraction
The operator extracts URLs from VirtualService resources based on:
1. **Hosts**: All hosts in `spec.hosts[]`
2. **HTTP routes**: Paths from `spec.http[].match[].uri`
**Example VirtualService:**
```yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: my-istio-app
namespace: default
spec:
hosts:
- myapp.example.com
gateways:
- my-gateway
http:
- match:
- uri:
prefix: /api
route:
- destination:
host: api-service
port:
number: 8080
- match:
- uri:
prefix: /web
route:
- destination:
host: web-service
port:
number: 80
```
**Extracted URLs:**
- `https://myapp.example.com/api`
- `https://myapp.example.com/web`
### Naming Convention
NucleiScan resources for VirtualServices follow the same pattern:
```
<virtualservice-name>-scan
```
---
## Scheduled Scans
### Enabling Scheduled Scans
Add a `schedule` field to run scans periodically:
```yaml
spec:
schedule: "@every 24h"
```
### Schedule Formats
**Simplified interval format:**
| Format | Description |
|--------|-------------|
| `@every 1h` | Every hour |
| `@every 6h` | Every 6 hours |
| `@every 24h` | Every 24 hours |
| `@every 168h` | Every week |
**Standard cron format:**
```
┌───────────── minute (0 - 59)
│ ┌───────────── hour (0 - 23)
│ │ ┌───────────── day of month (1 - 31)
│ │ │ ┌───────────── month (1 - 12)
│ │ │ │ ┌───────────── day of week (0 - 6) (Sunday = 0)
│ │ │ │ │
* * * * *
```
**Examples:**
| Schedule | Description |
|----------|-------------|
| `0 2 * * *` | Daily at 2:00 AM |
| `0 */6 * * *` | Every 6 hours |
| `0 3 * * 0` | Weekly on Sunday at 3:00 AM |
| `0 0 1 * *` | Monthly on the 1st at midnight |
### Suspending Scheduled Scans
Temporarily pause scheduled scans without deleting the resource:
```yaml
spec:
schedule: "@every 24h"
suspend: true # Scans are paused
```
To resume:
```bash
kubectl patch nucleiscan my-scan -p '{"spec":{"suspend":false}}'
```
### Viewing Next Scheduled Time
```bash
kubectl get nucleiscan my-scan -o jsonpath='{.status.nextScheduledTime}'
```
---
## Viewing Scan Results
### List All Scans
```bash
# Basic listing
kubectl get nucleiscans
# With additional details
kubectl get nucleiscans -o wide
# In all namespaces
kubectl get nucleiscans -A
```
### View Scan Details
```bash
# Full details
kubectl describe nucleiscan my-app-scan
# JSON output
kubectl get nucleiscan my-app-scan -o json
# YAML output
kubectl get nucleiscan my-app-scan -o yaml
```
### Extract Specific Information
```bash
# Get scan phase
kubectl get nucleiscan my-app-scan -o jsonpath='{.status.phase}'
# Get total findings count
kubectl get nucleiscan my-app-scan -o jsonpath='{.status.summary.totalFindings}'
# Get findings by severity
kubectl get nucleiscan my-app-scan -o jsonpath='{.status.summary.findingsBySeverity}'
# Get all findings
kubectl get nucleiscan my-app-scan -o jsonpath='{.status.findings}' | jq .
# Get critical findings only
kubectl get nucleiscan my-app-scan -o json | jq '.status.findings[] | select(.severity == "critical")'
```
### Export Results
```bash
# Export to JSON file
kubectl get nucleiscan my-app-scan -o json > scan-results.json
# Export findings only
kubectl get nucleiscan my-app-scan -o jsonpath='{.status.findings}' > findings.json
# Export as CSV (using jq)
kubectl get nucleiscan my-app-scan -o json | jq -r '.status.findings[] | [.templateId, .severity, .host, .matchedAt] | @csv' > findings.csv
```
### Watch Scan Progress
```bash
# Watch scan status changes
kubectl get nucleiscans -w
# Watch specific scan
watch kubectl get nucleiscan my-app-scan
```
---
## Best Practices
### 1. Use Severity Filters in Production
Avoid scanning for `info` level findings in production to reduce noise:
```yaml
spec:
severity:
- medium
- high
- critical
```
### 2. Schedule Scans During Off-Peak Hours
Run scheduled scans during low-traffic periods:
```yaml
spec:
schedule: "0 3 * * *" # 3 AM daily
```
### 3. Use Namespaces for Organization
Organize scans by environment or team:
```bash
# Development scans
kubectl get nucleiscans -n development
# Production scans
kubectl get nucleiscans -n production
```
### 4. Label Your Resources
Add labels for better organization and filtering:
```yaml
metadata:
labels:
environment: production
team: security
compliance: pci-dss
```
```bash
# Filter by label
kubectl get nucleiscans -l environment=production
```
### 5. Monitor Scan Failures
Set up alerts for failed scans:
```bash
# Find failed scans
kubectl get nucleiscans --field-selector status.phase=Failed
```
### 6. Regular Template Updates
Keep Nuclei templates updated for the latest vulnerability checks. The operator uses the templates bundled in the container image.
### 7. Resource Limits
Ensure the operator has appropriate resource limits:
```yaml
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
```
---
## Security Considerations
### Network Access
The operator needs network access to scan targets. Consider:
1. **Network Policies**: Ensure the operator can reach scan targets
2. **Egress Rules**: Allow outbound traffic to target hosts
3. **Internal vs External**: Be aware of scanning internal vs external endpoints
### RBAC Permissions
The operator requires specific permissions:
- **Read** Ingress and VirtualService resources
- **Full control** over NucleiScan resources
- **Create** events for logging
Review the RBAC configuration in `config/rbac/role.yaml`.
### Scan Impact
Consider the impact of security scans:
1. **Rate Limiting**: Nuclei respects rate limits, but be aware of target capacity
2. **WAF/IDS Alerts**: Scans may trigger security alerts on targets
3. **Logging**: Scan traffic will appear in target access logs
### Sensitive Data
Scan results may contain sensitive information:
1. **Access Control**: Restrict access to NucleiScan resources
2. **Data Retention**: Consider cleanup policies for old scan results
3. **Audit Logging**: Enable Kubernetes audit logging for compliance
### Container Security
The operator container includes the Nuclei binary:
1. **Image Updates**: Regularly update the operator image
2. **Vulnerability Scanning**: Scan the operator image itself
3. **Non-root User**: The operator runs as a non-root user
---
## Troubleshooting
### Scan Stuck in Pending
**Symptoms:** NucleiScan remains in `Pending` phase
**Solutions:**
1. Check operator logs:
```bash
kubectl logs -n nuclei-operator-system deployment/nuclei-operator-controller-manager
```
2. Verify the operator is running:
```bash
kubectl get pods -n nuclei-operator-system
```
3. Check for resource constraints:
```bash
kubectl describe pod -n nuclei-operator-system -l control-plane=controller-manager
```
### Scan Failed
**Symptoms:** NucleiScan shows `Failed` phase
**Solutions:**
1. Check the error message:
```bash
kubectl get nucleiscan my-scan -o jsonpath='{.status.lastError}'
```
2. Common errors:
- **Timeout**: Increase timeout or reduce targets
- **Network error**: Check connectivity to targets
- **Binary not found**: Verify Nuclei is installed in the container
3. Retry the scan:
```bash
# Trigger a new scan by updating the spec
kubectl patch nucleiscan my-scan -p '{"spec":{"targets":["https://example.com"]}}'
```
### No NucleiScan Created for Ingress
**Symptoms:** Ingress exists but no NucleiScan is created
**Solutions:**
1. Verify the Ingress has hosts defined:
```bash
kubectl get ingress my-ingress -o jsonpath='{.spec.rules[*].host}'
```
2. Check operator RBAC:
```bash
kubectl auth can-i list ingresses --as=system:serviceaccount:nuclei-operator-system:nuclei-operator-controller-manager
```
3. Check operator logs for errors:
```bash
kubectl logs -n nuclei-operator-system deployment/nuclei-operator-controller-manager | grep -i error
```
### Empty Scan Results
**Symptoms:** Scan completes but has no findings
**Possible causes:**
1. **Targets not accessible**: Verify targets are reachable from the operator pod
2. **Severity filter too strict**: Try including more severity levels
3. **Templates not matching**: Ensure templates are appropriate for the targets
**Verification:**
```bash
# Test connectivity from operator pod
kubectl exec -n nuclei-operator-system deployment/nuclei-operator-controller-manager -- curl -I https://your-target.com
```
### High Resource Usage
**Symptoms:** Operator consuming excessive CPU/memory
**Solutions:**
1. Reduce concurrent scans by adjusting controller concurrency
2. Increase resource limits:
```yaml
resources:
limits:
cpu: 1000m
memory: 1Gi
```
3. Reduce scan scope (fewer targets or templates)
### Scheduled Scans Not Running
**Symptoms:** Scheduled scan time passes but scan doesn't start
**Solutions:**
1. Verify scan is not suspended:
```bash
kubectl get nucleiscan my-scan -o jsonpath='{.spec.suspend}'
```
2. Check the schedule format:
```bash
kubectl get nucleiscan my-scan -o jsonpath='{.spec.schedule}'
```
3. Verify next scheduled time:
```bash
kubectl get nucleiscan my-scan -o jsonpath='{.status.nextScheduledTime}'
```
### Getting Help
If you're still experiencing issues:
1. Check the [GitHub Issues](https://github.com/mortenolsen/nuclei-operator/issues)
2. Review the [Architecture documentation](../ARCHITECTURE.md)
3. Enable debug logging and collect logs
4. Open a new issue with:
- Kubernetes version
- Operator version
- Relevant resource YAML (sanitized)
- Operator logs
- Steps to reproduce

103
go.mod Normal file
View File

@@ -0,0 +1,103 @@
module github.com/mortenolsen/nuclei-operator
go 1.24.6
require (
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.36.1
istio.io/client-go v1.28.1
k8s.io/api v0.34.1
k8s.io/apimachinery v0.34.1
k8s.io/client-go v0.34.1
sigs.k8s.io/controller-runtime v0.22.4
)
require (
cel.dev/expr v0.24.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/cobra v1.9.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/term v0.34.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.35.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect
google.golang.org/grpc v1.74.2 // indirect
google.golang.org/protobuf v1.36.7 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
istio.io/api v1.28.0-beta.1.0.20251027181303-a5aa715c1e32 // indirect
k8s.io/apiextensions-apiserver v0.34.1 // indirect
k8s.io/apiserver v0.34.1 // indirect
k8s.io/component-base v0.34.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)

264
go.sum Normal file
View File

@@ -0,0 +1,264 @@
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a h1:DMCgtIAIQGZqJXMVzJF4MV8BlWoJh2ZuFiRdAleyr58=
google.golang.org/genproto/googleapis/api v0.0.0-20250811230008-5f3141c8851a/go.mod h1:y2yVLIE/CSMCPXaHnSKXxu1spLPnglFLegmgdY23uuE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a h1:tPE/Kp+x9dMSwUm/uM0JKK0IfdiJkwAbSMSeZBXXJXc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo=
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
istio.io/api v1.28.0-beta.1.0.20251027181303-a5aa715c1e32 h1:ZeK4F1/DsYy3TqIkrL/85MHNn5xc+W7cixLXI6nPSYA=
istio.io/api v1.28.0-beta.1.0.20251027181303-a5aa715c1e32/go.mod h1:BD3qv/ekm16kvSgvSpuiDawgKhEwG97wx849CednJSg=
istio.io/client-go v1.28.1 h1:oB5bD3r64rEcrXuqYMNjaON2Shz15tn8mNOGv53wrN4=
istio.io/client-go v1.28.1/go.mod h1:mcFWH+wv9ltQqoDYyfLeVFyRZuD7n1Fj7TD5RGohqSU=
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA=
k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0=
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A=
k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

15
hack/boilerplate.go.txt Normal file
View File

@@ -0,0 +1,15 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@@ -0,0 +1,202 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"reflect"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
// IngressReconciler reconciles Ingress objects and creates NucleiScan resources
type IngressReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/status,verbs=get
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans,verbs=get;list;watch;create;update;patch;delete
// Reconcile handles Ingress events and creates/updates corresponding NucleiScan resources
func (r *IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Fetch the Ingress resource
ingress := &networkingv1.Ingress{}
if err := r.Get(ctx, req.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
// Ingress was deleted - NucleiScan will be garbage collected via ownerReference
log.Info("Ingress not found, likely deleted")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get Ingress")
return ctrl.Result{}, err
}
// Extract target URLs from the Ingress
targets := extractURLsFromIngress(ingress)
if len(targets) == 0 {
log.Info("No targets extracted from Ingress, skipping NucleiScan creation")
return ctrl.Result{}, nil
}
// Define the NucleiScan name based on the Ingress name
nucleiScanName := fmt.Sprintf("%s-scan", ingress.Name)
// Check if a NucleiScan already exists for this Ingress
existingScan := &nucleiv1alpha1.NucleiScan{}
err := r.Get(ctx, client.ObjectKey{
Namespace: ingress.Namespace,
Name: nucleiScanName,
}, existingScan)
if err != nil && !apierrors.IsNotFound(err) {
log.Error(err, "Failed to get existing NucleiScan")
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) {
// Create a new NucleiScan
nucleiScan := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: nucleiScanName,
Namespace: ingress.Namespace,
},
Spec: nucleiv1alpha1.NucleiScanSpec{
SourceRef: nucleiv1alpha1.SourceReference{
APIVersion: "networking.k8s.io/v1",
Kind: "Ingress",
Name: ingress.Name,
Namespace: ingress.Namespace,
UID: string(ingress.UID),
},
Targets: targets,
},
}
// Set owner reference for garbage collection
if err := controllerutil.SetControllerReference(ingress, nucleiScan, r.Scheme); err != nil {
log.Error(err, "Failed to set owner reference on NucleiScan")
return ctrl.Result{}, err
}
if err := r.Create(ctx, nucleiScan); err != nil {
log.Error(err, "Failed to create NucleiScan")
return ctrl.Result{}, err
}
log.Info("Created NucleiScan for Ingress", "nucleiScan", nucleiScanName, "targets", targets)
return ctrl.Result{}, nil
}
// NucleiScan exists - check if targets need to be updated
if !reflect.DeepEqual(existingScan.Spec.Targets, targets) {
existingScan.Spec.Targets = targets
// Also update the SourceRef UID in case it changed (e.g., Ingress was recreated)
existingScan.Spec.SourceRef.UID = string(ingress.UID)
if err := r.Update(ctx, existingScan); err != nil {
log.Error(err, "Failed to update NucleiScan targets")
return ctrl.Result{}, err
}
log.Info("Updated NucleiScan targets for Ingress", "nucleiScan", nucleiScanName, "targets", targets)
}
return ctrl.Result{}, nil
}
// extractURLsFromIngress extracts target URLs from an Ingress resource
func extractURLsFromIngress(ingress *networkingv1.Ingress) []string {
var urls []string
tlsHosts := make(map[string]bool)
// Build a map of TLS hosts for quick lookup
for _, tls := range ingress.Spec.TLS {
for _, host := range tls.Hosts {
tlsHosts[host] = true
}
}
// Extract URLs from rules
for _, rule := range ingress.Spec.Rules {
if rule.Host == "" {
continue
}
// Determine the scheme based on TLS configuration
scheme := "http"
if tlsHosts[rule.Host] {
scheme = "https"
}
// If there are HTTP paths defined, create URLs for each path
if rule.HTTP != nil && len(rule.HTTP.Paths) > 0 {
for _, path := range rule.HTTP.Paths {
pathStr := path.Path
if pathStr == "" {
pathStr = "/"
}
url := fmt.Sprintf("%s://%s%s", scheme, rule.Host, pathStr)
urls = append(urls, url)
}
} else {
// No paths defined, just use the host
url := fmt.Sprintf("%s://%s", scheme, rule.Host)
urls = append(urls, url)
}
}
// Deduplicate URLs
return deduplicateStrings(urls)
}
// deduplicateStrings removes duplicate strings from a slice while preserving order
func deduplicateStrings(input []string) []string {
seen := make(map[string]bool)
result := make([]string, 0, len(input))
for _, s := range input {
if !seen[s] {
seen[s] = true
result = append(result, s)
}
}
return result
}
// SetupWithManager sets up the controller with the Manager
func (r *IngressReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&networkingv1.Ingress{}).
Owns(&nucleiv1alpha1.NucleiScan{}).
Named("ingress").
Complete(r)
}

View File

@@ -0,0 +1,413 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
"github.com/mortenolsen/nuclei-operator/internal/scanner"
)
const (
// finalizerName is the finalizer used by this controller
finalizerName = "nuclei.homelab.mortenolsen.pro/finalizer"
// Default requeue intervals
defaultRequeueAfter = 30 * time.Second
defaultScheduleRequeue = 1 * time.Minute
defaultErrorRequeueAfter = 1 * time.Minute
)
// Condition types for NucleiScan
const (
ConditionTypeReady = "Ready"
ConditionTypeScanActive = "ScanActive"
)
// Condition reasons
const (
ReasonScanPending = "ScanPending"
ReasonScanRunning = "ScanRunning"
ReasonScanCompleted = "ScanCompleted"
ReasonScanFailed = "ScanFailed"
ReasonScanSuspended = "ScanSuspended"
)
// NucleiScanReconciler reconciles a NucleiScan object
type NucleiScanReconciler struct {
client.Client
Scheme *runtime.Scheme
Scanner scanner.Scanner
}
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
func (r *NucleiScanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Fetch the NucleiScan instance
nucleiScan := &nucleiv1alpha1.NucleiScan{}
if err := r.Get(ctx, req.NamespacedName, nucleiScan); err != nil {
// Resource not found, likely deleted
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Handle deletion
if !nucleiScan.DeletionTimestamp.IsZero() {
return r.handleDeletion(ctx, nucleiScan)
}
// Add finalizer if not present
if !controllerutil.ContainsFinalizer(nucleiScan, finalizerName) {
controllerutil.AddFinalizer(nucleiScan, finalizerName)
if err := r.Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Check if scan is suspended
if nucleiScan.Spec.Suspend {
log.Info("Scan is suspended, skipping")
return r.updateCondition(ctx, nucleiScan, ConditionTypeReady, metav1.ConditionFalse,
ReasonScanSuspended, "Scan is suspended")
}
// Initialize status if empty
if nucleiScan.Status.Phase == "" {
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
nucleiScan.Status.ObservedGeneration = nucleiScan.Generation
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Handle based on current phase
switch nucleiScan.Status.Phase {
case nucleiv1alpha1.ScanPhasePending:
return r.handlePendingPhase(ctx, nucleiScan)
case nucleiv1alpha1.ScanPhaseRunning:
// This shouldn't happen in our synchronous implementation
// but handle it gracefully
return r.handlePendingPhase(ctx, nucleiScan)
case nucleiv1alpha1.ScanPhaseCompleted:
return r.handleCompletedPhase(ctx, nucleiScan)
case nucleiv1alpha1.ScanPhaseFailed:
return r.handleFailedPhase(ctx, nucleiScan)
default:
log.Info("Unknown phase, resetting to Pending", "phase", nucleiScan.Status.Phase)
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
}
// handleDeletion handles the deletion of a NucleiScan resource
func (r *NucleiScanReconciler) handleDeletion(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
if controllerutil.ContainsFinalizer(nucleiScan, finalizerName) {
log.Info("Handling deletion, performing cleanup")
// Perform any cleanup here (e.g., cancel running scans)
// In our synchronous implementation, there's nothing to clean up
// Remove finalizer
controllerutil.RemoveFinalizer(nucleiScan, finalizerName)
if err := r.Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// handlePendingPhase handles the Pending phase - starts a new scan
func (r *NucleiScanReconciler) handlePendingPhase(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
log.Info("Starting scan", "targets", len(nucleiScan.Spec.Targets))
// Update status to Running
now := metav1.Now()
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseRunning
nucleiScan.Status.LastScanTime = &now
nucleiScan.Status.LastError = ""
nucleiScan.Status.ObservedGeneration = nucleiScan.Generation
// Set condition
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeScanActive,
Status: metav1.ConditionTrue,
Reason: ReasonScanRunning,
Message: "Scan is in progress",
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// Build scan options
options := scanner.ScanOptions{
Templates: nucleiScan.Spec.Templates,
Severity: nucleiScan.Spec.Severity,
Timeout: 30 * time.Minute, // Default timeout
}
// Execute the scan
result, err := r.Scanner.Scan(ctx, nucleiScan.Spec.Targets, options)
if err != nil {
log.Error(err, "Scan failed")
return r.handleScanError(ctx, nucleiScan, err)
}
// Update status with results
return r.handleScanSuccess(ctx, nucleiScan, result)
}
// handleScanSuccess updates the status after a successful scan
func (r *NucleiScanReconciler) handleScanSuccess(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan, result *scanner.ScanResult) (ctrl.Result, error) {
log := logf.FromContext(ctx)
log.Info("Scan completed successfully", "findings", len(result.Findings), "duration", result.Duration)
now := metav1.Now()
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseCompleted
nucleiScan.Status.CompletionTime = &now
nucleiScan.Status.Findings = result.Findings
nucleiScan.Status.Summary = &result.Summary
nucleiScan.Status.LastError = ""
// Set conditions
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeScanActive,
Status: metav1.ConditionFalse,
Reason: ReasonScanCompleted,
Message: "Scan completed successfully",
LastTransitionTime: now,
})
message := fmt.Sprintf("Scan completed with %d findings", len(result.Findings))
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeReady,
Status: metav1.ConditionTrue,
Reason: ReasonScanCompleted,
Message: message,
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// If there's a schedule, calculate next scan time
if nucleiScan.Spec.Schedule != "" {
return r.scheduleNextScan(ctx, nucleiScan)
}
return ctrl.Result{}, nil
}
// handleScanError updates the status after a failed scan
func (r *NucleiScanReconciler) handleScanError(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan, scanErr error) (ctrl.Result, error) {
now := metav1.Now()
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseFailed
nucleiScan.Status.CompletionTime = &now
nucleiScan.Status.LastError = scanErr.Error()
// Set conditions
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeScanActive,
Status: metav1.ConditionFalse,
Reason: ReasonScanFailed,
Message: scanErr.Error(),
LastTransitionTime: now,
})
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeReady,
Status: metav1.ConditionFalse,
Reason: ReasonScanFailed,
Message: scanErr.Error(),
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// Requeue with backoff for retry
return ctrl.Result{RequeueAfter: defaultErrorRequeueAfter}, nil
}
// handleCompletedPhase handles the Completed phase - checks for scheduled rescans
func (r *NucleiScanReconciler) handleCompletedPhase(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Check if spec has changed (new generation)
if nucleiScan.Generation != nucleiScan.Status.ObservedGeneration {
log.Info("Spec changed, triggering new scan")
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Check if there's a schedule
if nucleiScan.Spec.Schedule != "" {
return r.checkScheduledScan(ctx, nucleiScan)
}
return ctrl.Result{}, nil
}
// handleFailedPhase handles the Failed phase - implements retry logic
func (r *NucleiScanReconciler) handleFailedPhase(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Check if spec has changed (new generation)
if nucleiScan.Generation != nucleiScan.Status.ObservedGeneration {
log.Info("Spec changed, triggering new scan")
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// For now, don't auto-retry failed scans
// Users can trigger a retry by updating the spec
log.Info("Scan failed, waiting for manual intervention or spec change")
return ctrl.Result{}, nil
}
// scheduleNextScan calculates and sets the next scheduled scan time
func (r *NucleiScanReconciler) scheduleNextScan(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Parse cron schedule
nextTime, err := getNextScheduleTime(nucleiScan.Spec.Schedule, time.Now())
if err != nil {
log.Error(err, "Failed to parse schedule", "schedule", nucleiScan.Spec.Schedule)
return ctrl.Result{}, nil
}
nucleiScan.Status.NextScheduledTime = &metav1.Time{Time: nextTime}
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// Calculate requeue duration
requeueAfter := time.Until(nextTime)
if requeueAfter < 0 {
requeueAfter = defaultScheduleRequeue
}
log.Info("Scheduled next scan", "nextTime", nextTime, "requeueAfter", requeueAfter)
return ctrl.Result{RequeueAfter: requeueAfter}, nil
}
// checkScheduledScan checks if it's time for a scheduled scan
func (r *NucleiScanReconciler) checkScheduledScan(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
if nucleiScan.Status.NextScheduledTime == nil {
// No next scheduled time set, calculate it
return r.scheduleNextScan(ctx, nucleiScan)
}
now := time.Now()
nextTime := nucleiScan.Status.NextScheduledTime.Time
if now.After(nextTime) {
log.Info("Scheduled scan time reached, triggering scan")
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
nucleiScan.Status.NextScheduledTime = nil
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Not yet time, requeue until scheduled time
requeueAfter := time.Until(nextTime)
return ctrl.Result{RequeueAfter: requeueAfter}, nil
}
// updateCondition is a helper to update a condition and return a result
func (r *NucleiScanReconciler) updateCondition(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan,
condType string, status metav1.ConditionStatus, reason, message string) (ctrl.Result, error) {
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: condType,
Status: status,
Reason: reason,
Message: message,
LastTransitionTime: metav1.Now(),
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// getNextScheduleTime parses a cron expression and returns the next scheduled time
// This is a simplified implementation - for production, consider using a proper cron library
func getNextScheduleTime(schedule string, from time.Time) (time.Time, error) {
// Simple implementation for common intervals
// Format: "@every <duration>" or standard cron
if len(schedule) > 7 && schedule[:7] == "@every " {
duration, err := time.ParseDuration(schedule[7:])
if err != nil {
return time.Time{}, fmt.Errorf("invalid duration in schedule: %w", err)
}
return from.Add(duration), nil
}
// For standard cron expressions, we'd need a cron parser library
// For now, default to 24 hours if we can't parse
return from.Add(24 * time.Hour), nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *NucleiScanReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nucleiv1alpha1.NucleiScan{}).
Named("nucleiscan").
Complete(r)
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
var _ = Describe("NucleiScan Controller", func() {
Context("When reconciling a resource", func() {
const resourceName = "test-resource"
ctx := context.Background()
typeNamespacedName := types.NamespacedName{
Name: resourceName,
Namespace: "default", // TODO(user):Modify as needed
}
nucleiscan := &nucleiv1alpha1.NucleiScan{}
BeforeEach(func() {
By("creating the custom resource for the Kind NucleiScan")
err := k8sClient.Get(ctx, typeNamespacedName, nucleiscan)
if err != nil && errors.IsNotFound(err) {
resource := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Namespace: "default",
},
// TODO(user): Specify other spec details if needed.
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
}
})
AfterEach(func() {
// TODO(user): Cleanup logic after each test, like removing the resource instance.
resource := &nucleiv1alpha1.NucleiScan{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
By("Cleanup the specific resource instance NucleiScan")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &NucleiScanReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
})
})
})

View File

@@ -0,0 +1,116 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"os"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
ctx context.Context
cancel context.CancelFunc
testEnv *envtest.Environment
cfg *rest.Config
k8sClient client.Client
)
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
var err error
err = nucleiv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
// Retrieve the first found binary directory to allow running tests from IDEs
if getFirstFoundEnvTestBinaryDir() != "" {
testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir()
}
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path.
// ENVTEST-based tests depend on specific binaries, usually located in paths set by
// controller-runtime. When running tests directly (e.g., via an IDE) without using
// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured.
//
// This function streamlines the process by finding the required binaries, similar to
// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are
// properly set up, run 'make setup-envtest' beforehand.
func getFirstFoundEnvTestBinaryDir() string {
basePath := filepath.Join("..", "..", "bin", "k8s")
entries, err := os.ReadDir(basePath)
if err != nil {
logf.Log.Error(err, "Failed to read directory", "path", basePath)
return ""
}
for _, entry := range entries {
if entry.IsDir() {
return filepath.Join(basePath, entry.Name())
}
}
return ""
}

View File

@@ -0,0 +1,223 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"reflect"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
// VirtualServiceReconciler reconciles VirtualService objects and creates NucleiScan resources
type VirtualServiceReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices,verbs=get;list;watch
// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices/status,verbs=get
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans,verbs=get;list;watch;create;update;patch;delete
// Reconcile handles VirtualService events and creates/updates corresponding NucleiScan resources
func (r *VirtualServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Fetch the VirtualService resource
virtualService := &istionetworkingv1beta1.VirtualService{}
if err := r.Get(ctx, req.NamespacedName, virtualService); err != nil {
if apierrors.IsNotFound(err) {
// VirtualService was deleted - NucleiScan will be garbage collected via ownerReference
log.Info("VirtualService not found, likely deleted")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get VirtualService")
return ctrl.Result{}, err
}
// Extract target URLs from the VirtualService
targets := extractURLsFromVirtualService(virtualService)
if len(targets) == 0 {
log.Info("No targets extracted from VirtualService, skipping NucleiScan creation")
return ctrl.Result{}, nil
}
// Define the NucleiScan name based on the VirtualService name
nucleiScanName := fmt.Sprintf("%s-scan", virtualService.Name)
// Check if a NucleiScan already exists for this VirtualService
existingScan := &nucleiv1alpha1.NucleiScan{}
err := r.Get(ctx, client.ObjectKey{
Namespace: virtualService.Namespace,
Name: nucleiScanName,
}, existingScan)
if err != nil && !apierrors.IsNotFound(err) {
log.Error(err, "Failed to get existing NucleiScan")
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) {
// Create a new NucleiScan
nucleiScan := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: nucleiScanName,
Namespace: virtualService.Namespace,
},
Spec: nucleiv1alpha1.NucleiScanSpec{
SourceRef: nucleiv1alpha1.SourceReference{
APIVersion: "networking.istio.io/v1beta1",
Kind: "VirtualService",
Name: virtualService.Name,
Namespace: virtualService.Namespace,
UID: string(virtualService.UID),
},
Targets: targets,
},
}
// Set owner reference for garbage collection
if err := controllerutil.SetControllerReference(virtualService, nucleiScan, r.Scheme); err != nil {
log.Error(err, "Failed to set owner reference on NucleiScan")
return ctrl.Result{}, err
}
if err := r.Create(ctx, nucleiScan); err != nil {
log.Error(err, "Failed to create NucleiScan")
return ctrl.Result{}, err
}
log.Info("Created NucleiScan for VirtualService", "nucleiScan", nucleiScanName, "targets", targets)
return ctrl.Result{}, nil
}
// NucleiScan exists - check if targets need to be updated
if !reflect.DeepEqual(existingScan.Spec.Targets, targets) {
existingScan.Spec.Targets = targets
// Also update the SourceRef UID in case it changed (e.g., VirtualService was recreated)
existingScan.Spec.SourceRef.UID = string(virtualService.UID)
if err := r.Update(ctx, existingScan); err != nil {
log.Error(err, "Failed to update NucleiScan targets")
return ctrl.Result{}, err
}
log.Info("Updated NucleiScan targets for VirtualService", "nucleiScan", nucleiScanName, "targets", targets)
}
return ctrl.Result{}, nil
}
// extractURLsFromVirtualService extracts target URLs from a VirtualService resource
func extractURLsFromVirtualService(vs *istionetworkingv1beta1.VirtualService) []string {
var urls []string
// Check if VirtualService has gateways defined (indicates external traffic)
// If no gateways or only "mesh" gateway, it's internal service-to-service
hasExternalGateway := false
for _, gw := range vs.Spec.Gateways {
if gw != "mesh" {
hasExternalGateway = true
break
}
}
// If no external gateway, skip this VirtualService
if !hasExternalGateway && len(vs.Spec.Gateways) > 0 {
return urls
}
// Extract URLs from hosts
for _, host := range vs.Spec.Hosts {
// Skip wildcard hosts and internal service names (no dots or starts with *)
if strings.HasPrefix(host, "*") {
continue
}
// Skip internal Kubernetes service names (typically don't contain dots or are short names)
// External hosts typically have FQDNs like "myapp.example.com"
if !strings.Contains(host, ".") {
continue
}
// Skip Kubernetes internal service FQDNs (*.svc.cluster.local)
if strings.Contains(host, ".svc.cluster.local") || strings.Contains(host, ".svc.") {
continue
}
// Default to HTTPS for external hosts (security scanning)
scheme := "https"
// Extract paths from HTTP routes if defined
pathsFound := false
if vs.Spec.Http != nil {
for _, httpRoute := range vs.Spec.Http {
if httpRoute.Match != nil {
for _, match := range httpRoute.Match {
if match.Uri != nil {
if match.Uri.GetPrefix() != "" {
url := fmt.Sprintf("%s://%s%s", scheme, host, match.Uri.GetPrefix())
urls = append(urls, url)
pathsFound = true
} else if match.Uri.GetExact() != "" {
url := fmt.Sprintf("%s://%s%s", scheme, host, match.Uri.GetExact())
urls = append(urls, url)
pathsFound = true
} else if match.Uri.GetRegex() != "" {
// For regex patterns, just use the base URL
// We can't enumerate all possible matches
url := fmt.Sprintf("%s://%s", scheme, host)
urls = append(urls, url)
pathsFound = true
}
}
}
}
}
}
// If no specific paths found, add base URL
if !pathsFound {
url := fmt.Sprintf("%s://%s", scheme, host)
urls = append(urls, url)
}
}
// Deduplicate URLs
return deduplicateStrings(urls)
}
// SetupWithManager sets up the controller with the Manager
func (r *VirtualServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&istionetworkingv1beta1.VirtualService{}).
Owns(&nucleiv1alpha1.NucleiScan{}).
Named("virtualservice").
Complete(r)
}

196
internal/scanner/parser.go Normal file
View File

@@ -0,0 +1,196 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scanner
import (
"bufio"
"bytes"
"encoding/json"
"strings"
"time"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// NucleiOutput represents the structure of Nuclei's JSONL output
type NucleiOutput struct {
TemplateID string `json:"template-id"`
TemplatePath string `json:"template-path"`
Info NucleiInfo `json:"info"`
Type string `json:"type"`
Host string `json:"host"`
MatchedAt string `json:"matched-at"`
Timestamp string `json:"timestamp"`
// ExtractedResults can be a string array or other types
ExtractedResults interface{} `json:"extracted-results,omitempty"`
// MatcherName is the name of the matcher that triggered
MatcherName string `json:"matcher-name,omitempty"`
// IP is the resolved IP address
IP string `json:"ip,omitempty"`
// CurlCommand is the curl command to reproduce the request
CurlCommand string `json:"curl-command,omitempty"`
}
// NucleiInfo contains template metadata
type NucleiInfo struct {
Name string `json:"name"`
Author interface{} `json:"author"` // Can be string or []string
Tags interface{} `json:"tags"` // Can be string or []string
Description string `json:"description,omitempty"`
Severity string `json:"severity"`
Reference interface{} `json:"reference,omitempty"` // Can be string or []string
Metadata interface{} `json:"metadata,omitempty"`
}
// ParseJSONLOutput parses Nuclei's JSONL output and returns a slice of Findings
func ParseJSONLOutput(output []byte) ([]nucleiv1alpha1.Finding, error) {
var findings []nucleiv1alpha1.Finding
scanner := bufio.NewScanner(bytes.NewReader(output))
// Increase buffer size for potentially large JSON lines
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(line) == 0 {
continue
}
// Skip non-JSON lines (nuclei sometimes outputs status messages)
if !bytes.HasPrefix(bytes.TrimSpace(line), []byte("{")) {
continue
}
finding, err := parseJSONLine(line)
if err != nil {
// Log warning but continue parsing other lines
// In production, you might want to use a proper logger
continue
}
findings = append(findings, finding)
}
if err := scanner.Err(); err != nil {
return findings, err
}
return findings, nil
}
// parseJSONLine parses a single JSONL line into a Finding
func parseJSONLine(line []byte) (nucleiv1alpha1.Finding, error) {
var output NucleiOutput
if err := json.Unmarshal(line, &output); err != nil {
return nucleiv1alpha1.Finding{}, err
}
finding := nucleiv1alpha1.Finding{
TemplateID: output.TemplateID,
TemplateName: output.Info.Name,
Severity: strings.ToLower(output.Info.Severity),
Type: output.Type,
Host: output.Host,
MatchedAt: output.MatchedAt,
Description: output.Info.Description,
Timestamp: parseTimestamp(output.Timestamp),
}
// Parse extracted results
finding.ExtractedResults = parseStringSlice(output.ExtractedResults)
// Parse references
finding.Reference = parseStringSlice(output.Info.Reference)
// Parse tags
finding.Tags = parseStringSlice(output.Info.Tags)
// Store additional metadata as RawExtension
if output.Info.Metadata != nil {
if metadataBytes, err := json.Marshal(output.Info.Metadata); err == nil {
finding.Metadata = &runtime.RawExtension{Raw: metadataBytes}
}
}
return finding, nil
}
// parseTimestamp parses a timestamp string into metav1.Time
func parseTimestamp(ts string) metav1.Time {
if ts == "" {
return metav1.Now()
}
// Try various timestamp formats that Nuclei might use
formats := []string{
time.RFC3339,
time.RFC3339Nano,
"2006-01-02T15:04:05.000Z",
"2006-01-02T15:04:05Z",
"2006-01-02 15:04:05",
}
for _, format := range formats {
if t, err := time.Parse(format, ts); err == nil {
return metav1.NewTime(t)
}
}
// If parsing fails, return current time
return metav1.Now()
}
// parseStringSlice converts various types to a string slice
// Nuclei output can have fields as either a single string or an array of strings
func parseStringSlice(v interface{}) []string {
if v == nil {
return nil
}
switch val := v.(type) {
case string:
if val == "" {
return nil
}
// Check if it's a comma-separated list
if strings.Contains(val, ",") {
parts := strings.Split(val, ",")
result := make([]string, 0, len(parts))
for _, p := range parts {
if trimmed := strings.TrimSpace(p); trimmed != "" {
result = append(result, trimmed)
}
}
return result
}
return []string{val}
case []interface{}:
result := make([]string, 0, len(val))
for _, item := range val {
if s, ok := item.(string); ok && s != "" {
result = append(result, s)
}
}
return result
case []string:
return val
default:
return nil
}
}

239
internal/scanner/scanner.go Normal file
View File

@@ -0,0 +1,239 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scanner
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
// Scanner defines the interface for executing Nuclei scans
type Scanner interface {
// Scan executes a Nuclei scan against the given targets and returns the results
Scan(ctx context.Context, targets []string, options ScanOptions) (*ScanResult, error)
}
// ScanOptions contains configuration options for a scan
type ScanOptions struct {
// Templates specifies which Nuclei templates to use (paths or tags)
Templates []string
// Severity filters results by minimum severity level
Severity []string
// Timeout is the maximum duration for the scan
Timeout time.Duration
}
// ScanResult contains the results of a completed scan
type ScanResult struct {
// Findings contains all vulnerabilities/issues discovered
Findings []nucleiv1alpha1.Finding
// Summary provides aggregated statistics
Summary nucleiv1alpha1.ScanSummary
// Duration is how long the scan took
Duration time.Duration
}
// NucleiScanner implements the Scanner interface using the Nuclei binary
type NucleiScanner struct {
nucleiBinaryPath string
templatesPath string
}
// Config holds configuration for the NucleiScanner
type Config struct {
// NucleiBinaryPath is the path to the nuclei binary (default: "nuclei")
NucleiBinaryPath string
// TemplatesPath is the path to nuclei templates (default: use nuclei's default)
TemplatesPath string
// DefaultTimeout is the default scan timeout (default: 30m)
DefaultTimeout time.Duration
}
// DefaultConfig returns a Config with default values
func DefaultConfig() Config {
return Config{
NucleiBinaryPath: getEnvOrDefault("NUCLEI_BINARY_PATH", "nuclei"),
TemplatesPath: getEnvOrDefault("NUCLEI_TEMPLATES_PATH", ""),
DefaultTimeout: getEnvDurationOrDefault("NUCLEI_TIMEOUT", 30*time.Minute),
}
}
// NewNucleiScanner creates a new NucleiScanner with the given configuration
func NewNucleiScanner(config Config) *NucleiScanner {
return &NucleiScanner{
nucleiBinaryPath: config.NucleiBinaryPath,
templatesPath: config.TemplatesPath,
}
}
// NewNucleiScannerWithDefaults creates a new NucleiScanner with default configuration
func NewNucleiScannerWithDefaults() *NucleiScanner {
return NewNucleiScanner(DefaultConfig())
}
// Scan executes a Nuclei scan against the given targets
func (s *NucleiScanner) Scan(ctx context.Context, targets []string, options ScanOptions) (*ScanResult, error) {
if len(targets) == 0 {
return nil, fmt.Errorf("no targets provided for scan")
}
startTime := time.Now()
// Create a temporary directory for this scan
tmpDir, err := os.MkdirTemp("", "nuclei-scan-*")
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %w", err)
}
defer os.RemoveAll(tmpDir)
// Write targets to a file
targetsFile := filepath.Join(tmpDir, "targets.txt")
if err := os.WriteFile(targetsFile, []byte(strings.Join(targets, "\n")), 0600); err != nil {
return nil, fmt.Errorf("failed to write targets file: %w", err)
}
// Build the nuclei command arguments
args := s.buildArgs(targetsFile, options)
// Set timeout from options or use default
timeout := options.Timeout
if timeout == 0 {
timeout = 30 * time.Minute
}
// Create context with timeout
scanCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Execute nuclei
cmd := exec.CommandContext(scanCtx, s.nucleiBinaryPath, args...)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
duration := time.Since(startTime)
// Check for context cancellation
if scanCtx.Err() == context.DeadlineExceeded {
return nil, fmt.Errorf("scan timed out after %v", timeout)
}
if scanCtx.Err() == context.Canceled {
return nil, fmt.Errorf("scan was cancelled")
}
// Nuclei returns exit code 0 even when it finds vulnerabilities
// Non-zero exit codes indicate actual errors
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
// Exit code 1 can mean "no results found" which is not an error
if exitErr.ExitCode() != 1 {
return nil, fmt.Errorf("nuclei execution failed: %w, stderr: %s", err, stderr.String())
}
} else {
return nil, fmt.Errorf("failed to execute nuclei: %w", err)
}
}
// Parse the JSONL output
findings, err := ParseJSONLOutput(stdout.Bytes())
if err != nil {
return nil, fmt.Errorf("failed to parse nuclei output: %w", err)
}
// Calculate summary
summary := calculateSummary(findings, len(targets), duration)
return &ScanResult{
Findings: findings,
Summary: summary,
Duration: duration,
}, nil
}
// buildArgs constructs the command line arguments for nuclei
func (s *NucleiScanner) buildArgs(targetsFile string, options ScanOptions) []string {
args := []string{
"-l", targetsFile,
"-jsonl",
"-silent",
"-no-color",
}
// Add templates path if configured
if s.templatesPath != "" {
args = append(args, "-t", s.templatesPath)
}
// Add specific templates if provided
if len(options.Templates) > 0 {
for _, t := range options.Templates {
args = append(args, "-t", t)
}
}
// Add severity filter if provided
if len(options.Severity) > 0 {
args = append(args, "-severity", strings.Join(options.Severity, ","))
}
return args
}
// calculateSummary generates a ScanSummary from the findings
func calculateSummary(findings []nucleiv1alpha1.Finding, targetsCount int, duration time.Duration) nucleiv1alpha1.ScanSummary {
severityCounts := make(map[string]int)
for _, f := range findings {
severity := strings.ToLower(f.Severity)
severityCounts[severity]++
}
return nucleiv1alpha1.ScanSummary{
TotalFindings: len(findings),
FindingsBySeverity: severityCounts,
TargetsScanned: targetsCount,
DurationSeconds: int64(duration.Seconds()),
}
}
// getEnvOrDefault returns the environment variable value or a default
func getEnvOrDefault(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}
// getEnvDurationOrDefault returns the environment variable as a duration or a default
func getEnvDurationOrDefault(key string, defaultValue time.Duration) time.Duration {
if value := os.Getenv(key); value != "" {
if d, err := time.ParseDuration(value); err == nil {
return d
}
}
return defaultValue
}

View File

@@ -0,0 +1,92 @@
//go:build e2e
// +build e2e
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"os/exec"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/mortenolsen/nuclei-operator/test/utils"
)
var (
// Optional Environment Variables:
// - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup.
// These variables are useful if CertManager is already installed, avoiding
// re-installation and conflicts.
skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true"
// isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster
isCertManagerAlreadyInstalled = false
// projectImage is the name of the image which will be build and loaded
// with the code source changes to be tested.
projectImage = "example.com/nuclei-operator:v0.0.1"
)
// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated,
// temporary environment to validate project changes with the purpose of being used in CI jobs.
// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs
// CertManager.
func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)
_, _ = fmt.Fprintf(GinkgoWriter, "Starting nuclei-operator integration test suite\n")
RunSpecs(t, "e2e suite")
}
var _ = BeforeSuite(func() {
By("building the manager(Operator) image")
cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage))
_, err := utils.Run(cmd)
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image")
// TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is
// built and available before running the tests. Also, remove the following block.
By("loading the manager(Operator) image on Kind")
err = utils.LoadImageToKindClusterWithName(projectImage)
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind")
// The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing.
// To prevent errors when tests run in environments with CertManager already installed,
// we check for its presence before execution.
// Setup CertManager before the suite if not skipped and if not already installed
if !skipCertManagerInstall {
By("checking if cert manager is installed already")
isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled()
if !isCertManagerAlreadyInstalled {
_, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n")
Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager")
} else {
_, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n")
}
}
})
var _ = AfterSuite(func() {
// Teardown CertManager after the suite if not skipped and if it was not already installed
if !skipCertManagerInstall && !isCertManagerAlreadyInstalled {
_, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n")
utils.UninstallCertManager()
}
})

337
test/e2e/e2e_test.go Normal file
View File

@@ -0,0 +1,337 @@
//go:build e2e
// +build e2e
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/mortenolsen/nuclei-operator/test/utils"
)
// namespace where the project is deployed in
const namespace = "nuclei-operator-system"
// serviceAccountName created for the project
const serviceAccountName = "nuclei-operator-controller-manager"
// metricsServiceName is the name of the metrics service of the project
const metricsServiceName = "nuclei-operator-controller-manager-metrics-service"
// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data
const metricsRoleBindingName = "nuclei-operator-metrics-binding"
var _ = Describe("Manager", Ordered, func() {
var controllerPodName string
// Before running the tests, set up the environment by creating the namespace,
// enforce the restricted security policy to the namespace, installing CRDs,
// and deploying the controller.
BeforeAll(func() {
By("creating manager namespace")
cmd := exec.Command("kubectl", "create", "ns", namespace)
_, err := utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to create namespace")
By("labeling the namespace to enforce the restricted security policy")
cmd = exec.Command("kubectl", "label", "--overwrite", "ns", namespace,
"pod-security.kubernetes.io/enforce=restricted")
_, err = utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to label namespace with restricted policy")
By("installing CRDs")
cmd = exec.Command("make", "install")
_, err = utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs")
By("deploying the controller-manager")
cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage))
_, err = utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager")
})
// After all tests have been executed, clean up by undeploying the controller, uninstalling CRDs,
// and deleting the namespace.
AfterAll(func() {
By("cleaning up the curl pod for metrics")
cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace)
_, _ = utils.Run(cmd)
By("undeploying the controller-manager")
cmd = exec.Command("make", "undeploy")
_, _ = utils.Run(cmd)
By("uninstalling CRDs")
cmd = exec.Command("make", "uninstall")
_, _ = utils.Run(cmd)
By("removing manager namespace")
cmd = exec.Command("kubectl", "delete", "ns", namespace)
_, _ = utils.Run(cmd)
})
// After each test, check for failures and collect logs, events,
// and pod descriptions for debugging.
AfterEach(func() {
specReport := CurrentSpecReport()
if specReport.Failed() {
By("Fetching controller manager pod logs")
cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
controllerLogs, err := utils.Run(cmd)
if err == nil {
_, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs)
} else {
_, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err)
}
By("Fetching Kubernetes events")
cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp")
eventsOutput, err := utils.Run(cmd)
if err == nil {
_, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput)
} else {
_, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err)
}
By("Fetching curl-metrics logs")
cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
metricsOutput, err := utils.Run(cmd)
if err == nil {
_, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput)
} else {
_, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err)
}
By("Fetching controller manager pod description")
cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace)
podDescription, err := utils.Run(cmd)
if err == nil {
fmt.Println("Pod description:\n", podDescription)
} else {
fmt.Println("Failed to describe controller pod")
}
}
})
SetDefaultEventuallyTimeout(2 * time.Minute)
SetDefaultEventuallyPollingInterval(time.Second)
Context("Manager", func() {
It("should run successfully", func() {
By("validating that the controller-manager pod is running as expected")
verifyControllerUp := func(g Gomega) {
// Get the name of the controller-manager pod
cmd := exec.Command("kubectl", "get",
"pods", "-l", "control-plane=controller-manager",
"-o", "go-template={{ range .items }}"+
"{{ if not .metadata.deletionTimestamp }}"+
"{{ .metadata.name }}"+
"{{ \"\\n\" }}{{ end }}{{ end }}",
"-n", namespace,
)
podOutput, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information")
podNames := utils.GetNonEmptyLines(podOutput)
g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running")
controllerPodName = podNames[0]
g.Expect(controllerPodName).To(ContainSubstring("controller-manager"))
// Validate the pod's status
cmd = exec.Command("kubectl", "get",
"pods", controllerPodName, "-o", "jsonpath={.status.phase}",
"-n", namespace,
)
output, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status")
}
Eventually(verifyControllerUp).Should(Succeed())
})
It("should ensure the metrics endpoint is serving metrics", func() {
By("creating a ClusterRoleBinding for the service account to allow access to metrics")
cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName,
"--clusterrole=nuclei-operator-metrics-reader",
fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName),
)
_, err := utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding")
By("validating that the metrics service is available")
cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace)
_, err = utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Metrics service should exist")
By("getting the service account token")
token, err := serviceAccountToken()
Expect(err).NotTo(HaveOccurred())
Expect(token).NotTo(BeEmpty())
By("ensuring the controller pod is ready")
verifyControllerPodReady := func(g Gomega) {
cmd := exec.Command("kubectl", "get", "pod", controllerPodName, "-n", namespace,
"-o", "jsonpath={.status.conditions[?(@.type=='Ready')].status}")
output, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(output).To(Equal("True"), "Controller pod not ready")
}
Eventually(verifyControllerPodReady, 3*time.Minute, time.Second).Should(Succeed())
By("verifying that the controller manager is serving the metrics server")
verifyMetricsServerStarted := func(g Gomega) {
cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
output, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(output).To(ContainSubstring("Serving metrics server"),
"Metrics server not yet started")
}
Eventually(verifyMetricsServerStarted, 3*time.Minute, time.Second).Should(Succeed())
// +kubebuilder:scaffold:e2e-metrics-webhooks-readiness
By("creating the curl-metrics pod to access the metrics endpoint")
cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never",
"--namespace", namespace,
"--image=curlimages/curl:latest",
"--overrides",
fmt.Sprintf(`{
"spec": {
"containers": [{
"name": "curl",
"image": "curlimages/curl:latest",
"command": ["/bin/sh", "-c"],
"args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"],
"securityContext": {
"readOnlyRootFilesystem": true,
"allowPrivilegeEscalation": false,
"capabilities": {
"drop": ["ALL"]
},
"runAsNonRoot": true,
"runAsUser": 1000,
"seccompProfile": {
"type": "RuntimeDefault"
}
}
}],
"serviceAccountName": "%s"
}
}`, token, metricsServiceName, namespace, serviceAccountName))
_, err = utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod")
By("waiting for the curl-metrics pod to complete.")
verifyCurlUp := func(g Gomega) {
cmd := exec.Command("kubectl", "get", "pods", "curl-metrics",
"-o", "jsonpath={.status.phase}",
"-n", namespace)
output, err := utils.Run(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status")
}
Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed())
By("getting the metrics by checking curl-metrics logs")
verifyMetricsAvailable := func(g Gomega) {
metricsOutput, err := getMetricsOutput()
g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod")
g.Expect(metricsOutput).NotTo(BeEmpty())
g.Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK"))
}
Eventually(verifyMetricsAvailable, 2*time.Minute).Should(Succeed())
})
// +kubebuilder:scaffold:e2e-webhooks-checks
// TODO: Customize the e2e test suite with scenarios specific to your project.
// Consider applying sample/CR(s) and check their status and/or verifying
// the reconciliation by using the metrics, i.e.:
// metricsOutput, err := getMetricsOutput()
// Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod")
// Expect(metricsOutput).To(ContainSubstring(
// fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`,
// strings.ToLower(<Kind>),
// ))
})
})
// serviceAccountToken returns a token for the specified service account in the given namespace.
// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request
// and parsing the resulting token from the API response.
func serviceAccountToken() (string, error) {
const tokenRequestRawString = `{
"apiVersion": "authentication.k8s.io/v1",
"kind": "TokenRequest"
}`
// Temporary file to store the token request
secretName := fmt.Sprintf("%s-token-request", serviceAccountName)
tokenRequestFile := filepath.Join("/tmp", secretName)
err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644))
if err != nil {
return "", err
}
var out string
verifyTokenCreation := func(g Gomega) {
// Execute kubectl command to create the token
cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf(
"/api/v1/namespaces/%s/serviceaccounts/%s/token",
namespace,
serviceAccountName,
), "-f", tokenRequestFile)
output, err := cmd.CombinedOutput()
g.Expect(err).NotTo(HaveOccurred())
// Parse the JSON output to extract the token
var token tokenRequest
err = json.Unmarshal(output, &token)
g.Expect(err).NotTo(HaveOccurred())
out = token.Status.Token
}
Eventually(verifyTokenCreation).Should(Succeed())
return out, err
}
// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint.
func getMetricsOutput() (string, error) {
By("getting the curl-metrics logs")
cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
return utils.Run(cmd)
}
// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response,
// containing only the token field that we need to extract.
type tokenRequest struct {
Status struct {
Token string `json:"token"`
} `json:"status"`
}

226
test/utils/utils.go Normal file
View File

@@ -0,0 +1,226 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"bufio"
"bytes"
"fmt"
"os"
"os/exec"
"strings"
. "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck
)
const (
certmanagerVersion = "v1.19.1"
certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml"
defaultKindBinary = "kind"
defaultKindCluster = "kind"
)
func warnError(err error) {
_, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err)
}
// Run executes the provided command within this context
func Run(cmd *exec.Cmd) (string, error) {
dir, _ := GetProjectDir()
cmd.Dir = dir
if err := os.Chdir(cmd.Dir); err != nil {
_, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err)
}
cmd.Env = append(os.Environ(), "GO111MODULE=on")
command := strings.Join(cmd.Args, " ")
_, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command)
output, err := cmd.CombinedOutput()
if err != nil {
return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err)
}
return string(output), nil
}
// UninstallCertManager uninstalls the cert manager
func UninstallCertManager() {
url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
cmd := exec.Command("kubectl", "delete", "-f", url)
if _, err := Run(cmd); err != nil {
warnError(err)
}
// Delete leftover leases in kube-system (not cleaned by default)
kubeSystemLeases := []string{
"cert-manager-cainjector-leader-election",
"cert-manager-controller",
}
for _, lease := range kubeSystemLeases {
cmd = exec.Command("kubectl", "delete", "lease", lease,
"-n", "kube-system", "--ignore-not-found", "--force", "--grace-period=0")
if _, err := Run(cmd); err != nil {
warnError(err)
}
}
}
// InstallCertManager installs the cert manager bundle.
func InstallCertManager() error {
url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
cmd := exec.Command("kubectl", "apply", "-f", url)
if _, err := Run(cmd); err != nil {
return err
}
// Wait for cert-manager-webhook to be ready, which can take time if cert-manager
// was re-installed after uninstalling on a cluster.
cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook",
"--for", "condition=Available",
"--namespace", "cert-manager",
"--timeout", "5m",
)
_, err := Run(cmd)
return err
}
// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed
// by verifying the existence of key CRDs related to Cert Manager.
func IsCertManagerCRDsInstalled() bool {
// List of common Cert Manager CRDs
certManagerCRDs := []string{
"certificates.cert-manager.io",
"issuers.cert-manager.io",
"clusterissuers.cert-manager.io",
"certificaterequests.cert-manager.io",
"orders.acme.cert-manager.io",
"challenges.acme.cert-manager.io",
}
// Execute the kubectl command to get all CRDs
cmd := exec.Command("kubectl", "get", "crds")
output, err := Run(cmd)
if err != nil {
return false
}
// Check if any of the Cert Manager CRDs are present
crdList := GetNonEmptyLines(output)
for _, crd := range certManagerCRDs {
for _, line := range crdList {
if strings.Contains(line, crd) {
return true
}
}
}
return false
}
// LoadImageToKindClusterWithName loads a local docker image to the kind cluster
func LoadImageToKindClusterWithName(name string) error {
cluster := defaultKindCluster
if v, ok := os.LookupEnv("KIND_CLUSTER"); ok {
cluster = v
}
kindOptions := []string{"load", "docker-image", name, "--name", cluster}
kindBinary := defaultKindBinary
if v, ok := os.LookupEnv("KIND"); ok {
kindBinary = v
}
cmd := exec.Command(kindBinary, kindOptions...)
_, err := Run(cmd)
return err
}
// GetNonEmptyLines converts given command output string into individual objects
// according to line breakers, and ignores the empty elements in it.
func GetNonEmptyLines(output string) []string {
var res []string
elements := strings.Split(output, "\n")
for _, element := range elements {
if element != "" {
res = append(res, element)
}
}
return res
}
// GetProjectDir will return the directory where the project is
func GetProjectDir() (string, error) {
wd, err := os.Getwd()
if err != nil {
return wd, fmt.Errorf("failed to get current working directory: %w", err)
}
wd = strings.ReplaceAll(wd, "/test/e2e", "")
return wd, nil
}
// UncommentCode searches for target in the file and remove the comment prefix
// of the target content. The target content may span multiple lines.
func UncommentCode(filename, target, prefix string) error {
// false positive
// nolint:gosec
content, err := os.ReadFile(filename)
if err != nil {
return fmt.Errorf("failed to read file %q: %w", filename, err)
}
strContent := string(content)
idx := strings.Index(strContent, target)
if idx < 0 {
return fmt.Errorf("unable to find the code %q to be uncomment", target)
}
out := new(bytes.Buffer)
_, err = out.Write(content[:idx])
if err != nil {
return fmt.Errorf("failed to write to output: %w", err)
}
scanner := bufio.NewScanner(bytes.NewBufferString(target))
if !scanner.Scan() {
return nil
}
for {
if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil {
return fmt.Errorf("failed to write to output: %w", err)
}
// Avoid writing a newline in case the previous line was the last in target.
if !scanner.Scan() {
break
}
if _, err = out.WriteString("\n"); err != nil {
return fmt.Errorf("failed to write to output: %w", err)
}
}
if _, err = out.Write(content[idx+len(target):]); err != nil {
return fmt.Errorf("failed to write to output: %w", err)
}
// false positive
// nolint:gosec
if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil {
return fmt.Errorf("failed to write file %q: %w", filename, err)
}
return nil
}