2 Commits

Author SHA1 Message Date
Morten Olsen
797e75b703 fix: preserve historical chart versions in Helm repository
The Helm repository workflow was overwriting all previous chart versions
on each release, making it impossible for users to install older versions.

Changes:
- Download existing index.yaml from GitHub Pages before publishing
- Download all previously published chart packages
- Use 'helm repo index --merge' to preserve historical versions
- Users can now install any previously released version
2025-12-12 20:54:21 +01:00
Morten Olsen
335689da22 feat: implement pod-based scanning architecture
This major refactor moves from synchronous subprocess-based scanning to
asynchronous pod-based scanning using Kubernetes Jobs.

## Architecture Changes
- Scanner jobs are now Kubernetes Jobs with TTLAfterFinished for automatic cleanup
- Jobs have owner references for garbage collection when NucleiScan is deleted
- Configurable concurrency limits, timeouts, and resource requirements

## New Features
- Dual-mode binary: --mode=controller (default) or --mode=scanner
- Annotation-based configuration for Ingress/VirtualService resources
- Operator-level configuration via environment variables
- Startup recovery for orphaned scans after operator restart
- Periodic cleanup of stuck jobs

## New Files
- DESIGN.md: Comprehensive architecture design document
- internal/jobmanager/: Job Manager for creating/monitoring scanner jobs
- internal/scanner/runner.go: Scanner mode implementation
- internal/annotations/: Annotation parsing utilities
- charts/nuclei-operator/templates/scanner-rbac.yaml: Scanner RBAC

## API Changes
- Added ScannerConfig struct for per-scan scanner configuration
- Added JobReference struct for tracking scanner jobs
- Added ScannerConfig field to NucleiScanSpec
- Added JobRef and ScanStartTime fields to NucleiScanStatus

## Supported Annotations
- nuclei.homelab.mortenolsen.pro/enabled
- nuclei.homelab.mortenolsen.pro/templates
- nuclei.homelab.mortenolsen.pro/severity
- nuclei.homelab.mortenolsen.pro/schedule
- nuclei.homelab.mortenolsen.pro/timeout
- nuclei.homelab.mortenolsen.pro/scanner-image

## RBAC Updates
- Added Job and Pod permissions for operator
- Created separate scanner service account with minimal permissions

## Documentation
- Updated README, user-guide, api.md, and Helm chart README
- Added example annotated Ingress resources
2025-12-12 20:51:23 +01:00
10 changed files with 329 additions and 587 deletions

View File

@@ -87,7 +87,7 @@ jobs:
uses: docker/build-push-action@v6
with:
context: .
# platforms: linux/amd64,linux/arm64
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -21,28 +21,22 @@ COPY . .
# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
# Download and build nuclei binary
FROM golang:1.24 AS nuclei-builder
ARG TARGETOS
ARG TARGETARCH
# Install nuclei from source for the target architecture
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} \
go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest
# Final image
FROM alpine:3.19 AS final
# Build arguments for nuclei version and architecture
ARG TARGETOS
ARG TARGETARCH
ARG NUCLEI_VERSION=3.6.0
# Install ca-certificates for HTTPS requests, curl for downloading, and create non-root user
RUN apk --no-cache add ca-certificates tzdata curl unzip && \
# Install ca-certificates for HTTPS requests and create non-root user
RUN apk --no-cache add ca-certificates tzdata && \
adduser -D -u 65532 -g 65532 nonroot
# Download prebuilt nuclei binary
# ProjectDiscovery uses different naming: linux_amd64, linux_arm64
RUN NUCLEI_ARCH=$(echo ${TARGETARCH} | sed 's/amd64/amd64/;s/arm64/arm64/') && \
curl -sSL "https://github.com/projectdiscovery/nuclei/releases/download/v${NUCLEI_VERSION}/nuclei_${NUCLEI_VERSION}_linux_${NUCLEI_ARCH}.zip" -o /tmp/nuclei.zip && \
unzip /tmp/nuclei.zip -d /tmp && \
mv /tmp/nuclei /usr/local/bin/nuclei && \
chmod +x /usr/local/bin/nuclei && \
rm -rf /tmp/nuclei.zip /tmp/nuclei && \
apk del curl unzip
# Create directories for nuclei
RUN mkdir -p /nuclei-templates /home/nonroot/.nuclei && \
chown -R 65532:65532 /nuclei-templates /home/nonroot
@@ -52,6 +46,9 @@ WORKDIR /
# Copy the manager binary
COPY --from=builder /workspace/manager .
# Copy nuclei binary
COPY --from=nuclei-builder /go/bin/nuclei /usr/local/bin/nuclei
# Set ownership
RUN chown 65532:65532 /manager /usr/local/bin/nuclei

View File

@@ -77,9 +77,6 @@ type JobReference struct {
// Name of the Job
Name string `json:"name"`
// Namespace of the Job (may differ from NucleiScan namespace)
Namespace string `json:"namespace"`
// UID of the Job
UID string `json:"uid"`

View File

@@ -5,6 +5,8 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.19.0
name: nucleiscans.nuclei.homelab.mortenolsen.pro
labels:
{{- include "nuclei-operator.labels" . | nindent 4 }}
spec:
group: nuclei.homelab.mortenolsen.pro
names:
@@ -55,127 +57,6 @@ spec:
spec:
description: NucleiScanSpec defines the desired state of NucleiScan
properties:
scannerConfig:
description: ScannerConfig allows overriding scanner settings for
this scan
properties:
image:
description: Image overrides the default scanner image
type: string
nodeSelector:
additionalProperties:
type: string
description: NodeSelector for scanner pod scheduling
type: object
resources:
description: Resources defines resource requirements for the scanner
pod
properties:
claims:
description: |-
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: |-
Name must match the name of one entry in pod.spec.resourceClaims of
the Pod where this field is used. It makes that resource available
inside a container.
type: string
request:
description: |-
Request is the name chosen for a request in the referenced claim.
If empty, everything from the claim is made available, otherwise
only the result of this request.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
Limits describes the maximum amount of compute resources allowed.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
Requests describes the minimum amount of compute resources required.
If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value. Requests cannot exceed Limits.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
templateURLs:
description: TemplateURLs specifies additional template repositories
to clone
items:
type: string
type: array
timeout:
description: Timeout overrides the default scan timeout
type: string
tolerations:
description: Tolerations for scanner pod scheduling
items:
description: |-
The pod this Toleration is attached to tolerates any taint that matches
the triple <key,value,effect> using the matching operator <operator>.
properties:
effect:
description: |-
Effect indicates the taint effect to match. Empty means match all taint effects.
When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: |-
Key is the taint key that the toleration applies to. Empty means match all taint keys.
If the key is empty, operator must be Exists; this combination means to match all values and all keys.
type: string
operator:
description: |-
Operator represents a key's relationship to the value.
Valid operators are Exists and Equal. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod can
tolerate all taints of a particular category.
type: string
tolerationSeconds:
description: |-
TolerationSeconds represents the period of time the toleration (which must be
of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
it is not set, which means tolerate the taint forever (do not evict). Zero and
negative values will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: |-
Value is the taint value the toleration matches to.
If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
schedule:
description: |-
Schedule for periodic rescanning in cron format
@@ -183,17 +64,12 @@ spec:
type: string
severity:
description: Severity filters scan results by severity level
enum:
- info
- low
- medium
- high
- critical
items:
type: string
type: array
sourceRef:
description: SourceRef references the Ingress or VirtualService being
description:
SourceRef references the Ingress or VirtualService being
scanned
properties:
apiVersion:
@@ -225,7 +101,8 @@ spec:
description: Suspend prevents scheduled scans from running
type: boolean
targets:
description: Targets is the list of URLs to scan, extracted from the
description:
Targets is the list of URLs to scan, extracted from the
source resource
items:
type: string
@@ -252,7 +129,8 @@ spec:
conditions:
description: Conditions represent the latest available observations
items:
description: Condition contains details for one aspect of the current
description:
Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
@@ -320,7 +198,8 @@ spec:
description: Description provides details about the finding
type: string
extractedResults:
description: ExtractedResults contains any data extracted by
description:
ExtractedResults contains any data extracted by
the template
items:
type: string
@@ -329,7 +208,8 @@ spec:
description: Host that was scanned
type: string
matchedAt:
description: MatchedAt is the specific URL or endpoint where
description:
MatchedAt is the specific URL or endpoint where
the issue was found
type: string
metadata:
@@ -337,7 +217,8 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
reference:
description: Reference contains URLs to additional information
description:
Reference contains URLs to additional information
about the finding
items:
type: string
@@ -370,36 +251,12 @@ spec:
- timestamp
type: object
type: array
jobRef:
description: JobRef references the current or last scanner job
properties:
name:
description: Name of the Job
type: string
namespace:
description: Namespace of the Job (may differ from NucleiScan
namespace)
type: string
podName:
description: PodName is the name of the scanner pod (for log retrieval)
type: string
startTime:
description: StartTime when the job was created
format: date-time
type: string
uid:
description: UID of the Job
type: string
required:
- name
- namespace
- uid
type: object
lastError:
description: LastError contains the error message if the scan failed
type: string
lastRetryTime:
description: LastRetryTime is when the last availability check retry
description:
LastRetryTime is when the last availability check retry
occurred
format: date-time
type: string
@@ -408,12 +265,14 @@ spec:
format: date-time
type: string
nextScheduledTime:
description: NextScheduledTime is when the next scheduled scan will
description:
NextScheduledTime is when the next scheduled scan will
run
format: date-time
type: string
observedGeneration:
description: ObservedGeneration is the generation observed by the
description:
ObservedGeneration is the generation observed by the
controller
format: int64
type: integer
@@ -430,11 +289,6 @@ spec:
RetryCount tracks the number of consecutive availability check retries
Used for exponential backoff when waiting for targets
type: integer
scanStartTime:
description: ScanStartTime is when the scanner pod actually started
scanning
format: date-time
type: string
summary:
description: Summary provides aggregated scan statistics
properties:
@@ -445,11 +299,13 @@ spec:
findingsBySeverity:
additionalProperties:
type: integer
description: FindingsBySeverity breaks down findings by severity
description:
FindingsBySeverity breaks down findings by severity
level
type: object
targetsScanned:
description: TargetsScanned is the number of targets that were
description:
TargetsScanned is the number of targets that were
scanned
type: integer
totalFindings:

View File

@@ -70,8 +70,6 @@ spec:
value: {{ .Values.scanner.ttlAfterFinished | quote }}
- name: SCANNER_SERVICE_ACCOUNT
value: {{ include "nuclei-operator.fullname" . }}-scanner
- name: OPERATOR_NAMESPACE
value: {{ .Release.Namespace | quote }}
{{- if .Values.scanner.defaultTemplates }}
- name: DEFAULT_TEMPLATES
value: {{ join "," .Values.scanner.defaultTemplates | quote }}

View File

@@ -241,16 +241,6 @@ func main() {
scannerServiceAccount = "nuclei-scanner"
}
operatorNamespace := os.Getenv("OPERATOR_NAMESPACE")
if operatorNamespace == "" {
// Try to read from the downward API file
if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
operatorNamespace = string(data)
} else {
operatorNamespace = "nuclei-operator-system"
}
}
defaultTemplates := []string{}
if v := os.Getenv("DEFAULT_TEMPLATES"); v != "" {
defaultTemplates = strings.Split(v, ",")
@@ -269,7 +259,6 @@ func main() {
BackoffLimit: 2,
MaxConcurrent: maxConcurrentScans,
ServiceAccountName: scannerServiceAccount,
OperatorNamespace: operatorNamespace,
DefaultResources: jobmanager.DefaultConfig().DefaultResources,
DefaultTemplates: defaultTemplates,
DefaultSeverity: defaultSeverity,

View File

@@ -376,10 +376,6 @@ spec:
name:
description: Name of the Job
type: string
namespace:
description: Namespace of the Job (may differ from NucleiScan
namespace)
type: string
podName:
description: PodName is the name of the scanner pod (for log retrieval)
type: string
@@ -392,7 +388,6 @@ spec:
type: string
required:
- name
- namespace
- uid
type: object
lastError:

View File

@@ -257,13 +257,8 @@ func (r *NucleiScanReconciler) handleDeletion(ctx context.Context, nucleiScan *n
// Clean up any running scanner job
if nucleiScan.Status.JobRef != nil {
jobNamespace := nucleiScan.Status.JobRef.Namespace
if jobNamespace == "" {
// Fallback for backwards compatibility
jobNamespace = nucleiScan.Namespace
}
log.Info("Deleting scanner job", "job", nucleiScan.Status.JobRef.Name, "namespace", jobNamespace)
if err := r.JobManager.DeleteJob(ctx, nucleiScan.Status.JobRef.Name, jobNamespace); err != nil {
log.Info("Deleting scanner job", "job", nucleiScan.Status.JobRef.Name)
if err := r.JobManager.DeleteJob(ctx, nucleiScan.Status.JobRef.Name, nucleiScan.Namespace); err != nil {
if !apierrors.IsNotFound(err) {
log.Error(err, "Failed to delete scanner job", "job", nucleiScan.Status.JobRef.Name)
}
@@ -329,7 +324,6 @@ func (r *NucleiScanReconciler) handlePendingPhase(ctx context.Context, nucleiSca
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseRunning
nucleiScan.Status.JobRef = &nucleiv1alpha1.JobReference{
Name: job.Name,
Namespace: job.Namespace,
UID: string(job.UID),
StartTime: &now,
}
@@ -407,13 +401,8 @@ func (r *NucleiScanReconciler) handleRunningPhase(ctx context.Context, nucleiSca
return ctrl.Result{Requeue: true}, nil
}
// Get the job - use namespace from JobRef (may be different from scan namespace)
jobNamespace := nucleiScan.Status.JobRef.Namespace
if jobNamespace == "" {
// Fallback for backwards compatibility
jobNamespace = nucleiScan.Namespace
}
job, err := r.JobManager.GetJob(ctx, nucleiScan.Status.JobRef.Name, jobNamespace)
// Get the job
job, err := r.JobManager.GetJob(ctx, nucleiScan.Status.JobRef.Name, nucleiScan.Namespace)
if err != nil {
if apierrors.IsNotFound(err) {
logger.Info("Scanner job not found, resetting to Pending")

View File

@@ -82,9 +82,6 @@ type Config struct {
// ServiceAccountName is the service account to use for scanner pods
ServiceAccountName string
// OperatorNamespace is the namespace where the operator runs and where scanner jobs will be created
OperatorNamespace string
// DefaultResources are the default resource requirements for scanner pods
DefaultResources corev1.ResourceRequirements
@@ -104,7 +101,6 @@ func DefaultConfig() Config {
BackoffLimit: DefaultBackoffLimit,
MaxConcurrent: 5,
ServiceAccountName: "nuclei-scanner",
OperatorNamespace: "nuclei-operator-system",
DefaultResources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
@@ -140,22 +136,14 @@ func (m *JobManager) CreateScanJob(ctx context.Context, scan *nucleiv1alpha1.Nuc
job := m.buildJob(scan)
// Only set owner reference if the job is in the same namespace as the scan
// Cross-namespace owner references are not allowed in Kubernetes
if job.Namespace == scan.Namespace {
// Set owner reference so the job is garbage collected when the scan is deleted
if err := controllerutil.SetControllerReference(scan, job, m.Scheme); err != nil {
return nil, fmt.Errorf("failed to set controller reference: %w", err)
}
}
// When job is in a different namespace (operator namespace), we rely on:
// 1. TTLSecondsAfterFinished for automatic cleanup of completed jobs
// 2. Labels (LabelScanName, LabelScanNamespace) to track which scan the job belongs to
// 3. CleanupOrphanedJobs to clean up jobs whose scans no longer exist
logger.Info("Creating scanner job",
"job", job.Name,
"jobNamespace", job.Namespace,
"scanNamespace", scan.Namespace,
"namespace", job.Namespace,
"image", job.Spec.Template.Spec.Containers[0].Image,
"targets", len(scan.Spec.Targets))
@@ -289,42 +277,15 @@ func (m *JobManager) CleanupOrphanedJobs(ctx context.Context) error {
}
for _, job := range jobList.Items {
// Check if the associated NucleiScan still exists using labels
scanName := job.Labels[LabelScanName]
scanNamespace := job.Labels[LabelScanNamespace]
if scanName != "" && scanNamespace != "" {
// Try to get the associated NucleiScan
scan := &nucleiv1alpha1.NucleiScan{}
err := m.Get(ctx, types.NamespacedName{Name: scanName, Namespace: scanNamespace}, scan)
if err != nil {
if apierrors.IsNotFound(err) {
// The scan no longer exists - delete the job
logger.Info("Deleting orphaned job (scan not found)",
"job", job.Name,
"namespace", job.Namespace,
"scanName", scanName,
"scanNamespace", scanNamespace)
if err := m.DeleteJob(ctx, job.Name, job.Namespace); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete orphaned job", "job", job.Name)
}
continue
}
// Other error - log and continue
logger.Error(err, "Failed to check if scan exists", "scanName", scanName, "scanNamespace", scanNamespace)
continue
}
} else {
// Job doesn't have proper labels - check owner reference as fallback
// Check if owner reference exists and the owner still exists
ownerRef := metav1.GetControllerOf(&job)
if ownerRef == nil {
logger.Info("Deleting orphaned job without owner or labels", "job", job.Name, "namespace", job.Namespace)
logger.Info("Deleting orphaned job without owner", "job", job.Name, "namespace", job.Namespace)
if err := m.DeleteJob(ctx, job.Name, job.Namespace); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete orphaned job", "job", job.Name)
}
continue
}
}
// Check if the job is stuck (running longer than 2x the timeout)
if job.Status.StartTime != nil {
@@ -344,18 +305,12 @@ func (m *JobManager) CleanupOrphanedJobs(ctx context.Context) error {
// buildJob creates a Job specification for the given NucleiScan
func (m *JobManager) buildJob(scan *nucleiv1alpha1.NucleiScan) *batchv1.Job {
// Generate a unique job name that includes the scan namespace to avoid collisions
jobName := fmt.Sprintf("nucleiscan-%s-%s-%d", scan.Namespace, scan.Name, time.Now().Unix())
// Generate a unique job name
jobName := fmt.Sprintf("nucleiscan-%s-%d", scan.Name, time.Now().Unix())
if len(jobName) > 63 {
jobName = jobName[:63]
}
// Determine the namespace for the job - use operator namespace if configured
jobNamespace := m.Config.OperatorNamespace
if jobNamespace == "" {
jobNamespace = scan.Namespace
}
// Determine the scanner image
image := m.Config.ScannerImage
if scan.Spec.ScannerConfig != nil && scan.Spec.ScannerConfig.Image != "" {
@@ -405,7 +360,7 @@ func (m *JobManager) buildJob(scan *nucleiv1alpha1.NucleiScan) *batchv1.Job {
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: jobNamespace,
Namespace: scan.Namespace,
Labels: labels,
},
Spec: batchv1.JobSpec{

View File

@@ -43,22 +43,14 @@ func TestBuildJob(t *testing.T) {
job := manager.buildJob(scan)
// Verify job name prefix - should include scan namespace to avoid collisions
// Verify job name prefix
if len(job.Name) == 0 {
t.Error("Job name should not be empty")
}
// Verify namespace - job should be created in operator namespace
if job.Namespace != config.OperatorNamespace {
t.Errorf("Expected namespace '%s', got '%s'", config.OperatorNamespace, job.Namespace)
}
// Verify scan labels are set correctly for cross-namespace tracking
if job.Labels[LabelScanName] != scan.Name {
t.Errorf("Expected scan name label '%s', got '%s'", scan.Name, job.Labels[LabelScanName])
}
if job.Labels[LabelScanNamespace] != scan.Namespace {
t.Errorf("Expected scan namespace label '%s', got '%s'", scan.Namespace, job.Labels[LabelScanNamespace])
// Verify namespace
if job.Namespace != "default" {
t.Errorf("Expected namespace 'default', got '%s'", job.Namespace)
}
// Verify labels
@@ -123,29 +115,3 @@ func TestBuildJobWithCustomConfig(t *testing.T) {
t.Errorf("Expected deadline %d, got %d", expectedDeadline, *job.Spec.ActiveDeadlineSeconds)
}
}
func TestBuildJobInSameNamespace(t *testing.T) {
config := DefaultConfig()
// Clear operator namespace to test same-namespace behavior
config.OperatorNamespace = ""
manager := &JobManager{
Config: config,
}
scan := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-scan",
Namespace: "my-namespace",
},
Spec: nucleiv1alpha1.NucleiScanSpec{
Targets: []string{"https://example.com"},
},
}
job := manager.buildJob(scan)
// Verify namespace - when operator namespace is empty, job should be in scan's namespace
if job.Namespace != scan.Namespace {
t.Errorf("Expected namespace '%s', got '%s'", scan.Namespace, job.Namespace)
}
}