This commit is contained in:
Morten Olsen
2025-12-12 11:10:01 +01:00
commit 277fc459d5
64 changed files with 8625 additions and 0 deletions

View File

@@ -0,0 +1,202 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"reflect"
networkingv1 "k8s.io/api/networking/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
// IngressReconciler reconciles Ingress objects and creates NucleiScan resources
type IngressReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/status,verbs=get
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans,verbs=get;list;watch;create;update;patch;delete
// Reconcile handles Ingress events and creates/updates corresponding NucleiScan resources
func (r *IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Fetch the Ingress resource
ingress := &networkingv1.Ingress{}
if err := r.Get(ctx, req.NamespacedName, ingress); err != nil {
if apierrors.IsNotFound(err) {
// Ingress was deleted - NucleiScan will be garbage collected via ownerReference
log.Info("Ingress not found, likely deleted")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get Ingress")
return ctrl.Result{}, err
}
// Extract target URLs from the Ingress
targets := extractURLsFromIngress(ingress)
if len(targets) == 0 {
log.Info("No targets extracted from Ingress, skipping NucleiScan creation")
return ctrl.Result{}, nil
}
// Define the NucleiScan name based on the Ingress name
nucleiScanName := fmt.Sprintf("%s-scan", ingress.Name)
// Check if a NucleiScan already exists for this Ingress
existingScan := &nucleiv1alpha1.NucleiScan{}
err := r.Get(ctx, client.ObjectKey{
Namespace: ingress.Namespace,
Name: nucleiScanName,
}, existingScan)
if err != nil && !apierrors.IsNotFound(err) {
log.Error(err, "Failed to get existing NucleiScan")
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) {
// Create a new NucleiScan
nucleiScan := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: nucleiScanName,
Namespace: ingress.Namespace,
},
Spec: nucleiv1alpha1.NucleiScanSpec{
SourceRef: nucleiv1alpha1.SourceReference{
APIVersion: "networking.k8s.io/v1",
Kind: "Ingress",
Name: ingress.Name,
Namespace: ingress.Namespace,
UID: string(ingress.UID),
},
Targets: targets,
},
}
// Set owner reference for garbage collection
if err := controllerutil.SetControllerReference(ingress, nucleiScan, r.Scheme); err != nil {
log.Error(err, "Failed to set owner reference on NucleiScan")
return ctrl.Result{}, err
}
if err := r.Create(ctx, nucleiScan); err != nil {
log.Error(err, "Failed to create NucleiScan")
return ctrl.Result{}, err
}
log.Info("Created NucleiScan for Ingress", "nucleiScan", nucleiScanName, "targets", targets)
return ctrl.Result{}, nil
}
// NucleiScan exists - check if targets need to be updated
if !reflect.DeepEqual(existingScan.Spec.Targets, targets) {
existingScan.Spec.Targets = targets
// Also update the SourceRef UID in case it changed (e.g., Ingress was recreated)
existingScan.Spec.SourceRef.UID = string(ingress.UID)
if err := r.Update(ctx, existingScan); err != nil {
log.Error(err, "Failed to update NucleiScan targets")
return ctrl.Result{}, err
}
log.Info("Updated NucleiScan targets for Ingress", "nucleiScan", nucleiScanName, "targets", targets)
}
return ctrl.Result{}, nil
}
// extractURLsFromIngress extracts target URLs from an Ingress resource
func extractURLsFromIngress(ingress *networkingv1.Ingress) []string {
var urls []string
tlsHosts := make(map[string]bool)
// Build a map of TLS hosts for quick lookup
for _, tls := range ingress.Spec.TLS {
for _, host := range tls.Hosts {
tlsHosts[host] = true
}
}
// Extract URLs from rules
for _, rule := range ingress.Spec.Rules {
if rule.Host == "" {
continue
}
// Determine the scheme based on TLS configuration
scheme := "http"
if tlsHosts[rule.Host] {
scheme = "https"
}
// If there are HTTP paths defined, create URLs for each path
if rule.HTTP != nil && len(rule.HTTP.Paths) > 0 {
for _, path := range rule.HTTP.Paths {
pathStr := path.Path
if pathStr == "" {
pathStr = "/"
}
url := fmt.Sprintf("%s://%s%s", scheme, rule.Host, pathStr)
urls = append(urls, url)
}
} else {
// No paths defined, just use the host
url := fmt.Sprintf("%s://%s", scheme, rule.Host)
urls = append(urls, url)
}
}
// Deduplicate URLs
return deduplicateStrings(urls)
}
// deduplicateStrings removes duplicate strings from a slice while preserving order
func deduplicateStrings(input []string) []string {
seen := make(map[string]bool)
result := make([]string, 0, len(input))
for _, s := range input {
if !seen[s] {
seen[s] = true
result = append(result, s)
}
}
return result
}
// SetupWithManager sets up the controller with the Manager
func (r *IngressReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&networkingv1.Ingress{}).
Owns(&nucleiv1alpha1.NucleiScan{}).
Named("ingress").
Complete(r)
}

View File

@@ -0,0 +1,413 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
"github.com/mortenolsen/nuclei-operator/internal/scanner"
)
const (
// finalizerName is the finalizer used by this controller
finalizerName = "nuclei.homelab.mortenolsen.pro/finalizer"
// Default requeue intervals
defaultRequeueAfter = 30 * time.Second
defaultScheduleRequeue = 1 * time.Minute
defaultErrorRequeueAfter = 1 * time.Minute
)
// Condition types for NucleiScan
const (
ConditionTypeReady = "Ready"
ConditionTypeScanActive = "ScanActive"
)
// Condition reasons
const (
ReasonScanPending = "ScanPending"
ReasonScanRunning = "ScanRunning"
ReasonScanCompleted = "ScanCompleted"
ReasonScanFailed = "ScanFailed"
ReasonScanSuspended = "ScanSuspended"
)
// NucleiScanReconciler reconciles a NucleiScan object
type NucleiScanReconciler struct {
client.Client
Scheme *runtime.Scheme
Scanner scanner.Scanner
}
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
func (r *NucleiScanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Fetch the NucleiScan instance
nucleiScan := &nucleiv1alpha1.NucleiScan{}
if err := r.Get(ctx, req.NamespacedName, nucleiScan); err != nil {
// Resource not found, likely deleted
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Handle deletion
if !nucleiScan.DeletionTimestamp.IsZero() {
return r.handleDeletion(ctx, nucleiScan)
}
// Add finalizer if not present
if !controllerutil.ContainsFinalizer(nucleiScan, finalizerName) {
controllerutil.AddFinalizer(nucleiScan, finalizerName)
if err := r.Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Check if scan is suspended
if nucleiScan.Spec.Suspend {
log.Info("Scan is suspended, skipping")
return r.updateCondition(ctx, nucleiScan, ConditionTypeReady, metav1.ConditionFalse,
ReasonScanSuspended, "Scan is suspended")
}
// Initialize status if empty
if nucleiScan.Status.Phase == "" {
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
nucleiScan.Status.ObservedGeneration = nucleiScan.Generation
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Handle based on current phase
switch nucleiScan.Status.Phase {
case nucleiv1alpha1.ScanPhasePending:
return r.handlePendingPhase(ctx, nucleiScan)
case nucleiv1alpha1.ScanPhaseRunning:
// This shouldn't happen in our synchronous implementation
// but handle it gracefully
return r.handlePendingPhase(ctx, nucleiScan)
case nucleiv1alpha1.ScanPhaseCompleted:
return r.handleCompletedPhase(ctx, nucleiScan)
case nucleiv1alpha1.ScanPhaseFailed:
return r.handleFailedPhase(ctx, nucleiScan)
default:
log.Info("Unknown phase, resetting to Pending", "phase", nucleiScan.Status.Phase)
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
}
// handleDeletion handles the deletion of a NucleiScan resource
func (r *NucleiScanReconciler) handleDeletion(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
if controllerutil.ContainsFinalizer(nucleiScan, finalizerName) {
log.Info("Handling deletion, performing cleanup")
// Perform any cleanup here (e.g., cancel running scans)
// In our synchronous implementation, there's nothing to clean up
// Remove finalizer
controllerutil.RemoveFinalizer(nucleiScan, finalizerName)
if err := r.Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
// handlePendingPhase handles the Pending phase - starts a new scan
func (r *NucleiScanReconciler) handlePendingPhase(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
log.Info("Starting scan", "targets", len(nucleiScan.Spec.Targets))
// Update status to Running
now := metav1.Now()
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseRunning
nucleiScan.Status.LastScanTime = &now
nucleiScan.Status.LastError = ""
nucleiScan.Status.ObservedGeneration = nucleiScan.Generation
// Set condition
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeScanActive,
Status: metav1.ConditionTrue,
Reason: ReasonScanRunning,
Message: "Scan is in progress",
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// Build scan options
options := scanner.ScanOptions{
Templates: nucleiScan.Spec.Templates,
Severity: nucleiScan.Spec.Severity,
Timeout: 30 * time.Minute, // Default timeout
}
// Execute the scan
result, err := r.Scanner.Scan(ctx, nucleiScan.Spec.Targets, options)
if err != nil {
log.Error(err, "Scan failed")
return r.handleScanError(ctx, nucleiScan, err)
}
// Update status with results
return r.handleScanSuccess(ctx, nucleiScan, result)
}
// handleScanSuccess updates the status after a successful scan
func (r *NucleiScanReconciler) handleScanSuccess(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan, result *scanner.ScanResult) (ctrl.Result, error) {
log := logf.FromContext(ctx)
log.Info("Scan completed successfully", "findings", len(result.Findings), "duration", result.Duration)
now := metav1.Now()
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseCompleted
nucleiScan.Status.CompletionTime = &now
nucleiScan.Status.Findings = result.Findings
nucleiScan.Status.Summary = &result.Summary
nucleiScan.Status.LastError = ""
// Set conditions
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeScanActive,
Status: metav1.ConditionFalse,
Reason: ReasonScanCompleted,
Message: "Scan completed successfully",
LastTransitionTime: now,
})
message := fmt.Sprintf("Scan completed with %d findings", len(result.Findings))
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeReady,
Status: metav1.ConditionTrue,
Reason: ReasonScanCompleted,
Message: message,
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// If there's a schedule, calculate next scan time
if nucleiScan.Spec.Schedule != "" {
return r.scheduleNextScan(ctx, nucleiScan)
}
return ctrl.Result{}, nil
}
// handleScanError updates the status after a failed scan
func (r *NucleiScanReconciler) handleScanError(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan, scanErr error) (ctrl.Result, error) {
now := metav1.Now()
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhaseFailed
nucleiScan.Status.CompletionTime = &now
nucleiScan.Status.LastError = scanErr.Error()
// Set conditions
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeScanActive,
Status: metav1.ConditionFalse,
Reason: ReasonScanFailed,
Message: scanErr.Error(),
LastTransitionTime: now,
})
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: ConditionTypeReady,
Status: metav1.ConditionFalse,
Reason: ReasonScanFailed,
Message: scanErr.Error(),
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// Requeue with backoff for retry
return ctrl.Result{RequeueAfter: defaultErrorRequeueAfter}, nil
}
// handleCompletedPhase handles the Completed phase - checks for scheduled rescans
func (r *NucleiScanReconciler) handleCompletedPhase(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Check if spec has changed (new generation)
if nucleiScan.Generation != nucleiScan.Status.ObservedGeneration {
log.Info("Spec changed, triggering new scan")
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Check if there's a schedule
if nucleiScan.Spec.Schedule != "" {
return r.checkScheduledScan(ctx, nucleiScan)
}
return ctrl.Result{}, nil
}
// handleFailedPhase handles the Failed phase - implements retry logic
func (r *NucleiScanReconciler) handleFailedPhase(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Check if spec has changed (new generation)
if nucleiScan.Generation != nucleiScan.Status.ObservedGeneration {
log.Info("Spec changed, triggering new scan")
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// For now, don't auto-retry failed scans
// Users can trigger a retry by updating the spec
log.Info("Scan failed, waiting for manual intervention or spec change")
return ctrl.Result{}, nil
}
// scheduleNextScan calculates and sets the next scheduled scan time
func (r *NucleiScanReconciler) scheduleNextScan(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Parse cron schedule
nextTime, err := getNextScheduleTime(nucleiScan.Spec.Schedule, time.Now())
if err != nil {
log.Error(err, "Failed to parse schedule", "schedule", nucleiScan.Spec.Schedule)
return ctrl.Result{}, nil
}
nucleiScan.Status.NextScheduledTime = &metav1.Time{Time: nextTime}
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
// Calculate requeue duration
requeueAfter := time.Until(nextTime)
if requeueAfter < 0 {
requeueAfter = defaultScheduleRequeue
}
log.Info("Scheduled next scan", "nextTime", nextTime, "requeueAfter", requeueAfter)
return ctrl.Result{RequeueAfter: requeueAfter}, nil
}
// checkScheduledScan checks if it's time for a scheduled scan
func (r *NucleiScanReconciler) checkScheduledScan(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan) (ctrl.Result, error) {
log := logf.FromContext(ctx)
if nucleiScan.Status.NextScheduledTime == nil {
// No next scheduled time set, calculate it
return r.scheduleNextScan(ctx, nucleiScan)
}
now := time.Now()
nextTime := nucleiScan.Status.NextScheduledTime.Time
if now.After(nextTime) {
log.Info("Scheduled scan time reached, triggering scan")
nucleiScan.Status.Phase = nucleiv1alpha1.ScanPhasePending
nucleiScan.Status.NextScheduledTime = nil
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
// Not yet time, requeue until scheduled time
requeueAfter := time.Until(nextTime)
return ctrl.Result{RequeueAfter: requeueAfter}, nil
}
// updateCondition is a helper to update a condition and return a result
func (r *NucleiScanReconciler) updateCondition(ctx context.Context, nucleiScan *nucleiv1alpha1.NucleiScan,
condType string, status metav1.ConditionStatus, reason, message string) (ctrl.Result, error) {
meta.SetStatusCondition(&nucleiScan.Status.Conditions, metav1.Condition{
Type: condType,
Status: status,
Reason: reason,
Message: message,
LastTransitionTime: metav1.Now(),
})
if err := r.Status().Update(ctx, nucleiScan); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// getNextScheduleTime parses a cron expression and returns the next scheduled time
// This is a simplified implementation - for production, consider using a proper cron library
func getNextScheduleTime(schedule string, from time.Time) (time.Time, error) {
// Simple implementation for common intervals
// Format: "@every <duration>" or standard cron
if len(schedule) > 7 && schedule[:7] == "@every " {
duration, err := time.ParseDuration(schedule[7:])
if err != nil {
return time.Time{}, fmt.Errorf("invalid duration in schedule: %w", err)
}
return from.Add(duration), nil
}
// For standard cron expressions, we'd need a cron parser library
// For now, default to 24 hours if we can't parse
return from.Add(24 * time.Hour), nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *NucleiScanReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&nucleiv1alpha1.NucleiScan{}).
Named("nucleiscan").
Complete(r)
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
var _ = Describe("NucleiScan Controller", func() {
Context("When reconciling a resource", func() {
const resourceName = "test-resource"
ctx := context.Background()
typeNamespacedName := types.NamespacedName{
Name: resourceName,
Namespace: "default", // TODO(user):Modify as needed
}
nucleiscan := &nucleiv1alpha1.NucleiScan{}
BeforeEach(func() {
By("creating the custom resource for the Kind NucleiScan")
err := k8sClient.Get(ctx, typeNamespacedName, nucleiscan)
if err != nil && errors.IsNotFound(err) {
resource := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Namespace: "default",
},
// TODO(user): Specify other spec details if needed.
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
}
})
AfterEach(func() {
// TODO(user): Cleanup logic after each test, like removing the resource instance.
resource := &nucleiv1alpha1.NucleiScan{}
err := k8sClient.Get(ctx, typeNamespacedName, resource)
Expect(err).NotTo(HaveOccurred())
By("Cleanup the specific resource instance NucleiScan")
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
})
It("should successfully reconcile the resource", func() {
By("Reconciling the created resource")
controllerReconciler := &NucleiScanReconciler{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: typeNamespacedName,
})
Expect(err).NotTo(HaveOccurred())
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
// Example: If you expect a certain status condition after reconciliation, verify it here.
})
})
})

View File

@@ -0,0 +1,116 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"os"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
ctx context.Context
cancel context.CancelFunc
testEnv *envtest.Environment
cfg *rest.Config
k8sClient client.Client
)
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
var err error
err = nucleiv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
// Retrieve the first found binary directory to allow running tests from IDEs
if getFirstFoundEnvTestBinaryDir() != "" {
testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir()
}
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path.
// ENVTEST-based tests depend on specific binaries, usually located in paths set by
// controller-runtime. When running tests directly (e.g., via an IDE) without using
// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured.
//
// This function streamlines the process by finding the required binaries, similar to
// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are
// properly set up, run 'make setup-envtest' beforehand.
func getFirstFoundEnvTestBinaryDir() string {
basePath := filepath.Join("..", "..", "bin", "k8s")
entries, err := os.ReadDir(basePath)
if err != nil {
logf.Log.Error(err, "Failed to read directory", "path", basePath)
return ""
}
for _, entry := range entries {
if entry.IsDir() {
return filepath.Join(basePath, entry.Name())
}
}
return ""
}

View File

@@ -0,0 +1,223 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"reflect"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
nucleiv1alpha1 "github.com/mortenolsen/nuclei-operator/api/v1alpha1"
)
// VirtualServiceReconciler reconciles VirtualService objects and creates NucleiScan resources
type VirtualServiceReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices,verbs=get;list;watch
// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices/status,verbs=get
// +kubebuilder:rbac:groups=nuclei.homelab.mortenolsen.pro,resources=nucleiscans,verbs=get;list;watch;create;update;patch;delete
// Reconcile handles VirtualService events and creates/updates corresponding NucleiScan resources
func (r *VirtualServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logf.FromContext(ctx)
// Fetch the VirtualService resource
virtualService := &istionetworkingv1beta1.VirtualService{}
if err := r.Get(ctx, req.NamespacedName, virtualService); err != nil {
if apierrors.IsNotFound(err) {
// VirtualService was deleted - NucleiScan will be garbage collected via ownerReference
log.Info("VirtualService not found, likely deleted")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get VirtualService")
return ctrl.Result{}, err
}
// Extract target URLs from the VirtualService
targets := extractURLsFromVirtualService(virtualService)
if len(targets) == 0 {
log.Info("No targets extracted from VirtualService, skipping NucleiScan creation")
return ctrl.Result{}, nil
}
// Define the NucleiScan name based on the VirtualService name
nucleiScanName := fmt.Sprintf("%s-scan", virtualService.Name)
// Check if a NucleiScan already exists for this VirtualService
existingScan := &nucleiv1alpha1.NucleiScan{}
err := r.Get(ctx, client.ObjectKey{
Namespace: virtualService.Namespace,
Name: nucleiScanName,
}, existingScan)
if err != nil && !apierrors.IsNotFound(err) {
log.Error(err, "Failed to get existing NucleiScan")
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) {
// Create a new NucleiScan
nucleiScan := &nucleiv1alpha1.NucleiScan{
ObjectMeta: metav1.ObjectMeta{
Name: nucleiScanName,
Namespace: virtualService.Namespace,
},
Spec: nucleiv1alpha1.NucleiScanSpec{
SourceRef: nucleiv1alpha1.SourceReference{
APIVersion: "networking.istio.io/v1beta1",
Kind: "VirtualService",
Name: virtualService.Name,
Namespace: virtualService.Namespace,
UID: string(virtualService.UID),
},
Targets: targets,
},
}
// Set owner reference for garbage collection
if err := controllerutil.SetControllerReference(virtualService, nucleiScan, r.Scheme); err != nil {
log.Error(err, "Failed to set owner reference on NucleiScan")
return ctrl.Result{}, err
}
if err := r.Create(ctx, nucleiScan); err != nil {
log.Error(err, "Failed to create NucleiScan")
return ctrl.Result{}, err
}
log.Info("Created NucleiScan for VirtualService", "nucleiScan", nucleiScanName, "targets", targets)
return ctrl.Result{}, nil
}
// NucleiScan exists - check if targets need to be updated
if !reflect.DeepEqual(existingScan.Spec.Targets, targets) {
existingScan.Spec.Targets = targets
// Also update the SourceRef UID in case it changed (e.g., VirtualService was recreated)
existingScan.Spec.SourceRef.UID = string(virtualService.UID)
if err := r.Update(ctx, existingScan); err != nil {
log.Error(err, "Failed to update NucleiScan targets")
return ctrl.Result{}, err
}
log.Info("Updated NucleiScan targets for VirtualService", "nucleiScan", nucleiScanName, "targets", targets)
}
return ctrl.Result{}, nil
}
// extractURLsFromVirtualService extracts target URLs from a VirtualService resource
func extractURLsFromVirtualService(vs *istionetworkingv1beta1.VirtualService) []string {
var urls []string
// Check if VirtualService has gateways defined (indicates external traffic)
// If no gateways or only "mesh" gateway, it's internal service-to-service
hasExternalGateway := false
for _, gw := range vs.Spec.Gateways {
if gw != "mesh" {
hasExternalGateway = true
break
}
}
// If no external gateway, skip this VirtualService
if !hasExternalGateway && len(vs.Spec.Gateways) > 0 {
return urls
}
// Extract URLs from hosts
for _, host := range vs.Spec.Hosts {
// Skip wildcard hosts and internal service names (no dots or starts with *)
if strings.HasPrefix(host, "*") {
continue
}
// Skip internal Kubernetes service names (typically don't contain dots or are short names)
// External hosts typically have FQDNs like "myapp.example.com"
if !strings.Contains(host, ".") {
continue
}
// Skip Kubernetes internal service FQDNs (*.svc.cluster.local)
if strings.Contains(host, ".svc.cluster.local") || strings.Contains(host, ".svc.") {
continue
}
// Default to HTTPS for external hosts (security scanning)
scheme := "https"
// Extract paths from HTTP routes if defined
pathsFound := false
if vs.Spec.Http != nil {
for _, httpRoute := range vs.Spec.Http {
if httpRoute.Match != nil {
for _, match := range httpRoute.Match {
if match.Uri != nil {
if match.Uri.GetPrefix() != "" {
url := fmt.Sprintf("%s://%s%s", scheme, host, match.Uri.GetPrefix())
urls = append(urls, url)
pathsFound = true
} else if match.Uri.GetExact() != "" {
url := fmt.Sprintf("%s://%s%s", scheme, host, match.Uri.GetExact())
urls = append(urls, url)
pathsFound = true
} else if match.Uri.GetRegex() != "" {
// For regex patterns, just use the base URL
// We can't enumerate all possible matches
url := fmt.Sprintf("%s://%s", scheme, host)
urls = append(urls, url)
pathsFound = true
}
}
}
}
}
}
// If no specific paths found, add base URL
if !pathsFound {
url := fmt.Sprintf("%s://%s", scheme, host)
urls = append(urls, url)
}
}
// Deduplicate URLs
return deduplicateStrings(urls)
}
// SetupWithManager sets up the controller with the Manager
func (r *VirtualServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&istionetworkingv1beta1.VirtualService{}).
Owns(&nucleiv1alpha1.NucleiScan{}).
Named("virtualservice").
Complete(r)
}