mirror of
https://github.com/morten-olsen/homelab-operator.git
synced 2026-02-08 01:36:28 +01:00
53 lines
1.4 KiB
YAML
53 lines
1.4 KiB
YAML
apiVersion: apps/v1
|
||
kind: Deployment
|
||
metadata:
|
||
name: '{{ .Release.Name }}'
|
||
labels:
|
||
app: '{{ .Release.Name }}'
|
||
spec:
|
||
replicas: 1
|
||
selector:
|
||
matchLabels:
|
||
app: '{{ .Release.Name }}'
|
||
template:
|
||
metadata:
|
||
labels:
|
||
app: '{{ .Release.Name }}'
|
||
spec:
|
||
containers:
|
||
- name: ollama
|
||
image: ghcr.io/ollama/ollama:latest # Official image
|
||
imagePullPolicy: IfNotPresent
|
||
ports:
|
||
- containerPort: 11434
|
||
name: http
|
||
volumeMounts:
|
||
- name: ollama-data
|
||
mountPath: /root/.ollama
|
||
env:
|
||
# If you want to pre‑start a model, set this env var to the
|
||
# model name (e.g., "gpt-4o-mini"). The container will download
|
||
# it automatically at startup.
|
||
# - name: OLLAMA_MODEL
|
||
# value: "gpt-4o-mini"
|
||
readinessProbe:
|
||
httpGet:
|
||
scheme: HTTP
|
||
path: /api/status
|
||
port: 11434
|
||
initialDelaySeconds: 5
|
||
periodSeconds: 10
|
||
successThreshold: 1
|
||
failureThreshold: 3
|
||
resources:
|
||
requests:
|
||
cpu: 500m
|
||
memory: 1Gi
|
||
limits:
|
||
cpu: 2000m
|
||
memory: 4Gi
|
||
volumes:
|
||
- name: ollama-data
|
||
persistentVolumeClaim:
|
||
claimName: '{{ .Release.Name }}-data'
|