!113 Merge branch 'main' into devcontainer-k8s
Merge pull request !113 from panshuxiao/devcontainer-k8s
This commit is contained in:
19
Makefile
19
Makefile
@@ -723,6 +723,18 @@ install: $(wildcard *.go)
|
||||
.PHONY: build
|
||||
build: frontend backend ## build everything
|
||||
|
||||
# 添加一个新目标,用于构建带有调试信息的二进制文件
|
||||
.PHONY: build-debug
|
||||
build-debug: frontend backend-debug
|
||||
|
||||
.PHONY: backend-debug
|
||||
backend-debug: go-check generate-backend $(EXECUTABLE)-debug
|
||||
|
||||
$(EXECUTABLE)-debug: $(GO_SOURCES) $(TAGS_PREREQ)
|
||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o $@
|
||||
|
||||
.PHONY: frontend
|
||||
|
||||
.PHONY: frontend
|
||||
frontend: $(WEBPACK_DEST) ## build frontend files
|
||||
|
||||
@@ -946,6 +958,13 @@ docker:
|
||||
# support also build args docker build --build-arg GITEA_VERSION=v1.2.3 --build-arg TAGS="bindata sqlite sqlite_unlock_notify" .
|
||||
|
||||
# This endif closes the if at the top of the file
|
||||
|
||||
# 添加一个新目标,用于构建 controller-manager
|
||||
.PHONY: controller-manager
|
||||
controller-manager: go-check
|
||||
@echo "Building controller-manager..."
|
||||
CGO_CFLAGS="$(CGO_CFLAGS)" $(GO) build $(GOFLAGS) $(EXTRA_GOFLAGS) -tags '$(TAGS)' -ldflags '-s -w $(LDFLAGS)' -o controller-manager modules/k8s/cmd/controller-manager/controller-manager.go
|
||||
|
||||
endif
|
||||
|
||||
# Disable parallel execution because it would break some targets that don't
|
||||
|
||||
280
docs/devcontainer-local-k8s.md
Normal file
280
docs/devcontainer-local-k8s.md
Normal file
@@ -0,0 +1,280 @@
|
||||
## DevContainer(Kubernetes + Istio)本地运行
|
||||
|
||||
仅保留三点:Istio 1.27.1 安装、app.ini 手动配置、安装 Devcontainer CRD(内嵌 YAML)。
|
||||
|
||||
### 1) 安装 Istio 1.27.1(指定版本)
|
||||
|
||||
```bash
|
||||
ISTIO_VER=1.27.1
|
||||
curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VER} sh -
|
||||
export PATH="$PWD/istio-${ISTIO_VER}/bin:$PATH"
|
||||
|
||||
istioctl x precheck
|
||||
istioctl install -y --set profile=default
|
||||
|
||||
kubectl -n istio-system get svc istio-ingressgateway -o wide
|
||||
```
|
||||
|
||||
说明:本项目 WebTerminal 使用 HTTP/80,经 `istio-ingressgateway` 进入,无需立刻配置 HTTPS。
|
||||
|
||||
### 2) 本地 app.ini 手动配置(非helm安装的 devstar 不会自动写入)
|
||||
|
||||
文件:`devstar/custom/conf/app.ini`
|
||||
|
||||
```ini
|
||||
|
||||
[devstar.devcontainer]
|
||||
NAMESPACE = default # 创建的devcontainer所在的命名空间
|
||||
HOST = 192.168.23.138 # 和[server].DOMAIN一致
|
||||
```
|
||||
|
||||
保存后重启后端以加载配置。
|
||||
|
||||
### 3) 安装 Devcontainer CRD(内嵌 YAML,可直接 apply)
|
||||
|
||||
可直接复制以下清单,通过标准输入安装:
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<'YAML'
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.1
|
||||
name: devcontainerapps.devcontainer.devstar.cn
|
||||
spec:
|
||||
group: devcontainer.devstar.cn
|
||||
names:
|
||||
kind: DevcontainerApp
|
||||
listKind: DevcontainerAppList
|
||||
plural: devcontainerapps
|
||||
singular: devcontainerapp
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: DevcontainerApp is the Schema for the devcontainerapps API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DevcontainerAppSpec defines the desired state of DevcontainerApp
|
||||
properties:
|
||||
failedJobsHistoryLimit:
|
||||
description: |-
|
||||
The number of failed finished jobs to retain.
|
||||
This is a pointer to distinguish between explicit zero and not specified.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
service:
|
||||
description: ServiceSpec specifies Service for DevContainer
|
||||
properties:
|
||||
extraPorts:
|
||||
description: ExtraPorts 定义额外的端口配置
|
||||
items:
|
||||
description: ExtraPortSpec 定义额外端口配置
|
||||
properties:
|
||||
containerPort:
|
||||
description: ContainerPort 是容器内的端口号
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
name:
|
||||
description: Name 是端口的名称
|
||||
type: string
|
||||
servicePort:
|
||||
description: ServicePort 是服务暴露的端口号
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- containerPort
|
||||
- servicePort
|
||||
type: object
|
||||
type: array
|
||||
nodePort:
|
||||
maximum: 32767
|
||||
minimum: 30000
|
||||
type: integer
|
||||
servicePort:
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
startingDeadlineSeconds:
|
||||
description: |-
|
||||
Optional deadline in seconds for starting the job if it misses scheduled
|
||||
time for any reason. Missed jobs executions will be counted as failed ones.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
statefulset:
|
||||
description: StatefulSetSpec specifies StatefulSet for DevContainer
|
||||
properties:
|
||||
command:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
containerPort:
|
||||
minimum: 1
|
||||
type: integer
|
||||
gitRepositoryURL:
|
||||
type: string
|
||||
image:
|
||||
type: string
|
||||
sshPublicKeyList:
|
||||
description: 至少包含一个 SSH Public Key 才能通过校验规则
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
required:
|
||||
- command
|
||||
- gitRepositoryURL
|
||||
- image
|
||||
- sshPublicKeyList
|
||||
type: object
|
||||
successfulJobsHistoryLimit:
|
||||
description: |-
|
||||
The number of successful finished jobs to retain.
|
||||
This is a pointer to distinguish between explicit zero and not specified.
|
||||
format: int32
|
||||
minimum: 0
|
||||
type: integer
|
||||
suspend:
|
||||
description: |-
|
||||
This flag tells the controller to suspend subsequent executions, it does
|
||||
not apply to already started executions. Defaults to false.
|
||||
type: boolean
|
||||
required:
|
||||
- statefulset
|
||||
type: object
|
||||
status:
|
||||
description: DevcontainerAppStatus defines the observed state of DevcontainerApp
|
||||
properties:
|
||||
active:
|
||||
description: A list of pointers to currently running jobs.
|
||||
items:
|
||||
description: ObjectReference contains enough information to let
|
||||
you inspect or modify the referred object.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: |-
|
||||
If referring to a piece of an object instead of an entire object, this string
|
||||
should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
|
||||
For example, if the object reference is to a container within a pod, this would take on a value like:
|
||||
"spec.containers{name}" (where "name" refers to the name of the container that triggered
|
||||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind of the referent.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
namespace:
|
||||
description: |-
|
||||
Namespace of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: |-
|
||||
Specific resourceVersion to which this reference is made, if any.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
|
||||
type: string
|
||||
uid:
|
||||
description: |-
|
||||
UID of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: array
|
||||
extraPortsAssigned:
|
||||
description: ExtraPortsAssigned 存储额外端口映射的 NodePort
|
||||
items:
|
||||
description: ExtraPortAssigned 定义已分配的额外端口信息
|
||||
properties:
|
||||
containerPort:
|
||||
description: ContainerPort 是容器内的端口号
|
||||
type: integer
|
||||
name:
|
||||
description: Name 是端口的名称
|
||||
type: string
|
||||
nodePort:
|
||||
description: NodePort 是 Kubernetes 分配的 NodePort
|
||||
type: integer
|
||||
servicePort:
|
||||
description: ServicePort 是服务暴露的端口号
|
||||
type: integer
|
||||
required:
|
||||
- containerPort
|
||||
- nodePort
|
||||
- servicePort
|
||||
type: object
|
||||
type: array
|
||||
lastScheduleTime:
|
||||
description: Information when was the last time the job was successfully
|
||||
scheduled.
|
||||
format: date-time
|
||||
type: string
|
||||
nodePortAssigned:
|
||||
description: NodePortAssigned 存储 DevcontainerApp CRD调度后集群分配的 NodePort
|
||||
type: integer
|
||||
ready:
|
||||
description: Ready 标识 DevcontainerApp 管理的 Pod 的 Readiness Probe 是否达到就绪状态
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
YAML
|
||||
```
|
||||
|
||||
验证 CRD:
|
||||
|
||||
```bash
|
||||
kubectl get crd devcontainerapps.devcontainer.devstar.cn -o wide
|
||||
```
|
||||
|
||||
### 4) 编译与运行 controller-manager(必需)
|
||||
|
||||
controller-manager 负责监听 `DevcontainerApp` CR,并创建/更新/删除底层 K8s 资源(StatefulSet、Service 等),并回写 `Status.Ready` 等状态。仅安装 CRD 不会触发任何实际资源变更,必须运行 controller-manager 才会生效。
|
||||
|
||||
- 在主目录编译并运行:
|
||||
|
||||
```bash
|
||||
cd /home/psx/devstar-main
|
||||
make controller-manager
|
||||
./controller-manager
|
||||
# 观察日志:应能看到 Reconcile 日志;创建 DevcontainerApp 后会创建 sts/svc
|
||||
```
|
||||
|
||||
83
go.mod
83
go.mod
@@ -94,6 +94,8 @@ require (
|
||||
github.com/nektos/act v0.2.63
|
||||
github.com/niklasfasching/go-org v1.8.0
|
||||
github.com/olivere/elastic/v7 v7.0.32
|
||||
github.com/onsi/ginkgo/v2 v2.22.0
|
||||
github.com/onsi/gomega v1.36.1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
@@ -105,6 +107,8 @@ require (
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
|
||||
github.com/sassoftware/go-rpmutils v0.4.0
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tstranex/u2f v1.0.0
|
||||
@@ -125,41 +129,78 @@ require (
|
||||
golang.org/x/sync v0.15.0
|
||||
golang.org/x/sys v0.33.0
|
||||
golang.org/x/text v0.26.0
|
||||
google.golang.org/grpc v1.72.0
|
||||
google.golang.org/grpc v1.72.1
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.33.3
|
||||
istio.io/api v1.27.2
|
||||
istio.io/client-go v1.27.2
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/component-base v0.34.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kubectl v0.34.1
|
||||
mvdan.cc/xurls/v2 v2.6.0
|
||||
sigs.k8s.io/controller-runtime v0.22.3
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
||||
xorm.io/builder v0.3.13
|
||||
xorm.io/xorm v1.3.9
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.1 // indirect
|
||||
k8s.io/apiserver v0.34.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -212,7 +253,7 @@ require (
|
||||
github.com/couchbase/go-couchbase v0.1.1 // indirect
|
||||
github.com/couchbase/gomemcached v0.3.3 // indirect
|
||||
github.com/couchbase/goutils v0.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/davidmz/go-pageant v1.0.2 // indirect
|
||||
@@ -221,7 +262,7 @@ require (
|
||||
github.com/docker/docker v24.0.9+incompatible
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/git-lfs/pktline v0.0.0-20230103162542-ca444d533ef1 // indirect
|
||||
github.com/go-ap/errors v0.0.0-20250409143711-5686c11ae650 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
@@ -264,7 +305,7 @@ require (
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/mrjones/oauth v0.0.0-20190623134757-126b35219450 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
@@ -294,9 +335,9 @@ require (
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/zeebo/assert v1.3.0 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
go.etcd.io/bbolt v1.4.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.etcd.io/bbolt v1.4.2 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
@@ -307,8 +348,8 @@ require (
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.33.3
|
||||
k8s.io/client-go v0.33.3
|
||||
k8s.io/api v0.34.1
|
||||
k8s.io/client-go v0.34.1
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
|
||||
@@ -330,3 +371,7 @@ exclude github.com/gofrs/uuid v4.0.0+incompatible
|
||||
exclude github.com/goccy/go-json v0.4.11
|
||||
|
||||
exclude github.com/satori/go.uuid v1.2.0
|
||||
|
||||
replace github.com/docker/distribution => github.com/distribution/distribution v2.8.3+incompatible
|
||||
|
||||
replace github.com/distribution/reference => github.com/distribution/reference v0.5.0
|
||||
|
||||
161
go.sum
161
go.sum
@@ -1,3 +1,5 @@
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
|
||||
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
|
||||
code.gitea.io/actions-proto-go v0.4.1 h1:l0EYhjsgpUe/1VABo2eK7zcoNX2W44WOnb0MSLrKfls=
|
||||
@@ -102,6 +104,8 @@ github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kk
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
@@ -128,6 +132,8 @@ github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCk
|
||||
github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4=
|
||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/blevesearch/bleve/v2 v2.0.5/go.mod h1:ZjWibgnbRX33c+vBRgla9QhPb4QOjD6fdVJ+R1Bk8LM=
|
||||
github.com/blevesearch/bleve/v2 v2.5.0 h1:HzYqBy/5/M9Ul9ESEmXzN/3Jl7YpmWBdHM/+zzv/3k4=
|
||||
github.com/blevesearch/bleve/v2 v2.5.0/go.mod h1:PcJzTPnEynO15dCf9isxOga7YFRa/cMSsbnRwnszXUk=
|
||||
@@ -197,6 +203,8 @@ github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnO
|
||||
github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4=
|
||||
github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
|
||||
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cention-sany/utf7 v0.0.0-20170124080048-26cad61bd60a h1:MISbI8sU/PSK/ztvmWKFcI7UGb5/HQT7B+i3a2myKgI=
|
||||
github.com/cention-sany/utf7 v0.0.0-20170124080048-26cad61bd60a/go.mod h1:2GxOXOlEPAMFPfp014mK1SWq8G8BN8o7/dfYqJrVGn8=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
@@ -225,9 +233,11 @@ github.com/couchbase/goutils v0.1.2 h1:gWr8B6XNWPIhfalHNog3qQKfGiYyh4K4VhO3P2o9B
|
||||
github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE=
|
||||
github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
|
||||
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -240,6 +250,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dimiro1/reply v0.0.0-20200315094148-d0136a4c9e21 h1:PdsjTl0Cg+ZJgOx/CFV5NNgO1ThTreqdgKYiDCMHJwA=
|
||||
github.com/dimiro1/reply v0.0.0-20200315094148-d0136a4c9e21/go.mod h1:xJvkyD6Y2rZapGvPJLYo9dyx1s5dxBEDPa8T3YTuOk0=
|
||||
github.com/distribution/distribution v2.8.3+incompatible h1:RlpEXBLq/WPXYvBYMDAmBX/SnhD67qwtvW/DzKc8pAo=
|
||||
github.com/distribution/distribution v2.8.3+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o=
|
||||
@@ -252,8 +264,6 @@ github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55k
|
||||
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
|
||||
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
|
||||
github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@@ -278,24 +288,30 @@ github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21/go.mod h1:iL2twTe
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 h1:oP4q0fw+fOSWn3DfFi4EXdT+B+gTtzx8GC9xsc26Znk=
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6/go.mod h1:iL2twTeMvZnrg54ZoPDNfJaJaqy0xIQFuBdrLsmspwQ=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594/go.mod h1:aqO8z8wPrjkscevZJFVE1wXJrLpC5LtJG7fqLOsPb2U=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/ethantkoenig/rupture v1.0.1 h1:6aAXghmvtnngMgQzy7SMGdicMvkV86V4n9fT0meE5E4=
|
||||
github.com/ethantkoenig/rupture v1.0.1/go.mod h1:Sjqo/nbffZp1pVVXNGhpugIjsWmuS9KiIB4GtpEBur4=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/git-lfs/pktline v0.0.0-20230103162542-ca444d533ef1 h1:mtDjlmloH7ytdblogrMz1/8Hqua1y8B4ID+bh3rvod0=
|
||||
github.com/git-lfs/pktline v0.0.0-20230103162542-ca444d533ef1/go.mod h1:fenKRzpXDjNpsIBhuhUzvjCKlDjKam0boRAenTE0Q6A=
|
||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
@@ -335,10 +351,13 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
@@ -359,7 +378,6 @@ github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkv
|
||||
github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ=
|
||||
github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU=
|
||||
github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@@ -412,17 +430,18 @@ github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws
|
||||
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
@@ -462,6 +481,10 @@ github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pw
|
||||
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ=
|
||||
github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -485,6 +508,8 @@ github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI
|
||||
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056 h1:iCHtR9CQyktQ5+f3dMVZfwD2KWJUgm7M0gdL9NGr8KA=
|
||||
github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
@@ -583,13 +608,16 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mrjones/oauth v0.0.0-20190623134757-126b35219450 h1:j2kD3MT1z4PXCiUllUJF9mWUESr9TWKS7iEKsQ/IipM=
|
||||
@@ -601,6 +629,8 @@ github.com/msteinert/pam v1.2.0 h1:mYfjlvN2KYs2Pb9G6nb/1f/nPfAttT/Jee5Sq9r3bGE=
|
||||
github.com/msteinert/pam v1.2.0/go.mod h1:d2n0DCUK8rGecChV3JzvmsDjOY4R7AYbsNxAT+ftQl0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niklasfasching/go-org v1.8.0 h1:WyGLaajLLp8JbQzkmapZ1y0MOzKuKV47HkZRloi+HGY=
|
||||
github.com/niklasfasching/go-org v1.8.0/go.mod h1:e2A9zJs7cdONrEGs3gvxCcaAEpwwPNPG7csDpXckMNg=
|
||||
github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||
@@ -618,13 +648,13 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
|
||||
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
@@ -705,6 +735,8 @@ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:s
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
@@ -714,6 +746,8 @@ github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cma
|
||||
github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -792,18 +826,28 @@ github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l
|
||||
gitlab.com/gitlab-org/api/client-go v0.127.0 h1:8xnxcNKGF2gDazEoMs+hOZfOspSSw8D0vAoWhQk9U+U=
|
||||
gitlab.com/gitlab-org/api/client-go v0.127.0/go.mod h1:bYC6fPORKSmtuPRyD9Z2rtbAjE7UeNatu2VWHRf4/LE=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
|
||||
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
@@ -815,6 +859,10 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
|
||||
go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -830,6 +878,8 @@ golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ug
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/image v0.26.0 h1:4XjIFEZWQmCZi6Wv8BoxsDhRU3RVnLX04dToTDAEPlY=
|
||||
golang.org/x/image v0.26.0/go.mod h1:lcxbMFAovzpnJxzXS3nyL83K27tmqtKzIJpctK8YO5c=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
@@ -960,10 +1010,14 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
|
||||
google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1002,18 +1056,30 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8=
|
||||
k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE=
|
||||
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
|
||||
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA=
|
||||
k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg=
|
||||
istio.io/api v1.27.2 h1:t0m2EAT+LWGA/jSvsyxEhGQoIQYdXD5sJG7tQ9OtQk0=
|
||||
istio.io/api v1.27.2/go.mod h1:DTVGH6CLXj5W8FF9JUD3Tis78iRgT1WeuAnxfTz21Wg=
|
||||
istio.io/client-go v1.27.2 h1:4IsF7UAdV5Yg0iq6ONyWZpjFr3z2ahkIbLWyzOHCAwA=
|
||||
istio.io/client-go v1.27.2/go.mod h1:zgT5R1USl6rwYK1eb2kisPuiji05TQJE7CQHU253iAg=
|
||||
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
|
||||
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
|
||||
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
|
||||
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
|
||||
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
|
||||
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA=
|
||||
k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0=
|
||||
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
|
||||
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
|
||||
k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A=
|
||||
k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI=
|
||||
k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
||||
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
|
||||
@@ -1038,15 +1104,18 @@ mvdan.cc/xurls/v2 v2.6.0 h1:3NTZpeTxYVWNSokW3MKeyVkz/j7uYXYiMtXRUfmjbgI=
|
||||
mvdan.cc/xurls/v2 v2.6.0/go.mod h1:bCvEZ1XvdA6wDnxY7jPPjEmigDtvtvPXAD/Exa9IMSk=
|
||||
pgregory.net/rapid v0.4.2 h1:lsi9jhvZTYvzVpeG93WWgimPRmiJQfGFRNTEZh1dtY0=
|
||||
pgregory.net/rapid v0.4.2/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y=
|
||||
sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 h1:mUcz5b3FJbP5Cvdq7Khzn6J9OCUQJaBwgBkCR+MOwSs=
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251/go.mod h1:FJGmPh3vz9jSos1L/F91iAgnC/aejc0wIIrF2ZwJxdY=
|
||||
xorm.io/builder v0.3.13 h1:a3jmiVVL19psGeXx8GIurTp7p0IIgqeDmwhcR6BAOAo=
|
||||
|
||||
45
modules/k8s/Dockerfile.controller-manager
Normal file
45
modules/k8s/Dockerfile.controller-manager
Normal file
@@ -0,0 +1,45 @@
|
||||
FROM golang:1.24 AS builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# 创建临时目录结构
|
||||
RUN mkdir -p modules/k8s
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# 禁用所有代理
|
||||
ENV HTTP_PROXY=""
|
||||
ENV HTTPS_PROXY=""
|
||||
ENV http_proxy=""
|
||||
ENV https_proxy=""
|
||||
ENV GOPROXY=https://goproxy.io,direct
|
||||
ENV GOSUMDB=sum.golang.org
|
||||
|
||||
# 下载依赖
|
||||
RUN go mod download
|
||||
|
||||
# Copy the Go source code
|
||||
COPY modules/k8s/ modules/k8s/
|
||||
|
||||
# Build the controller-manager binary
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o controller-manager modules/k8s/cmd/controller-manager/controller-manager.go
|
||||
|
||||
# Build a small image
|
||||
FROM alpine:3.18
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# 创建非 root 用户
|
||||
RUN addgroup -g 65532 nonroot && \
|
||||
adduser -u 65532 -G nonroot -D nonroot
|
||||
|
||||
COPY --from=builder /workspace/modules/k8s/controller/ modules/k8s/controller/
|
||||
COPY --from=builder /workspace/controller-manager .
|
||||
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/controller-manager"]
|
||||
|
||||
# $ docker build -t devstar-controller-manager:latest -f modules/k8s/Dockerfile.controller-manager .
|
||||
18
modules/k8s/README.md
Normal file
18
modules/k8s/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# DevStar Controller Manager
|
||||
|
||||
本目录包含 DevStar Controller Manager 的源代码和构建所需的 Dockerfile。Controller Manager 负责管理 Kubernetes 中的 DevContainer 自定义资源。
|
||||
|
||||
## 构建 Docker 镜像
|
||||
|
||||
### 构建方法
|
||||
|
||||
由于项目结构原因,构建 Docker 镜像必须从项目根目录执行:
|
||||
|
||||
```bash
|
||||
# 切换到项目根目录make docker 或者 使用如下命令单独构建devstar-controller-manager镜像
|
||||
docker build -t devstar-controller-manager:latest -f modules/k8s/Dockerfile.controller-manager .
|
||||
|
||||
# 合并代码时由CI脚本负责构建和推送镜像devstar.cn/devstar/devstar-controller-manager:latest
|
||||
```
|
||||
|
||||
此镜像由devstar的helm chart的子chart devstar-controller-manager使用,若要使用新的镜像请修改helm chart中的values.yaml
|
||||
727
modules/k8s/api/application/v1/application_types.go
Normal file
727
modules/k8s/api/application/v1/application_types.go
Normal file
@@ -0,0 +1,727 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ApplicationSpec defines the desired state of Application
|
||||
type ApplicationSpec struct {
|
||||
// Template defines the application template
|
||||
// +required
|
||||
Template ApplicationTemplate `json:"template"`
|
||||
|
||||
// Replicas defines the number of desired replicas
|
||||
// +optional
|
||||
// +kubebuilder:default=1
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
|
||||
// Environment defines environment variables for the application
|
||||
// +optional
|
||||
Environment map[string]string `json:"environment,omitempty"`
|
||||
|
||||
// Resources defines resource requirements for the application
|
||||
// +optional
|
||||
Resources ResourceRequirements `json:"resources,omitempty"`
|
||||
|
||||
// Expose defines whether to expose the application as a service (deprecated, use Service instead)
|
||||
// +optional
|
||||
// +kubebuilder:default=false
|
||||
Expose bool `json:"expose,omitempty"`
|
||||
|
||||
// Service defines service configuration for the application
|
||||
// +optional
|
||||
Service *ServiceConfig `json:"service,omitempty"`
|
||||
|
||||
// 网络策略配置,包含了南北向和东西向流量管理
|
||||
// +optional
|
||||
NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"`
|
||||
|
||||
// 保留现有的TrafficPolicy用于向后兼容,但增强其功能
|
||||
// +optional
|
||||
TrafficPolicy *TrafficPolicy `json:"trafficPolicy,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkPolicy 定义应用的网络和流量策略
|
||||
// +kubebuilder:object:generate=true
|
||||
// +kubebuilder:validation:Optional
|
||||
type NetworkPolicy struct {
|
||||
// Gateway定义南北向流量入口配置
|
||||
// +optional
|
||||
Gateway *GatewayConfig `json:"gateway,omitempty"`
|
||||
|
||||
// Mesh定义服务网格相关配置
|
||||
// +optional
|
||||
Mesh *MeshConfig `json:"mesh,omitempty"`
|
||||
}
|
||||
|
||||
// GatewayConfig 定义南北向流量入口配置
|
||||
// +kubebuilder:object:generate=true
|
||||
// +kubebuilder:validation:Optional
|
||||
type GatewayConfig struct {
|
||||
// 是否启用Gateway
|
||||
// +optional
|
||||
// +kubebuilder:default=false
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// 暴露的端口列表
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinItems=0
|
||||
// +listType=atomic
|
||||
Ports []GatewayPort `json:"ports,omitempty"`
|
||||
|
||||
// 域名列表,用于HTTP/HTTPS协议
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
// +kubebuilder:validation:MinItems=0
|
||||
Hosts []string `json:"hosts,omitempty"`
|
||||
|
||||
// TLS配置
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
// +kubebuilder:validation:MinItems=0
|
||||
TLS []GatewayTLS `json:"tls,omitempty"`
|
||||
|
||||
// 额外的Gateway注解
|
||||
// +optional
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// GatewayPort 定义Gateway暴露的端口
|
||||
// +kubebuilder:object:generate=true
|
||||
type GatewayPort struct {
|
||||
// 端口名称
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=63
|
||||
// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`
|
||||
Name string `json:"name"`
|
||||
|
||||
// 端口号
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
Number int32 `json:"number"`
|
||||
|
||||
// 协议类型
|
||||
// +optional
|
||||
// +kubebuilder:default="HTTP"
|
||||
// +kubebuilder:validation:Enum=HTTP;HTTPS;TCP;UDP;GRPC;TLS;MONGO
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
|
||||
// 目标端口(如与端口号不同)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
TargetPort int32 `json:"targetPort,omitempty"`
|
||||
}
|
||||
|
||||
// GatewayTLS 定义Gateway的TLS配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type GatewayTLS struct {
|
||||
// 主机列表
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
Hosts []string `json:"hosts,omitempty"`
|
||||
|
||||
// 证书Secret名称
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinLength=0
|
||||
// +kubebuilder:validation:MaxLength=253
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
|
||||
// TLS模式
|
||||
// +optional
|
||||
// +kubebuilder:default="SIMPLE"
|
||||
// +kubebuilder:validation:Enum=SIMPLE;MUTUAL;PASSTHROUGH
|
||||
Mode string `json:"mode,omitempty"`
|
||||
|
||||
// 最低TLS版本
|
||||
// +optional
|
||||
// +kubebuilder:default="TLSv1_2"
|
||||
// +kubebuilder:validation:Enum=TLSv1_0;TLSv1_1;TLSv1_2;TLSv1_3
|
||||
MinProtocolVersion string `json:"minProtocolVersion,omitempty"`
|
||||
}
|
||||
|
||||
// MeshConfig 定义服务网格相关配置
|
||||
// +kubebuilder:object:generate=true
|
||||
// +kubebuilder:validation:Optional
|
||||
type MeshConfig struct {
|
||||
// 是否启用服务网格
|
||||
// +optional
|
||||
// +kubebuilder:default=false
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Sidecar注入配置
|
||||
// +optional
|
||||
Sidecar *SidecarConfig `json:"sidecar,omitempty"`
|
||||
|
||||
// 路由规则
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
Routes []RouteConfig `json:"routes,omitempty"`
|
||||
|
||||
// 熔断策略
|
||||
// +optional
|
||||
CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"`
|
||||
|
||||
// 超时配置(毫秒)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Timeout int32 `json:"timeout,omitempty"`
|
||||
|
||||
// 重试策略
|
||||
// +optional
|
||||
Retry *RetryPolicy `json:"retry,omitempty"`
|
||||
|
||||
// 故障注入(用于测试)
|
||||
// +optional
|
||||
FaultInjection *FaultInjection `json:"faultInjection,omitempty"`
|
||||
|
||||
// 负载均衡策略
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"`
|
||||
}
|
||||
|
||||
// SidecarConfig 定义Sidecar代理配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type SidecarConfig struct {
|
||||
// 是否手动注入Sidecar
|
||||
// +optional
|
||||
// +kubebuilder:default=true
|
||||
Inject bool `json:"inject,omitempty"`
|
||||
|
||||
// 资源限制
|
||||
// +optional
|
||||
Resources *ResourceRequirements `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
// RouteConfig 定义路由规则
|
||||
// +kubebuilder:object:generate=true
|
||||
type RouteConfig struct {
|
||||
// 路由名称
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxLength=253
|
||||
// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// 匹配条件
|
||||
// +optional
|
||||
Match *HTTPMatchRequest `json:"match,omitempty"`
|
||||
|
||||
// 目标服务
|
||||
// +required
|
||||
Destination RouteDestination `json:"destination"`
|
||||
|
||||
// 权重(0-100)
|
||||
// +optional
|
||||
// +kubebuilder:default=100
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
Weight int32 `json:"weight,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPMatchRequest 定义HTTP匹配条件
|
||||
// +kubebuilder:object:generate=true
|
||||
type HTTPMatchRequest struct {
|
||||
// URI匹配
|
||||
// +optional
|
||||
URI *StringMatch `json:"uri,omitempty"`
|
||||
|
||||
// 方法匹配(GET, POST等)
|
||||
// +optional
|
||||
Method *StringMatch `json:"method,omitempty"`
|
||||
|
||||
// 头部匹配
|
||||
// +optional
|
||||
Headers map[string]StringMatch `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// StringMatch 定义字符串匹配方式
|
||||
// +kubebuilder:object:generate=true
|
||||
// +kubebuilder:validation:XValidation:rule="has(self.exact) || has(self.prefix) || has(self.regex)",message="必须指定exact、prefix或regex中的一种匹配方式"
|
||||
type StringMatch struct {
|
||||
// 精确匹配
|
||||
// +optional
|
||||
Exact string `json:"exact,omitempty"`
|
||||
|
||||
// 前缀匹配
|
||||
// +optional
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// 正则匹配
|
||||
// +optional
|
||||
Regex string `json:"regex,omitempty"`
|
||||
}
|
||||
|
||||
// RouteDestination 定义路由目标
|
||||
// +kubebuilder:object:generate=true
|
||||
type RouteDestination struct {
|
||||
// 目标服务
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Host string `json:"host"`
|
||||
|
||||
// 目标子集
|
||||
// +optional
|
||||
Subset string `json:"subset,omitempty"`
|
||||
|
||||
// 目标端口
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
Port int32 `json:"port,omitempty"`
|
||||
}
|
||||
|
||||
// CanaryTraffic 金丝雀流量配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type CanaryTraffic struct {
|
||||
// 是否启用金丝雀发布
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// 主版本权重(如80),金丝雀版本自动为100-主版本
|
||||
// +optional
|
||||
// +kubebuilder:default=80
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
MainWeight int32 `json:"mainWeight,omitempty"`
|
||||
|
||||
// 金丝雀版本标签(如"v2")
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
CanaryVersion string `json:"canaryVersion,omitempty"`
|
||||
}
|
||||
|
||||
// TrafficPolicy 定义服务网格流量治理策略
|
||||
// +kubebuilder:object:generate=true
|
||||
type TrafficPolicy struct {
|
||||
// 金丝雀发布配置(已有)
|
||||
// +optional
|
||||
Canary *CanaryTraffic `json:"canary,omitempty"`
|
||||
|
||||
// 超时配置(毫秒)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Timeout int32 `json:"timeout,omitempty"`
|
||||
|
||||
// 重试策略
|
||||
// +optional
|
||||
Retry *RetryPolicy `json:"retry,omitempty"`
|
||||
|
||||
// 熔断配置
|
||||
// +optional
|
||||
CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"`
|
||||
|
||||
// 流量镜像配置
|
||||
// +optional
|
||||
Mirror *MirrorConfig `json:"mirror,omitempty"`
|
||||
|
||||
// 负载均衡策略
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"`
|
||||
|
||||
// 故障注入(用于测试)
|
||||
// +optional
|
||||
FaultInjection *FaultInjection `json:"faultInjection,omitempty"`
|
||||
}
|
||||
|
||||
// RetryPolicy 重试策略配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type RetryPolicy struct {
|
||||
// 重试次数
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
Attempts int32 `json:"attempts"`
|
||||
|
||||
// 每次重试超时时间(毫秒)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
PerTryTimeout int32 `json:"perTryTimeout,omitempty"`
|
||||
|
||||
// 重试条件
|
||||
// +optional
|
||||
// +listType=set
|
||||
RetryOn []string `json:"retryOn,omitempty"`
|
||||
}
|
||||
|
||||
// CircuitBreaker 熔断器配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type CircuitBreaker struct {
|
||||
// 连续错误阈值
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
ConsecutiveErrors int32 `json:"consecutiveErrors"`
|
||||
|
||||
// 熔断恢复时间(秒)
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
BaseEjectionTime int32 `json:"baseEjectionTime"`
|
||||
|
||||
// 最大熔断实例百分比(1-100)
|
||||
// +optional
|
||||
// +kubebuilder:default=100
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
MaxEjectionPercent int32 `json:"maxEjectionPercent,omitempty"`
|
||||
}
|
||||
|
||||
// MirrorConfig 流量镜像配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type MirrorConfig struct {
|
||||
// 镜像目标服务
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
TargetService string `json:"targetService"`
|
||||
|
||||
// 目标服务端口
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
TargetPort int32 `json:"targetPort,omitempty"`
|
||||
|
||||
// 镜像流量百分比(1-100)
|
||||
// +optional
|
||||
// +kubebuilder:default=100
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
Percentage int32 `json:"percentage,omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancerSettings 负载均衡设置
|
||||
// +kubebuilder:object:generate=true
|
||||
// +kubebuilder:validation:XValidation:rule="has(self.simple) != has(self.consistentHash)",message="必须指定simple或consistentHash其中之一,且只能指定一个"
|
||||
type LoadBalancerSettings struct {
|
||||
// 负载均衡模式
|
||||
// +optional
|
||||
// +kubebuilder:default="ROUND_ROBIN"
|
||||
// +kubebuilder:validation:Enum=ROUND_ROBIN;LEAST_CONN;RANDOM;PASSTHROUGH
|
||||
Simple string `json:"simple,omitempty"`
|
||||
|
||||
// 一致性哈希配置
|
||||
// +optional
|
||||
ConsistentHash *ConsistentHashLB `json:"consistentHash,omitempty"`
|
||||
}
|
||||
|
||||
// ConsistentHashLB 一致性哈希负载均衡
|
||||
// +kubebuilder:object:generate=true
|
||||
// +kubebuilder:validation:XValidation:rule="(has(self.httpHeaderName) ? 1 : 0) + (has(self.httpCookie) ? 1 : 0) + (self.useSourceIp ? 1 : 0) == 1",message="必须且只能指定httpHeaderName、httpCookie或useSourceIp其中之一"
|
||||
type ConsistentHashLB struct {
|
||||
// HTTP头部哈希
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
HttpHeaderName string `json:"httpHeaderName,omitempty"`
|
||||
|
||||
// HTTP Cookie哈希
|
||||
// +optional
|
||||
HttpCookie *HTTPCookie `json:"httpCookie,omitempty"`
|
||||
|
||||
// 使用源IP
|
||||
// +optional
|
||||
// +kubebuilder:default=false
|
||||
UseSourceIp bool `json:"useSourceIp,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPCookie HTTP Cookie配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type HTTPCookie struct {
|
||||
// Cookie名称
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Name string `json:"name"`
|
||||
|
||||
// Cookie路径
|
||||
// +optional
|
||||
Path string `json:"path,omitempty"`
|
||||
|
||||
// Cookie过期时间(秒)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
Ttl int32 `json:"ttl,omitempty"`
|
||||
}
|
||||
|
||||
// FaultInjection 故障注入配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type FaultInjection struct {
|
||||
// 延迟注入
|
||||
// +optional
|
||||
Delay *DelayInjection `json:"delay,omitempty"`
|
||||
|
||||
// 中止注入
|
||||
// +optional
|
||||
Abort *AbortInjection `json:"abort,omitempty"`
|
||||
}
|
||||
|
||||
// DelayInjection 延迟注入配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type DelayInjection struct {
|
||||
// 固定延迟时间(毫秒)
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
FixedDelay int32 `json:"fixedDelay"`
|
||||
|
||||
// 注入百分比(0-100)
|
||||
// +optional
|
||||
// +kubebuilder:default=100
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
Percentage int32 `json:"percentage,omitempty"`
|
||||
}
|
||||
|
||||
// AbortInjection 中止注入配置
|
||||
// +kubebuilder:object:generate=true
|
||||
type AbortInjection struct {
|
||||
// HTTP状态码
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=100
|
||||
// +kubebuilder:validation:Maximum=599
|
||||
HttpStatus int32 `json:"httpStatus"`
|
||||
|
||||
// 注入百分比(0-100)
|
||||
// +optional
|
||||
// +kubebuilder:default=100
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +kubebuilder:validation:Maximum=100
|
||||
Percentage int32 `json:"percentage,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationTemplate defines the application template
|
||||
type ApplicationTemplate struct {
|
||||
// Image defines the container image
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Image string `json:"image"`
|
||||
|
||||
// Type defines the application type (stateless/stateful)
|
||||
// +optional
|
||||
// +kubebuilder:default="stateless"
|
||||
// +kubebuilder:validation:Enum=stateless;stateful
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Ports defines the container ports
|
||||
// +optional
|
||||
Ports []Port `json:"ports,omitempty"`
|
||||
|
||||
// HealthCheck defines health check configuration
|
||||
// +optional
|
||||
HealthCheck *HealthCheck `json:"healthCheck,omitempty"`
|
||||
|
||||
// Command defines the container command
|
||||
// +optional
|
||||
Command []string `json:"command,omitempty"`
|
||||
|
||||
// Args defines the container arguments
|
||||
// +optional
|
||||
Args []string `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
// Port defines a port configuration
|
||||
type Port struct {
|
||||
// Name defines the port name
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Name string `json:"name"`
|
||||
|
||||
// Port defines the port number
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
Port int32 `json:"port"`
|
||||
|
||||
// Protocol defines the port protocol
|
||||
// +optional
|
||||
// +kubebuilder:default="TCP"
|
||||
// +kubebuilder:validation:Enum=TCP;UDP
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
}
|
||||
|
||||
// HealthCheck defines health check configuration
|
||||
type HealthCheck struct {
|
||||
// HTTPGet defines HTTP health check
|
||||
// +optional
|
||||
HTTPGet *HTTPGetAction `json:"httpGet,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPGetAction defines HTTP health check action
|
||||
type HTTPGetAction struct {
|
||||
// Path defines the HTTP path
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Path string `json:"path"`
|
||||
|
||||
// Port defines the HTTP port
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
Port int32 `json:"port"`
|
||||
}
|
||||
|
||||
// ResourceRequirements defines resource requirements
|
||||
type ResourceRequirements struct {
|
||||
// CPU defines CPU resource requirement
|
||||
// +optional
|
||||
// +kubebuilder:validation:Pattern=`^(\d+m|\d+(\.\d+)?)$`
|
||||
CPU string `json:"cpu,omitempty"`
|
||||
|
||||
// Memory defines memory resource requirement
|
||||
// +optional
|
||||
// +kubebuilder:validation:Pattern=`^(\d+(Ki|Mi|Gi|Ti|Pi|Ei|k|M|G|T|P|E)?)$`
|
||||
Memory string `json:"memory,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceConfig defines service configuration
|
||||
type ServiceConfig struct {
|
||||
// Enabled defines whether to create service
|
||||
// +optional
|
||||
// +kubebuilder:default=false
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Type defines the service type
|
||||
// +optional
|
||||
// +kubebuilder:default="ClusterIP"
|
||||
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer;ExternalName
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Ports defines custom service ports (if not specified, uses template ports)
|
||||
// +optional
|
||||
Ports []ServicePort `json:"ports,omitempty"`
|
||||
|
||||
// NodePorts defines specific node ports for NodePort type services
|
||||
// +optional
|
||||
NodePorts map[string]int32 `json:"nodePorts,omitempty"`
|
||||
|
||||
// LoadBalancerIP defines the IP for LoadBalancer type services
|
||||
// +optional
|
||||
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
|
||||
|
||||
// LoadBalancerSourceRanges defines allowed source ranges for LoadBalancer
|
||||
// +optional
|
||||
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"`
|
||||
|
||||
// ExternalName defines external service name for ExternalName type
|
||||
// +optional
|
||||
ExternalName string `json:"externalName,omitempty"`
|
||||
|
||||
// SessionAffinity defines session affinity
|
||||
// +optional
|
||||
// +kubebuilder:default="None"
|
||||
// +kubebuilder:validation:Enum=None;ClientIP
|
||||
SessionAffinity string `json:"sessionAffinity,omitempty"`
|
||||
|
||||
// Annotations defines additional annotations for service
|
||||
// +optional
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
|
||||
// Labels defines additional labels for service
|
||||
// +optional
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
// ServicePort defines a service port configuration
|
||||
type ServicePort struct {
|
||||
// Name defines the port name
|
||||
// +required
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
Name string `json:"name"`
|
||||
|
||||
// Port defines the service port
|
||||
// +required
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
Port int32 `json:"port"`
|
||||
|
||||
// TargetPort defines the target port (can be port number or name)
|
||||
// +optional
|
||||
TargetPort string `json:"targetPort,omitempty"`
|
||||
|
||||
// Protocol defines the port protocol
|
||||
// +optional
|
||||
// +kubebuilder:default="TCP"
|
||||
// +kubebuilder:validation:Enum=TCP;UDP;SCTP
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
|
||||
// NodePort defines the node port for NodePort type (30000-32767)
|
||||
// +optional
|
||||
// +kubebuilder:validation:Minimum=30000
|
||||
// +kubebuilder:validation:Maximum=32767
|
||||
NodePort int32 `json:"nodePort,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationStatus defines the observed state of Application
|
||||
type ApplicationStatus struct {
|
||||
// Phase defines the current phase of the application
|
||||
// +optional
|
||||
// +kubebuilder:validation:Enum=Pending;Running;Scaling;Failed
|
||||
Phase string `json:"phase,omitempty"`
|
||||
|
||||
// ReadyReplicas defines the number of ready replicas
|
||||
// +optional
|
||||
ReadyReplicas int32 `json:"readyReplicas,omitempty"`
|
||||
|
||||
// Replicas defines the total number of replicas
|
||||
// +optional
|
||||
Replicas int32 `json:"replicas,omitempty"`
|
||||
|
||||
// Message defines a human-readable message
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// LastUpdated defines when the status was last updated
|
||||
// +optional
|
||||
LastUpdated metav1.Time `json:"lastUpdated,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:scope=Namespaced
|
||||
// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=".spec.template.image"
|
||||
// +kubebuilder:printcolumn:name="Replicas",type=integer,JSONPath=".spec.replicas"
|
||||
// +kubebuilder:printcolumn:name="Ready",type=integer,JSONPath=".status.readyReplicas"
|
||||
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=".status.phase"
|
||||
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// Application is the Schema for the applications API
|
||||
type Application struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// metadata is a standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// spec defines the desired state of Application
|
||||
// +required
|
||||
Spec ApplicationSpec `json:"spec"`
|
||||
|
||||
// status defines the observed state of Application
|
||||
// +optional
|
||||
Status ApplicationStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ApplicationList contains a list of Application
|
||||
type ApplicationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Application `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Application{}, &ApplicationList{})
|
||||
}
|
||||
36
modules/k8s/api/application/v1/groupversion_info.go
Normal file
36
modules/k8s/api/application/v1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1 contains API Schema definitions for the application v1 API group.
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=application.devstar.cn
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "application.devstar.cn", Version: "v1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
774
modules/k8s/api/application/v1/zz_generated.deepcopy.go
Normal file
774
modules/k8s/api/application/v1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,774 @@
|
||||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AbortInjection) DeepCopyInto(out *AbortInjection) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AbortInjection.
|
||||
func (in *AbortInjection) DeepCopy() *AbortInjection {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AbortInjection)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Application) DeepCopyInto(out *Application) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application.
|
||||
func (in *Application) DeepCopy() *Application {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Application)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Application) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationList) DeepCopyInto(out *ApplicationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Application, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList.
|
||||
func (in *ApplicationList) DeepCopy() *ApplicationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ApplicationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) {
|
||||
*out = *in
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Environment != nil {
|
||||
in, out := &in.Environment, &out.Environment
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.Resources = in.Resources
|
||||
if in.Service != nil {
|
||||
in, out := &in.Service, &out.Service
|
||||
*out = new(ServiceConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NetworkPolicy != nil {
|
||||
in, out := &in.NetworkPolicy, &out.NetworkPolicy
|
||||
*out = new(NetworkPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TrafficPolicy != nil {
|
||||
in, out := &in.TrafficPolicy, &out.TrafficPolicy
|
||||
*out = new(TrafficPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec.
|
||||
func (in *ApplicationSpec) DeepCopy() *ApplicationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
|
||||
*out = *in
|
||||
in.LastUpdated.DeepCopyInto(&out.LastUpdated)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus.
|
||||
func (in *ApplicationStatus) DeepCopy() *ApplicationStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationTemplate) DeepCopyInto(out *ApplicationTemplate) {
|
||||
*out = *in
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]Port, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.HealthCheck != nil {
|
||||
in, out := &in.HealthCheck, &out.HealthCheck
|
||||
*out = new(HealthCheck)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Command != nil {
|
||||
in, out := &in.Command, &out.Command
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Args != nil {
|
||||
in, out := &in.Args, &out.Args
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationTemplate.
|
||||
func (in *ApplicationTemplate) DeepCopy() *ApplicationTemplate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationTemplate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CanaryTraffic) DeepCopyInto(out *CanaryTraffic) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryTraffic.
|
||||
func (in *CanaryTraffic) DeepCopy() *CanaryTraffic {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CanaryTraffic)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CircuitBreaker) DeepCopyInto(out *CircuitBreaker) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CircuitBreaker.
|
||||
func (in *CircuitBreaker) DeepCopy() *CircuitBreaker {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CircuitBreaker)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConsistentHashLB) DeepCopyInto(out *ConsistentHashLB) {
|
||||
*out = *in
|
||||
if in.HttpCookie != nil {
|
||||
in, out := &in.HttpCookie, &out.HttpCookie
|
||||
*out = new(HTTPCookie)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistentHashLB.
|
||||
func (in *ConsistentHashLB) DeepCopy() *ConsistentHashLB {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConsistentHashLB)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DelayInjection) DeepCopyInto(out *DelayInjection) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelayInjection.
|
||||
func (in *DelayInjection) DeepCopy() *DelayInjection {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DelayInjection)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FaultInjection) DeepCopyInto(out *FaultInjection) {
|
||||
*out = *in
|
||||
if in.Delay != nil {
|
||||
in, out := &in.Delay, &out.Delay
|
||||
*out = new(DelayInjection)
|
||||
**out = **in
|
||||
}
|
||||
if in.Abort != nil {
|
||||
in, out := &in.Abort, &out.Abort
|
||||
*out = new(AbortInjection)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FaultInjection.
|
||||
func (in *FaultInjection) DeepCopy() *FaultInjection {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FaultInjection)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GatewayConfig) DeepCopyInto(out *GatewayConfig) {
|
||||
*out = *in
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]GatewayPort, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = make([]GatewayTLS, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayConfig.
|
||||
func (in *GatewayConfig) DeepCopy() *GatewayConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GatewayConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GatewayPort) DeepCopyInto(out *GatewayPort) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayPort.
|
||||
func (in *GatewayPort) DeepCopy() *GatewayPort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GatewayPort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GatewayTLS) DeepCopyInto(out *GatewayTLS) {
|
||||
*out = *in
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayTLS.
|
||||
func (in *GatewayTLS) DeepCopy() *GatewayTLS {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GatewayTLS)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HTTPCookie) DeepCopyInto(out *HTTPCookie) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCookie.
|
||||
func (in *HTTPCookie) DeepCopy() *HTTPCookie {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HTTPCookie)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction.
|
||||
func (in *HTTPGetAction) DeepCopy() *HTTPGetAction {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HTTPGetAction)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HTTPMatchRequest) DeepCopyInto(out *HTTPMatchRequest) {
|
||||
*out = *in
|
||||
if in.URI != nil {
|
||||
in, out := &in.URI, &out.URI
|
||||
*out = new(StringMatch)
|
||||
**out = **in
|
||||
}
|
||||
if in.Method != nil {
|
||||
in, out := &in.Method, &out.Method
|
||||
*out = new(StringMatch)
|
||||
**out = **in
|
||||
}
|
||||
if in.Headers != nil {
|
||||
in, out := &in.Headers, &out.Headers
|
||||
*out = make(map[string]StringMatch, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatchRequest.
|
||||
func (in *HTTPMatchRequest) DeepCopy() *HTTPMatchRequest {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HTTPMatchRequest)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HealthCheck) DeepCopyInto(out *HealthCheck) {
|
||||
*out = *in
|
||||
if in.HTTPGet != nil {
|
||||
in, out := &in.HTTPGet, &out.HTTPGet
|
||||
*out = new(HTTPGetAction)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck.
|
||||
func (in *HealthCheck) DeepCopy() *HealthCheck {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HealthCheck)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerSettings) DeepCopyInto(out *LoadBalancerSettings) {
|
||||
*out = *in
|
||||
if in.ConsistentHash != nil {
|
||||
in, out := &in.ConsistentHash, &out.ConsistentHash
|
||||
*out = new(ConsistentHashLB)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSettings.
|
||||
func (in *LoadBalancerSettings) DeepCopy() *LoadBalancerSettings {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LoadBalancerSettings)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MeshConfig) DeepCopyInto(out *MeshConfig) {
|
||||
*out = *in
|
||||
if in.Sidecar != nil {
|
||||
in, out := &in.Sidecar, &out.Sidecar
|
||||
*out = new(SidecarConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Routes != nil {
|
||||
in, out := &in.Routes, &out.Routes
|
||||
*out = make([]RouteConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.CircuitBreaker != nil {
|
||||
in, out := &in.CircuitBreaker, &out.CircuitBreaker
|
||||
*out = new(CircuitBreaker)
|
||||
**out = **in
|
||||
}
|
||||
if in.Retry != nil {
|
||||
in, out := &in.Retry, &out.Retry
|
||||
*out = new(RetryPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FaultInjection != nil {
|
||||
in, out := &in.FaultInjection, &out.FaultInjection
|
||||
*out = new(FaultInjection)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.LoadBalancer != nil {
|
||||
in, out := &in.LoadBalancer, &out.LoadBalancer
|
||||
*out = new(LoadBalancerSettings)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshConfig.
|
||||
func (in *MeshConfig) DeepCopy() *MeshConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MeshConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MirrorConfig) DeepCopyInto(out *MirrorConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirrorConfig.
|
||||
func (in *MirrorConfig) DeepCopy() *MirrorConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MirrorConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
|
||||
*out = *in
|
||||
if in.Gateway != nil {
|
||||
in, out := &in.Gateway, &out.Gateway
|
||||
*out = new(GatewayConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Mesh != nil {
|
||||
in, out := &in.Mesh, &out.Mesh
|
||||
*out = new(MeshConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
|
||||
func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NetworkPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Port) DeepCopyInto(out *Port) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port.
|
||||
func (in *Port) DeepCopy() *Port {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Port)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
|
||||
func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceRequirements)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RetryPolicy) DeepCopyInto(out *RetryPolicy) {
|
||||
*out = *in
|
||||
if in.RetryOn != nil {
|
||||
in, out := &in.RetryOn, &out.RetryOn
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicy.
|
||||
func (in *RetryPolicy) DeepCopy() *RetryPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RetryPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RouteConfig) DeepCopyInto(out *RouteConfig) {
|
||||
*out = *in
|
||||
if in.Match != nil {
|
||||
in, out := &in.Match, &out.Match
|
||||
*out = new(HTTPMatchRequest)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
out.Destination = in.Destination
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteConfig.
|
||||
func (in *RouteConfig) DeepCopy() *RouteConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RouteConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RouteDestination) DeepCopyInto(out *RouteDestination) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteDestination.
|
||||
func (in *RouteDestination) DeepCopy() *RouteDestination {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RouteDestination)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceConfig) DeepCopyInto(out *ServiceConfig) {
|
||||
*out = *in
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]ServicePort, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NodePorts != nil {
|
||||
in, out := &in.NodePorts, &out.NodePorts
|
||||
*out = make(map[string]int32, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.LoadBalancerSourceRanges != nil {
|
||||
in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceConfig.
|
||||
func (in *ServiceConfig) DeepCopy() *ServiceConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServicePort) DeepCopyInto(out *ServicePort) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort.
|
||||
func (in *ServicePort) DeepCopy() *ServicePort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServicePort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SidecarConfig) DeepCopyInto(out *SidecarConfig) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(ResourceRequirements)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarConfig.
|
||||
func (in *SidecarConfig) DeepCopy() *SidecarConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SidecarConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StringMatch) DeepCopyInto(out *StringMatch) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringMatch.
|
||||
func (in *StringMatch) DeepCopy() *StringMatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StringMatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TrafficPolicy) DeepCopyInto(out *TrafficPolicy) {
|
||||
*out = *in
|
||||
if in.Canary != nil {
|
||||
in, out := &in.Canary, &out.Canary
|
||||
*out = new(CanaryTraffic)
|
||||
**out = **in
|
||||
}
|
||||
if in.Retry != nil {
|
||||
in, out := &in.Retry, &out.Retry
|
||||
*out = new(RetryPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.CircuitBreaker != nil {
|
||||
in, out := &in.CircuitBreaker, &out.CircuitBreaker
|
||||
*out = new(CircuitBreaker)
|
||||
**out = **in
|
||||
}
|
||||
if in.Mirror != nil {
|
||||
in, out := &in.Mirror, &out.Mirror
|
||||
*out = new(MirrorConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.LoadBalancer != nil {
|
||||
in, out := &in.LoadBalancer, &out.LoadBalancer
|
||||
*out = new(LoadBalancerSettings)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FaultInjection != nil {
|
||||
in, out := &in.FaultInjection, &out.FaultInjection
|
||||
*out = new(FaultInjection)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPolicy.
|
||||
func (in *TrafficPolicy) DeepCopy() *TrafficPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TrafficPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
177
modules/k8s/api/devcontainer/v1/devcontainerapp_types.go
Normal file
177
modules/k8s/api/devcontainer/v1/devcontainerapp_types.go
Normal file
@@ -0,0 +1,177 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// ExtraPortSpec 定义额外端口配置
|
||||
type ExtraPortSpec struct {
|
||||
// Name 是端口的名称
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// ContainerPort 是容器内的端口号
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
|
||||
// ServicePort 是服务暴露的端口号
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
}
|
||||
|
||||
// ExtraPortAssigned 定义已分配的额外端口信息
|
||||
type ExtraPortAssigned struct {
|
||||
// Name 是端口的名称
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// ContainerPort 是容器内的端口号
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
|
||||
// ServicePort 是服务暴露的端口号
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
|
||||
// NodePort 是 Kubernetes 分配的 NodePort
|
||||
NodePort uint16 `json:"nodePort"`
|
||||
}
|
||||
|
||||
// DevcontainerAppSpec defines the desired state of DevcontainerApp
|
||||
type DevcontainerAppSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
StatefulSet StatefulSetSpec `json:"statefulset"`
|
||||
// +optional
|
||||
Service ServiceSpec `json:"service"`
|
||||
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
|
||||
// Optional deadline in seconds for starting the job if it misses scheduled
|
||||
// time for any reason. Missed jobs executions will be counted as failed ones.
|
||||
// +optional
|
||||
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
|
||||
|
||||
// This flag tells the controller to suspend subsequent executions, it does
|
||||
// not apply to already started executions. Defaults to false.
|
||||
// +optional
|
||||
Suspend *bool `json:"suspend,omitempty"`
|
||||
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
|
||||
// The number of successful finished jobs to retain.
|
||||
// This is a pointer to distinguish between explicit zero and not specified.
|
||||
// +optional
|
||||
SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
|
||||
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
|
||||
// The number of failed finished jobs to retain.
|
||||
// This is a pointer to distinguish between explicit zero and not specified.
|
||||
// +optional
|
||||
FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
|
||||
}
|
||||
|
||||
// StatefulSetSpec specifies StatefulSet for DevContainer
|
||||
type StatefulSetSpec struct {
|
||||
Image string `json:"image"`
|
||||
Command []string `json:"command"`
|
||||
|
||||
GitRepositoryURL string `json:"gitRepositoryURL"`
|
||||
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
// 至少包含一个 SSH Public Key 才能通过校验规则
|
||||
SSHPublicKeyList []string `json:"sshPublicKeyList"`
|
||||
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +optional
|
||||
ContainerPort uint16 `json:"containerPort,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSpec specifies Service for DevContainer
|
||||
type ServiceSpec struct {
|
||||
// +kubebuilder:validation:Minimum=30000
|
||||
// +kubebuilder:validation:Maximum=32767
|
||||
// +optional
|
||||
NodePort uint16 `json:"nodePort,omitempty"`
|
||||
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +optional
|
||||
ServicePort uint16 `json:"servicePort,omitempty"`
|
||||
|
||||
// ExtraPorts 定义额外的端口配置
|
||||
// +optional
|
||||
ExtraPorts []ExtraPortSpec `json:"extraPorts,omitempty"`
|
||||
}
|
||||
|
||||
// DevcontainerAppStatus defines the observed state of DevcontainerApp
|
||||
type DevcontainerAppStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// A list of pointers to currently running jobs.
|
||||
// +optional
|
||||
Active []corev1.ObjectReference `json:"active,omitempty"`
|
||||
|
||||
// Information when was the last time the job was successfully scheduled.
|
||||
// +optional
|
||||
LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
|
||||
|
||||
// NodePortAssigned 存储 DevcontainerApp CRD调度后集群分配的 NodePort
|
||||
// +optional
|
||||
NodePortAssigned uint16 `json:"nodePortAssigned"`
|
||||
|
||||
// ExtraPortsAssigned 存储额外端口映射的 NodePort
|
||||
// +optional
|
||||
ExtraPortsAssigned []ExtraPortAssigned `json:"extraPortsAssigned,omitempty"`
|
||||
|
||||
// Ready 标识 DevcontainerApp 管理的 Pod 的 Readiness Probe 是否达到就绪状态
|
||||
// +optional
|
||||
Ready bool `json:"ready"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
|
||||
// DevcontainerApp is the Schema for the devcontainerapps API
|
||||
type DevcontainerApp struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec DevcontainerAppSpec `json:"spec,omitempty"`
|
||||
Status DevcontainerAppStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// DevcontainerAppList contains a list of DevcontainerApp
|
||||
type DevcontainerAppList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []DevcontainerApp `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&DevcontainerApp{}, &DevcontainerAppList{})
|
||||
}
|
||||
36
modules/k8s/api/devcontainer/v1/groupversion_info.go
Normal file
36
modules/k8s/api/devcontainer/v1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1 contains API Schema definitions for the devcontainer v1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=devcontainer.devstar.cn
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "devcontainer.devstar.cn", Version: "v1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
226
modules/k8s/api/devcontainer/v1/zz_generated.deepcopy.go
Normal file
226
modules/k8s/api/devcontainer/v1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,226 @@
|
||||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DevcontainerApp) DeepCopyInto(out *DevcontainerApp) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevcontainerApp.
|
||||
func (in *DevcontainerApp) DeepCopy() *DevcontainerApp {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DevcontainerApp)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DevcontainerApp) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DevcontainerAppList) DeepCopyInto(out *DevcontainerAppList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DevcontainerApp, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevcontainerAppList.
|
||||
func (in *DevcontainerAppList) DeepCopy() *DevcontainerAppList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DevcontainerAppList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DevcontainerAppList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DevcontainerAppSpec) DeepCopyInto(out *DevcontainerAppSpec) {
|
||||
*out = *in
|
||||
in.StatefulSet.DeepCopyInto(&out.StatefulSet)
|
||||
in.Service.DeepCopyInto(&out.Service)
|
||||
if in.StartingDeadlineSeconds != nil {
|
||||
in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.Suspend != nil {
|
||||
in, out := &in.Suspend, &out.Suspend
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.SuccessfulJobsHistoryLimit != nil {
|
||||
in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.FailedJobsHistoryLimit != nil {
|
||||
in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevcontainerAppSpec.
|
||||
func (in *DevcontainerAppSpec) DeepCopy() *DevcontainerAppSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DevcontainerAppSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DevcontainerAppStatus) DeepCopyInto(out *DevcontainerAppStatus) {
|
||||
*out = *in
|
||||
if in.Active != nil {
|
||||
in, out := &in.Active, &out.Active
|
||||
*out = make([]corev1.ObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.LastScheduleTime != nil {
|
||||
in, out := &in.LastScheduleTime, &out.LastScheduleTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.ExtraPortsAssigned != nil {
|
||||
in, out := &in.ExtraPortsAssigned, &out.ExtraPortsAssigned
|
||||
*out = make([]ExtraPortAssigned, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevcontainerAppStatus.
|
||||
func (in *DevcontainerAppStatus) DeepCopy() *DevcontainerAppStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DevcontainerAppStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtraPortAssigned) DeepCopyInto(out *ExtraPortAssigned) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraPortAssigned.
|
||||
func (in *ExtraPortAssigned) DeepCopy() *ExtraPortAssigned {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtraPortAssigned)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtraPortSpec) DeepCopyInto(out *ExtraPortSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraPortSpec.
|
||||
func (in *ExtraPortSpec) DeepCopy() *ExtraPortSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtraPortSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
||||
*out = *in
|
||||
if in.ExtraPorts != nil {
|
||||
in, out := &in.ExtraPorts, &out.ExtraPorts
|
||||
*out = make([]ExtraPortSpec, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
|
||||
func (in *ServiceSpec) DeepCopy() *ServiceSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
||||
*out = *in
|
||||
if in.Command != nil {
|
||||
in, out := &in.Command, &out.Command
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SSHPublicKeyList != nil {
|
||||
in, out := &in.SSHPublicKeyList, &out.SSHPublicKeyList
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
|
||||
func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StatefulSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
256
modules/k8s/cmd/controller-manager/app/options/options.go
Normal file
256
modules/k8s/cmd/controller-manager/app/options/options.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package options
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
"code.gitea.io/gitea/modules/k8s/controller"
|
||||
)
|
||||
|
||||
type ControllerManagerOptions struct {
|
||||
KubeConfig string
|
||||
Master string
|
||||
MetricsAddr string
|
||||
HealthProbeAddr string
|
||||
|
||||
LeaderElect bool
|
||||
LeaderElection *leaderelection.LeaderElectionConfig
|
||||
|
||||
WebhookCertDir string
|
||||
SecureMetrics bool
|
||||
EnableHTTP2 bool
|
||||
|
||||
// ControllerGates is the list of controller gates to enable or disable controller.
|
||||
// '*' means "all enabled by default controllers"
|
||||
// 'foo' means "enable 'foo'"
|
||||
// '-foo' means "disable 'foo'"
|
||||
// first item for a particular name wins.
|
||||
ControllerGates []string
|
||||
|
||||
DebugMode bool
|
||||
}
|
||||
|
||||
func NewControllerManagerOptions() *ControllerManagerOptions {
|
||||
return &ControllerManagerOptions{
|
||||
KubeConfig: "",
|
||||
Master: "",
|
||||
MetricsAddr: ":8080",
|
||||
HealthProbeAddr: ":8081",
|
||||
LeaderElect: false,
|
||||
LeaderElection: &leaderelection.LeaderElectionConfig{
|
||||
LeaseDuration: 30 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
},
|
||||
WebhookCertDir: "",
|
||||
SecureMetrics: true,
|
||||
EnableHTTP2: false,
|
||||
ControllerGates: []string{"*"},
|
||||
DebugMode: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Flags 返回一组命名的命令行标志集合
|
||||
func (s *ControllerManagerOptions) Flags() cliflag.NamedFlagSets {
|
||||
fss := cliflag.NamedFlagSets{}
|
||||
|
||||
// Kubernetes 相关选项
|
||||
fs := fss.FlagSet("kubernetes")
|
||||
fs.StringVar(&s.KubeConfig, "kubeconfig", s.KubeConfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server.")
|
||||
|
||||
// 指标和健康检查
|
||||
fs = fss.FlagSet("metrics")
|
||||
fs.StringVar(&s.MetricsAddr, "metrics-bind-address", s.MetricsAddr, "The address the metric endpoint binds to. Use :8443 for HTTPS or :8080 for HTTP, or 0 to disable.")
|
||||
fs.StringVar(&s.HealthProbeAddr, "health-probe-bind-address", s.HealthProbeAddr, "The address the probe endpoint binds to.")
|
||||
fs.BoolVar(&s.SecureMetrics, "metrics-secure", s.SecureMetrics, "If set, metrics endpoint is served securely via HTTPS.")
|
||||
|
||||
// Leader 选举相关选项
|
||||
fs = fss.FlagSet("leaderelection")
|
||||
fs.BoolVar(&s.LeaderElect, "leader-elect", s.LeaderElect, "Whether to enable leader election. This field should be enabled when controller manager deployed with multiple replicas.")
|
||||
s.bindLeaderElectionFlags(s.LeaderElection, fs)
|
||||
|
||||
// Webhook 相关选项
|
||||
fs = fss.FlagSet("webhook")
|
||||
fs.StringVar(&s.WebhookCertDir, "webhook-cert-dir", s.WebhookCertDir, "Certificate directory used to setup webhooks, need tls.crt and tls.key placed inside. If not set, webhook server would look up the server key and certificate in {TempDir}/k8s-webhook-server/serving-certs")
|
||||
fs.BoolVar(&s.EnableHTTP2, "enable-http2", s.EnableHTTP2, "If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
|
||||
// 一般选项
|
||||
fs = fss.FlagSet("generic")
|
||||
fs.StringSliceVar(&s.ControllerGates, "controllers", s.ControllerGates, fmt.Sprintf("A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s",
|
||||
strings.Join(controller.GetAllControllers().List(), ", ")))
|
||||
fs.BoolVar(&s.DebugMode, "debug", s.DebugMode, "Don't enable this if you don't know what it means.")
|
||||
|
||||
// klog 选项
|
||||
kfs := fss.FlagSet("klog")
|
||||
local := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(local)
|
||||
local.VisitAll(func(fl *flag.Flag) {
|
||||
fl.Name = strings.Replace(fl.Name, "_", "-", -1)
|
||||
kfs.AddGoFlag(fl)
|
||||
})
|
||||
|
||||
return fss
|
||||
}
|
||||
|
||||
// 绑定 Leader 选举相关标志
|
||||
func (s *ControllerManagerOptions) bindLeaderElectionFlags(l *leaderelection.LeaderElectionConfig, fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&l.LeaseDuration, "leader-elect-lease-duration", l.LeaseDuration, ""+
|
||||
"The duration that non-leader candidates will wait after observing a leadership "+
|
||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||
"slot. This is effectively the maximum duration that a leader can be stopped "+
|
||||
"before it is replaced by another candidate. This is only applicable if leader "+
|
||||
"election is enabled.")
|
||||
fs.DurationVar(&l.RenewDeadline, "leader-elect-renew-deadline", l.RenewDeadline, ""+
|
||||
"The interval between attempts by the acting master to renew a leadership slot "+
|
||||
"before it stops leading. This must be less than or equal to the lease duration. "+
|
||||
"This is only applicable if leader election is enabled.")
|
||||
fs.DurationVar(&l.RetryPeriod, "leader-elect-retry-period", l.RetryPeriod, ""+
|
||||
"The duration the clients should wait between attempting acquisition and renewal "+
|
||||
"of a leadership. This is only applicable if leader election is enabled.")
|
||||
}
|
||||
|
||||
// Validate 验证选项
|
||||
func (s *ControllerManagerOptions) Validate() []error {
|
||||
var errs []error
|
||||
|
||||
// 验证 ControllerGates
|
||||
allControllersNameSet := controller.GetAllControllers()
|
||||
for _, selector := range s.ControllerGates {
|
||||
if selector == "*" {
|
||||
continue
|
||||
}
|
||||
selector = strings.TrimPrefix(selector, "-")
|
||||
if !allControllersNameSet.Has(selector) {
|
||||
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", selector))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// IsControllerEnabled 检查指定的控制器是否启用
|
||||
func (s *ControllerManagerOptions) IsControllerEnabled(name string) bool {
|
||||
allowedAll := false
|
||||
for _, controllerGate := range s.ControllerGates {
|
||||
if controllerGate == name {
|
||||
return true
|
||||
}
|
||||
if controllerGate == "-"+name {
|
||||
return false
|
||||
}
|
||||
if controllerGate == "*" {
|
||||
allowedAll = true
|
||||
}
|
||||
}
|
||||
return allowedAll
|
||||
}
|
||||
|
||||
// NewControllerManager 创建并返回一个新的控制器管理器
|
||||
func (s *ControllerManagerOptions) NewControllerManager() (*controller.Manager, error) {
|
||||
cm := &controller.Manager{}
|
||||
|
||||
// TLS 选项
|
||||
tlsOpts := []func(*tls.Config){}
|
||||
|
||||
// 如果未启用 HTTP/2,则禁用它以防止 HTTP/2 流取消和快速重置 CVE 的漏洞
|
||||
if !s.EnableHTTP2 {
|
||||
disableHTTP2 := func(c *tls.Config) {
|
||||
klog.V(4).Info("disabling http/2")
|
||||
c.NextProtos = []string{"http/1.1"}
|
||||
}
|
||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||
}
|
||||
|
||||
// Webhook 服务器配置
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: s.WebhookCertDir,
|
||||
TLSOpts: tlsOpts,
|
||||
Port: 8443,
|
||||
})
|
||||
|
||||
// 度量服务器配置
|
||||
metricsServerOptions := metricsserver.Options{
|
||||
BindAddress: s.MetricsAddr,
|
||||
SecureServing: s.SecureMetrics,
|
||||
TLSOpts: tlsOpts,
|
||||
}
|
||||
|
||||
if s.SecureMetrics {
|
||||
// 使用身份验证和授权来保护度量端点
|
||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
}
|
||||
|
||||
// 设置控制器管理器选项
|
||||
controllerOpts := ctrl.Options{
|
||||
Scheme: controller.Scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
WebhookServer: webhookServer,
|
||||
HealthProbeBindAddress: s.HealthProbeAddr,
|
||||
}
|
||||
|
||||
// 配置 Leader 选举
|
||||
if s.LeaderElect {
|
||||
controllerOpts.LeaderElection = s.LeaderElect
|
||||
controllerOpts.LeaderElectionNamespace = "devstar-system"
|
||||
controllerOpts.LeaderElectionID = "devstar-controller-manager-leader-election"
|
||||
leaseDuration := s.LeaderElection.LeaseDuration
|
||||
renewDeadline := s.LeaderElection.RenewDeadline
|
||||
retryPeriod := s.LeaderElection.RetryPeriod
|
||||
controllerOpts.LeaseDuration = &leaseDuration
|
||||
controllerOpts.RenewDeadline = &renewDeadline
|
||||
controllerOpts.RetryPeriod = &retryPeriod
|
||||
}
|
||||
|
||||
// 创建 controller-runtime 管理器
|
||||
klog.V(0).Info("setting up manager")
|
||||
ctrl.SetLogger(klog.NewKlogr())
|
||||
|
||||
// 获取 Kubernetes 配置
|
||||
var config *rest.Config
|
||||
var err error
|
||||
|
||||
if s.KubeConfig != "" {
|
||||
config, err = clientcmd.BuildConfigFromFlags(s.Master, s.KubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get kubeconfig: %v", err)
|
||||
}
|
||||
} else {
|
||||
config = ctrl.GetConfigOrDie()
|
||||
}
|
||||
|
||||
// 创建管理器
|
||||
mgr, err := ctrl.NewManager(config, controllerOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to set up overall controller manager: %v", err)
|
||||
}
|
||||
|
||||
// 添加健康检查
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
return nil, fmt.Errorf("unable to set up health check: %v", err)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
return nil, fmt.Errorf("unable to set up ready check: %v", err)
|
||||
}
|
||||
|
||||
// 设置控制器管理器
|
||||
cm.Manager = mgr
|
||||
cm.IsControllerEnabled = s.IsControllerEnabled
|
||||
|
||||
return cm, nil
|
||||
}
|
||||
71
modules/k8s/cmd/controller-manager/app/server.go
Normal file
71
modules/k8s/cmd/controller-manager/app/server.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/term"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
|
||||
"code.gitea.io/gitea/modules/k8s/cmd/controller-manager/app/options"
|
||||
"code.gitea.io/gitea/modules/k8s/controller"
|
||||
"code.gitea.io/gitea/modules/k8s/controller/application"
|
||||
"code.gitea.io/gitea/modules/k8s/controller/devcontainer"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// 在初始化时注册所有控制器
|
||||
runtime.Must(controller.Register(&devcontainer.Controller{}))
|
||||
runtime.Must(controller.Register(&application.Controller{}))
|
||||
}
|
||||
|
||||
// NewControllerManagerCommand 创建一个启动 controller manager 的命令
|
||||
func NewControllerManagerCommand() *cobra.Command {
|
||||
s := options.NewControllerManagerOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "controller-manager",
|
||||
Long: `DevStar controller manager is a daemon that embeds the control loops shipped with DevStar.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if errs := s.Validate(); len(errs) != 0 {
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
return Run(signals.SetupSignalHandler(), s)
|
||||
},
|
||||
}
|
||||
|
||||
fs := cmd.Flags()
|
||||
namedFlagSets := s.Flags()
|
||||
for _, f := range namedFlagSets.FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
usageFmt := "Usage:\n %s\n"
|
||||
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
|
||||
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
|
||||
cliflag.PrintSections(cmd.OutOrStderr(), namedFlagSets, cols)
|
||||
return nil
|
||||
})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Run 启动控制器管理器
|
||||
func Run(ctx context.Context, s *options.ControllerManagerOptions) error {
|
||||
klog.InfoS("Starting DevStar controller manager")
|
||||
|
||||
cm, err := s.NewControllerManager()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 启动控制器管理器
|
||||
return cm.Start(ctx)
|
||||
}
|
||||
17
modules/k8s/cmd/controller-manager/controller-manager.go
Normal file
17
modules/k8s/cmd/controller-manager/controller-manager.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"code.gitea.io/gitea/modules/k8s/cmd/controller-manager/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := app.NewControllerManagerCommand()
|
||||
if err := cmd.Execute(); err != nil {
|
||||
klog.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
1806
modules/k8s/controller/application/application_controller.go
Normal file
1806
modules/k8s/controller/application/application_controller.go
Normal file
File diff suppressed because it is too large
Load Diff
42
modules/k8s/controller/application/controller-wrapper.go
Normal file
42
modules/k8s/controller/application/controller-wrapper.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
applicationv1 "code.gitea.io/gitea/modules/k8s/api/application/v1"
|
||||
)
|
||||
|
||||
// Controller 实现 controller.Controller 接口
|
||||
type Controller struct{}
|
||||
|
||||
// Name 返回控制器名称
|
||||
func (c *Controller) Name() string {
|
||||
return "application"
|
||||
}
|
||||
|
||||
// Init 初始化控制器
|
||||
func (c *Controller) Init(mgr manager.Manager) error {
|
||||
// 添加 API 到 scheme
|
||||
klog.InfoS("Adding Application API to scheme")
|
||||
if err := applicationv1.AddToScheme(mgr.GetScheme()); err != nil {
|
||||
return fmt.Errorf("unable to add Application API to scheme: %w", err)
|
||||
}
|
||||
|
||||
// 创建 Application reconciler
|
||||
klog.InfoS("Creating Application reconciler")
|
||||
reconciler := &ApplicationReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
// 设置 reconciler 与 manager
|
||||
klog.InfoS("Setting up Application with manager")
|
||||
if err := reconciler.SetupWithManager(mgr); err != nil {
|
||||
return fmt.Errorf("failed to setup Application controller: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
90
modules/k8s/controller/application/templates/deployment.yaml
Normal file
90
modules/k8s/controller/application/templates/deployment.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar.io/managed-by: devstar-application-operator
|
||||
devstar-resource-type: devstar-application
|
||||
spec:
|
||||
replicas: {{.Spec.Replicas | default 1}}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
spec:
|
||||
containers:
|
||||
- name: {{.ObjectMeta.Name}}
|
||||
image: {{.Spec.Template.Image}}
|
||||
{{- if .Spec.Template.Command}}
|
||||
command:
|
||||
{{- range .Spec.Template.Command}}
|
||||
- {{.}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Template.Args}}
|
||||
args:
|
||||
{{- range .Spec.Template.Args}}
|
||||
- {{.}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Template.Ports}}
|
||||
ports:
|
||||
{{- range .Spec.Template.Ports}}
|
||||
- name: {{.Name}}
|
||||
containerPort: {{.Port}}
|
||||
protocol: {{.Protocol | default "TCP"}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Environment}}
|
||||
env:
|
||||
{{- range $key, $value := .Spec.Environment}}
|
||||
- name: {{$key}}
|
||||
value: "{{$value}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if or .Spec.Resources.CPU .Spec.Resources.Memory}}
|
||||
resources:
|
||||
limits:
|
||||
{{- if .Spec.Resources.CPU}}
|
||||
cpu: {{.Spec.Resources.CPU}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Resources.Memory}}
|
||||
memory: {{.Spec.Resources.Memory}}
|
||||
{{- end}}
|
||||
requests:
|
||||
{{- if .Spec.Resources.CPU}}
|
||||
cpu: {{.Spec.Resources.CPU}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Resources.Memory}}
|
||||
memory: {{.Spec.Resources.Memory}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Template.HealthCheck}}
|
||||
{{- if .Spec.Template.HealthCheck.HTTPGet}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: {{.Spec.Template.HealthCheck.HTTPGet.Path}}
|
||||
port: {{.Spec.Template.HealthCheck.HTTPGet.Port}}
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: {{.Spec.Template.HealthCheck.HTTPGet.Path}}
|
||||
port: {{.Spec.Template.HealthCheck.HTTPGet.Port}}
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
54
modules/k8s/controller/application/templates/ingress.yaml
Normal file
54
modules/k8s/controller/application/templates/ingress.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}-ingress
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
app.kubernetes.io/name: {{.ObjectMeta.Name}}
|
||||
app.kubernetes.io/managed-by: devstar-operator
|
||||
{{- if .Spec.Ingress.Annotations}}
|
||||
annotations:
|
||||
{{- range $key, $value := .Spec.Ingress.Annotations}}
|
||||
{{$key}}: "{{$value}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
spec:
|
||||
{{- if .Spec.Ingress.IngressClassName}}
|
||||
ingressClassName: {{.Spec.Ingress.IngressClassName}}
|
||||
{{- end}}
|
||||
|
||||
{{- if .Spec.Ingress.TLS}}
|
||||
tls:
|
||||
{{- range .Spec.Ingress.TLS}}
|
||||
- {{- if .Hosts}}
|
||||
hosts:
|
||||
{{- range .Hosts}}
|
||||
- {{.}}
|
||||
{{- end}}
|
||||
{{- else}}
|
||||
hosts:
|
||||
- {{$.Spec.Ingress.Host}}
|
||||
{{- end}}
|
||||
{{- if .SecretName}}
|
||||
secretName: {{.SecretName}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
rules:
|
||||
- host: {{.Spec.Ingress.Host}}
|
||||
http:
|
||||
paths:
|
||||
- path: {{.Spec.Ingress.Path | default "/"}}
|
||||
pathType: {{.Spec.Ingress.PathType | default "Prefix"}}
|
||||
backend:
|
||||
service:
|
||||
name: {{.ObjectMeta.Name}}-svc
|
||||
port:
|
||||
{{- if .Spec.Template.Ports}}
|
||||
number: {{(index .Spec.Template.Ports 0).Port}}
|
||||
{{- else}}
|
||||
number: 80
|
||||
{{- end}}
|
||||
88
modules/k8s/controller/application/templates/service.yaml
Normal file
88
modules/k8s/controller/application/templates/service.yaml
Normal file
@@ -0,0 +1,88 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}-svc
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
app.kubernetes.io/name: {{.ObjectMeta.Name}}
|
||||
app.kubernetes.io/managed-by: devstar-operator
|
||||
{{- if .Spec.Service}}
|
||||
{{- if .Spec.Service.Labels}}
|
||||
{{- range $key, $value := .Spec.Service.Labels}}
|
||||
{{$key}}: "{{$value}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Service}}
|
||||
{{- if .Spec.Service.Annotations}}
|
||||
annotations:
|
||||
{{- range $key, $value := .Spec.Service.Annotations}}
|
||||
{{$key}}: "{{$value}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
spec:
|
||||
selector:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
type: {{if .Spec.Service}}{{.Spec.Service.Type | default "ClusterIP"}}{{else}}ClusterIP{{end}}
|
||||
|
||||
{{- if and .Spec.Service .Spec.Service.LoadBalancerIP}}
|
||||
loadBalancerIP: {{.Spec.Service.LoadBalancerIP}}
|
||||
{{- end}}
|
||||
|
||||
{{- if and .Spec.Service .Spec.Service.LoadBalancerSourceRanges}}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range .Spec.Service.LoadBalancerSourceRanges}}
|
||||
- {{.}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
{{- if and .Spec.Service .Spec.Service.ExternalName}}
|
||||
externalName: {{.Spec.Service.ExternalName}}
|
||||
{{- end}}
|
||||
|
||||
{{- if and .Spec.Service .Spec.Service.SessionAffinity}}
|
||||
sessionAffinity: {{.Spec.Service.SessionAffinity}}
|
||||
{{- else}}
|
||||
sessionAffinity: None
|
||||
{{- end}}
|
||||
|
||||
{{- if and .Spec.Service (eq .Spec.Service.Type "ExternalName")}}
|
||||
# ExternalName 类型不需要 ports 和 selector
|
||||
{{- else}}
|
||||
ports:
|
||||
{{- if and .Spec.Service .Spec.Service.Ports}}
|
||||
# 使用自定义端口配置
|
||||
{{- range .Spec.Service.Ports}}
|
||||
- name: {{.Name}}
|
||||
protocol: {{.Protocol | default "TCP"}}
|
||||
port: {{.Port}}
|
||||
{{- if .TargetPort}}
|
||||
targetPort: {{.TargetPort}}
|
||||
{{- else}}
|
||||
targetPort: {{.Port}}
|
||||
{{- end}}
|
||||
{{- if and (or (eq $.Spec.Service.Type "NodePort") (eq $.Spec.Service.Type "LoadBalancer")) .NodePort}}
|
||||
nodePort: {{.NodePort}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- else}}
|
||||
# 使用模板中的端口配置
|
||||
{{- range .Spec.Template.Ports}}
|
||||
- name: {{.Name | default (printf "port-%d" .Port)}}
|
||||
protocol: {{.Protocol | default "TCP"}}
|
||||
port: {{.Port}}
|
||||
targetPort: {{.Port}}
|
||||
{{- if and $.Spec.Service (or (eq $.Spec.Service.Type "NodePort") (eq $.Spec.Service.Type "LoadBalancer"))}}
|
||||
{{- if $.Spec.Service.NodePorts}}
|
||||
{{- if index $.Spec.Service.NodePorts .Name}}
|
||||
nodePort: {{index $.Spec.Service.NodePorts .Name}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
@@ -0,0 +1,86 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar.io/managed-by: devstar-application-operator
|
||||
devstar-resource-type: devstar-application
|
||||
spec:
|
||||
serviceName: {{.ObjectMeta.Name}}-svc
|
||||
replicas: {{.Spec.Replicas | default 1}}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-application
|
||||
spec:
|
||||
containers:
|
||||
- name: {{.ObjectMeta.Name}}
|
||||
image: {{.Spec.Template.Image}}
|
||||
{{- if .Spec.Template.Command}}
|
||||
command:
|
||||
{{- range .Spec.Template.Command}}
|
||||
- {{.}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Template.Args}}
|
||||
args:
|
||||
{{- range .Spec.Template.Args}}
|
||||
- {{.}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Template.Ports}}
|
||||
ports:
|
||||
{{- range .Spec.Template.Ports}}
|
||||
- name: {{.Name}}
|
||||
containerPort: {{.Port}}
|
||||
protocol: {{.Protocol | default "TCP"}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Environment}}
|
||||
env:
|
||||
{{- range $key, $value := .Spec.Environment}}
|
||||
- name: {{$key}}
|
||||
value: "{{$value}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if or .Spec.Resources.CPU .Spec.Resources.Memory}}
|
||||
resources:
|
||||
limits:
|
||||
{{- if .Spec.Resources.CPU}}
|
||||
cpu: {{.Spec.Resources.CPU}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Resources.Memory}}
|
||||
memory: {{.Spec.Resources.Memory}}
|
||||
{{- end}}
|
||||
requests:
|
||||
{{- if .Spec.Resources.CPU}}
|
||||
cpu: {{.Spec.Resources.CPU}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Resources.Memory}}
|
||||
memory: {{.Spec.Resources.Memory}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Spec.Template.HealthCheck}}
|
||||
{{- if .Spec.Template.HealthCheck.HTTPGet}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: {{.Spec.Template.HealthCheck.HTTPGet.Path}}
|
||||
port: {{.Spec.Template.HealthCheck.HTTPGet.Port}}
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: {{.Spec.Template.HealthCheck.HTTPGet.Path}}
|
||||
port: {{.Spec.Template.HealthCheck.HTTPGet.Port}}
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
# 如果需要持久化存储,可以在这里添加 volumeClaimTemplates
|
||||
262
modules/k8s/controller/application/utils/template_utils.go
Normal file
262
modules/k8s/controller/application/utils/template_utils.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
applicationv1 "code.gitea.io/gitea/modules/k8s/api/application/v1"
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
const (
|
||||
TemplatePath = "modules/k8s/controller/application/templates/"
|
||||
)
|
||||
|
||||
// parseTemplate 解析 Go Template 模板文件
|
||||
func parseTemplate(templateName string, app *applicationv1.Application) ([]byte, error) {
|
||||
tmpl, err := template.
|
||||
New(templateName + ".yaml").
|
||||
Funcs(template.FuncMap{"default": DefaultFunc}).
|
||||
ParseFiles(TemplatePath + templateName + ".yaml")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse template %s: %w", templateName, err)
|
||||
}
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
err = tmpl.Execute(b, app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute template %s: %w", templateName, err)
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// NewDeployment 使用模板创建 Deployment
|
||||
func NewDeployment(app *applicationv1.Application) (*apps_v1.Deployment, error) {
|
||||
yamlBytes, err := parseTemplate("deployment", app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse deployment template: %w", err)
|
||||
}
|
||||
|
||||
deployment := &apps_v1.Deployment{}
|
||||
decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder()
|
||||
err = runtime.DecodeInto(decoder, yamlBytes, deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode deployment YAML: %w", err)
|
||||
}
|
||||
|
||||
// 设置 ObjectMeta
|
||||
deployment.Name = app.Name
|
||||
deployment.Namespace = app.Namespace
|
||||
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// NewService 使用模板创建 Service
|
||||
func NewService(app *applicationv1.Application) (*core_v1.Service, error) {
|
||||
// 检查是否需要创建 Service
|
||||
if !shouldCreateService(app) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
yamlBytes, err := parseTemplate("service", app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse service template: %w", err)
|
||||
}
|
||||
|
||||
service := &core_v1.Service{}
|
||||
decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder()
|
||||
err = runtime.DecodeInto(decoder, yamlBytes, service)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode service YAML: %w", err)
|
||||
}
|
||||
|
||||
// 设置 ObjectMeta
|
||||
service.Name = app.Name + "-svc"
|
||||
service.Namespace = app.Namespace
|
||||
|
||||
// 后处理:根据新的 Service 配置更新
|
||||
updateServiceWithConfig(service, app)
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
// NewStatefulSet 使用模板创建 StatefulSet
|
||||
func NewStatefulSet(app *applicationv1.Application) (*apps_v1.StatefulSet, error) {
|
||||
yamlBytes, err := parseTemplate("statefulset", app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse statefulset template: %w", err)
|
||||
}
|
||||
|
||||
statefulSet := &apps_v1.StatefulSet{}
|
||||
decoder := serializer.NewCodecFactory(scheme.Scheme).UniversalDecoder()
|
||||
err = runtime.DecodeInto(decoder, yamlBytes, statefulSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode statefulset YAML: %w", err)
|
||||
}
|
||||
|
||||
// 设置 ObjectMeta
|
||||
statefulSet.Name = app.Name
|
||||
statefulSet.Namespace = app.Namespace
|
||||
|
||||
return statefulSet, nil
|
||||
}
|
||||
|
||||
// updateServiceWithConfig 根据新的 Service 配置更新 Service
|
||||
func updateServiceWithConfig(service *core_v1.Service, app *applicationv1.Application) {
|
||||
if app.Spec.Service == nil {
|
||||
return
|
||||
}
|
||||
|
||||
config := app.Spec.Service
|
||||
|
||||
// 更新服务类型
|
||||
if config.Type != "" {
|
||||
service.Spec.Type = core_v1.ServiceType(config.Type)
|
||||
}
|
||||
|
||||
// 添加自定义注解
|
||||
if config.Annotations != nil {
|
||||
if service.Annotations == nil {
|
||||
service.Annotations = make(map[string]string)
|
||||
}
|
||||
for k, v := range config.Annotations {
|
||||
service.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// 添加自定义标签
|
||||
if config.Labels != nil {
|
||||
if service.Labels == nil {
|
||||
service.Labels = make(map[string]string)
|
||||
}
|
||||
for k, v := range config.Labels {
|
||||
service.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// 设置特定配置
|
||||
switch config.Type {
|
||||
case "LoadBalancer":
|
||||
if config.LoadBalancerIP != "" {
|
||||
service.Spec.LoadBalancerIP = config.LoadBalancerIP
|
||||
}
|
||||
if len(config.LoadBalancerSourceRanges) > 0 {
|
||||
service.Spec.LoadBalancerSourceRanges = config.LoadBalancerSourceRanges
|
||||
}
|
||||
case "ExternalName":
|
||||
if config.ExternalName != "" {
|
||||
service.Spec.ExternalName = config.ExternalName
|
||||
}
|
||||
// ExternalName 类型不需要 selector 和 ports
|
||||
service.Spec.Selector = nil
|
||||
service.Spec.Ports = nil
|
||||
}
|
||||
|
||||
// 设置会话亲和性
|
||||
if config.SessionAffinity != "" {
|
||||
service.Spec.SessionAffinity = core_v1.ServiceAffinity(config.SessionAffinity)
|
||||
}
|
||||
|
||||
// 更新端口配置(如果有自定义端口配置)
|
||||
if len(config.Ports) > 0 {
|
||||
service.Spec.Ports = getServicePorts(app, config)
|
||||
} else if config.NodePorts != nil {
|
||||
// 如果只配置了 NodePorts,更新现有端口的 NodePort
|
||||
updateServiceNodePorts(service, config)
|
||||
}
|
||||
}
|
||||
|
||||
// updateServiceNodePorts 更新服务的 NodePort 配置
|
||||
func updateServiceNodePorts(service *core_v1.Service, config *applicationv1.ServiceConfig) {
|
||||
for i, port := range service.Spec.Ports {
|
||||
if nodePort, exists := config.NodePorts[port.Name]; exists {
|
||||
service.Spec.Ports[i].NodePort = nodePort
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getServicePorts 获取 Service 端口配置
|
||||
func getServicePorts(app *applicationv1.Application, config *applicationv1.ServiceConfig) []core_v1.ServicePort {
|
||||
var servicePorts []core_v1.ServicePort
|
||||
|
||||
// 如果配置了自定义端口,使用自定义端口
|
||||
if len(config.Ports) > 0 {
|
||||
for _, port := range config.Ports {
|
||||
servicePort := core_v1.ServicePort{
|
||||
Name: port.Name,
|
||||
Port: port.Port,
|
||||
Protocol: core_v1.Protocol(getPortProtocol(port.Protocol)),
|
||||
}
|
||||
|
||||
// 设置目标端口
|
||||
if port.TargetPort != "" {
|
||||
servicePort.TargetPort = intstr.FromString(port.TargetPort)
|
||||
} else {
|
||||
servicePort.TargetPort = intstr.FromInt(int(port.Port))
|
||||
}
|
||||
|
||||
// 设置 NodePort(仅适用于 NodePort 和 LoadBalancer 类型)
|
||||
if (config.Type == "NodePort" || config.Type == "LoadBalancer") && port.NodePort > 0 {
|
||||
servicePort.NodePort = port.NodePort
|
||||
}
|
||||
|
||||
servicePorts = append(servicePorts, servicePort)
|
||||
}
|
||||
} else {
|
||||
// 使用模板中的端口配置
|
||||
for _, port := range app.Spec.Template.Ports {
|
||||
servicePort := core_v1.ServicePort{
|
||||
Name: port.Name,
|
||||
Port: port.Port,
|
||||
TargetPort: intstr.FromInt(int(port.Port)),
|
||||
Protocol: core_v1.Protocol(getPortProtocol(port.Protocol)),
|
||||
}
|
||||
|
||||
// 如果是 NodePort 类型,检查是否有指定的 NodePort
|
||||
if (config.Type == "NodePort" || config.Type == "LoadBalancer") &&
|
||||
config.NodePorts != nil {
|
||||
if nodePort, exists := config.NodePorts[port.Name]; exists {
|
||||
servicePort.NodePort = nodePort
|
||||
}
|
||||
}
|
||||
|
||||
servicePorts = append(servicePorts, servicePort)
|
||||
}
|
||||
}
|
||||
|
||||
return servicePorts
|
||||
}
|
||||
|
||||
// shouldCreateService 判断是否需要创建 Service
|
||||
func shouldCreateService(app *applicationv1.Application) bool {
|
||||
// 优先使用新的 Service 配置
|
||||
if app.Spec.Service != nil {
|
||||
return app.Spec.Service.Enabled
|
||||
}
|
||||
|
||||
// 向后兼容:使用旧的 expose 配置
|
||||
return app.Spec.Expose && len(app.Spec.Template.Ports) > 0
|
||||
}
|
||||
|
||||
// getPortProtocol 获取端口协议,设置默认值
|
||||
func getPortProtocol(protocol string) string {
|
||||
if protocol == "" {
|
||||
return "TCP"
|
||||
}
|
||||
return protocol
|
||||
}
|
||||
|
||||
// DefaultFunc 函数用于实现默认值
|
||||
func DefaultFunc(value interface{}, defaultValue interface{}) interface{} {
|
||||
if value == nil || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
42
modules/k8s/controller/devcontainer/controller-wrapper.go
Normal file
42
modules/k8s/controller/devcontainer/controller-wrapper.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
)
|
||||
|
||||
// Controller 实现 controller.Controller 接口
|
||||
type Controller struct{}
|
||||
|
||||
// Name 返回控制器名称
|
||||
func (c *Controller) Name() string {
|
||||
return "devcontainer"
|
||||
}
|
||||
|
||||
// Init 初始化控制器
|
||||
func (c *Controller) Init(mgr manager.Manager) error {
|
||||
// 添加 API 到 scheme
|
||||
klog.InfoS("Adding DevContainer API to scheme")
|
||||
if err := devcontainerv1.AddToScheme(mgr.GetScheme()); err != nil {
|
||||
return fmt.Errorf("unable to add DevContainer API to scheme: %w", err)
|
||||
}
|
||||
|
||||
// 创建 DevContainer reconciler
|
||||
klog.InfoS("Creating DevContainer reconciler")
|
||||
reconciler := &DevcontainerAppReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
// 设置 reconciler 与 manager
|
||||
klog.InfoS("Setting up DevContainer with manager")
|
||||
if err := reconciler.SetupWithManager(mgr); err != nil {
|
||||
return fmt.Errorf("failed to setup DevContainer controller: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,449 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
devcontainer_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
devcontainer_controller_utils "code.gitea.io/gitea/modules/k8s/controller/devcontainer/utils"
|
||||
apps_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
k8s_sigs_controller_runtime_utils "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
// DevcontainerAppReconciler reconciles a DevcontainerApp object
|
||||
type DevcontainerAppReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=devcontainer.devstar.cn,resources=devcontainerapps/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;delete;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=services,verbs=create;delete;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;delete
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// Modify the Reconcile function to compare the state specified by
|
||||
// the DevcontainerApp object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
|
||||
|
||||
func (r *DevcontainerAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
var err error
|
||||
|
||||
// 1. 读取缓存中的 DevcontainerApp
|
||||
app := &devcontainer_v1.DevcontainerApp{}
|
||||
err = r.Get(ctx, req.NamespacedName, app)
|
||||
if err != nil {
|
||||
// 当 CRD 资源 "DevcontainerApp" 被删除后,直接返回空结果,跳过剩下步骤
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// 添加 finalizer 处理逻辑
|
||||
finalizerName := "devcontainer.devstar.cn/finalizer"
|
||||
|
||||
// 检查对象是否正在被删除
|
||||
if !app.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// 对象正在被删除 - 处理 finalizer
|
||||
if k8s_sigs_controller_runtime_utils.ContainsFinalizer(app, finalizerName) {
|
||||
// 执行清理操作
|
||||
logger.Info("Cleaning up resources before deletion", "name", app.Name)
|
||||
|
||||
// 查找并删除关联的 PVC
|
||||
if err := r.cleanupPersistentVolumeClaims(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to clean up PVCs")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 删除完成后移除 finalizer
|
||||
k8s_sigs_controller_runtime_utils.RemoveFinalizer(app, finalizerName)
|
||||
if err := r.Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to remove finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// 已标记为删除且处理完成,允许继续删除流程
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// 如果对象不包含 finalizer,就添加它
|
||||
if !k8s_sigs_controller_runtime_utils.ContainsFinalizer(app, finalizerName) {
|
||||
logger.Info("Adding finalizer", "name", app.Name)
|
||||
k8s_sigs_controller_runtime_utils.AddFinalizer(app, finalizerName)
|
||||
if err := r.Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to add finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// 检查停止容器的注解
|
||||
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas == "0" {
|
||||
logger.Info("DevContainer stop requested via annotation", "name", app.Name)
|
||||
|
||||
// 获取当前的 StatefulSet
|
||||
statefulSetInNamespace := &apps_v1.StatefulSet{}
|
||||
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
|
||||
if err == nil {
|
||||
// 设置副本数为0
|
||||
replicas := int32(0)
|
||||
statefulSetInNamespace.Spec.Replicas = &replicas
|
||||
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
|
||||
logger.Error(err, "Failed to scale down StatefulSet replicas to 0")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet scaled down to 0 replicas due to stop request")
|
||||
|
||||
// 标记容器为未就绪
|
||||
app.Status.Ready = false
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update DevcontainerApp status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 继续处理其他逻辑(如更新 Service)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 根据 DevcontainerApp 配置信息进行处理
|
||||
// 2.1 StatefulSet 处理
|
||||
statefulSet := devcontainer_controller_utils.NewStatefulSet(app)
|
||||
err = k8s_sigs_controller_runtime_utils.SetControllerReference(app, statefulSet, r.Scheme)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// 2.2 查找 集群中同名称的 StatefulSet
|
||||
statefulSetInNamespace := &apps_v1.StatefulSet{}
|
||||
err = r.Get(ctx, req.NamespacedName, statefulSetInNamespace)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Create(ctx, statefulSet)
|
||||
if err != nil && !errors.IsAlreadyExists(err) {
|
||||
logger.Error(err, "Failed to create StatefulSet")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
// 处理重启注解
|
||||
if restartedAt, exists := app.Annotations["devstar.io/restartedAt"]; exists {
|
||||
// 检查注解是否已经应用到StatefulSet
|
||||
needsRestart := true
|
||||
|
||||
if statefulSetInNamespace.Spec.Template.Annotations != nil {
|
||||
if currentRestartTime, exists := statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"]; exists && currentRestartTime == restartedAt {
|
||||
needsRestart = false
|
||||
}
|
||||
} else {
|
||||
statefulSetInNamespace.Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if needsRestart {
|
||||
logger.Info("DevContainer restart requested", "name", app.Name, "time", restartedAt)
|
||||
|
||||
// 将重启注解传递到 Pod 模板以触发滚动更新
|
||||
statefulSetInNamespace.Spec.Template.Annotations["devstar.io/restartedAt"] = restartedAt
|
||||
|
||||
// 确保副本数至少为1(防止之前被停止)
|
||||
replicas := int32(1)
|
||||
if statefulSetInNamespace.Spec.Replicas != nil && *statefulSetInNamespace.Spec.Replicas > 0 {
|
||||
replicas = *statefulSetInNamespace.Spec.Replicas
|
||||
}
|
||||
statefulSetInNamespace.Spec.Replicas = &replicas
|
||||
|
||||
if err := r.Update(ctx, statefulSetInNamespace); err != nil {
|
||||
logger.Error(err, "Failed to update StatefulSet for restart")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet restarted successfully")
|
||||
}
|
||||
}
|
||||
|
||||
// 若 StatefulSet.Status.readyReplicas 变化,则更新 DevcontainerApp.Status.Ready 域
|
||||
if statefulSetInNamespace.Status.ReadyReplicas > 0 {
|
||||
app.Status.Ready = true
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("DevContainer is READY", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
|
||||
} else if app.Status.Ready {
|
||||
// 只有当目前状态为Ready但实际不再Ready时才更新
|
||||
app.Status.Ready = false
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to un-mark DevcontainerApp.Status.Ready", "DevcontainerApp.Status.Ready", app.Status.Ready)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("DevContainer is NOT ready", "ReadyReplicas", statefulSetInNamespace.Status.ReadyReplicas)
|
||||
}
|
||||
|
||||
// 修复方法:加上判断条件,避免循环触发更新
|
||||
needsUpdate := false
|
||||
|
||||
// 检查镜像是否变更
|
||||
if app.Spec.StatefulSet.Image != statefulSetInNamespace.Spec.Template.Spec.Containers[0].Image {
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
// 检查副本数 - 如果指定了 desiredReplicas 注解但不为 0(停止已在前面处理)
|
||||
if desiredReplicas, exists := app.Annotations["devstar.io/desiredReplicas"]; exists && desiredReplicas != "0" {
|
||||
replicas, err := strconv.ParseInt(desiredReplicas, 10, 32)
|
||||
if err == nil {
|
||||
currentReplicas := int32(1) // 默认值
|
||||
if statefulSetInNamespace.Spec.Replicas != nil {
|
||||
currentReplicas = *statefulSetInNamespace.Spec.Replicas
|
||||
}
|
||||
|
||||
if currentReplicas != int32(replicas) {
|
||||
r32 := int32(replicas)
|
||||
statefulSet.Spec.Replicas = &r32
|
||||
needsUpdate = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
if err := r.Update(ctx, statefulSet); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("StatefulSet updated", "name", statefulSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// 2.3 Service 处理
|
||||
service := devcontainer_controller_utils.NewService(app)
|
||||
if err := k8s_sigs_controller_runtime_utils.SetControllerReference(app, service, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
serviceInCluster := &core_v1.Service{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: app.Name, Namespace: app.Namespace}, serviceInCluster)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Create(ctx, service)
|
||||
if err == nil {
|
||||
// 创建 NodePort Service 成功只执行一次 ==> 将NodePort 端口分配信息更新到 app.Status
|
||||
logger.Info("[DevStar][DevContainer] NodePort Assigned", "nodePortAssigned", service.Spec.Ports[0].NodePort)
|
||||
|
||||
// 设置主 SSH 端口的 NodePort
|
||||
app.Status.NodePortAssigned = uint16(service.Spec.Ports[0].NodePort)
|
||||
|
||||
// 处理额外端口
|
||||
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
|
||||
|
||||
// 处理额外端口,从第二个端口开始(索引为1)
|
||||
// 因为第一个端口(索引为0)是 SSH 端口
|
||||
for i := 1; i < len(service.Spec.Ports); i++ {
|
||||
port := service.Spec.Ports[i]
|
||||
|
||||
// 查找对应的端口规格
|
||||
var containerPort uint16 = 0
|
||||
|
||||
// 如果存在额外端口配置,尝试匹配
|
||||
if app.Spec.Service.ExtraPorts != nil {
|
||||
for _, ep := range app.Spec.Service.ExtraPorts {
|
||||
if (ep.Name != "" && ep.Name == port.Name) ||
|
||||
(uint16(port.Port) == ep.ServicePort) {
|
||||
containerPort = ep.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配项,使用目标端口
|
||||
if containerPort == 0 && port.TargetPort.IntVal > 0 {
|
||||
containerPort = uint16(port.TargetPort.IntVal)
|
||||
}
|
||||
|
||||
// 添加到额外端口列表
|
||||
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
|
||||
Name: port.Name,
|
||||
ServicePort: uint16(port.Port),
|
||||
ContainerPort: containerPort,
|
||||
NodePort: uint16(port.NodePort),
|
||||
})
|
||||
|
||||
logger.Info("[DevStar][DevContainer] Extra Port NodePort Assigned",
|
||||
"name", port.Name,
|
||||
"servicePort", port.Port,
|
||||
"nodePort", port.NodePort)
|
||||
}
|
||||
|
||||
// 更新 CRD 状态,包括额外端口
|
||||
app.Status.ExtraPortsAssigned = extraPortsAssigned
|
||||
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update NodePorts of DevcontainerApp",
|
||||
"nodePortAssigned", service.Spec.Ports[0].NodePort,
|
||||
"extraPortsCount", len(extraPortsAssigned))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else if !errors.IsAlreadyExists(err) {
|
||||
logger.Error(err, "Failed to create DevcontainerApp NodePort Service", "nodePortServiceName", service.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
// Service 已存在,检查它的端口信息
|
||||
// 检查是否需要更新状态
|
||||
needStatusUpdate := false
|
||||
|
||||
// 如果主端口未记录,记录之
|
||||
if app.Status.NodePortAssigned == 0 && len(serviceInCluster.Spec.Ports) > 0 {
|
||||
app.Status.NodePortAssigned = uint16(serviceInCluster.Spec.Ports[0].NodePort)
|
||||
needStatusUpdate = true
|
||||
logger.Info("[DevStar][DevContainer] Found existing main NodePort",
|
||||
"nodePort", serviceInCluster.Spec.Ports[0].NodePort)
|
||||
}
|
||||
|
||||
// 处理额外端口
|
||||
if len(serviceInCluster.Spec.Ports) > 1 {
|
||||
// 如果额外端口状态为空,或者数量不匹配
|
||||
if app.Status.ExtraPortsAssigned == nil ||
|
||||
len(app.Status.ExtraPortsAssigned) != len(serviceInCluster.Spec.Ports)-1 {
|
||||
|
||||
extraPortsAssigned := []devcontainer_v1.ExtraPortAssigned{}
|
||||
|
||||
// 从索引 1 开始,跳过主端口
|
||||
for i := 1; i < len(serviceInCluster.Spec.Ports); i++ {
|
||||
port := serviceInCluster.Spec.Ports[i]
|
||||
|
||||
// 查找对应的端口规格
|
||||
var containerPort uint16 = 0
|
||||
|
||||
// 如果存在额外端口配置,尝试匹配
|
||||
if app.Spec.Service.ExtraPorts != nil {
|
||||
for _, ep := range app.Spec.Service.ExtraPorts {
|
||||
if (ep.Name != "" && ep.Name == port.Name) ||
|
||||
(uint16(port.Port) == ep.ServicePort) {
|
||||
containerPort = ep.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到匹配项,使用目标端口
|
||||
if containerPort == 0 && port.TargetPort.IntVal > 0 {
|
||||
containerPort = uint16(port.TargetPort.IntVal)
|
||||
}
|
||||
|
||||
// 添加到额外端口列表
|
||||
extraPortsAssigned = append(extraPortsAssigned, devcontainer_v1.ExtraPortAssigned{
|
||||
Name: port.Name,
|
||||
ServicePort: uint16(port.Port),
|
||||
ContainerPort: containerPort,
|
||||
NodePort: uint16(port.NodePort),
|
||||
})
|
||||
|
||||
logger.Info("[DevStar][DevContainer] Found existing extra NodePort",
|
||||
"name", port.Name,
|
||||
"nodePort", port.NodePort)
|
||||
}
|
||||
|
||||
// 更新额外端口状态
|
||||
app.Status.ExtraPortsAssigned = extraPortsAssigned
|
||||
needStatusUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
// 如果需要更新状态
|
||||
if needStatusUpdate {
|
||||
if err := r.Status().Update(ctx, app); err != nil {
|
||||
logger.Error(err, "Failed to update NodePorts status for existing service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Info("[DevStar][DevContainer] Updated NodePorts status for existing service",
|
||||
"mainNodePort", app.Status.NodePortAssigned,
|
||||
"extraPortsCount", len(app.Status.ExtraPortsAssigned))
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// cleanupPersistentVolumeClaims 查找并删除与 DevcontainerApp 关联的所有 PVC
|
||||
func (r *DevcontainerAppReconciler) cleanupPersistentVolumeClaims(ctx context.Context, app *devcontainer_v1.DevcontainerApp) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// 查找关联的 PVC
|
||||
pvcList := &core_v1.PersistentVolumeClaimList{}
|
||||
|
||||
// 按标签筛选
|
||||
labelSelector := client.MatchingLabels{
|
||||
"app": app.Name,
|
||||
}
|
||||
if err := r.List(ctx, pvcList, client.InNamespace(app.Namespace), labelSelector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 如果按标签没找到,尝试按名称模式查找
|
||||
if len(pvcList.Items) == 0 {
|
||||
if err := r.List(ctx, pvcList, client.InNamespace(app.Namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 筛选出名称包含 DevcontainerApp 名称的 PVC
|
||||
var filteredItems []core_v1.PersistentVolumeClaim
|
||||
for _, pvc := range pvcList.Items {
|
||||
// StatefulSet PVC 命名格式通常为: <volumeClaimTemplate名称>-<StatefulSet名称>-<序号>
|
||||
// 检查是否包含 app 名称作为名称的一部分
|
||||
if strings.Contains(pvc.Name, app.Name+"-") {
|
||||
filteredItems = append(filteredItems, pvc)
|
||||
logger.Info("Found PVC to delete", "name", pvc.Name)
|
||||
}
|
||||
}
|
||||
pvcList.Items = filteredItems
|
||||
}
|
||||
|
||||
// 删除找到的 PVC
|
||||
for i := range pvcList.Items {
|
||||
logger.Info("Deleting PVC", "name", pvcList.Items[i].Name)
|
||||
if err := r.Delete(ctx, &pvcList.Items[i]); err != nil && !errors.IsNotFound(err) {
|
||||
logger.Error(err, "Failed to delete PVC", "name", pvcList.Items[i].Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *DevcontainerAppReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&devcontainer_v1.DevcontainerApp{}).
|
||||
Owns(&apps_v1.StatefulSet{}).
|
||||
Owns(&core_v1.Service{}).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("DevcontainerApp Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
devcontainerapp := &devcontainerv1.DevcontainerApp{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind DevcontainerApp")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, devcontainerapp)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &devcontainerv1.DevcontainerApp{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &devcontainerv1.DevcontainerApp{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance DevcontainerApp")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &DevcontainerAppReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
||||
96
modules/k8s/controller/devcontainer/suite_test.go
Normal file
96
modules/k8s/controller/devcontainer/suite_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
|
||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||
// without call the makefile target test. If not informed it will look for the
|
||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||
// Note that you must have the required binaries setup under the bin directory to perform
|
||||
// the tests directly. When we run make test it will be setup and used automatically.
|
||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = devcontainerv1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
24
modules/k8s/controller/devcontainer/templates/service.yaml
Normal file
24
modules/k8s/controller/devcontainer/templates/service.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}-svc
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
spec:
|
||||
selector:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: ssh-port
|
||||
protocol: TCP
|
||||
port: 22
|
||||
targetPort: {{.Spec.StatefulSet.ContainerPort}}
|
||||
{{ if .Spec.Service.NodePort}}
|
||||
nodePort: {{.Spec.Service.NodePort}}
|
||||
{{ end }}
|
||||
{{- range .Spec.Service.ExtraPorts }}
|
||||
- name: {{ .Name | default (printf "port-%d" .ServicePort) }}
|
||||
protocol: TCP
|
||||
port: {{ .ServicePort }}
|
||||
targetPort: {{ .ContainerPort }}
|
||||
{{- end }}
|
||||
248
modules/k8s/controller/devcontainer/templates/statefulset.yaml
Normal file
248
modules/k8s/controller/devcontainer/templates/statefulset.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{.ObjectMeta.Name}}
|
||||
namespace: {{.ObjectMeta.Namespace}}
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
spec:
|
||||
podManagementPolicy: OrderedReady
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{.ObjectMeta.Name}}
|
||||
devstar-resource-type: devstar-devcontainer
|
||||
spec:
|
||||
# 安全策略,禁止挂载 ServiceAccount Token
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
# 添加 ttyd 共享卷
|
||||
- name: ttyd-shared
|
||||
emptyDir: {}
|
||||
initContainers:
|
||||
# 用户配置初始化
|
||||
- name: init-user-config
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "=== Checking /target-root directory ==="
|
||||
ls -la /target-root/ 2>/dev/null || echo "Directory not found"
|
||||
|
||||
# 检查是否为空目录或首次初始化
|
||||
file_count=$(find /target-root -maxdepth 1 \( -type f -o -type d \) ! -name '.' ! -name '..' 2>/dev/null | wc -l)
|
||||
echo "Found $file_count items in /target-root"
|
||||
|
||||
if [ "$file_count" -lt 2 ]; then
|
||||
echo "Empty or minimal directory detected - initializing user home..."
|
||||
cp -a /root/. /target-root/
|
||||
echo "User config initialized from image defaults"
|
||||
else
|
||||
echo "User config already exists - skipping initialization to preserve user data"
|
||||
echo "Current contents:"
|
||||
ls -la /target-root/
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /target-root
|
||||
subPath: user-home
|
||||
|
||||
# SSH 配置和公钥初始化
|
||||
- name: init-root-ssh-dir
|
||||
image: devstar.cn/public/busybox:27a71e19c956
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
# 确保目录存在
|
||||
mkdir -p /root/.ssh
|
||||
mkdir -p /etc/ssh
|
||||
|
||||
# 创建标准的 sshd_config 文件(如果不存在)
|
||||
if [ ! -f /etc/ssh/sshd_config ]; then
|
||||
cat > /etc/ssh/sshd_config << 'EOF'
|
||||
# OpenSSH Server Configuration
|
||||
Port 22
|
||||
AddressFamily any
|
||||
ListenAddress 0.0.0.0
|
||||
|
||||
# Host Keys
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
# Logging
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
# Authentication
|
||||
LoginGraceTime 2m
|
||||
PermitRootLogin yes
|
||||
StrictModes yes
|
||||
MaxAuthTries 6
|
||||
MaxSessions 10
|
||||
|
||||
PubkeyAuthentication yes
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
# Forwarding
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
|
||||
# Environment
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
# Subsystem
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
# PAM
|
||||
UsePAM yes
|
||||
EOF
|
||||
echo "Created sshd_config"
|
||||
fi
|
||||
|
||||
# 导入 SSH 公钥(如果不存在)
|
||||
{{range .Spec.StatefulSet.SSHPublicKeyList}}
|
||||
if ! grep -q "{{.}}" /root/.ssh/authorized_keys 2>/dev/null; then
|
||||
echo "{{.}}" >> /root/.ssh/authorized_keys
|
||||
fi
|
||||
{{end}}
|
||||
|
||||
# 设置正确的权限
|
||||
chmod 755 /root
|
||||
chmod 700 /root/.ssh/
|
||||
chmod 600 /root/.ssh/authorized_keys 2>/dev/null || true
|
||||
chmod 644 /etc/ssh/sshd_config 2>/dev/null || true
|
||||
|
||||
# 确保文件所有者正确
|
||||
chown -R root:root /root/.ssh/
|
||||
|
||||
echo 'SSH configuration and keys initialized.'
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /root
|
||||
subPath: user-home
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /etc/ssh
|
||||
subPath: ssh-host-keys
|
||||
|
||||
- name: init-git-repo-dir
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- if [ ! -d '/data/workspace' ]; then git clone {{.Spec.StatefulSet.GitRepositoryURL}} /data/workspace && echo "Git Repository cloned."; else echo "Folder already exists."; fi
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /data
|
||||
subPath: user-data
|
||||
|
||||
# ttyd 二进制文件复制
|
||||
- name: init-ttyd
|
||||
image: tsl0922/ttyd:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "Copying ttyd binary to shared volume..."
|
||||
cp /usr/bin/ttyd /ttyd-shared/ttyd
|
||||
chmod +x /ttyd-shared/ttyd
|
||||
echo "ttyd binary copied successfully"
|
||||
ls -la /ttyd-shared/ttyd
|
||||
volumeMounts:
|
||||
- name: ttyd-shared
|
||||
mountPath: /ttyd-shared
|
||||
|
||||
containers:
|
||||
- name: {{.ObjectMeta.Name}}
|
||||
image: {{.Spec.StatefulSet.Image}}
|
||||
command:
|
||||
{{range .Spec.StatefulSet.Command}}
|
||||
- {{.}}
|
||||
{{end}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
# securityContext: TODO: 设置 DevContainer 安全策略
|
||||
ports:
|
||||
- name: ssh-port
|
||||
protocol: TCP
|
||||
containerPort: {{.Spec.StatefulSet.ContainerPort}}
|
||||
{{- range .Spec.Service.ExtraPorts }}
|
||||
- name: {{ .Name | default (printf "port-%d" .ContainerPort) }}
|
||||
protocol: TCP
|
||||
containerPort: {{ .ContainerPort }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /data
|
||||
subPath: user-data
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /root
|
||||
subPath: user-home
|
||||
- name: pvc-devcontainer
|
||||
mountPath: /etc/ssh
|
||||
subPath: ssh-host-keys
|
||||
# 挂载 ttyd 共享卷
|
||||
- name: ttyd-shared
|
||||
mountPath: /ttyd-shared
|
||||
# 其他配置保持不变...
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- exec ls ~
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- exec cat /etc/ssh/ssh_host*.pub
|
||||
failureThreshold: 6
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 300m
|
||||
ephemeral-storage: 8Gi
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
ephemeral-storage: 50Mi
|
||||
memory: 128Mi
|
||||
volumeClaimTemplates:
|
||||
- apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-devcontainer
|
||||
spec:
|
||||
storageClassName: openebs-hostpath
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
60
modules/k8s/controller/devcontainer/utils/template_utils.go
Normal file
60
modules/k8s/controller/devcontainer/utils/template_utils.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"text/template"
|
||||
|
||||
devcontainer_apps_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
app_v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
yaml_util "k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
TemplatePath = "modules/k8s/controller/devcontainer/templates/"
|
||||
)
|
||||
|
||||
// parseTemplate 解析 Go Template 模板文件
|
||||
func parseTemplate(templateName string, app *devcontainer_apps_v1.DevcontainerApp) []byte {
|
||||
tmpl, err := template.
|
||||
New(templateName + ".yaml").
|
||||
Funcs(template.FuncMap{"default": DefaultFunc}).
|
||||
ParseFiles(TemplatePath + templateName + ".yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b := new(bytes.Buffer)
|
||||
err = tmpl.Execute(b, app)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// NewStatefulSet 创建 StatefulSet
|
||||
func NewStatefulSet(app *devcontainer_apps_v1.DevcontainerApp) *app_v1.StatefulSet {
|
||||
statefulSet := &app_v1.StatefulSet{}
|
||||
err := yaml_util.Unmarshal(parseTemplate("statefulset", app), statefulSet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return statefulSet
|
||||
}
|
||||
|
||||
// NewService 创建 Service
|
||||
func NewService(app *devcontainer_apps_v1.DevcontainerApp) *core_v1.Service {
|
||||
service := &core_v1.Service{}
|
||||
err := yaml_util.Unmarshal(parseTemplate("service", app), service)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
// DefaultFunc 函数用于实现默认值
|
||||
func DefaultFunc(value interface{}, defaultValue interface{}) interface{} {
|
||||
if value == nil || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
113
modules/k8s/controller/manager.go
Normal file
113
modules/k8s/controller/manager.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
applicationv1 "code.gitea.io/gitea/modules/k8s/api/application/v1" // 新增
|
||||
devcontainerv1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
// 添加Istio网络资源类型导入
|
||||
istionetworkingv1 "istio.io/client-go/pkg/apis/networking/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Scheme 是所有 API 类型的 scheme
|
||||
Scheme = runtime.NewScheme()
|
||||
|
||||
// 控制器注册表
|
||||
Controllers = map[string]Controller{}
|
||||
controllerLock sync.Mutex
|
||||
controllerSet = sets.NewString()
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(Scheme))
|
||||
// 注册 devcontainer API
|
||||
utilruntime.Must(devcontainerv1.AddToScheme(Scheme))
|
||||
// 注册 application API
|
||||
utilruntime.Must(applicationv1.AddToScheme(Scheme))
|
||||
// 注册 Istio 网络资源类型
|
||||
utilruntime.Must(istionetworkingv1.AddToScheme(Scheme))
|
||||
}
|
||||
|
||||
// Controller 是控制器接口
|
||||
type Controller interface {
|
||||
// Name 返回控制器名称
|
||||
Name() string
|
||||
// Init 初始化控制器
|
||||
Init(mgr manager.Manager) error
|
||||
}
|
||||
|
||||
// Manager 是控制器管理器
|
||||
type Manager struct {
|
||||
Manager manager.Manager
|
||||
IsControllerEnabled func(name string) bool
|
||||
}
|
||||
|
||||
// Start 启动控制器管理器
|
||||
func (m *Manager) Start(ctx context.Context) error {
|
||||
klog.InfoS("Starting DevStar controller manager")
|
||||
|
||||
// 添加健康检查
|
||||
if err := m.Manager.AddHealthzCheck("health", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up health check: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Manager.AddReadyzCheck("ready", healthz.Ping); err != nil {
|
||||
return fmt.Errorf("unable to set up ready check: %w", err)
|
||||
}
|
||||
|
||||
// 初始化所有启用的控制器
|
||||
controllerLock.Lock()
|
||||
defer controllerLock.Unlock()
|
||||
|
||||
for name, c := range Controllers {
|
||||
if !m.IsControllerEnabled(name) {
|
||||
klog.InfoS("Controller disabled", "name", name)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.InfoS("Initializing controller", "name", name)
|
||||
if err := c.Init(m.Manager); err != nil {
|
||||
return fmt.Errorf("error initializing controller %q: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 启动管理器
|
||||
klog.InfoS("Starting controllers")
|
||||
return m.Manager.Start(ctx)
|
||||
}
|
||||
|
||||
// Register 注册一个控制器到控制器管理器
|
||||
func Register(c Controller) error {
|
||||
controllerLock.Lock()
|
||||
defer controllerLock.Unlock()
|
||||
|
||||
name := c.Name()
|
||||
if _, found := Controllers[name]; found {
|
||||
return fmt.Errorf("controller %q was registered twice", name)
|
||||
}
|
||||
|
||||
Controllers[name] = c
|
||||
controllerSet.Insert(name)
|
||||
klog.InfoS("Registered controller", "name", name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllControllers 返回所有已注册控制器的名称
|
||||
func GetAllControllers() sets.String {
|
||||
controllerLock.Lock()
|
||||
defer controllerLock.Unlock()
|
||||
|
||||
return controllerSet.Union(nil)
|
||||
}
|
||||
10
modules/k8s/controller/options/options.go
Normal file
10
modules/k8s/controller/options/options.go
Normal file
@@ -0,0 +1,10 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
// Options 包含所有控制器可能需要的选项
|
||||
type Options struct {
|
||||
// 可以根据实际需求扩展更多选项
|
||||
}
|
||||
47
modules/k8s/errors/errors.go
Normal file
47
modules/k8s/errors/errors.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ErrIllegalDevcontainerParameters struct {
|
||||
FieldList []string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (err ErrIllegalDevcontainerParameters) Error() string {
|
||||
return fmt.Sprintf("Illegal DevContainer parameters detected: %v (%s)", err.FieldList, err.Message)
|
||||
}
|
||||
|
||||
type ErrOperateDevcontainer struct {
|
||||
Action string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (err ErrOperateDevcontainer) Error() string {
|
||||
return fmt.Sprintf("Failed to %v in DevStar DevContainer DB: %v", err.Action, err.Message)
|
||||
}
|
||||
|
||||
// ErrOpenDevcontainerTimeout 阻塞式等待 DevContainer 超时
|
||||
type ErrOpenDevcontainerTimeout struct {
|
||||
Name string
|
||||
Namespace string
|
||||
TimeoutSeconds int64
|
||||
}
|
||||
|
||||
func (err ErrOpenDevcontainerTimeout) Error() string {
|
||||
return fmt.Sprintf("Failed to open DevContainer '%s' in namespace '%s': waiting timeout limit of %d seconds has been exceeded.",
|
||||
err.Name, err.Namespace, err.TimeoutSeconds,
|
||||
)
|
||||
}
|
||||
|
||||
type ErrK8sDevcontainerNotReady struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Wait bool
|
||||
}
|
||||
|
||||
func (err ErrK8sDevcontainerNotReady) Error() string {
|
||||
return fmt.Sprintf("Failed to open k8s Devcontainer '%s' in namespace '%s': DevContainer Not Ready (Wait = %v)",
|
||||
err.Name, err.Namespace, err.Wait)
|
||||
}
|
||||
449
modules/k8s/k8s.go
Normal file
449
modules/k8s/k8s.go
Normal file
@@ -0,0 +1,449 @@
|
||||
package k8s_agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
k8sErrors "code.gitea.io/gitea/modules/k8s/errors"
|
||||
apimachinery_api_metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apimachinery_apis_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apimachinery_apis_v1_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
apimachinery_runtime_utils "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apimachinery_watch "k8s.io/apimachinery/pkg/watch"
|
||||
dynamic_client "k8s.io/client-go/dynamic"
|
||||
dynamicclient "k8s.io/client-go/dynamic"
|
||||
clientgorest "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// IsK8sDevcontainerStatusReady 工具类方法,判断给定的 DevcontainerApp.Status 是否达到就绪状态
|
||||
// 1. DevcontainerApp.Status.Ready == true
|
||||
// 2. DevcontainerApp.Status.NodePortAssigned 介于闭区间 [30000, 32767]
|
||||
func IsK8sDevcontainerStatusReady(devcontainerAppStatus *k8s_api_v1.DevcontainerAppStatus) bool {
|
||||
return devcontainerAppStatus != nil &&
|
||||
devcontainerAppStatus.Ready &&
|
||||
devcontainerAppStatus.NodePortAssigned >= 30000 &&
|
||||
devcontainerAppStatus.NodePortAssigned <= 32767
|
||||
}
|
||||
|
||||
// groupVersionResource 用于描述 CRD,供 dynamic Client 交互使用
|
||||
var groupVersionResource = schema.GroupVersionResource{
|
||||
Group: k8s_api_v1.GroupVersion.Group,
|
||||
Version: k8s_api_v1.GroupVersion.Version,
|
||||
Resource: "devcontainerapps",
|
||||
}
|
||||
|
||||
// GetKubernetesClient 通过用户提供的 kubeconfig 原始内容与可选的 contextName 获取动态客户端
|
||||
func GetKubernetesClient(ctx context.Context, kubeconfig []byte, contextName string) (dynamicclient.Interface, error) {
|
||||
var config *clientgorest.Config
|
||||
var err error
|
||||
|
||||
if len(kubeconfig) == 0 {
|
||||
// 未提供 kubeconfig 内容:优先使用本机默认 kubeconfig,其次回退到 InCluster
|
||||
config, err = clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
|
||||
if err != nil {
|
||||
log.Warn("Failed to obtain Kubernetes config outside of cluster: " + clientcmd.RecommendedHomeFile)
|
||||
config, err = clientgorest.InClusterConfig()
|
||||
if err != nil {
|
||||
log.Error("Failed to obtain Kubernetes config both inside/outside of cluster, the DevContainer is Disabled")
|
||||
setting.DevContainerConfig.Enable = false
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// 提供了 kubeconfig 内容:按用户提供的内容与可选 context 获取配置
|
||||
config, err = restConfigFromKubeconfigBytes(kubeconfig, contextName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
applyClientDefaults(config)
|
||||
|
||||
// 强制跳过 TLS 证书校验(无论 kubeconfig 是否声明 insecure-skip-tls-verify)
|
||||
// 同时清空 CA 配置
|
||||
config.TLSClientConfig.Insecure = true
|
||||
config.TLSClientConfig.CAData = nil
|
||||
config.TLSClientConfig.CAFile = ""
|
||||
|
||||
// 尝试创建客户端,如果TLS验证失败则自动跳过验证
|
||||
client, err := dynamicclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
// 再次兜底:若识别为 TLS 错误,已 Insecure,无需再次设置;否则将错误上抛
|
||||
return nil, fmt.Errorf("failed to create k8s client: %v", err)
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// restConfigFromKubeconfigBytes 基于 kubeconfig 内容构造 *rest.Config,支持指定 context(为空则使用 current-context)
|
||||
func restConfigFromKubeconfigBytes(kubeconfig []byte, contextName string) (*clientgorest.Config, error) {
|
||||
|
||||
if contextName == "" {
|
||||
cfg, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
applyClientDefaults(cfg)
|
||||
return cfg, nil
|
||||
}
|
||||
// 指定 context 的解析路径
|
||||
apiConfig, err := clientcmd.Load(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
overrides := &clientcmd.ConfigOverrides{CurrentContext: contextName}
|
||||
clientConfig := clientcmd.NewDefaultClientConfig(*apiConfig, overrides)
|
||||
cfg, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
applyClientDefaults(cfg)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// applyClientDefaults 统一设置 QPS/Burst(可按需设置超时等)
|
||||
func applyClientDefaults(cfg *clientgorest.Config) {
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
if cfg.QPS == 0 {
|
||||
cfg.QPS = 50
|
||||
}
|
||||
if cfg.Burst == 0 {
|
||||
cfg.Burst = 100
|
||||
}
|
||||
}
|
||||
|
||||
func GetDevcontainer(ctx context.Context, client dynamic_client.Interface, opts *GetDevcontainerOptions) (*k8s_api_v1.DevcontainerApp, error) {
|
||||
|
||||
// 0. 检查参数
|
||||
if ctx == nil || opts == nil || len(opts.Namespace) == 0 || len(opts.Name) == 0 {
|
||||
return nil, k8sErrors.ErrIllegalDevcontainerParameters{
|
||||
FieldList: []string{"ctx", "opts", "opts.Name", "opts.Namespace"},
|
||||
Message: "cannot be nil",
|
||||
}
|
||||
}
|
||||
|
||||
// 1. 获取 k8s CRD 资源 DevcontainerApp
|
||||
devcontainerUnstructured, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Get(ctx, opts.Name, opts.GetOptions)
|
||||
if err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "Get DevcontainerApp thru k8s API Server",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 解析 DevcontainerApp Status 域,装填 VO
|
||||
devcontainerApp := &k8s_api_v1.DevcontainerApp{}
|
||||
err = apimachinery_runtime_utils.DefaultUnstructuredConverter.FromUnstructured(devcontainerUnstructured.Object, &devcontainerApp)
|
||||
if err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "Convert k8s API Server unstructured response into DevcontainerApp",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 3. 检查 Devcontainer 是否就绪
|
||||
if !IsK8sDevcontainerStatusReady(&devcontainerApp.Status) {
|
||||
// 3.1 检查 Wait 参数,若用户不需要阻塞式等待,直接返回 “DevContainer 未就绪” 错误
|
||||
if opts.Wait == false {
|
||||
return nil, k8sErrors.ErrK8sDevcontainerNotReady{
|
||||
Name: opts.Name,
|
||||
Namespace: opts.Namespace,
|
||||
Wait: opts.Wait,
|
||||
}
|
||||
}
|
||||
|
||||
// 3.2 执行阻塞式等待
|
||||
devcontainerStatusVO, err := waitUntilDevcontainerReadyWithTimeout(ctx, client, opts)
|
||||
if err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "wait for k8s DevContainer to be ready",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
devcontainerApp.Status.Ready = devcontainerStatusVO.Ready
|
||||
devcontainerApp.Status.NodePortAssigned = devcontainerStatusVO.NodePortAssigned
|
||||
}
|
||||
|
||||
// 4. 将就绪的 DevContainer Status VO 返回
|
||||
return devcontainerApp, nil
|
||||
}
|
||||
|
||||
// waitUntilDevcontainerReadyWithTimeout 辅助方法:在超时时间内阻塞等待 DevContainer 就绪
|
||||
func waitUntilDevcontainerReadyWithTimeout(ctx context.Context, client dynamic_client.Interface, opts *GetDevcontainerOptions) (*DevcontainerStatusK8sAgentVO, error) {
|
||||
|
||||
// 0. 检查参数
|
||||
if ctx == nil || client == nil || opts == nil || len(opts.Name) == 0 || len(opts.Namespace) == 0 {
|
||||
return nil, k8sErrors.ErrIllegalDevcontainerParameters{
|
||||
FieldList: []string{"ctx", "client", "opts", "opts.Name", "opts.Namespace"},
|
||||
Message: "could not be nil",
|
||||
}
|
||||
}
|
||||
|
||||
// 1. 注册 watcher 监听 DevContainer Status 变化
|
||||
watcherTimeoutSeconds := int64(120)
|
||||
watcher, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Watch(ctx, apimachinery_apis_v1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", opts.Name),
|
||||
Watch: true,
|
||||
TimeoutSeconds: &watcherTimeoutSeconds,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "register watcher of DevContainer Readiness",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
defer watcher.Stop()
|
||||
|
||||
// 2. 当 DevContainer Watcher 事件处理
|
||||
devcontainerStatusVO := &DevcontainerStatusK8sAgentVO{}
|
||||
for event := range watcher.ResultChan() {
|
||||
switch event.Type {
|
||||
case apimachinery_watch.Added:
|
||||
// 2.1 监听 DevcontainerApp ADDED 事件,直接 fallthrough 到 MODIFIED 事件合并处理
|
||||
fallthrough
|
||||
case apimachinery_watch.Modified:
|
||||
// 2.2 监听 DevcontainerApp MODIFIED 事件
|
||||
if devcontainerUnstructured, ok := event.Object.(*apimachinery_apis_v1_unstructured.Unstructured); ok {
|
||||
// 2.2.1 解析 status 域
|
||||
statusDevcontainer, ok, err := apimachinery_apis_v1_unstructured.NestedMap(devcontainerUnstructured.Object, "status")
|
||||
if err == nil && ok {
|
||||
devcontainerCurrentStatus := &k8s_api_v1.DevcontainerAppStatus{
|
||||
Ready: statusDevcontainer["ready"].(bool),
|
||||
NodePortAssigned: uint16(statusDevcontainer["nodePortAssigned"].(int64)),
|
||||
}
|
||||
// 2.2.2 当 Status 达到就绪状态后,返回
|
||||
if IsK8sDevcontainerStatusReady(devcontainerCurrentStatus) {
|
||||
devcontainerStatusVO.Ready = devcontainerCurrentStatus.Ready
|
||||
devcontainerStatusVO.NodePortAssigned = devcontainerCurrentStatus.NodePortAssigned
|
||||
return devcontainerStatusVO, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case apimachinery_watch.Error:
|
||||
// 2.3 监听 DevcontainerApp ERROR 事件,返回报错信息
|
||||
apimachineryApiMetav1Status, ok := event.Object.(*apimachinery_api_metav1.Status)
|
||||
if !ok {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("wait for Devcontainer '%s' in namespace '%s' to be ready", opts.Name, opts.Namespace),
|
||||
Message: fmt.Sprintf("An error occurred in k8s CRD DevcontainerApp Watcher: \n"+
|
||||
" Code: %v (status = %v)\n"+
|
||||
"Message: %v\n"+
|
||||
" Reason: %v\n"+
|
||||
"Details: %v",
|
||||
apimachineryApiMetav1Status.Code, apimachineryApiMetav1Status.Status,
|
||||
apimachineryApiMetav1Status.Message,
|
||||
apimachineryApiMetav1Status.Reason,
|
||||
apimachineryApiMetav1Status.Details),
|
||||
}
|
||||
}
|
||||
case apimachinery_watch.Deleted:
|
||||
// 2.4 监听 DevcontainerApp DELETED 事件,返回报错信息
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("Open DevContainer '%s' in namespace '%s'", opts.Name, opts.Namespace),
|
||||
Message: fmt.Sprintf("'%s' of Kind DevcontainerApp has been Deleted", opts.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. k8s CRD DevcontainerApp Watcher 超时关闭处理:直接返回超时错误
|
||||
return nil, k8sErrors.ErrOpenDevcontainerTimeout{
|
||||
Name: opts.Name,
|
||||
Namespace: opts.Namespace,
|
||||
TimeoutSeconds: setting.DevContainerConfig.TimeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// 修改 CreateDevcontainer 函数
|
||||
func CreateDevcontainer(ctx context.Context, client dynamic_client.Interface, opts *CreateDevcontainerOptions) (*k8s_api_v1.DevcontainerApp, error) {
|
||||
// 记录日志
|
||||
log.Info("Creating DevContainer with options: name=%s, namespace=%s, image=%s",
|
||||
opts.Name, opts.Namespace, opts.Image)
|
||||
|
||||
// 创建资源定义
|
||||
devcontainerApp := &k8s_api_v1.DevcontainerApp{
|
||||
TypeMeta: apimachinery_apis_v1.TypeMeta{
|
||||
Kind: "DevcontainerApp",
|
||||
APIVersion: "devcontainer.devstar.cn/v1",
|
||||
},
|
||||
ObjectMeta: apimachinery_apis_v1.ObjectMeta{
|
||||
Name: opts.Name,
|
||||
Namespace: opts.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "devcontainer-operator",
|
||||
"app.kubernetes.io/managed-by": "kustomize",
|
||||
},
|
||||
},
|
||||
Spec: k8s_api_v1.DevcontainerAppSpec{
|
||||
StatefulSet: k8s_api_v1.StatefulSetSpec{
|
||||
Image: opts.Image,
|
||||
Command: opts.CommandList,
|
||||
ContainerPort: opts.ContainerPort,
|
||||
SSHPublicKeyList: opts.SSHPublicKeyList,
|
||||
GitRepositoryURL: opts.GitRepositoryURL,
|
||||
},
|
||||
Service: k8s_api_v1.ServiceSpec{
|
||||
ServicePort: opts.ServicePort,
|
||||
ExtraPorts: opts.ExtraPorts, // 添加 ExtraPorts 配置
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// 转换为 JSON
|
||||
jsonData, err := json.Marshal(devcontainerApp)
|
||||
if err != nil {
|
||||
log.Error("Failed to marshal DevcontainerApp to JSON: %v", err)
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "Marshal JSON",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 输出 JSON 以便调试
|
||||
log.Debug("Generated JSON for DevcontainerApp:\n%s", string(jsonData))
|
||||
|
||||
// 转换为 Unstructured 对象
|
||||
unstructuredObj := &apimachinery_apis_v1_unstructured.Unstructured{}
|
||||
err = unstructuredObj.UnmarshalJSON(jsonData)
|
||||
if err != nil {
|
||||
log.Error("Failed to unmarshal JSON to Unstructured: %v", err)
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "Unmarshal JSON to Unstructured",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// 确认 GroupVersionResource 定义
|
||||
log.Debug("Using GroupVersionResource: Group=%s, Version=%s, Resource=%s",
|
||||
groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
|
||||
// 创建资源
|
||||
log.Info("Creating DevcontainerApp resource in namespace %s", opts.Namespace)
|
||||
result, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Create(ctx, unstructuredObj, opts.CreateOptions)
|
||||
if err != nil {
|
||||
log.Error("Failed to create DevcontainerApp: %v", err)
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "create DevContainer via Dynamic Client",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("DevcontainerApp resource created successfully")
|
||||
|
||||
// 将结果转换回 DevcontainerApp 结构体
|
||||
resultJSON, err := result.MarshalJSON()
|
||||
if err != nil {
|
||||
log.Error("Failed to marshal result to JSON: %v", err)
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "Marshal result JSON",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
createdDevcontainer := &k8s_api_v1.DevcontainerApp{}
|
||||
if err := json.Unmarshal(resultJSON, createdDevcontainer); err != nil {
|
||||
log.Error("Failed to unmarshal result JSON: %v", err)
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "Unmarshal result JSON",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
return createdDevcontainer, nil
|
||||
}
|
||||
|
||||
func DeleteDevcontainer(ctx context.Context, client dynamic_client.Interface, opts *DeleteDevcontainerOptions) error {
|
||||
if ctx == nil || opts == nil || len(opts.Namespace) == 0 || len(opts.Name) == 0 {
|
||||
return k8sErrors.ErrIllegalDevcontainerParameters{
|
||||
FieldList: []string{"ctx", "opts", "opts.Name", "opts.Namespace"},
|
||||
Message: "cannot be nil",
|
||||
}
|
||||
}
|
||||
|
||||
err := client.Resource(groupVersionResource).Namespace(opts.Namespace).Delete(ctx, opts.Name, opts.DeleteOptions)
|
||||
if err != nil {
|
||||
log.Warn("Failed to delete DevcontainerApp '%s' in namespace '%s': %s", opts.Name, opts.Namespace, err.Error())
|
||||
return k8sErrors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("delete devcontainer '%s' in namespace '%s'", opts.Name, opts.Namespace),
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListDevcontainers 根据条件列举 DevContainer
|
||||
func ListDevcontainers(ctx context.Context, client dynamic_client.Interface, opts *ListDevcontainersOptions) (*k8s_api_v1.DevcontainerAppList, error) {
|
||||
|
||||
if ctx == nil || opts == nil || len(opts.Namespace) == 0 {
|
||||
return nil, k8sErrors.ErrIllegalDevcontainerParameters{
|
||||
FieldList: []string{"ctx", "namespace"},
|
||||
Message: "cannot be empty",
|
||||
}
|
||||
}
|
||||
|
||||
list, err := client.Resource(groupVersionResource).Namespace(opts.Namespace).List(ctx, opts.ListOptions)
|
||||
if err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("List Devcontainer in namespace '%s'", opts.Namespace),
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
// JSON 反序列化为 DevcontainerAppList
|
||||
jsonData, err := list.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "verify JSON data of Devcontainer List",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
devcontainerList := &k8s_api_v1.DevcontainerAppList{}
|
||||
if err := json.Unmarshal(jsonData, devcontainerList); err != nil {
|
||||
return nil, k8sErrors.ErrOperateDevcontainer{
|
||||
Action: "deserialize Devcontainer List data",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
return devcontainerList, nil
|
||||
}
|
||||
|
||||
// isTLSCertificateError 检查错误是否是TLS证书验证错误
|
||||
func isTLSCertificateError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
errStr := err.Error()
|
||||
|
||||
// 检查常见的TLS证书验证错误(尽量宽松,覆盖更多 x509 报错文案)
|
||||
tlsErrorPatterns := []string{
|
||||
"tls: failed to verify certificate",
|
||||
"x509:",
|
||||
"x509: certificate",
|
||||
"cannot validate certificate",
|
||||
"doesn't contain any IP SANs",
|
||||
"certificate is valid for",
|
||||
"certificate signed by unknown authority",
|
||||
"unknown authority",
|
||||
"self-signed certificate",
|
||||
"certificate has expired",
|
||||
"certificate is not valid",
|
||||
"invalid certificate",
|
||||
}
|
||||
|
||||
for _, pattern := range tlsErrorPatterns {
|
||||
if strings.Contains(errStr, pattern) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
53
modules/k8s/k8s_types.go
Normal file
53
modules/k8s/k8s_types.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package k8s_agent
|
||||
|
||||
import (
|
||||
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// CreateDevcontainerOptions 定义创建开发容器选项
|
||||
type CreateDevcontainerOptions struct {
|
||||
metav1.CreateOptions
|
||||
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Image string `json:"image"`
|
||||
CommandList []string `json:"command"`
|
||||
ContainerPort uint16 `json:"containerPort"`
|
||||
ServicePort uint16 `json:"servicePort"`
|
||||
SSHPublicKeyList []string `json:"sshPublicKeyList"`
|
||||
GitRepositoryURL string `json:"gitRepositoryURL"`
|
||||
ExtraPorts []k8s_api_v1.ExtraPortSpec `json:"extraPorts,omitempty"` // 添加额外端口配置
|
||||
}
|
||||
|
||||
type GetDevcontainerOptions struct {
|
||||
metav1.GetOptions
|
||||
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Wait bool `json:"wait"`
|
||||
}
|
||||
|
||||
type DeleteDevcontainerOptions struct {
|
||||
metav1.DeleteOptions
|
||||
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
type ListDevcontainersOptions struct {
|
||||
metav1.ListOptions
|
||||
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
type DevcontainerStatusK8sAgentVO struct {
|
||||
// CRD Controller 向 DevcontainerApp.Status.NodePortAssigned 写入了最新的 NodePort 端口值,当且仅当 Service 被调度且分配了最新的 NodePort
|
||||
NodePortAssigned uint16 `json:"nodePortAssigned"`
|
||||
|
||||
// CRD Controller 向 DevcontainerApp.Status.Ready 写入了 true,当且仅当 StatefulSet 控制下的 Pod 中的 Readiness Probe 返回 true
|
||||
Ready bool `json:"ready"`
|
||||
|
||||
// 额外端口的 NodePort 分配情况
|
||||
ExtraPortsAssigned []k8s_api_v1.ExtraPortAssigned `json:"extraPortsAssigned,omitempty"`
|
||||
}
|
||||
@@ -4,6 +4,9 @@ var DevContainerConfig = struct {
|
||||
Enable bool
|
||||
Web_Terminal_Image string
|
||||
Web_Terminal_Container string
|
||||
Namespace string
|
||||
Host string
|
||||
TimeoutSeconds int64
|
||||
}{}
|
||||
|
||||
func loadDevContainerSettingsFrom(rootCfg ConfigProvider) {
|
||||
@@ -11,4 +14,7 @@ func loadDevContainerSettingsFrom(rootCfg ConfigProvider) {
|
||||
DevContainerConfig.Enable = sec.Key("ENABLE").MustBool(true)
|
||||
DevContainerConfig.Web_Terminal_Image = sec.Key("WEB_TERMINAL_IMAGE").MustString("devstar.cn/devstar/webterminal:latest")
|
||||
DevContainerConfig.Web_Terminal_Container = sec.Key("WEB_TERMINAL_CONTAINER").MustString("")
|
||||
DevContainerConfig.Namespace = rootCfg.Section("devstar.devcontainer").Key("NAMESPACE").MustString("default")
|
||||
DevContainerConfig.Host = rootCfg.Section("devstar.devcontainer").Key("HOST").MustString("")
|
||||
DevContainerConfig.TimeoutSeconds = rootCfg.Section("devstar.devcontainer").Key("TIMEOUT_SECONDS").MustInt64(120)
|
||||
}
|
||||
|
||||
@@ -632,6 +632,7 @@ func SubmitInstall(ctx *context.Context) {
|
||||
|
||||
if form.K8sEnable {
|
||||
//K8s环境检测
|
||||
cfg.Section("devstar.devcontainer").Key("HOST").SetValue(form.Domain)
|
||||
} else {
|
||||
if !checkDocker(ctx, &form) {
|
||||
ctx.RenderWithErr("There is no docker environment", tplInstall, &form)
|
||||
|
||||
@@ -161,7 +161,29 @@ func GetWebTerminalURL(ctx context.Context, userID, repoID int64) (string, error
|
||||
return "", err
|
||||
}
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// K8s 模式:使用 Istio Gateway + VirtualService
|
||||
log.Info("GetWebTerminalURL: 使用 Istio 模式获取 WebTerminal URL for DevContainer: %s", devcontainerName)
|
||||
|
||||
// 从配置中读取域名
|
||||
domain := cfg.Section("server").Key("DOMAIN").Value()
|
||||
|
||||
// 从容器名称中提取用户名和仓库名
|
||||
parts := strings.Split(devcontainerName, "-")
|
||||
var username, repoName string
|
||||
if len(parts) >= 2 {
|
||||
username = parts[0]
|
||||
repoName = parts[1]
|
||||
} else {
|
||||
username = "unknown"
|
||||
repoName = "unknown"
|
||||
}
|
||||
|
||||
// 构建基于 Istio Gateway 的 URL
|
||||
path := fmt.Sprintf("/%s/%s/dev-container-webterminal", username, repoName)
|
||||
webTerminalURL := fmt.Sprintf("http://%s%s", domain, path)
|
||||
|
||||
log.Info("GetWebTerminalURL: 生成 Istio WebTerminal URL: %s", webTerminalURL)
|
||||
return webTerminalURL, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
@@ -181,6 +203,7 @@ func GetWebTerminalURL(ctx context.Context, userID, repoID int64) (string, error
|
||||
10已删除
|
||||
*/
|
||||
func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string, error) {
|
||||
log.Info("GetDevContainerStatus: Starting - userID: %s, repoID: %s", userID, repoID)
|
||||
var id int
|
||||
var containerName string
|
||||
|
||||
@@ -188,6 +211,7 @@ func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string,
|
||||
var realTimeStatus uint16
|
||||
cfg, err := setting.NewConfigProviderFromFile(setting.CustomConf)
|
||||
if err != nil {
|
||||
log.Error("GetDevContainerStatus: Failed to load config: %v", err)
|
||||
return "", err
|
||||
}
|
||||
dbEngine := db.GetEngine(ctx)
|
||||
@@ -197,19 +221,42 @@ func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string,
|
||||
Where("user_id = ? AND repo_id = ?", userID, repoID).
|
||||
Get(&status, &id, &containerName)
|
||||
if err != nil {
|
||||
log.Error("GetDevContainerStatus: Failed to query database: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Info("GetDevContainerStatus: Database query result - id: %d, containerName: %s, status: %d", id, containerName, status)
|
||||
|
||||
if id == 0 {
|
||||
log.Info("GetDevContainerStatus: No devcontainer found, returning -1")
|
||||
return fmt.Sprintf("%d", -1), nil
|
||||
}
|
||||
|
||||
realTimeStatus = status
|
||||
log.Info("GetDevContainerStatus: Initial realTimeStatus: %d", realTimeStatus)
|
||||
switch status {
|
||||
//正在重启
|
||||
case 6:
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// k8s 逻辑:检查 Pod 是否已恢复运行
|
||||
log.Info("GetDevContainerStatus: K8s branch for case 6 (restarting), container: %s", containerName)
|
||||
opts := &OpenDevcontainerAppDispatcherOptions{
|
||||
Name: containerName,
|
||||
Wait: false,
|
||||
}
|
||||
log.Info("GetDevContainerStatus: Calling AssignDevcontainerGetting2K8sOperator with opts: %+v", opts)
|
||||
devcontainerApp, err := AssignDevcontainerGetting2K8sOperator(&ctx, opts)
|
||||
if err != nil {
|
||||
log.Error("GetDevContainerStatus: AssignDevcontainerGetting2K8sOperator failed: %v", err)
|
||||
} else if devcontainerApp != nil {
|
||||
log.Info("GetDevContainerStatus: DevcontainerApp retrieved - Name: %s, Ready: %v", devcontainerApp.Name, devcontainerApp.Status.Ready)
|
||||
if devcontainerApp.Status.Ready {
|
||||
realTimeStatus = 4 // 已恢复运行
|
||||
log.Info("GetDevContainerStatus: Container %s is ready, updating status to 4", containerName)
|
||||
}
|
||||
} else {
|
||||
log.Warn("GetDevContainerStatus: DevcontainerApp is nil for container: %s", containerName)
|
||||
}
|
||||
} else {
|
||||
containerRealTimeStatus, err := GetDevContainerStatusFromDocker(ctx, containerName)
|
||||
if err != nil {
|
||||
@@ -222,7 +269,24 @@ func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string,
|
||||
//正在关闭
|
||||
case 7:
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// k8s 逻辑:检查 Pod 是否已停止
|
||||
log.Info("GetDevContainerStatus: K8s branch for case 7 (stopping), container: %s", containerName)
|
||||
opts := &OpenDevcontainerAppDispatcherOptions{
|
||||
Name: containerName,
|
||||
Wait: false,
|
||||
}
|
||||
log.Info("GetDevContainerStatus: Calling AssignDevcontainerGetting2K8sOperator for stop check with opts: %+v", opts)
|
||||
devcontainerApp, err := AssignDevcontainerGetting2K8sOperator(&ctx, opts)
|
||||
if err != nil {
|
||||
log.Info("GetDevContainerStatus: DevcontainerApp not found or error, considering stopped: %v", err)
|
||||
realTimeStatus = 8 // 已停止
|
||||
} else if devcontainerApp == nil || !devcontainerApp.Status.Ready {
|
||||
log.Info("GetDevContainerStatus: DevcontainerApp is nil or not ready, considering stopped")
|
||||
realTimeStatus = 8 // 已停止
|
||||
} else {
|
||||
log.Info("GetDevContainerStatus: DevcontainerApp still running - Name: %s, Ready: %v", devcontainerApp.Name, devcontainerApp.Status.Ready)
|
||||
}
|
||||
// 已在外部通过 StopDevContainer 触发,此处仅检查状态
|
||||
} else {
|
||||
containerRealTimeStatus, err := GetDevContainerStatusFromDocker(ctx, containerName)
|
||||
if err != nil {
|
||||
@@ -240,7 +304,21 @@ func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string,
|
||||
break
|
||||
case 9:
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// k8s 逻辑:检查 Pod 是否已删除
|
||||
log.Info("GetDevContainerStatus: K8s branch for case 9 (deleting), container: %s", containerName)
|
||||
opts := &OpenDevcontainerAppDispatcherOptions{
|
||||
Name: containerName,
|
||||
Wait: false,
|
||||
}
|
||||
log.Info("GetDevContainerStatus: Calling AssignDevcontainerGetting2K8sOperator for delete check with opts: %+v", opts)
|
||||
_, err := AssignDevcontainerGetting2K8sOperator(&ctx, opts)
|
||||
if err != nil {
|
||||
log.Info("GetDevContainerStatus: DevcontainerApp not found, considering deleted: %v", err)
|
||||
realTimeStatus = 10 // 已删除
|
||||
} else {
|
||||
log.Info("GetDevContainerStatus: DevcontainerApp still exists, not deleted yet")
|
||||
}
|
||||
// 已在外部通过 DeleteDevContainer 触发,此处仅检查状态
|
||||
} else {
|
||||
isContainerNotFound, err := IsContainerNotFound(ctx, containerName)
|
||||
if err != nil {
|
||||
@@ -259,6 +337,22 @@ func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string,
|
||||
default:
|
||||
log.Info("other status")
|
||||
}
|
||||
// K8s: 仅在 Ready 后才返回 4;否则维持/降为 3
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" && (status == 3 || status == 4) {
|
||||
opts := &OpenDevcontainerAppDispatcherOptions{
|
||||
Name: containerName,
|
||||
Wait: false,
|
||||
}
|
||||
app, err := AssignDevcontainerGetting2K8sOperator(&ctx, opts)
|
||||
if err != nil || app == nil {
|
||||
// 获取不到 CR 或出错时,保守认为未就绪
|
||||
realTimeStatus = 3
|
||||
} else if app.Status.Ready {
|
||||
realTimeStatus = 4
|
||||
} else {
|
||||
realTimeStatus = 3
|
||||
}
|
||||
}
|
||||
//状态更新
|
||||
if realTimeStatus != status {
|
||||
if realTimeStatus == 10 {
|
||||
@@ -289,6 +383,7 @@ func GetDevContainerStatus(ctx context.Context, userID, repoID string) (string,
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
log.Info("GetDevContainerStatus: Final realTimeStatus: %d, returning status string", realTimeStatus)
|
||||
return fmt.Sprintf("%d", realTimeStatus), nil
|
||||
}
|
||||
func CreateDevContainer(ctx context.Context, repo *repo.Repository, doer *user.User, publicKeyList []string, isWebTerminal bool) error {
|
||||
@@ -330,7 +425,28 @@ func CreateDevContainer(ctx context.Context, repo *repo.Repository, doer *user.U
|
||||
go func() {
|
||||
otherCtx := context.Background()
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// K8s 模式:直接调用 K8s Operator 创建 DevContainer
|
||||
configurationString, err := GetDevcontainerConfigurationString(otherCtx, repo)
|
||||
if err != nil {
|
||||
log.Info("CreateDevContainer: 读取 devcontainer 配置失败: %v", err)
|
||||
return
|
||||
}
|
||||
configurationModel, err := UnmarshalDevcontainerConfigContent(configurationString)
|
||||
if err != nil {
|
||||
log.Info("CreateDevContainer: 解析 devcontainer 配置失败: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
newDTO := &CreateDevcontainerDTO{
|
||||
Devcontainer: newDevcontainer,
|
||||
SSHPublicKeyList: publicKeyList,
|
||||
GitRepositoryURL: strings.TrimSuffix(setting.AppURL, "/") + repo.Link(),
|
||||
Image: configurationModel.Image,
|
||||
}
|
||||
if err := AssignDevcontainerCreation2K8sOperator(&otherCtx, newDTO); err != nil {
|
||||
log.Error("CreateDevContainer: K8s 创建失败: %v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
imageName, err := CreateDevContainerByDockerCommand(otherCtx, &newDevcontainer, repo, publicKeyList)
|
||||
if err != nil {
|
||||
@@ -367,7 +483,9 @@ func DeleteDevContainer(ctx context.Context, userID, repoID int64) error {
|
||||
go func() {
|
||||
otherCtx := context.Background()
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// k8s 模式:调用 K8s Operator 删除 DevContainer 资源
|
||||
devList := []devcontainer_models.Devcontainer{devContainerInfo}
|
||||
_ = AssignDevcontainerDeletion2K8sOperator(&otherCtx, &devList)
|
||||
} else {
|
||||
|
||||
err = DeleteDevContainerByDocker(otherCtx, devContainerInfo.Name)
|
||||
@@ -402,7 +520,15 @@ func RestartDevContainer(ctx context.Context, userID, repoID int64) error {
|
||||
go func() {
|
||||
otherCtx := context.Background()
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// k8s 模式:调用 K8s Operator 重启 DevContainer
|
||||
vo := &DevcontainerVO{
|
||||
DevContainerName: devContainerInfo.Name,
|
||||
UserId: userID,
|
||||
RepoId: repoID,
|
||||
}
|
||||
if err := AssignDevcontainerRestart2K8sOperator(&otherCtx, vo); err != nil {
|
||||
log.Error("RestartDevContainer: K8s 重启失败: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = RestartDevContainerByDocker(otherCtx, devContainerInfo.Name)
|
||||
if err != nil {
|
||||
@@ -436,7 +562,15 @@ func StopDevContainer(ctx context.Context, userID, repoID int64) error {
|
||||
go func() {
|
||||
otherCtx := context.Background()
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
//k8s的逻辑
|
||||
// k8s 模式:调用 K8s Operator 停止 DevContainer
|
||||
vo := &DevcontainerVO{
|
||||
DevContainerName: devContainerInfo.Name,
|
||||
UserId: userID,
|
||||
RepoId: repoID,
|
||||
}
|
||||
if err := AssignDevcontainerStop2K8sOperator(&otherCtx, vo); err != nil {
|
||||
log.Error("StopDevContainer: K8s 停止失败: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = StopDevContainerByDocker(otherCtx, devContainerInfo.Name)
|
||||
if err != nil {
|
||||
@@ -891,7 +1025,20 @@ func Get_IDE_TerminalURL(ctx *gitea_context.Context, doer *user.User, repo *gite
|
||||
var port string
|
||||
|
||||
if cfg.Section("k8s").Key("ENABLE").Value() == "true" {
|
||||
|
||||
// K8s 环境:通过 DevcontainerApp 的 NodePort 作为 SSH 端口
|
||||
apiRequestCtx := ctx.Req.Context()
|
||||
opts := &OpenDevcontainerAppDispatcherOptions{
|
||||
Name: devContainerInfo.Name,
|
||||
Wait: false,
|
||||
}
|
||||
devcontainerApp, err := AssignDevcontainerGetting2K8sOperator(&apiRequestCtx, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if devcontainerApp == nil || devcontainerApp.Status.NodePortAssigned == 0 {
|
||||
return "", fmt.Errorf("k8s DevcontainerApp 未就绪或未分配 NodePort: %s", devContainerInfo.Name)
|
||||
}
|
||||
port = fmt.Sprintf("%d", devcontainerApp.Status.NodePortAssigned)
|
||||
} else {
|
||||
mappedPort, err := docker_module.GetMappedPort(ctx, devContainerInfo.Name, "22")
|
||||
if err != nil {
|
||||
|
||||
891
services/devcontainer/k8s_agent.go
Normal file
891
services/devcontainer/k8s_agent.go
Normal file
@@ -0,0 +1,891 @@
|
||||
package devcontainer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
devcontainer_model "code.gitea.io/gitea/models/devcontainer"
|
||||
devcontainer_dto "code.gitea.io/gitea/modules/k8s"
|
||||
devcontainer_k8s_agent_module "code.gitea.io/gitea/modules/k8s"
|
||||
k8s_api_v1 "code.gitea.io/gitea/modules/k8s/api/devcontainer/v1"
|
||||
"code.gitea.io/gitea/modules/k8s/errors"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
// Istio 资源改为 dynamic/unstructured,不再直接使用 typed API
|
||||
)
|
||||
|
||||
// 为 K8s Agent 暴露所需的 DTO 类型,便于 K8s 分支创建/查询入口使用
|
||||
type CreateDevcontainerDTO struct {
|
||||
devcontainer_model.Devcontainer
|
||||
SSHPublicKeyList []string
|
||||
GitRepositoryURL string
|
||||
Image string
|
||||
DockerfileContent string
|
||||
DevcontainerPort uint16
|
||||
}
|
||||
|
||||
type OpenDevcontainerAppDispatcherOptions struct {
|
||||
Name string `json:"name"`
|
||||
Wait bool `json:"wait"`
|
||||
Status uint16
|
||||
Port uint16
|
||||
UserPublicKey string
|
||||
RepoID int64
|
||||
UserID int64
|
||||
}
|
||||
|
||||
var k8sGroupVersionResource = schema.GroupVersionResource{
|
||||
Group: "devcontainer.devstar.cn",
|
||||
Version: "v1",
|
||||
Resource: "devcontainerapps",
|
||||
}
|
||||
|
||||
type ErrIllegalK8sAgentParams struct {
|
||||
FieldNameList []string
|
||||
}
|
||||
|
||||
func (err ErrIllegalK8sAgentParams) Error() string {
|
||||
return fmt.Sprintf("Illegal Params: %v", err.FieldNameList)
|
||||
}
|
||||
|
||||
// AssignDevcontainerGetting2K8sOperator 获取 k8s CRD 资源 DevcontainerApp 最新状态(需要根据用户传入的 wait 参数决定是否要阻塞等待 DevContainer 就绪)
|
||||
func AssignDevcontainerGetting2K8sOperator(ctx *context.Context, opts *OpenDevcontainerAppDispatcherOptions) (*k8s_api_v1.DevcontainerApp, error) {
|
||||
log.Info("AssignDevcontainerGetting2K8sOperator: Starting lookup for container: %s, wait=%v",
|
||||
opts.Name, opts.Wait)
|
||||
|
||||
// 0. 检查参数
|
||||
if ctx == nil || opts == nil || len(opts.Name) == 0 {
|
||||
return nil, ErrIllegalK8sAgentParams{
|
||||
FieldNameList: []string{"ctx", "opts", "opts.Name"},
|
||||
}
|
||||
}
|
||||
|
||||
// 1. 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
// 层层返回错误,结束数据库事务
|
||||
return nil, errors.ErrOperateDevcontainer{
|
||||
Action: "Connect to k8s API Server",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
log.Info("AssignDevcontainerGetting2K8sOperator: K8s client created successfully")
|
||||
|
||||
// 2. 调用 modules 层 k8s Agent 获取 k8s CRD 资源 DevcontainerApp
|
||||
optsGetDevcontainer := &devcontainer_dto.GetDevcontainerOptions{
|
||||
GetOptions: metav1.GetOptions{},
|
||||
Name: opts.Name,
|
||||
Namespace: setting.DevContainerConfig.Namespace,
|
||||
Wait: opts.Wait,
|
||||
}
|
||||
log.Info("AssignDevcontainerGetting2K8sOperator: Retrieving DevcontainerApp %s in namespace %s (wait=%v)",
|
||||
opts.Name, setting.DevContainerConfig.Namespace, opts.Wait)
|
||||
devcontainerApp, err := devcontainer_k8s_agent_module.GetDevcontainer(ctxVal, client, optsGetDevcontainer)
|
||||
if err != nil {
|
||||
log.Error("AssignDevcontainerGetting2K8sOperator: Failed to get DevcontainerApp: %v", err)
|
||||
return nil, errors.ErrOperateDevcontainer{
|
||||
Action: fmt.Sprintf("Open Devcontainer '%s' (wait=%v)", opts.Name, opts.Wait),
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
log.Info("AssignDevcontainerGetting2K8sOperator: DevcontainerApp retrieved successfully - Name: %s, NodePort: %d, Ready: %v",
|
||||
devcontainerApp.Name, devcontainerApp.Status.NodePortAssigned, devcontainerApp.Status.Ready)
|
||||
|
||||
// 添加额外端口的日志
|
||||
if len(devcontainerApp.Status.ExtraPortsAssigned) > 0 {
|
||||
for i, port := range devcontainerApp.Status.ExtraPortsAssigned {
|
||||
log.Info("AssignDevcontainerGetting2K8sOperator: Extra port %d - Name: %s, NodePort: %d, ContainerPort: %d",
|
||||
i, port.Name, port.NodePort, port.ContainerPort)
|
||||
}
|
||||
} else {
|
||||
log.Info("AssignDevcontainerGetting2K8sOperator: No extra ports found for DevcontainerApp %s", devcontainerApp.Name)
|
||||
}
|
||||
|
||||
// 3. 成功获取最新的 DevcontainerApp,返回
|
||||
return devcontainerApp, nil
|
||||
}
|
||||
|
||||
// 补充笔记: modules/ 与 services/ 两个目录中的 k8s Agent 区别是什么?
|
||||
// - modules/ 与 k8s API Server 交互密切相关
|
||||
// - services/ 进行了封装,简化用户界面使用
|
||||
|
||||
func AssignDevcontainerDeletion2K8sOperator(ctx *context.Context, devcontainersList *[]devcontainer_model.Devcontainer) error {
|
||||
log.Info("AssignDevcontainerDeletion2K8sOperator: Starting Deletion for containers")
|
||||
// 1. 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
// 层层返回错误,结束数据库事务
|
||||
return err
|
||||
}
|
||||
|
||||
// 获取标准 Kubernetes 客户端,用于删除 Ingress
|
||||
stdClient, err := getStandardKubernetesClient()
|
||||
if err != nil {
|
||||
log.Warn("AssignDevcontainerDeletion2K8sOperator: 获取标准 K8s 客户端失败: %v", err)
|
||||
// 继续执行,不阻止主流程
|
||||
} else {
|
||||
// 先删除与 DevContainer 相关的 Ingress 资源
|
||||
for _, devcontainer := range *devcontainersList {
|
||||
ingressName := fmt.Sprintf("%s-ttyd-ingress", devcontainer.Name)
|
||||
log.Info("AssignDevcontainerDeletion2K8sOperator: 删除 Ingress %s", ingressName)
|
||||
|
||||
err := stdClient.NetworkingV1().Ingresses(setting.DevContainerConfig.Namespace).Delete(*ctx, ingressName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
// Ingress 已经不存在,视为正常情况
|
||||
log.Info("AssignDevcontainerDeletion2K8sOperator: Ingress %s 不存在,跳过删除", ingressName)
|
||||
} else {
|
||||
log.Warn("AssignDevcontainerDeletion2K8sOperator: 删除 Ingress %s 失败: %v", ingressName, err)
|
||||
// 继续执行,不阻止主流程
|
||||
}
|
||||
} else {
|
||||
log.Info("AssignDevcontainerDeletion2K8sOperator: 成功删除 Ingress %s", ingressName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 调用 modules 层 k8s Agent,执行删除资源
|
||||
opts := &devcontainer_dto.DeleteDevcontainerOptions{
|
||||
DeleteOptions: metav1.DeleteOptions{},
|
||||
Namespace: setting.DevContainerConfig.Namespace,
|
||||
}
|
||||
if devcontainersList == nil || len(*devcontainersList) == 0 {
|
||||
return fmt.Errorf("delete devcontainer in namespace '%s': %s", opts.Namespace, "the DevContainer list is empty")
|
||||
}
|
||||
// 3. 遍历列表删除 DevContainer,如果删除出错,交由 module 层打印日志,交由管理员手动处理
|
||||
for _, devcontainer := range *devcontainersList {
|
||||
opts.Name = devcontainer.Name
|
||||
_ = devcontainer_k8s_agent_module.DeleteDevcontainer(ctxVal, client, opts)
|
||||
|
||||
// 删除对应的 VirtualService
|
||||
if err := deleteDevContainerWebTerminalVirtualService(ctx, devcontainer.Name); err != nil {
|
||||
log.Warn("AssignDevcontainerDeletion2K8sOperator: 删除 VirtualService 失败 for DevContainer %s: %v", devcontainer.Name, err)
|
||||
// 不阻止主流程,只记录警告
|
||||
} else {
|
||||
log.Info("AssignDevcontainerDeletion2K8sOperator: 成功删除 VirtualService for DevContainer: %s", devcontainer.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 补充笔记: modules/ 与 services/ 两个目录中的 k8s Agent 区别是什么?
|
||||
// - modules/ 与 k8s API Server 交互密切相关
|
||||
// - services/ 进行了封装,简化用户界面使用
|
||||
|
||||
// AssignDevcontainerCreation2K8sOperator 将 DevContainer 资源创建任务派遣至 k8s Operator,同时根据结果更新 NodePort
|
||||
//
|
||||
// 注意:本方法仍然在数据库事务中,因此不适合执行长时间操作,故需要后期异步判断 DevContainer 是否就绪
|
||||
func AssignDevcontainerCreation2K8sOperator(ctx *context.Context, newDevContainer *CreateDevcontainerDTO) error {
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Starting creation for container: %s", newDevContainer.Name)
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Container details - Image: %s, RepoURL: %s, SSHKeys: %d",
|
||||
newDevContainer.Image, newDevContainer.GitRepositoryURL, len(newDevContainer.SSHPublicKeyList))
|
||||
|
||||
// 1. 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
// 层层返回错误,结束数据库事务
|
||||
return err
|
||||
}
|
||||
|
||||
// 1.1:插入 devcontainer_output 记录
|
||||
dbEngine := db.GetEngine(*ctx)
|
||||
|
||||
// 更新状态为 1:正在拉取镜像
|
||||
_, err = dbEngine.Table("devcontainer").
|
||||
Where("user_id = ? AND repo_id = ? ", newDevContainer.UserId, newDevContainer.RepoId).
|
||||
Update(&devcontainer_model.Devcontainer{DevcontainerStatus: 1})
|
||||
if err != nil {
|
||||
log.Info("Failed to update status to 1: %v", err)
|
||||
}
|
||||
|
||||
// 插入拉取镜像记录
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_model.DevcontainerOutput{
|
||||
Output: "Pulling image for K8s container: " + newDevContainer.Image,
|
||||
ListId: 0,
|
||||
Status: "success", // 设为 success 以满足 created 变量的条件
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Pull Image",
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Pull Image record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 更新状态为 2:正在创建和启动容器
|
||||
_, err = dbEngine.Table("devcontainer").
|
||||
Where("user_id = ? AND repo_id = ? ", newDevContainer.UserId, newDevContainer.RepoId).
|
||||
Update(&devcontainer_model.Devcontainer{DevcontainerStatus: 2})
|
||||
if err != nil {
|
||||
log.Info("Failed to update status to 2: %v", err)
|
||||
}
|
||||
|
||||
// 插入初始化工作区记录 (满足 created = true 的关键条件)
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_model.DevcontainerOutput{
|
||||
Output: "Initializing workspace in Kubernetes...",
|
||||
Status: "success", // 必须为 success
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Initialize Workspace",
|
||||
ListId: 1, // ListId > 0 且 Status = success 是 created = true 的条件
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Initialize Workspace record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 更新状态为 3:容器安装必要工具
|
||||
_, err = dbEngine.Table("devcontainer").
|
||||
Where("user_id = ? AND repo_id = ? ", newDevContainer.UserId, newDevContainer.RepoId).
|
||||
Update(&devcontainer_model.Devcontainer{DevcontainerStatus: 3})
|
||||
if err != nil {
|
||||
log.Info("Failed to update status to 3: %v", err)
|
||||
}
|
||||
|
||||
// 插入初始化 DevStar 记录
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_model.DevcontainerOutput{
|
||||
Output: "Initializing DevStar in Kubernetes...",
|
||||
Status: "success",
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Initialize DevStar",
|
||||
ListId: 2,
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Initialize DevStar record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 插入 postCreateCommand 记录
|
||||
if _, err := dbEngine.Table("devcontainer_output").Insert(&devcontainer_model.DevcontainerOutput{
|
||||
Output: "Running post-create commands in Kubernetes...",
|
||||
Status: "success",
|
||||
UserId: newDevContainer.UserId,
|
||||
RepoId: newDevContainer.RepoId,
|
||||
Command: "Run postCreateCommand",
|
||||
ListId: 3,
|
||||
}); err != nil {
|
||||
log.Info("Failed to insert Run postCreateCommand record: %v", err)
|
||||
// 不返回错误,继续执行
|
||||
}
|
||||
|
||||
// 添加 ttyd 端口配置 - WebTerminal 功能
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Adding ttyd port configuration (7681)")
|
||||
extraPorts := []k8s_api_v1.ExtraPortSpec{
|
||||
{
|
||||
Name: "ttyd",
|
||||
ContainerPort: 7681, // ttyd 默认端口
|
||||
ServicePort: 7681,
|
||||
},
|
||||
}
|
||||
|
||||
command := []string{
|
||||
"/bin/bash",
|
||||
"-c",
|
||||
"export DEBIAN_FRONTEND=noninteractive && " +
|
||||
"apt-get update -y && " +
|
||||
"apt-get install -y ssh && " +
|
||||
// 改为条件生成:只有在密钥不存在时才生成
|
||||
"if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then " +
|
||||
" echo 'Generating SSH host keys...' && " +
|
||||
" ssh-keygen -A && " +
|
||||
" echo 'SSH host keys generated' ; " +
|
||||
"else " +
|
||||
" echo 'SSH host keys already exist' ; " +
|
||||
"fi && " +
|
||||
"mkdir -p /var/run/sshd && " +
|
||||
"/usr/sbin/sshd && " +
|
||||
"if [ -f /ttyd-shared/ttyd ]; then " +
|
||||
"mkdir -p /data/workspace && " +
|
||||
"cd /data/workspace && " +
|
||||
"/ttyd-shared/ttyd -p 7681 -i 0.0.0.0 --writable bash > /tmp/ttyd.log 2>&1 & " +
|
||||
"fi && " +
|
||||
"while true; do sleep 60; done",
|
||||
}
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Command includes ttyd installation and startup")
|
||||
|
||||
// 2. 调用 modules 层 k8s Agent,执行创建资源
|
||||
opts := &devcontainer_dto.CreateDevcontainerOptions{
|
||||
CreateOptions: metav1.CreateOptions{},
|
||||
Name: newDevContainer.Name,
|
||||
Namespace: setting.DevContainerConfig.Namespace,
|
||||
Image: newDevContainer.Image,
|
||||
/**
|
||||
* 配置 Kubernetes 主容器启动命令注意事项:
|
||||
* 1. 确保 Image 中已安装 OpenSSH Server
|
||||
* 2. 容器启动后必须拉起 OpenSSH 后台服务
|
||||
* 3. 请勿使用 sleep infinity 或者 tail -f /dev/null 等无限等待命令,
|
||||
* 可以考虑无限循环 sleep 60s,能够防止 k8s 中容器先变成 Completed 然后变为 CrashLoopBackOff
|
||||
* 也可以防止造成大量僵尸(<defunct>)进程:
|
||||
* $ ps aux | grep "<defunct>" # 列举僵尸进程
|
||||
* USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
* pollina+ 2336 0.0 0.0 0 0 ? Z 17:22 0:00 [sshd] <defunct>
|
||||
* pollina+ 10986 0.0 0.0 0 0 ? Z 16:12 0:00 [sshd] <defunct>
|
||||
* pollina+ 24722 0.0 0.0 0 0 ? Z 18:36 0:00 [sshd] <defunct>
|
||||
* pollina+ 26773 0.0 0.0 0 0 ? Z 18:37 0:00 [sshd] <defunct>
|
||||
* $ ubuntu@node2:~$ ps o ppid 2336 10986 24722 26773 # 查询僵尸进程父进程PID
|
||||
* PPID
|
||||
* 21826
|
||||
* $ ps aux | grep # 列举僵尸进程父进程详情
|
||||
* USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
* root 21826 0.0 0.0 2520 408 ? Ss 18:36 0:00 sleep infinity
|
||||
*/
|
||||
CommandList: command,
|
||||
ContainerPort: 22,
|
||||
ServicePort: 22,
|
||||
SSHPublicKeyList: newDevContainer.SSHPublicKeyList,
|
||||
GitRepositoryURL: newDevContainer.GitRepositoryURL,
|
||||
ExtraPorts: extraPorts, // 添加额外端口配置
|
||||
}
|
||||
|
||||
// 2. 创建成功,取回集群中的 DevContainer
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Creating DevcontainerApp %s in namespace %s",
|
||||
opts.Name, opts.Namespace)
|
||||
devcontainerInCluster, err := devcontainer_k8s_agent_module.CreateDevcontainer(ctxVal, client, opts)
|
||||
if err != nil {
|
||||
log.Error("AssignDevcontainerCreation2K8sOperator: Failed to create DevcontainerApp: %v", err)
|
||||
return err
|
||||
}
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: DevcontainerApp created successfully - Name: %s",
|
||||
devcontainerInCluster.Name)
|
||||
|
||||
// 不再在创建后立即置为 4,保持为 3;待 Pod Ready 后由 GetDevContainerStatus 升级为 4
|
||||
|
||||
// 3. 处理 NodePort - 检查是否为0(尚未分配)
|
||||
nodePort := devcontainerInCluster.Status.NodePortAssigned
|
||||
|
||||
if nodePort == 0 {
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: NodePort not yet assigned, starting async updater for %s",
|
||||
devcontainerInCluster.Name)
|
||||
|
||||
// 将端口设为0,数据库中记录特殊标记
|
||||
newDevContainer.DevcontainerPort = 0
|
||||
|
||||
// 记录容器已创建,但端口待更新
|
||||
log.Info("DevContainer created in cluster - Name: %s, NodePort: pending assignment",
|
||||
devcontainerInCluster.Name)
|
||||
|
||||
// 启动异步任务来更新端口
|
||||
go updateNodePortAsync(devcontainerInCluster.Name,
|
||||
setting.DevContainerConfig.Namespace,
|
||||
newDevContainer.UserId,
|
||||
newDevContainer.RepoId)
|
||||
} else {
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: NodePort %d assigned immediately to %s",
|
||||
nodePort, devcontainerInCluster.Name)
|
||||
|
||||
// 端口已分配,直接使用
|
||||
newDevContainer.DevcontainerPort = nodePort
|
||||
log.Info("DevContainer created in cluster - Name: %s, NodePort: %d",
|
||||
devcontainerInCluster.Name, nodePort)
|
||||
}
|
||||
|
||||
log.Info("DevContainer created in cluster - Name: %s, NodePort: %d",
|
||||
devcontainerInCluster.Name,
|
||||
devcontainerInCluster.Status.NodePortAssigned)
|
||||
|
||||
// 为 ttyd 服务创建 Istio Gateway 和 VirtualService
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: 开始创建 Istio 资源 for DevContainer: %s", devcontainerInCluster.Name)
|
||||
|
||||
// 1. 确保 Gateway 存在
|
||||
if err := createDevContainerWebTerminalGateway(ctx); err != nil {
|
||||
log.Warn("AssignDevcontainerCreation2K8sOperator: 创建 Gateway 失败: %v", err)
|
||||
// 不阻止主流程,只记录警告
|
||||
} else {
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: Gateway 创建成功")
|
||||
}
|
||||
|
||||
// 2. 创建 VirtualService
|
||||
if err := createDevContainerWebTerminalVirtualService(ctx, devcontainerInCluster.Name); err != nil {
|
||||
log.Warn("AssignDevcontainerCreation2K8sOperator: 创建 VirtualService 失败: %v", err)
|
||||
// 不阻止主流程,只记录警告
|
||||
} else {
|
||||
log.Info("AssignDevcontainerCreation2K8sOperator: VirtualService 创建成功")
|
||||
}
|
||||
|
||||
// 4. 层层返回 nil,自动提交数据库事务,完成 DevContainer 创建
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssignDevcontainerRestart2K8sOperator 将 DevContainer 重启任务派遣至 K8s 控制器
|
||||
func AssignDevcontainerRestart2K8sOperator(ctx *context.Context, opts *DevcontainerVO) error {
|
||||
log.Info("AssignDevcontainerRestart2K8sOperator: Starting restart for container: %s", opts.DevContainerName)
|
||||
|
||||
// 1. 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
log.Error("Failed to get Kubernetes client: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. 通过打补丁方式实现重启 - 更新注解以触发控制器重新部署 Pod
|
||||
// 创建补丁,添加或更新 restartedAt 注解,同时确保 desiredReplicas 为 1
|
||||
patchData := fmt.Sprintf(`{
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"devstar.io/restartedAt": "%s",
|
||||
"devstar.io/desiredReplicas": "1"
|
||||
}
|
||||
}
|
||||
}`, time.Now().Format(time.RFC3339))
|
||||
log.Info("AssignDevcontainerRestart2K8sOperator: Applying patch to restart container %s",
|
||||
opts.DevContainerName)
|
||||
log.Debug("AssignDevcontainerRestart2K8sOperator: Patch data: %s", patchData)
|
||||
|
||||
// 应用补丁到 DevcontainerApp CRD
|
||||
_, err = client.Resource(k8sGroupVersionResource).
|
||||
Namespace(setting.DevContainerConfig.Namespace).
|
||||
Patch(ctxVal, opts.DevContainerName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to patch DevcontainerApp for restart: %v", err)
|
||||
return fmt.Errorf("restart k8s devcontainer '%s' failed: %v", opts.DevContainerName, err)
|
||||
}
|
||||
|
||||
// 记录重启操作日志
|
||||
log.Info("DevContainer restarted: %s", opts.DevContainerName)
|
||||
log.Info("AssignDevcontainerRestart2K8sOperator: Restart patch applied successfully for %s",
|
||||
opts.DevContainerName)
|
||||
|
||||
// 将重启操作记录到数据库
|
||||
dbEngine := db.GetEngine(*ctx)
|
||||
_, err = dbEngine.Table("devcontainer_output").Insert(&devcontainer_model.DevcontainerOutput{
|
||||
Output: fmt.Sprintf("Restarting K8s DevContainer %s", opts.DevContainerName),
|
||||
Status: "success",
|
||||
UserId: opts.UserId,
|
||||
RepoId: opts.RepoId,
|
||||
Command: "Restart DevContainer",
|
||||
ListId: 0,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn("Failed to insert restart record: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AssignDevcontainerStop2K8sOperator 将 DevContainer 停止任务派遣至 K8s 控制器
|
||||
func AssignDevcontainerStop2K8sOperator(ctx *context.Context, opts *DevcontainerVO) error {
|
||||
// 1. 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
log.Error("Failed to get Kubernetes client: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. 通过打补丁方式实现停止 - 添加停止注解
|
||||
// 创建补丁,添加或更新 stopped 和 desiredReplicas 注解
|
||||
patchData := fmt.Sprintf(`{
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"devstar.io/stoppedAt": "%s",
|
||||
"devstar.io/desiredReplicas": "0"
|
||||
}
|
||||
}
|
||||
}`, time.Now().Format(time.RFC3339))
|
||||
|
||||
// 应用补丁到 DevcontainerApp CRD
|
||||
_, err = client.Resource(k8sGroupVersionResource).
|
||||
Namespace(setting.DevContainerConfig.Namespace).
|
||||
Patch(ctxVal, opts.DevContainerName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to patch DevcontainerApp for stop: %v", err)
|
||||
return fmt.Errorf("stop k8s devcontainer '%s' failed: %v", opts.DevContainerName, err)
|
||||
}
|
||||
|
||||
// 记录停止操作日志
|
||||
log.Info("DevContainer stopped: %s", opts.DevContainerName)
|
||||
|
||||
// 将停止操作记录到数据库
|
||||
dbEngine := db.GetEngine(*ctx)
|
||||
_, err = dbEngine.Table("devcontainer_output").Insert(&devcontainer_model.DevcontainerOutput{
|
||||
Output: fmt.Sprintf("Stopping K8s DevContainer %s", opts.DevContainerName),
|
||||
Status: "success",
|
||||
UserId: opts.UserId,
|
||||
RepoId: opts.RepoId,
|
||||
Command: "Stop DevContainer",
|
||||
ListId: 0,
|
||||
})
|
||||
if err != nil {
|
||||
// 只记录错误,不影响主流程返回结果
|
||||
log.Warn("Failed to insert stop record: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 异步更新 NodePort 的辅助函数
|
||||
func updateNodePortAsync(containerName string, namespace string, userId, repoId int64) {
|
||||
log.Info("updateNodePortAsync: Starting for container: %s in namespace: %s", containerName, namespace)
|
||||
log.Info("updateNodePortAsync: Waiting 20 seconds for K8s controller to assign port")
|
||||
|
||||
// 等待K8s控制器完成端口分配
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
// 创建新的上下文和客户端
|
||||
ctx := context.Background()
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctx, nil, "")
|
||||
if err != nil {
|
||||
log.Error("Failed to get K8s client in async updater: %v", err)
|
||||
return
|
||||
}
|
||||
log.Info("updateNodePortAsync: K8s client created successfully")
|
||||
|
||||
// 尝试最多10次获取端口
|
||||
for i := 0; i < 10; i++ {
|
||||
log.Info("updateNodePortAsync: Attempt %d/10 to retrieve NodePort for %s", i+1, containerName)
|
||||
getOpts := &devcontainer_k8s_agent_module.GetDevcontainerOptions{
|
||||
GetOptions: metav1.GetOptions{},
|
||||
Name: containerName,
|
||||
Namespace: namespace,
|
||||
Wait: false,
|
||||
}
|
||||
|
||||
devcontainer, err := devcontainer_k8s_agent_module.GetDevcontainer(ctx, client, getOpts)
|
||||
if err == nil && devcontainer != nil && devcontainer.Status.NodePortAssigned > 0 {
|
||||
log.Info("updateNodePortAsync: Success! Found NodePort %d for %s",
|
||||
devcontainer.Status.NodePortAssigned, containerName)
|
||||
// 获取到正确的端口,更新数据库
|
||||
realNodePort := devcontainer.Status.NodePortAssigned
|
||||
|
||||
// 记录 ttyd 端口信息到日志
|
||||
if len(devcontainer.Status.ExtraPortsAssigned) > 0 {
|
||||
for _, portInfo := range devcontainer.Status.ExtraPortsAssigned {
|
||||
log.Info("Found extra port for %s: name=%s, nodePort=%d, containerPort=%d",
|
||||
containerName, portInfo.Name, portInfo.NodePort, portInfo.ContainerPort)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Found real NodePort %d for container %s, updating database record",
|
||||
realNodePort, containerName)
|
||||
|
||||
engine := db.GetEngine(ctx)
|
||||
_, err := engine.Table("devcontainer").
|
||||
Where("user_id = ? AND repo_id = ?", userId, repoId).
|
||||
Update(map[string]interface{}{
|
||||
"devcontainer_port": realNodePort,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to update NodePort in database: %v", err)
|
||||
} else {
|
||||
log.Info("Successfully updated NodePort in database to %d", realNodePort)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("updateNodePortAsync: Port not yet assigned, waiting 5 seconds before next attempt")
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
log.Warn("updateNodePortAsync: Failed to retrieve real NodePort after multiple attempts")
|
||||
}
|
||||
|
||||
// 获取标准 Kubernetes 客户端
|
||||
func getStandardKubernetesClient() (*kubernetes.Clientset, error) {
|
||||
// 使用与 GetKubernetesClient 相同的逻辑获取配置
|
||||
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
|
||||
if err != nil {
|
||||
// 如果集群外配置失败,尝试集群内配置
|
||||
log.Warn("Failed to obtain Kubernetes config outside of cluster: %v", err)
|
||||
config, err = rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("获取 K8s 配置失败 (集群内外均失败): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 创建标准客户端
|
||||
stdClient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("创建标准 K8s 客户端失败: %v", err)
|
||||
}
|
||||
|
||||
return stdClient, nil
|
||||
}
|
||||
|
||||
// 创建 DevContainer WebTerminal Gateway
|
||||
func createDevContainerWebTerminalGateway(ctx *context.Context) error {
|
||||
log.Info("createDevContainerWebTerminalGateway: 开始创建 DevContainer WebTerminal Gateway")
|
||||
|
||||
// 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("获取 K8s 客户端失败: %v", err)
|
||||
}
|
||||
|
||||
gatewayName := "devcontainer-webterminal-gateway"
|
||||
namespace := setting.DevContainerConfig.Namespace
|
||||
|
||||
// 检查 Gateway 是否已存在
|
||||
gwGVR := schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1", Resource: "gateways"}
|
||||
if _, err := client.Resource(gwGVR).Namespace(namespace).Get(ctxVal, gatewayName, metav1.GetOptions{}); err == nil {
|
||||
log.Info("createDevContainerWebTerminalGateway: Gateway 已存在: %s", gatewayName)
|
||||
return nil
|
||||
} else if !k8serrors.IsNotFound(err) {
|
||||
return fmt.Errorf("检查 Gateway 失败: %v", err)
|
||||
}
|
||||
|
||||
// 从配置中读取域名
|
||||
cfg, err := setting.NewConfigProviderFromFile(setting.CustomConf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("加载配置文件失败: %v", err)
|
||||
}
|
||||
|
||||
domain := cfg.Section("server").Key("DOMAIN").Value()
|
||||
|
||||
// 使用 Unstructured 定义 Gateway(HTTP-only)
|
||||
gw := &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "networking.istio.io/v1",
|
||||
"kind": "Gateway",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": gatewayName,
|
||||
"namespace": namespace,
|
||||
"labels": map[string]interface{}{
|
||||
"app.kubernetes.io/name": "devcontainer-webterminal",
|
||||
"app.kubernetes.io/component": "gateway",
|
||||
"app.kubernetes.io/managed-by": "devstar",
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"selector": map[string]interface{}{"istio": "ingressgateway"},
|
||||
"servers": []interface{}{
|
||||
map[string]interface{}{
|
||||
"port": map[string]interface{}{"number": 80, "name": "http", "protocol": "HTTP"},
|
||||
"hosts": []interface{}{domain, "*"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
if _, err := client.Resource(gwGVR).Namespace(namespace).Create(ctxVal, gw, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("创建 Gateway 失败: %v", err)
|
||||
}
|
||||
|
||||
log.Info("createDevContainerWebTerminalGateway: 成功创建 Gateway: %s", gatewayName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 创建 DevContainer WebTerminal VirtualService
|
||||
func createDevContainerWebTerminalVirtualService(ctx *context.Context, devcontainerName string) error {
|
||||
log.Info("createDevContainerWebTerminalVirtualService: 开始创建 VirtualService for DevContainer: %s", devcontainerName)
|
||||
|
||||
// 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("获取 K8s 客户端失败: %v", err)
|
||||
}
|
||||
|
||||
vsName := devcontainerName + "-webterminal-vs"
|
||||
namespace := setting.DevContainerConfig.Namespace
|
||||
|
||||
// 从配置中读取域名
|
||||
cfg, err := setting.NewConfigProviderFromFile(setting.CustomConf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("加载配置文件失败: %v", err)
|
||||
}
|
||||
|
||||
domain := cfg.Section("server").Key("DOMAIN").Value()
|
||||
// VirtualService 的 hosts 只能二选一:有 DOMAIN 用 [DOMAIN],否则用 ["*"]
|
||||
var vsHosts []interface{}
|
||||
if domain != "" {
|
||||
vsHosts = []interface{}{domain}
|
||||
} else {
|
||||
vsHosts = []interface{}{"*"}
|
||||
}
|
||||
// 从容器名称中提取用户名和仓库名
|
||||
parts := strings.Split(devcontainerName, "-")
|
||||
var username, repoName string
|
||||
if len(parts) >= 2 {
|
||||
username = parts[0]
|
||||
repoName = parts[1]
|
||||
} else {
|
||||
username = "unknown"
|
||||
repoName = "unknown"
|
||||
}
|
||||
|
||||
// 构建访问路径
|
||||
path := fmt.Sprintf("/%s/%s/dev-container-webterminal", username, repoName)
|
||||
|
||||
// 使用 Unstructured 定义 VS,并增加路径重写到根路径
|
||||
vsGVR := schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1", Resource: "virtualservices"}
|
||||
vs := &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "networking.istio.io/v1",
|
||||
"kind": "VirtualService",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": vsName,
|
||||
"namespace": namespace,
|
||||
"labels": map[string]interface{}{
|
||||
"app.kubernetes.io/name": "devcontainer-webterminal",
|
||||
"app.kubernetes.io/component": "virtualservice",
|
||||
"app.kubernetes.io/managed-by": "devstar",
|
||||
"devcontainer-name": devcontainerName,
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"hosts": vsHosts,
|
||||
"gateways": []interface{}{"devcontainer-webterminal-gateway"},
|
||||
"http": []interface{}{
|
||||
map[string]interface{}{
|
||||
"match": []interface{}{map[string]interface{}{"uri": map[string]interface{}{"prefix": path}}},
|
||||
"rewrite": map[string]interface{}{"uri": "/"},
|
||||
"route": []interface{}{
|
||||
map[string]interface{}{
|
||||
"destination": map[string]interface{}{
|
||||
"host": devcontainerName + "-svc",
|
||||
"port": map[string]interface{}{"number": 7681},
|
||||
},
|
||||
},
|
||||
},
|
||||
"timeout": "3600s",
|
||||
"retries": map[string]interface{}{
|
||||
"attempts": 3,
|
||||
"perTryTimeout": "30s",
|
||||
"retryOn": "5xx,gateway-error,connect-failure",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
if _, err := client.Resource(vsGVR).Namespace(namespace).Create(ctxVal, vs, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("创建 VirtualService 失败: %v", err)
|
||||
}
|
||||
|
||||
log.Info("createDevContainerWebTerminalVirtualService: 成功创建 VirtualService: %s, 路径: %s", vsName, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 删除 DevContainer WebTerminal VirtualService
|
||||
func deleteDevContainerWebTerminalVirtualService(ctx *context.Context, devcontainerName string) error {
|
||||
log.Info("deleteDevContainerWebTerminalVirtualService: 开始删除 VirtualService for DevContainer: %s", devcontainerName)
|
||||
|
||||
// 获取 Dynamic Client
|
||||
ctxVal := *ctx
|
||||
client, err := devcontainer_k8s_agent_module.GetKubernetesClient(ctxVal, nil, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("获取 K8s 客户端失败: %v", err)
|
||||
}
|
||||
|
||||
vsName := devcontainerName + "-webterminal-vs"
|
||||
namespace := setting.DevContainerConfig.Namespace
|
||||
|
||||
vsGVR := schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1", Resource: "virtualservices"}
|
||||
if err := client.Resource(vsGVR).Namespace(namespace).Delete(ctxVal, vsName, metav1.DeleteOptions{}); err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
log.Info("deleteDevContainerWebTerminalVirtualService: VirtualService 不存在,无需删除: %s", vsName)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("删除 VirtualService 失败: %v", err)
|
||||
}
|
||||
|
||||
log.Info("deleteDevContainerWebTerminalVirtualService: 成功删除 VirtualService: %s", vsName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeCommandInK8sPod 在 K8s Pod 中执行命令的辅助函数
|
||||
func executeCommandInK8sPod(ctx *context.Context, client *kubernetes.Clientset, namespace, devcontainerName, containerName string, command []string) error {
|
||||
log.Info("executeCommandInK8sPod: 开始为 DevContainer %s 查找对应的 Pod", devcontainerName)
|
||||
|
||||
// 1. 首先根据标签选择器查找对应的 Pod
|
||||
labelSelector := fmt.Sprintf("app=%s", devcontainerName)
|
||||
pods, err := client.CoreV1().Pods(namespace).List(*ctx, metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("executeCommandInK8sPod: 查找 Pod 失败: %v", err)
|
||||
return fmt.Errorf("查找 Pod 失败: %v", err)
|
||||
}
|
||||
|
||||
if len(pods.Items) == 0 {
|
||||
log.Error("executeCommandInK8sPod: 未找到 DevContainer %s 对应的 Pod", devcontainerName)
|
||||
return fmt.Errorf("未找到 DevContainer %s 对应的 Pod", devcontainerName)
|
||||
}
|
||||
|
||||
// 2. 找到第一个运行中的 Pod
|
||||
var targetPod *v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := &pods.Items[i]
|
||||
if pod.Status.Phase == v1.PodRunning {
|
||||
targetPod = pod
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if targetPod == nil {
|
||||
log.Error("executeCommandInK8sPod: DevContainer %s 没有运行中的 Pod", devcontainerName)
|
||||
return fmt.Errorf("DevContainer %s 没有运行中的 Pod", devcontainerName)
|
||||
}
|
||||
|
||||
podName := targetPod.Name
|
||||
log.Info("executeCommandInK8sPod: 找到运行中的 Pod: %s, 在容器 %s 中执行命令",
|
||||
podName, containerName)
|
||||
|
||||
// 3. 执行命令
|
||||
req := client.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(namespace).
|
||||
SubResource("exec").
|
||||
Param("container", containerName)
|
||||
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Container: containerName,
|
||||
Command: command,
|
||||
Stdin: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
TTY: false,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
// 获取 executor
|
||||
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
|
||||
if err != nil {
|
||||
// 如果集群外配置失败,尝试集群内配置
|
||||
config, err = rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("获取 K8s 配置失败: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
executor, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return fmt.Errorf("创建命令执行器失败: %v", err)
|
||||
}
|
||||
|
||||
// 执行命令
|
||||
var stdout, stderr bytes.Buffer
|
||||
err = executor.StreamWithContext(*ctx, remotecommand.StreamOptions{
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error("executeCommandInK8sPod: 命令执行失败: %v, stderr: %s",
|
||||
err, stderr.String())
|
||||
return fmt.Errorf("命令执行失败: %v, stderr: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
log.Info("executeCommandInK8sPod: 命令执行成功, stdout: %s", stdout.String())
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user