Compare commits
12 Commits
v0.260.2
...
nektos/v0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
361b7e9f1a | ||
|
|
f825e42ce2 | ||
|
|
d9a19c8b02 | ||
|
|
3949d74af5 | ||
|
|
b9382a2c4e | ||
|
|
f56dd65ff6 | ||
|
|
069720abff | ||
|
|
8c83d57212 | ||
|
|
119ceb81d9 | ||
|
|
352ad41ad2 | ||
|
|
75e4ad93f4 | ||
|
|
934b13a7a1 |
@@ -1,21 +0,0 @@
|
||||
name: checks
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: check and test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: vet checks
|
||||
run: go vet -v ./...
|
||||
- name: build
|
||||
run: go build -v ./...
|
||||
- name: test
|
||||
run: | # Test only the new packages in this fork. Add more packages as needed.
|
||||
go test -v ./pkg/jobparser
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -31,4 +31,3 @@ coverage.txt
|
||||
|
||||
# megalinter
|
||||
report/
|
||||
act
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
## Images based on [`actions/virtual-environments`][gh/actions/virtual-environments]
|
||||
|
||||
**Note: `nektos/act-environments-ubuntu` have been last updated in February, 2020. It's recommended to update the image manually after `docker pull` if you decide to to use it.**
|
||||
**Note: `nektos/act-environments-ubuntu` have been last updated in February, 2020. It's recommended to update the image manually after `docker pull` if you decide to use it.**
|
||||
|
||||
| Image | Size | GitHub Repository |
|
||||
| --------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | ------------------------------------------------------- |
|
||||
|
||||
1
LICENSE
1
LICENSE
@@ -1,6 +1,5 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 The Gitea Authors
|
||||
Copyright (c) 2019
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
|
||||
25
README.md
25
README.md
@@ -1,28 +1,3 @@
|
||||
## Forking rules
|
||||
|
||||
This is a custom fork of [nektos/act](https://github.com/nektos/act/), for the purpose of serving [act_runner](https://gitea.com/gitea/act_runner).
|
||||
|
||||
It cannot be used as command line tool anymore, but only as a library.
|
||||
|
||||
It's a soft fork, which means that it will tracking the latest release of nektos/act.
|
||||
|
||||
Branches:
|
||||
|
||||
- `main`: default branch, contains custom changes, based on the latest release(not the latest of the master branch) of nektos/act.
|
||||
- `nektos/master`: mirror for the master branch of nektos/act.
|
||||
|
||||
Tags:
|
||||
|
||||
- `nektos/vX.Y.Z`: mirror for `vX.Y.Z` of [nektos/act](https://github.com/nektos/act/).
|
||||
- `vX.YZ.*`: based on `nektos/vX.Y.Z`, contains custom changes.
|
||||
- Examples:
|
||||
- `nektos/v0.2.23` -> `v0.223.*`
|
||||
- `nektos/v0.3.1` -> `v0.301.*`, not ~~`v0.31.*`~~
|
||||
- `nektos/v0.10.1` -> `v0.1001.*`, not ~~`v0.101.*`~~
|
||||
- `nektos/v0.3.100` -> not ~~`v0.3100.*`~~, I don't think it's really going to happen, if it does, we can find a way to handle it.
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
# Overview [](https://github.com/nektos/act/actions) [](https://gitter.im/nektos/act?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [](https://goreportcard.com/report/github.com/nektos/act) [](https://github.com/jonico/awesome-runners)
|
||||
|
||||
11
go.mod
11
go.mod
@@ -10,11 +10,10 @@ require (
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/docker/cli v24.0.7+incompatible
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible // 24.0 branch
|
||||
github.com/docker/docker v24.0.9+incompatible // 24.0 branch
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/go-git/go-billy/v5 v5.5.0
|
||||
github.com/go-git/go-git/v5 v5.11.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
@@ -30,10 +29,10 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
golang.org/x/term v0.17.0
|
||||
golang.org/x/term v0.18.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.1
|
||||
)
|
||||
@@ -74,7 +73,7 @@ require (
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/skeema/knownhosts v1.2.1 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
@@ -83,7 +82,7 @@ require (
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.13.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
|
||||
25
go.sum
25
go.sum
@@ -42,8 +42,8 @@ github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1x
|
||||
github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
|
||||
github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@@ -63,8 +63,6 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@@ -165,18 +163,15 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a h1:oIi7H/bwFUYKYhzKbHc+3MvHRWqhQwXVB4LweLMiVy0=
|
||||
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a/go.mod h1:iSvujNDmpZ6eQX+bg/0X3lF7LEmZ8N77g2a/J/+Zt2U=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
@@ -250,15 +245,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
||||
@@ -35,7 +35,7 @@ type Handler struct {
|
||||
server *http.Server
|
||||
logger logrus.FieldLogger
|
||||
|
||||
gcing int32 // TODO: use atomic.Bool when we can use Go 1.19
|
||||
gcing atomic.Bool
|
||||
gcAt time.Time
|
||||
|
||||
outboundIP string
|
||||
@@ -170,7 +170,7 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
cache, err := h.findCache(db, keys, version)
|
||||
cache, err := findCache(db, keys, version)
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
@@ -206,32 +206,17 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P
|
||||
api.Key = strings.ToLower(api.Key)
|
||||
|
||||
cache := api.ToCache()
|
||||
cache.FillKeyVersionHash()
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
if err := db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
|
||||
if !errors.Is(err, bolthold.ErrNotFound) {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.responseJSON(w, r, 400, fmt.Errorf("already exist"))
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
cache.CreatedAt = now
|
||||
cache.UsedAt = now
|
||||
if err := db.Insert(bolthold.NextSequence(), cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
// write back id to db
|
||||
if err := db.Update(cache.ID, cache); err != nil {
|
||||
if err := insertCache(db, cache); err != nil {
|
||||
h.responseJSON(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
@@ -364,56 +349,51 @@ func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle {
|
||||
}
|
||||
|
||||
// if not found, return (nil, nil) instead of an error.
|
||||
func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
key := keys[0] // the first key is for exact match.
|
||||
|
||||
cache := &Cache{
|
||||
Key: key,
|
||||
Version: version,
|
||||
}
|
||||
cache.FillKeyVersionHash()
|
||||
|
||||
if err := db.FindOne(cache, bolthold.Where("KeyVersionHash").Eq(cache.KeyVersionHash)); err != nil {
|
||||
if !errors.Is(err, bolthold.ErrNotFound) {
|
||||
return nil, err
|
||||
func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) {
|
||||
cache := &Cache{}
|
||||
for _, prefix := range keys {
|
||||
// if a key in the list matches exactly, don't return partial matches
|
||||
if err := db.FindOne(cache,
|
||||
bolthold.Where("Key").Eq(prefix).
|
||||
And("Version").Eq(version).
|
||||
And("Complete").Eq(true).
|
||||
SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("find cache: %w", err)
|
||||
}
|
||||
return cache, nil
|
||||
}
|
||||
} else if cache.Complete {
|
||||
return cache, nil
|
||||
}
|
||||
stop := fmt.Errorf("stop")
|
||||
|
||||
for _, prefix := range keys[1:] {
|
||||
found := false
|
||||
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
|
||||
re, err := regexp.Compile(prefixPattern)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ForEach(bolthold.Where("Key").RegExp(re).And("Version").Eq(version).SortBy("CreatedAt").Reverse(), func(v *Cache) error {
|
||||
if !strings.HasPrefix(v.Key, prefix) {
|
||||
return stop
|
||||
}
|
||||
if v.Complete {
|
||||
cache = v
|
||||
found = true
|
||||
return stop
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
if !errors.Is(err, stop) {
|
||||
return nil, err
|
||||
if err := db.FindOne(cache,
|
||||
bolthold.Where("Key").RegExp(re).
|
||||
And("Version").Eq(version).
|
||||
And("Complete").Eq(true).
|
||||
SortBy("CreatedAt").Reverse()); err != nil {
|
||||
if errors.Is(err, bolthold.ErrNotFound) {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("find cache: %w", err)
|
||||
}
|
||||
if found {
|
||||
return cache, nil
|
||||
}
|
||||
return cache, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func insertCache(db *bolthold.Store, cache *Cache) error {
|
||||
if err := db.Insert(bolthold.NextSequence(), cache); err != nil {
|
||||
return fmt.Errorf("insert cache: %w", err)
|
||||
}
|
||||
// write back id to db
|
||||
if err := db.Update(cache.ID, cache); err != nil {
|
||||
return fmt.Errorf("write back id to db: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handler) useCache(id int64) {
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
@@ -428,14 +408,21 @@ func (h *Handler) useCache(id int64) {
|
||||
_ = db.Update(cache.ID, cache)
|
||||
}
|
||||
|
||||
const (
|
||||
keepUsed = 30 * 24 * time.Hour
|
||||
keepUnused = 7 * 24 * time.Hour
|
||||
keepTemp = 5 * time.Minute
|
||||
keepOld = 5 * time.Minute
|
||||
)
|
||||
|
||||
func (h *Handler) gcCache() {
|
||||
if atomic.LoadInt32(&h.gcing) != 0 {
|
||||
if h.gcing.Load() {
|
||||
return
|
||||
}
|
||||
if !atomic.CompareAndSwapInt32(&h.gcing, 0, 1) {
|
||||
if !h.gcing.CompareAndSwap(false, true) {
|
||||
return
|
||||
}
|
||||
defer atomic.StoreInt32(&h.gcing, 0)
|
||||
defer h.gcing.Store(false)
|
||||
|
||||
if time.Since(h.gcAt) < time.Hour {
|
||||
h.logger.Debugf("skip gc: %v", h.gcAt.String())
|
||||
@@ -444,37 +431,18 @@ func (h *Handler) gcCache() {
|
||||
h.gcAt = time.Now()
|
||||
h.logger.Debugf("gc: %v", h.gcAt.String())
|
||||
|
||||
const (
|
||||
keepUsed = 30 * 24 * time.Hour
|
||||
keepUnused = 7 * 24 * time.Hour
|
||||
keepTemp = 5 * time.Minute
|
||||
)
|
||||
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Remove the caches which are not completed for a while, they are most likely to be broken.
|
||||
var caches []*Cache
|
||||
if err := db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix())); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
if cache.Complete {
|
||||
continue
|
||||
}
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := db.Find(&caches, bolthold.Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix())); err != nil {
|
||||
if err := db.Find(&caches, bolthold.
|
||||
Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix()).
|
||||
And("Complete").Eq(false),
|
||||
); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
@@ -487,8 +455,11 @@ func (h *Handler) gcCache() {
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the old caches which have not been used recently.
|
||||
caches = caches[:0]
|
||||
if err := db.Find(&caches, bolthold.Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix())); err != nil {
|
||||
if err := db.Find(&caches, bolthold.
|
||||
Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix()),
|
||||
); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
@@ -500,6 +471,55 @@ func (h *Handler) gcCache() {
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the old caches which are too old.
|
||||
caches = caches[:0]
|
||||
if err := db.Find(&caches, bolthold.
|
||||
Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix()),
|
||||
); err != nil {
|
||||
h.logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the old caches with the same key and version, keep the latest one.
|
||||
// Also keep the olds which have been used recently for a while in case of the cache is still in use.
|
||||
if results, err := db.FindAggregate(
|
||||
&Cache{},
|
||||
bolthold.Where("Complete").Eq(true),
|
||||
"Key", "Version",
|
||||
); err != nil {
|
||||
h.logger.Warnf("find aggregate caches: %v", err)
|
||||
} else {
|
||||
for _, result := range results {
|
||||
if result.Count() <= 1 {
|
||||
continue
|
||||
}
|
||||
result.Sort("CreatedAt")
|
||||
caches = caches[:0]
|
||||
result.Reduction(&caches)
|
||||
for _, cache := range caches[:len(caches)-1] {
|
||||
if time.Since(time.Unix(cache.UsedAt, 0)) < keepOld {
|
||||
// Keep it since it has been used recently, even if it's old.
|
||||
// Or it could break downloading in process.
|
||||
continue
|
||||
}
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := db.Delete(cache.ID, cache); err != nil {
|
||||
h.logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
h.logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) {
|
||||
|
||||
@@ -10,9 +10,11 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/timshannon/bolthold"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -78,6 +80,9 @@ func TestHandler(t *testing.T) {
|
||||
t.Run("duplicate reserve", func(t *testing.T) {
|
||||
key := strings.ToLower(t.Name())
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
var first, second struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
Key: key,
|
||||
@@ -89,10 +94,8 @@ func TestHandler(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
got := struct {
|
||||
CacheID uint64 `json:"cacheId"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&first))
|
||||
assert.NotZero(t, first.CacheID)
|
||||
}
|
||||
{
|
||||
body, err := json.Marshal(&Request{
|
||||
@@ -103,8 +106,13 @@ func TestHandler(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 400, resp.StatusCode)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&second))
|
||||
assert.NotZero(t, second.CacheID)
|
||||
}
|
||||
|
||||
assert.NotEqual(t, first.CacheID, second.CacheID)
|
||||
})
|
||||
|
||||
t.Run("upload with bad id", func(t *testing.T) {
|
||||
@@ -341,9 +349,9 @@ func TestHandler(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
keys := [3]string{
|
||||
key + "_a",
|
||||
key + "_a_b",
|
||||
key + "_a_b_c",
|
||||
key + "_a_b",
|
||||
key + "_a",
|
||||
}
|
||||
contents := [3][]byte{
|
||||
make([]byte, 100),
|
||||
@@ -354,6 +362,7 @@ func TestHandler(t *testing.T) {
|
||||
_, err := rand.Read(contents[i])
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, keys[i], version, contents[i])
|
||||
time.Sleep(time.Second) // ensure CreatedAt of caches are different
|
||||
}
|
||||
|
||||
reqKeys := strings.Join([]string{
|
||||
@@ -361,29 +370,33 @@ func TestHandler(t *testing.T) {
|
||||
key + "_a_b",
|
||||
key + "_a",
|
||||
}, ",")
|
||||
var archiveLocation string
|
||||
{
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, keys[1], got.CacheKey)
|
||||
archiveLocation = got.ArchiveLocation
|
||||
}
|
||||
{
|
||||
resp, err := http.Get(archiveLocation) //nolint:gosec
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, contents[1], got)
|
||||
}
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
/*
|
||||
Expect `key_a_b` because:
|
||||
- `key_a_b_x" doesn't match any caches.
|
||||
- `key_a_b" matches `key_a_b` and `key_a_b_c`, but `key_a_b` is newer.
|
||||
*/
|
||||
except := 1
|
||||
|
||||
got := struct {
|
||||
Result string `json:"result"`
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, "hit", got.Result)
|
||||
assert.Equal(t, keys[except], got.CacheKey)
|
||||
|
||||
contentResp, err := http.Get(got.ArchiveLocation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, contentResp.StatusCode)
|
||||
content, err := io.ReadAll(contentResp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, contents[except], content)
|
||||
})
|
||||
|
||||
t.Run("case insensitive", func(t *testing.T) {
|
||||
@@ -409,6 +422,110 @@ func TestHandler(t *testing.T) {
|
||||
assert.Equal(t, key+"_abc", got.CacheKey)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("exact keys are preferred (key 0)", func(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
keys := [3]string{
|
||||
key + "_a",
|
||||
key + "_a_b_c",
|
||||
key + "_a_b",
|
||||
}
|
||||
contents := [3][]byte{
|
||||
make([]byte, 100),
|
||||
make([]byte, 200),
|
||||
make([]byte, 300),
|
||||
}
|
||||
for i := range contents {
|
||||
_, err := rand.Read(contents[i])
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, keys[i], version, contents[i])
|
||||
time.Sleep(time.Second) // ensure CreatedAt of caches are different
|
||||
}
|
||||
|
||||
reqKeys := strings.Join([]string{
|
||||
key + "_a",
|
||||
key + "_a_b",
|
||||
}, ",")
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
/*
|
||||
Expect `key_a` because:
|
||||
- `key_a` matches `key_a`, `key_a_b` and `key_a_b_c`, but `key_a` is an exact match.
|
||||
- `key_a_b` matches `key_a_b` and `key_a_b_c`, but previous key had a match
|
||||
*/
|
||||
expect := 0
|
||||
|
||||
got := struct {
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, keys[expect], got.CacheKey)
|
||||
|
||||
contentResp, err := http.Get(got.ArchiveLocation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, contentResp.StatusCode)
|
||||
content, err := io.ReadAll(contentResp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, contents[expect], content)
|
||||
})
|
||||
|
||||
t.Run("exact keys are preferred (key 1)", func(t *testing.T) {
|
||||
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
|
||||
key := strings.ToLower(t.Name())
|
||||
keys := [3]string{
|
||||
key + "_a",
|
||||
key + "_a_b_c",
|
||||
key + "_a_b",
|
||||
}
|
||||
contents := [3][]byte{
|
||||
make([]byte, 100),
|
||||
make([]byte, 200),
|
||||
make([]byte, 300),
|
||||
}
|
||||
for i := range contents {
|
||||
_, err := rand.Read(contents[i])
|
||||
require.NoError(t, err)
|
||||
uploadCacheNormally(t, base, keys[i], version, contents[i])
|
||||
time.Sleep(time.Second) // ensure CreatedAt of caches are different
|
||||
}
|
||||
|
||||
reqKeys := strings.Join([]string{
|
||||
"------------------------------------------------------",
|
||||
key + "_a",
|
||||
key + "_a_b",
|
||||
}, ",")
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.StatusCode)
|
||||
|
||||
/*
|
||||
Expect `key_a` because:
|
||||
- `------------------------------------------------------` doesn't match any caches.
|
||||
- `key_a` matches `key_a`, `key_a_b` and `key_a_b_c`, but `key_a` is an exact match.
|
||||
- `key_a_b` matches `key_a_b` and `key_a_b_c`, but previous key had a match
|
||||
*/
|
||||
expect := 0
|
||||
|
||||
got := struct {
|
||||
ArchiveLocation string `json:"archiveLocation"`
|
||||
CacheKey string `json:"cacheKey"`
|
||||
}{}
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
|
||||
assert.Equal(t, keys[expect], got.CacheKey)
|
||||
|
||||
contentResp, err := http.Get(got.ArchiveLocation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, contentResp.StatusCode)
|
||||
content, err := io.ReadAll(contentResp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, contents[expect], content)
|
||||
})
|
||||
}
|
||||
|
||||
func uploadCacheNormally(t *testing.T, base, key, version string, content []byte) {
|
||||
@@ -469,3 +586,112 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte
|
||||
assert.Equal(t, content, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler_gcCache(t *testing.T) {
|
||||
dir := filepath.Join(t.TempDir(), "artifactcache")
|
||||
handler, err := StartHandler(dir, "", 0, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
require.NoError(t, handler.Close())
|
||||
}()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
cases := []struct {
|
||||
Cache *Cache
|
||||
Kept bool
|
||||
}{
|
||||
{
|
||||
// should be kept, since it's used recently and not too old.
|
||||
Cache: &Cache{
|
||||
Key: "test_key_1",
|
||||
Version: "test_version",
|
||||
Complete: true,
|
||||
UsedAt: now.Unix(),
|
||||
CreatedAt: now.Add(-time.Hour).Unix(),
|
||||
},
|
||||
Kept: true,
|
||||
},
|
||||
{
|
||||
// should be removed, since it's not complete and not used for a while.
|
||||
Cache: &Cache{
|
||||
Key: "test_key_2",
|
||||
Version: "test_version",
|
||||
Complete: false,
|
||||
UsedAt: now.Add(-(keepTemp + time.Second)).Unix(),
|
||||
CreatedAt: now.Add(-(keepTemp + time.Hour)).Unix(),
|
||||
},
|
||||
Kept: false,
|
||||
},
|
||||
{
|
||||
// should be removed, since it's not used for a while.
|
||||
Cache: &Cache{
|
||||
Key: "test_key_3",
|
||||
Version: "test_version",
|
||||
Complete: true,
|
||||
UsedAt: now.Add(-(keepUnused + time.Second)).Unix(),
|
||||
CreatedAt: now.Add(-(keepUnused + time.Hour)).Unix(),
|
||||
},
|
||||
Kept: false,
|
||||
},
|
||||
{
|
||||
// should be removed, since it's used but too old.
|
||||
Cache: &Cache{
|
||||
Key: "test_key_3",
|
||||
Version: "test_version",
|
||||
Complete: true,
|
||||
UsedAt: now.Unix(),
|
||||
CreatedAt: now.Add(-(keepUsed + time.Second)).Unix(),
|
||||
},
|
||||
Kept: false,
|
||||
},
|
||||
{
|
||||
// should be kept, since it has a newer edition but be used recently.
|
||||
Cache: &Cache{
|
||||
Key: "test_key_1",
|
||||
Version: "test_version",
|
||||
Complete: true,
|
||||
UsedAt: now.Add(-(keepOld - time.Minute)).Unix(),
|
||||
CreatedAt: now.Add(-(time.Hour + time.Second)).Unix(),
|
||||
},
|
||||
Kept: true,
|
||||
},
|
||||
{
|
||||
// should be removed, since it has a newer edition and not be used recently.
|
||||
Cache: &Cache{
|
||||
Key: "test_key_1",
|
||||
Version: "test_version",
|
||||
Complete: true,
|
||||
UsedAt: now.Add(-(keepOld + time.Second)).Unix(),
|
||||
CreatedAt: now.Add(-(time.Hour + time.Second)).Unix(),
|
||||
},
|
||||
Kept: false,
|
||||
},
|
||||
}
|
||||
|
||||
db, err := handler.openDB()
|
||||
require.NoError(t, err)
|
||||
for _, c := range cases {
|
||||
require.NoError(t, insertCache(db, c.Cache))
|
||||
}
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
handler.gcAt = time.Time{} // ensure gcCache will not skip
|
||||
handler.gcCache()
|
||||
|
||||
db, err = handler.openDB()
|
||||
require.NoError(t, err)
|
||||
for i, v := range cases {
|
||||
t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) {
|
||||
cache := &Cache{}
|
||||
err = db.Get(v.Cache.ID, cache)
|
||||
if v.Kept {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.ErrorIs(t, err, bolthold.ErrNotFound)
|
||||
}
|
||||
})
|
||||
}
|
||||
require.NoError(t, db.Close())
|
||||
}
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Key string `json:"key" `
|
||||
Version string `json:"version"`
|
||||
@@ -29,16 +24,11 @@ func (c *Request) ToCache() *Cache {
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
ID uint64 `json:"id" boltholdKey:"ID"`
|
||||
Key string `json:"key" boltholdIndex:"Key"`
|
||||
Version string `json:"version" boltholdIndex:"Version"`
|
||||
KeyVersionHash string `json:"keyVersionHash" boltholdUnique:"KeyVersionHash"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
Complete bool `json:"complete"`
|
||||
UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"`
|
||||
CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"`
|
||||
}
|
||||
|
||||
func (c *Cache) FillKeyVersionHash() {
|
||||
c.KeyVersionHash = fmt.Sprintf("%x", sha256.Sum256([]byte(fmt.Sprintf("%s:%s", c.Key, c.Version))))
|
||||
ID uint64 `json:"id" boltholdKey:"ID"`
|
||||
Key string `json:"key" boltholdIndex:"Key"`
|
||||
Version string `json:"version" boltholdIndex:"Version"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
Complete bool `json:"complete" boltholdIndex:"Complete"`
|
||||
UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"`
|
||||
CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"`
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ func NewParallelExecutor(parallel int, executors ...Executor) Executor {
|
||||
errs := make(chan error, len(executors))
|
||||
|
||||
if 1 > parallel {
|
||||
log.Infof("Parallel tasks (%d) below minimum, setting to 1", parallel)
|
||||
log.Debugf("Parallel tasks (%d) below minimum, setting to 1", parallel)
|
||||
parallel = 1
|
||||
}
|
||||
|
||||
|
||||
@@ -226,9 +226,6 @@ type NewGitCloneExecutorInput struct {
|
||||
Dir string
|
||||
Token string
|
||||
OfflineMode bool
|
||||
|
||||
// For Gitea
|
||||
InsecureSkipTLS bool
|
||||
}
|
||||
|
||||
// CloneIfRequired ...
|
||||
@@ -250,8 +247,6 @@ func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input
|
||||
cloneOptions := git.CloneOptions{
|
||||
URL: input.URL,
|
||||
Progress: progressWriter,
|
||||
|
||||
InsecureSkipTLS: input.InsecureSkipTLS, // For Gitea
|
||||
}
|
||||
if input.Token != "" {
|
||||
cloneOptions.Auth = &http.BasicAuth{
|
||||
@@ -313,11 +308,6 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
|
||||
// fetch latest changes
|
||||
fetchOptions, pullOptions := gitOptions(input.Token)
|
||||
|
||||
if input.InsecureSkipTLS { // For Gitea
|
||||
fetchOptions.InsecureSkipTLS = true
|
||||
pullOptions.InsecureSkipTLS = true
|
||||
}
|
||||
|
||||
if !isOfflineMode {
|
||||
err = r.Fetch(&fetchOptions)
|
||||
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
|
||||
@@ -25,24 +25,3 @@ func Logger(ctx context.Context) logrus.FieldLogger {
|
||||
func WithLogger(ctx context.Context, logger logrus.FieldLogger) context.Context {
|
||||
return context.WithValue(ctx, loggerContextKeyVal, logger)
|
||||
}
|
||||
|
||||
type loggerHookKey string
|
||||
|
||||
const loggerHookKeyVal = loggerHookKey("logrus.Hook")
|
||||
|
||||
// LoggerHook returns the appropriate logger hook for current context
|
||||
// the hook affects job logger, not global logger
|
||||
func LoggerHook(ctx context.Context) logrus.Hook {
|
||||
val := ctx.Value(loggerHookKeyVal)
|
||||
if val != nil {
|
||||
if hook, ok := val.(logrus.Hook); ok {
|
||||
return hook
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLoggerHook adds a value to the context for the logger hook
|
||||
func WithLoggerHook(ctx context.Context, hook logrus.Hook) context.Context {
|
||||
return context.WithValue(ctx, loggerHookKeyVal, hook)
|
||||
}
|
||||
|
||||
@@ -30,10 +30,6 @@ type NewContainerInput struct {
|
||||
NetworkAliases []string
|
||||
ExposedPorts nat.PortSet
|
||||
PortBindings nat.PortMap
|
||||
|
||||
// Gitea specific
|
||||
AutoRemove bool
|
||||
ValidVolumes []string
|
||||
}
|
||||
|
||||
// FileEntry is a file to copy to a container
|
||||
@@ -46,7 +42,6 @@ type FileEntry struct {
|
||||
// Container for managing docker run containers
|
||||
type Container interface {
|
||||
Create(capAdd []string, capDrop []string) common.Executor
|
||||
ConnectToNetwork(name string) common.Executor
|
||||
Copy(destPath string, files ...*FileEntry) common.Executor
|
||||
CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error
|
||||
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
|
||||
|
||||
@@ -17,19 +17,16 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
"github.com/docker/cli/cli/connhelper"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/go-git/go-billy/v5/helper/polyfill"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/gobwas/glob"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/kballard/go-shellquote"
|
||||
@@ -48,25 +45,6 @@ func NewContainer(input *NewContainerInput) ExecutionsEnvironment {
|
||||
return cr
|
||||
}
|
||||
|
||||
func (cr *containerReference) ConnectToNetwork(name string) common.Executor {
|
||||
return common.
|
||||
NewDebugExecutor("%sdocker network connect %s %s", logPrefix, name, cr.input.Name).
|
||||
Then(
|
||||
common.NewPipelineExecutor(
|
||||
cr.connect(),
|
||||
cr.connectToNetwork(name, cr.input.NetworkAliases),
|
||||
).IfNot(common.Dryrun),
|
||||
)
|
||||
}
|
||||
|
||||
func (cr *containerReference) connectToNetwork(name string, aliases []string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return cr.cli.NetworkConnect(ctx, name, cr.input.Name, &networktypes.EndpointSettings{
|
||||
Aliases: aliases,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// supportsContainerImagePlatform returns true if the underlying Docker server
|
||||
// API version is 1.41 and beyond
|
||||
func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) bool {
|
||||
@@ -368,32 +346,12 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
|
||||
// FIXME: If everything is fine after gitea/act v0.260.0, remove the following comment.
|
||||
// In the old fork version, the code is
|
||||
// if len(copts.netMode.Value()) == 0 {
|
||||
// if err = copts.netMode.Set("host"); err != nil {
|
||||
// return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
|
||||
// }
|
||||
// }
|
||||
// And it has been commented with:
|
||||
// If a service container's network is set to `host`, the container will not be able to
|
||||
// connect to the specified network created for the job container and the service containers.
|
||||
// So comment out the following code.
|
||||
// Not the if it's necessary to comment it in the new version,
|
||||
// since it's cr.input.NetworkMode now.
|
||||
|
||||
if len(copts.netMode.Value()) == 0 {
|
||||
if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If the `privileged` config has been disabled, `copts.privileged` need to be forced to false,
|
||||
// even if the user specifies `--privileged` in the options string.
|
||||
if !hostConfig.Privileged {
|
||||
copts.privileged = false
|
||||
}
|
||||
|
||||
containerConfig, err := parse(flags, copts, runtime.GOOS)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot process container options: '%s': '%w'", input.Options, err)
|
||||
@@ -401,7 +359,7 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
|
||||
logger.Debugf("Custom container.Config from options ==> %+v", containerConfig.Config)
|
||||
|
||||
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride, mergo.WithAppendSlice)
|
||||
err = mergo.Merge(config, containerConfig.Config, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot merge container.Config options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
@@ -413,17 +371,12 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
|
||||
hostConfig.Mounts = append(hostConfig.Mounts, containerConfig.HostConfig.Mounts...)
|
||||
binds := hostConfig.Binds
|
||||
mounts := hostConfig.Mounts
|
||||
networkMode := hostConfig.NetworkMode
|
||||
err = mergo.Merge(hostConfig, containerConfig.HostConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot merge container.HostConfig options: '%s': '%w'", input.Options, err)
|
||||
}
|
||||
hostConfig.Binds = binds
|
||||
hostConfig.Mounts = mounts
|
||||
if len(copts.netMode.Value()) > 0 {
|
||||
logger.Warn("--network and --net in the options will be ignored.")
|
||||
}
|
||||
hostConfig.NetworkMode = networkMode
|
||||
logger.Debugf("Merged container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
return config, hostConfig, nil
|
||||
@@ -487,7 +440,6 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
Privileged: input.Privileged,
|
||||
UsernsMode: container.UsernsMode(input.UsernsMode),
|
||||
PortBindings: input.PortBindings,
|
||||
AutoRemove: input.AutoRemove,
|
||||
}
|
||||
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
|
||||
|
||||
@@ -496,9 +448,6 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
|
||||
return err
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
config, hostConfig = cr.sanitizeConfig(ctx, config, hostConfig)
|
||||
|
||||
var networkingConfig *network.NetworkingConfig
|
||||
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
|
||||
n := hostConfig.NetworkMode
|
||||
@@ -730,7 +679,7 @@ func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string
|
||||
tw := tar.NewWriter(buf)
|
||||
_ = tw.WriteHeader(&tar.Header{
|
||||
Name: destPath,
|
||||
Mode: 777,
|
||||
Mode: 0o777,
|
||||
Typeflag: tar.TypeDir,
|
||||
})
|
||||
tw.Close()
|
||||
@@ -927,63 +876,3 @@ func (cr *containerReference) wait() common.Executor {
|
||||
return fmt.Errorf("exit with `FAILURE`: %v", statusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// sanitizeConfig remove the invalid configurations from `config` and `hostConfig`
|
||||
func (cr *containerReference) sanitizeConfig(ctx context.Context, config *container.Config, hostConfig *container.HostConfig) (*container.Config, *container.HostConfig) {
|
||||
logger := common.Logger(ctx)
|
||||
|
||||
if len(cr.input.ValidVolumes) > 0 {
|
||||
globs := make([]glob.Glob, 0, len(cr.input.ValidVolumes))
|
||||
for _, v := range cr.input.ValidVolumes {
|
||||
if g, err := glob.Compile(v); err != nil {
|
||||
logger.Errorf("create glob from %s error: %v", v, err)
|
||||
} else {
|
||||
globs = append(globs, g)
|
||||
}
|
||||
}
|
||||
isValid := func(v string) bool {
|
||||
for _, g := range globs {
|
||||
if g.Match(v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// sanitize binds
|
||||
sanitizedBinds := make([]string, 0, len(hostConfig.Binds))
|
||||
for _, bind := range hostConfig.Binds {
|
||||
parsed, err := loader.ParseVolume(bind)
|
||||
if err != nil {
|
||||
logger.Warnf("parse volume [%s] error: %v", bind, err)
|
||||
continue
|
||||
}
|
||||
if parsed.Source == "" {
|
||||
// anonymous volume
|
||||
sanitizedBinds = append(sanitizedBinds, bind)
|
||||
continue
|
||||
}
|
||||
if isValid(parsed.Source) {
|
||||
sanitizedBinds = append(sanitizedBinds, bind)
|
||||
} else {
|
||||
logger.Warnf("[%s] is not a valid volume, will be ignored", parsed.Source)
|
||||
}
|
||||
}
|
||||
hostConfig.Binds = sanitizedBinds
|
||||
// sanitize mounts
|
||||
sanitizedMounts := make([]mount.Mount, 0, len(hostConfig.Mounts))
|
||||
for _, mt := range hostConfig.Mounts {
|
||||
if isValid(mt.Source) {
|
||||
sanitizedMounts = append(sanitizedMounts, mt)
|
||||
} else {
|
||||
logger.Warnf("[%s] is not a valid volume, will be ignored", mt.Source)
|
||||
}
|
||||
}
|
||||
hostConfig.Mounts = sanitizedMounts
|
||||
} else {
|
||||
hostConfig.Binds = []string{}
|
||||
hostConfig.Mounts = []mount.Mount{}
|
||||
}
|
||||
|
||||
return config, hostConfig
|
||||
}
|
||||
|
||||
@@ -2,19 +2,17 @@ package container
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
@@ -79,6 +77,11 @@ func (m *mockDockerClient) ContainerExecInspect(ctx context.Context, execID stri
|
||||
return args.Get(0).(types.ContainerExecInspect), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockDockerClient) CopyToContainer(ctx context.Context, id string, path string, content io.Reader, options types.CopyToContainerOptions) error {
|
||||
args := m.Called(ctx, id, path, content, options)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
type endlessReader struct {
|
||||
io.Reader
|
||||
}
|
||||
@@ -169,78 +172,77 @@ func TestDockerExecFailure(t *testing.T) {
|
||||
client.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestDockerCopyTarStream(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
conn := &mockConn{}
|
||||
|
||||
client := &mockDockerClient{}
|
||||
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(nil)
|
||||
client.On("CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(nil)
|
||||
cr := &containerReference{
|
||||
id: "123",
|
||||
cli: client,
|
||||
input: &NewContainerInput{
|
||||
Image: "image",
|
||||
},
|
||||
}
|
||||
|
||||
_ = cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
||||
|
||||
conn.AssertExpectations(t)
|
||||
client.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestDockerCopyTarStreamErrorInCopyFiles(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
conn := &mockConn{}
|
||||
|
||||
merr := fmt.Errorf("Failure")
|
||||
|
||||
client := &mockDockerClient{}
|
||||
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(merr)
|
||||
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(merr)
|
||||
cr := &containerReference{
|
||||
id: "123",
|
||||
cli: client,
|
||||
input: &NewContainerInput{
|
||||
Image: "image",
|
||||
},
|
||||
}
|
||||
|
||||
err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
||||
assert.ErrorIs(t, err, merr)
|
||||
|
||||
conn.AssertExpectations(t)
|
||||
client.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestDockerCopyTarStreamErrorInMkdir(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
conn := &mockConn{}
|
||||
|
||||
merr := fmt.Errorf("Failure")
|
||||
|
||||
client := &mockDockerClient{}
|
||||
client.On("CopyToContainer", ctx, "123", "/", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(nil)
|
||||
client.On("CopyToContainer", ctx, "123", "/var/run/act", mock.Anything, mock.AnythingOfType("types.CopyToContainerOptions")).Return(merr)
|
||||
cr := &containerReference{
|
||||
id: "123",
|
||||
cli: client,
|
||||
input: &NewContainerInput{
|
||||
Image: "image",
|
||||
},
|
||||
}
|
||||
|
||||
err := cr.CopyTarStream(ctx, "/var/run/act", &bytes.Buffer{})
|
||||
assert.ErrorIs(t, err, merr)
|
||||
|
||||
conn.AssertExpectations(t)
|
||||
client.AssertExpectations(t)
|
||||
}
|
||||
|
||||
// Type assert containerReference implements ExecutionsEnvironment
|
||||
var _ ExecutionsEnvironment = &containerReference{}
|
||||
|
||||
func TestCheckVolumes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
validVolumes []string
|
||||
binds []string
|
||||
expectedBinds []string
|
||||
}{
|
||||
{
|
||||
desc: "match all volumes",
|
||||
validVolumes: []string{"**"},
|
||||
binds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
expectedBinds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no volumes can be matched",
|
||||
validVolumes: []string{},
|
||||
binds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
expectedBinds: []string{},
|
||||
},
|
||||
{
|
||||
desc: "only allowed volumes can be matched",
|
||||
validVolumes: []string{
|
||||
"shared_volume",
|
||||
"/home/test/data",
|
||||
"/etc/conf.d/*.json",
|
||||
},
|
||||
binds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
"sql_data:/sql_data",
|
||||
"/secrets/keys:/keys",
|
||||
},
|
||||
expectedBinds: []string{
|
||||
"shared_volume:/shared_volume",
|
||||
"/home/test/data:/test_data",
|
||||
"/etc/conf.d/base.json:/config/base.json",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
logger, _ := test.NewNullLogger()
|
||||
ctx := common.WithLogger(context.Background(), logger)
|
||||
cr := &containerReference{
|
||||
input: &NewContainerInput{
|
||||
ValidVolumes: tc.validVolumes,
|
||||
},
|
||||
}
|
||||
_, hostConf := cr.sanitizeConfig(ctx, &container.Config{}, &container.HostConfig{Binds: tc.binds})
|
||||
assert.Equal(t, tc.expectedBinds, hostConf.Binds)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,12 +41,6 @@ func (e *HostEnvironment) Create(_ []string, _ []string) common.Executor {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) ConnectToNetwork(name string) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *HostEnvironment) Close() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
return nil
|
||||
|
||||
@@ -155,8 +155,6 @@ func (impl *interperterImpl) evaluateVariable(variableNode *actionlint.VariableN
|
||||
switch strings.ToLower(variableNode.Name) {
|
||||
case "github":
|
||||
return impl.env.Github, nil
|
||||
case "gitea": // compatible with Gitea
|
||||
return impl.env.Github, nil
|
||||
case "env":
|
||||
return impl.env.Env, nil
|
||||
case "job":
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ExpressionEvaluator is copied from runner.expressionEvaluator,
|
||||
// to avoid unnecessary dependencies
|
||||
type ExpressionEvaluator struct {
|
||||
interpreter exprparser.Interpreter
|
||||
}
|
||||
|
||||
func NewExpressionEvaluator(interpreter exprparser.Interpreter) *ExpressionEvaluator {
|
||||
return &ExpressionEvaluator{interpreter: interpreter}
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluate(in string, defaultStatusCheck exprparser.DefaultStatusCheck) (interface{}, error) {
|
||||
evaluated, err := ee.interpreter.Evaluate(in, defaultStatusCheck)
|
||||
|
||||
return evaluated, err
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluateScalarYamlNode(node *yaml.Node) error {
|
||||
var in string
|
||||
if err := node.Decode(&in); err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return nil
|
||||
}
|
||||
expr, _ := rewriteSubExpression(in, false)
|
||||
res, err := ee.evaluate(expr, exprparser.DefaultStatusCheckNone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return node.Encode(res)
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluateMappingYamlNode(node *yaml.Node) error {
|
||||
// GitHub has this undocumented feature to merge maps, called insert directive
|
||||
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
|
||||
for i := 0; i < len(node.Content)/2; {
|
||||
k := node.Content[i*2]
|
||||
v := node.Content[i*2+1]
|
||||
if err := ee.EvaluateYamlNode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
var sk string
|
||||
// Merge the nested map of the insert directive
|
||||
if k.Decode(&sk) == nil && insertDirective.MatchString(sk) {
|
||||
node.Content = append(append(node.Content[:i*2], v.Content...), node.Content[(i+1)*2:]...)
|
||||
i += len(v.Content) / 2
|
||||
} else {
|
||||
if err := ee.EvaluateYamlNode(k); err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) evaluateSequenceYamlNode(node *yaml.Node) error {
|
||||
for i := 0; i < len(node.Content); {
|
||||
v := node.Content[i]
|
||||
// Preserve nested sequences
|
||||
wasseq := v.Kind == yaml.SequenceNode
|
||||
if err := ee.EvaluateYamlNode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
// GitHub has this undocumented feature to merge sequences / arrays
|
||||
// We have a nested sequence via evaluation, merge the arrays
|
||||
if v.Kind == yaml.SequenceNode && !wasseq {
|
||||
node.Content = append(append(node.Content[:i], v.Content...), node.Content[i+1:]...)
|
||||
i += len(v.Content)
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) EvaluateYamlNode(node *yaml.Node) error {
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
return ee.evaluateScalarYamlNode(node)
|
||||
case yaml.MappingNode:
|
||||
return ee.evaluateMappingYamlNode(node)
|
||||
case yaml.SequenceNode:
|
||||
return ee.evaluateSequenceYamlNode(node)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ee ExpressionEvaluator) Interpolate(in string) string {
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return in
|
||||
}
|
||||
|
||||
expr, _ := rewriteSubExpression(in, true)
|
||||
evaluated, err := ee.evaluate(expr, exprparser.DefaultStatusCheckNone)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
value, ok := evaluated.(string)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Expression %s did not evaluate to a string", expr))
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func escapeFormatString(in string) string {
|
||||
return strings.ReplaceAll(strings.ReplaceAll(in, "{", "{{"), "}", "}}")
|
||||
}
|
||||
|
||||
func rewriteSubExpression(in string, forceFormat bool) (string, error) {
|
||||
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
strPattern := regexp.MustCompile("(?:''|[^'])*'")
|
||||
pos := 0
|
||||
exprStart := -1
|
||||
strStart := -1
|
||||
var results []string
|
||||
formatOut := ""
|
||||
for pos < len(in) {
|
||||
if strStart > -1 {
|
||||
matches := strPattern.FindStringIndex(in[pos:])
|
||||
if matches == nil {
|
||||
panic("unclosed string.")
|
||||
}
|
||||
|
||||
strStart = -1
|
||||
pos += matches[1]
|
||||
} else if exprStart > -1 {
|
||||
exprEnd := strings.Index(in[pos:], "}}")
|
||||
strStart = strings.Index(in[pos:], "'")
|
||||
|
||||
if exprEnd > -1 && strStart > -1 {
|
||||
if exprEnd < strStart {
|
||||
strStart = -1
|
||||
} else {
|
||||
exprEnd = -1
|
||||
}
|
||||
}
|
||||
|
||||
if exprEnd > -1 {
|
||||
formatOut += fmt.Sprintf("{%d}", len(results))
|
||||
results = append(results, strings.TrimSpace(in[exprStart:pos+exprEnd]))
|
||||
pos += exprEnd + 2
|
||||
exprStart = -1
|
||||
} else if strStart > -1 {
|
||||
pos += strStart + 1
|
||||
} else {
|
||||
panic("unclosed expression.")
|
||||
}
|
||||
} else {
|
||||
exprStart = strings.Index(in[pos:], "${{")
|
||||
if exprStart != -1 {
|
||||
formatOut += escapeFormatString(in[pos : pos+exprStart])
|
||||
exprStart = pos + exprStart + 3
|
||||
pos = exprStart
|
||||
} else {
|
||||
formatOut += escapeFormatString(in[pos:])
|
||||
pos = len(in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(results) == 1 && formatOut == "{0}" && !forceFormat {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
out := fmt.Sprintf("format('%s', %s)", strings.ReplaceAll(formatOut, "'", "''"), strings.Join(results, ", "))
|
||||
return out, nil
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// NewInterpeter returns an interpeter used in the server,
|
||||
// need github, needs, strategy, matrix, inputs context only,
|
||||
// see https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability
|
||||
func NewInterpeter(
|
||||
jobID string,
|
||||
job *model.Job,
|
||||
matrix map[string]interface{},
|
||||
gitCtx *model.GithubContext,
|
||||
results map[string]*JobResult,
|
||||
vars map[string]string,
|
||||
) exprparser.Interpreter {
|
||||
strategy := make(map[string]interface{})
|
||||
if job.Strategy != nil {
|
||||
strategy["fail-fast"] = job.Strategy.FailFast
|
||||
strategy["max-parallel"] = job.Strategy.MaxParallel
|
||||
}
|
||||
|
||||
run := &model.Run{
|
||||
Workflow: &model.Workflow{
|
||||
Jobs: map[string]*model.Job{},
|
||||
},
|
||||
JobID: jobID,
|
||||
}
|
||||
for id, result := range results {
|
||||
need := yaml.Node{}
|
||||
_ = need.Encode(result.Needs)
|
||||
run.Workflow.Jobs[id] = &model.Job{
|
||||
RawNeeds: need,
|
||||
Result: result.Result,
|
||||
Outputs: result.Outputs,
|
||||
}
|
||||
}
|
||||
|
||||
jobs := run.Workflow.Jobs
|
||||
jobNeeds := run.Job().Needs()
|
||||
|
||||
using := map[string]exprparser.Needs{}
|
||||
for _, need := range jobNeeds {
|
||||
if v, ok := jobs[need]; ok {
|
||||
using[need] = exprparser.Needs{
|
||||
Outputs: v.Outputs,
|
||||
Result: v.Result,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ee := &exprparser.EvaluationEnvironment{
|
||||
Github: gitCtx,
|
||||
Env: nil, // no need
|
||||
Job: nil, // no need
|
||||
Steps: nil, // no need
|
||||
Runner: nil, // no need
|
||||
Secrets: nil, // no need
|
||||
Strategy: strategy,
|
||||
Matrix: matrix,
|
||||
Needs: using,
|
||||
Inputs: nil, // not supported yet
|
||||
Vars: vars,
|
||||
}
|
||||
|
||||
config := exprparser.Config{
|
||||
Run: run,
|
||||
WorkingDir: "", // WorkingDir is used for the function hashFiles, but it's not needed in the server
|
||||
Context: "job",
|
||||
}
|
||||
|
||||
return exprparser.NewInterpeter(ee, config)
|
||||
}
|
||||
|
||||
// JobResult is the minimum requirement of job results for Interpeter
|
||||
type JobResult struct {
|
||||
Needs []string
|
||||
Result string
|
||||
Outputs map[string]string
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
)
|
||||
|
||||
func Parse(content []byte, options ...ParseOption) ([]*SingleWorkflow, error) {
|
||||
origin, err := model.ReadWorkflow(bytes.NewReader(content))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("model.ReadWorkflow: %w", err)
|
||||
}
|
||||
|
||||
workflow := &SingleWorkflow{}
|
||||
if err := yaml.Unmarshal(content, workflow); err != nil {
|
||||
return nil, fmt.Errorf("yaml.Unmarshal: %w", err)
|
||||
}
|
||||
|
||||
pc := &parseContext{}
|
||||
for _, o := range options {
|
||||
o(pc)
|
||||
}
|
||||
results := map[string]*JobResult{}
|
||||
for id, job := range origin.Jobs {
|
||||
results[id] = &JobResult{
|
||||
Needs: job.Needs(),
|
||||
Result: pc.jobResults[id],
|
||||
Outputs: nil, // not supported yet
|
||||
}
|
||||
}
|
||||
|
||||
var ret []*SingleWorkflow
|
||||
ids, jobs, err := workflow.jobs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid jobs: %w", err)
|
||||
}
|
||||
for i, id := range ids {
|
||||
job := jobs[i]
|
||||
matricxes, err := getMatrixes(origin.GetJob(id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getMatrixes: %w", err)
|
||||
}
|
||||
for _, matrix := range matricxes {
|
||||
job := job.Clone()
|
||||
if job.Name == "" {
|
||||
job.Name = id
|
||||
}
|
||||
job.Name = nameWithMatrix(job.Name, matrix)
|
||||
job.Strategy.RawMatrix = encodeMatrix(matrix)
|
||||
evaluator := NewExpressionEvaluator(NewInterpeter(id, origin.GetJob(id), matrix, pc.gitContext, results, pc.vars))
|
||||
runsOn := origin.GetJob(id).RunsOn()
|
||||
for i, v := range runsOn {
|
||||
runsOn[i] = evaluator.Interpolate(v)
|
||||
}
|
||||
job.RawRunsOn = encodeRunsOn(runsOn)
|
||||
swf := &SingleWorkflow{
|
||||
Name: workflow.Name,
|
||||
RawOn: workflow.RawOn,
|
||||
Env: workflow.Env,
|
||||
Defaults: workflow.Defaults,
|
||||
}
|
||||
if err := swf.SetJob(id, job); err != nil {
|
||||
return nil, fmt.Errorf("SetJob: %w", err)
|
||||
}
|
||||
ret = append(ret, swf)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func WithJobResults(results map[string]string) ParseOption {
|
||||
return func(c *parseContext) {
|
||||
c.jobResults = results
|
||||
}
|
||||
}
|
||||
|
||||
func WithGitContext(context *model.GithubContext) ParseOption {
|
||||
return func(c *parseContext) {
|
||||
c.gitContext = context
|
||||
}
|
||||
}
|
||||
|
||||
func WithVars(vars map[string]string) ParseOption {
|
||||
return func(c *parseContext) {
|
||||
c.vars = vars
|
||||
}
|
||||
}
|
||||
|
||||
type parseContext struct {
|
||||
jobResults map[string]string
|
||||
gitContext *model.GithubContext
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
type ParseOption func(c *parseContext)
|
||||
|
||||
func getMatrixes(job *model.Job) ([]map[string]interface{}, error) {
|
||||
ret, err := job.GetMatrixes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetMatrixes: %w", err)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return matrixName(ret[i]) < matrixName(ret[j])
|
||||
})
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func encodeMatrix(matrix map[string]interface{}) yaml.Node {
|
||||
if len(matrix) == 0 {
|
||||
return yaml.Node{}
|
||||
}
|
||||
value := map[string][]interface{}{}
|
||||
for k, v := range matrix {
|
||||
value[k] = []interface{}{v}
|
||||
}
|
||||
node := yaml.Node{}
|
||||
_ = node.Encode(value)
|
||||
return node
|
||||
}
|
||||
|
||||
func encodeRunsOn(runsOn []string) yaml.Node {
|
||||
node := yaml.Node{}
|
||||
if len(runsOn) == 1 {
|
||||
_ = node.Encode(runsOn[0])
|
||||
} else {
|
||||
_ = node.Encode(runsOn)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
func nameWithMatrix(name string, m map[string]interface{}) string {
|
||||
if len(m) == 0 {
|
||||
return name
|
||||
}
|
||||
|
||||
return name + " " + matrixName(m)
|
||||
}
|
||||
|
||||
func matrixName(m map[string]interface{}) string {
|
||||
ks := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
vs := make([]string, 0, len(m))
|
||||
for _, v := range ks {
|
||||
vs = append(vs, fmt.Sprint(m[v]))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%s)", strings.Join(vs, ", "))
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options []ParseOption
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "multiple_jobs",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple_matrix",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_needs",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_with",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "has_secrets",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty_step",
|
||||
options: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
content := ReadTestdata(t, tt.name+".in.yaml")
|
||||
want := ReadTestdata(t, tt.name+".out.yaml")
|
||||
got, err := Parse(content, tt.options...)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := &strings.Builder{}
|
||||
for _, v := range got {
|
||||
if builder.Len() > 0 {
|
||||
builder.WriteString("---\n")
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
require.NoError(t, encoder.Encode(v))
|
||||
id, job := v.Job()
|
||||
assert.NotEmpty(t, id)
|
||||
assert.NotNil(t, job)
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,333 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// SingleWorkflow is a workflow with single job and single matrix
|
||||
type SingleWorkflow struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
RawOn yaml.Node `yaml:"on,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
RawJobs yaml.Node `yaml:"jobs,omitempty"`
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Job() (string, *Job) {
|
||||
ids, jobs, _ := w.jobs()
|
||||
if len(ids) >= 1 {
|
||||
return ids[0], jobs[0]
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) jobs() ([]string, []*Job, error) {
|
||||
ids, jobs, err := parseMappingNode[*Job](&w.RawJobs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
steps := make([]*Step, 0, len(job.Steps))
|
||||
for _, s := range job.Steps {
|
||||
if s != nil {
|
||||
steps = append(steps, s)
|
||||
}
|
||||
}
|
||||
job.Steps = steps
|
||||
}
|
||||
|
||||
return ids, jobs, nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) SetJob(id string, job *Job) error {
|
||||
m := map[string]*Job{
|
||||
id: job,
|
||||
}
|
||||
out, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node := yaml.Node{}
|
||||
if err := yaml.Unmarshal(out, &node); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(node.Content) != 1 || node.Content[0].Kind != yaml.MappingNode {
|
||||
return fmt.Errorf("can not set job: %q", out)
|
||||
}
|
||||
w.RawJobs = *node.Content[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *SingleWorkflow) Marshal() ([]byte, error) {
|
||||
return yaml.Marshal(w)
|
||||
}
|
||||
|
||||
type Job struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
RawNeeds yaml.Node `yaml:"needs,omitempty"`
|
||||
RawRunsOn yaml.Node `yaml:"runs-on,omitempty"`
|
||||
Env yaml.Node `yaml:"env,omitempty"`
|
||||
If yaml.Node `yaml:"if,omitempty"`
|
||||
Steps []*Step `yaml:"steps,omitempty"`
|
||||
TimeoutMinutes string `yaml:"timeout-minutes,omitempty"`
|
||||
Services map[string]*ContainerSpec `yaml:"services,omitempty"`
|
||||
Strategy Strategy `yaml:"strategy,omitempty"`
|
||||
RawContainer yaml.Node `yaml:"container,omitempty"`
|
||||
Defaults Defaults `yaml:"defaults,omitempty"`
|
||||
Outputs map[string]string `yaml:"outputs,omitempty"`
|
||||
Uses string `yaml:"uses,omitempty"`
|
||||
With map[string]interface{} `yaml:"with,omitempty"`
|
||||
RawSecrets yaml.Node `yaml:"secrets,omitempty"`
|
||||
}
|
||||
|
||||
func (j *Job) Clone() *Job {
|
||||
if j == nil {
|
||||
return nil
|
||||
}
|
||||
return &Job{
|
||||
Name: j.Name,
|
||||
RawNeeds: j.RawNeeds,
|
||||
RawRunsOn: j.RawRunsOn,
|
||||
Env: j.Env,
|
||||
If: j.If,
|
||||
Steps: j.Steps,
|
||||
TimeoutMinutes: j.TimeoutMinutes,
|
||||
Services: j.Services,
|
||||
Strategy: j.Strategy,
|
||||
RawContainer: j.RawContainer,
|
||||
Defaults: j.Defaults,
|
||||
Outputs: j.Outputs,
|
||||
Uses: j.Uses,
|
||||
With: j.With,
|
||||
RawSecrets: j.RawSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Job) Needs() []string {
|
||||
return (&model.Job{RawNeeds: j.RawNeeds}).Needs()
|
||||
}
|
||||
|
||||
func (j *Job) EraseNeeds() *Job {
|
||||
j.RawNeeds = yaml.Node{}
|
||||
return j
|
||||
}
|
||||
|
||||
func (j *Job) RunsOn() []string {
|
||||
return (&model.Job{RawRunsOn: j.RawRunsOn}).RunsOn()
|
||||
}
|
||||
|
||||
type Step struct {
|
||||
ID string `yaml:"id,omitempty"`
|
||||
If yaml.Node `yaml:"if,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Uses string `yaml:"uses,omitempty"`
|
||||
Run string `yaml:"run,omitempty"`
|
||||
WorkingDirectory string `yaml:"working-directory,omitempty"`
|
||||
Shell string `yaml:"shell,omitempty"`
|
||||
Env yaml.Node `yaml:"env,omitempty"`
|
||||
With map[string]string `yaml:"with,omitempty"`
|
||||
ContinueOnError bool `yaml:"continue-on-error,omitempty"`
|
||||
TimeoutMinutes string `yaml:"timeout-minutes,omitempty"`
|
||||
}
|
||||
|
||||
// String gets the name of step
|
||||
func (s *Step) String() string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return (&model.Step{
|
||||
ID: s.ID,
|
||||
Name: s.Name,
|
||||
Uses: s.Uses,
|
||||
Run: s.Run,
|
||||
}).String()
|
||||
}
|
||||
|
||||
type ContainerSpec struct {
|
||||
Image string `yaml:"image,omitempty"`
|
||||
Env map[string]string `yaml:"env,omitempty"`
|
||||
Ports []string `yaml:"ports,omitempty"`
|
||||
Volumes []string `yaml:"volumes,omitempty"`
|
||||
Options string `yaml:"options,omitempty"`
|
||||
Credentials map[string]string `yaml:"credentials,omitempty"`
|
||||
Cmd []string `yaml:"cmd,omitempty"`
|
||||
}
|
||||
|
||||
type Strategy struct {
|
||||
FailFastString string `yaml:"fail-fast,omitempty"`
|
||||
MaxParallelString string `yaml:"max-parallel,omitempty"`
|
||||
RawMatrix yaml.Node `yaml:"matrix,omitempty"`
|
||||
}
|
||||
|
||||
type Defaults struct {
|
||||
Run RunDefaults `yaml:"run,omitempty"`
|
||||
}
|
||||
|
||||
type RunDefaults struct {
|
||||
Shell string `yaml:"shell,omitempty"`
|
||||
WorkingDirectory string `yaml:"working-directory,omitempty"`
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Name string
|
||||
acts map[string][]string
|
||||
schedules []map[string]string
|
||||
}
|
||||
|
||||
func (evt *Event) IsSchedule() bool {
|
||||
return evt.schedules != nil
|
||||
}
|
||||
|
||||
func (evt *Event) Acts() map[string][]string {
|
||||
return evt.acts
|
||||
}
|
||||
|
||||
func (evt *Event) Schedules() []map[string]string {
|
||||
return evt.schedules
|
||||
}
|
||||
|
||||
func ParseRawOn(rawOn *yaml.Node) ([]*Event, error) {
|
||||
switch rawOn.Kind {
|
||||
case yaml.ScalarNode:
|
||||
var val string
|
||||
err := rawOn.Decode(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*Event{
|
||||
{Name: val},
|
||||
}, nil
|
||||
case yaml.SequenceNode:
|
||||
var val []interface{}
|
||||
err := rawOn.Decode(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(val))
|
||||
for _, v := range val {
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{Name: t})
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type %T", t)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
case yaml.MappingNode:
|
||||
events, triggers, err := parseMappingNode[interface{}](rawOn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*Event, 0, len(events))
|
||||
for i, k := range events {
|
||||
v := triggers[i]
|
||||
if v == nil {
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
continue
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case []string:
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: map[string][]string{},
|
||||
})
|
||||
case map[string]interface{}:
|
||||
acts := make(map[string][]string, len(t))
|
||||
for act, branches := range t {
|
||||
switch b := branches.(type) {
|
||||
case string:
|
||||
acts[act] = []string{b}
|
||||
case []string:
|
||||
acts[act] = b
|
||||
case []interface{}:
|
||||
acts[act] = make([]string, len(b))
|
||||
for i, v := range b {
|
||||
var ok bool
|
||||
if acts[act][i], ok = v.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", branches)
|
||||
}
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
acts: acts,
|
||||
})
|
||||
case []interface{}:
|
||||
if k != "schedule" {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
schedules := make([]map[string]string, len(t))
|
||||
for i, tt := range t {
|
||||
vv, ok := tt.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
schedules[i] = make(map[string]string, len(vv))
|
||||
for k, vvv := range vv {
|
||||
var ok bool
|
||||
if schedules[i][k], ok = vvv.(string); !ok {
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, &Event{
|
||||
Name: k,
|
||||
schedules: schedules,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %#v", v)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown on type: %v", rawOn.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// parseMappingNode parse a mapping node and preserve order.
|
||||
func parseMappingNode[T any](node *yaml.Node) ([]string, []T, error) {
|
||||
if node.Kind != yaml.MappingNode {
|
||||
return nil, nil, fmt.Errorf("input node is not a mapping node")
|
||||
}
|
||||
|
||||
var scalars []string
|
||||
var datas []T
|
||||
expectKey := true
|
||||
for _, item := range node.Content {
|
||||
if expectKey {
|
||||
if item.Kind != yaml.ScalarNode {
|
||||
return nil, nil, fmt.Errorf("not a valid scalar node: %v", item.Value)
|
||||
}
|
||||
scalars = append(scalars, item.Value)
|
||||
expectKey = false
|
||||
} else {
|
||||
var val T
|
||||
if err := item.Decode(&val); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
datas = append(datas, val)
|
||||
expectKey = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(scalars) != len(datas) {
|
||||
return nil, nil, fmt.Errorf("invalid definition of on: %v", node.Value)
|
||||
}
|
||||
|
||||
return scalars, datas, nil
|
||||
}
|
||||
@@ -1,306 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nektos/act/pkg/model"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseRawOn(t *testing.T) {
|
||||
kases := []struct {
|
||||
input string
|
||||
result []*Event
|
||||
}{
|
||||
{
|
||||
input: "on: issue_comment",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "issue_comment",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
input: "on:\n - push\n - pull_request",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
},
|
||||
{
|
||||
Name: "pull_request",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - master",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"master",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "branch_protection_rule",
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "project",
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"created",
|
||||
"deleted",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "milestone",
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
"deleted",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "pull_request",
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
"branches": {
|
||||
"releases/**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pull_request",
|
||||
acts: map[string][]string{
|
||||
"types": {
|
||||
"opened",
|
||||
},
|
||||
"branches": {
|
||||
"**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - 'main'\n - 'releases/**'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
acts: map[string][]string{
|
||||
"branches": {
|
||||
"main",
|
||||
"releases/**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n tags:\n - v1.**",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "push",
|
||||
acts: map[string][]string{
|
||||
"tags": {
|
||||
"v1.**",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on: [pull_request, workflow_dispatch]",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "pull_request",
|
||||
},
|
||||
{
|
||||
Name: "workflow_dispatch",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: '20 6 * * *'",
|
||||
result: []*Event{
|
||||
{
|
||||
Name: "schedule",
|
||||
schedules: []map[string]string{
|
||||
{
|
||||
"cron": "20 6 * * *",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.input, func(t *testing.T) {
|
||||
origin, err := model.ReadWorkflow(strings.NewReader(kase.input))
|
||||
assert.NoError(t, err)
|
||||
|
||||
events, err := ParseRawOn(&origin.RawOn)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, kase.result, events, fmt.Sprintf("%#v", events))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleWorkflow_SetJob(t *testing.T) {
|
||||
t.Run("erase needs", func(t *testing.T) {
|
||||
content := ReadTestdata(t, "erase_needs.in.yaml")
|
||||
want := ReadTestdata(t, "erase_needs.out.yaml")
|
||||
swf, err := Parse(content)
|
||||
require.NoError(t, err)
|
||||
builder := &strings.Builder{}
|
||||
for _, v := range swf {
|
||||
id, job := v.Job()
|
||||
require.NoError(t, v.SetJob(id, job.EraseNeeds()))
|
||||
|
||||
if builder.Len() > 0 {
|
||||
builder.WriteString("---\n")
|
||||
}
|
||||
encoder := yaml.NewEncoder(builder)
|
||||
encoder.SetIndent(2)
|
||||
require.NoError(t, encoder.Encode(v))
|
||||
}
|
||||
assert.Equal(t, string(want), builder.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseMappingNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
scalars []string
|
||||
datas []interface{}
|
||||
}{
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - master",
|
||||
scalars: []string{"push"},
|
||||
datas: []interface {
|
||||
}{
|
||||
map[string]interface{}{
|
||||
"branches": []interface{}{"master"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n branch_protection_rule:\n types: [created, deleted]",
|
||||
scalars: []string{"branch_protection_rule"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"created", "deleted"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n project:\n types: [created, deleted]\n milestone:\n types: [opened, deleted]",
|
||||
scalars: []string{"project", "milestone"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"created", "deleted"},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened", "deleted"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n pull_request:\n types:\n - opened\n branches:\n - 'releases/**'",
|
||||
scalars: []string{"pull_request"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened"},
|
||||
"branches": []interface{}{"releases/**"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n push:\n branches:\n - main\n pull_request:\n types:\n - opened\n branches:\n - '**'",
|
||||
scalars: []string{"push", "pull_request"},
|
||||
datas: []interface{}{
|
||||
map[string]interface{}{
|
||||
"branches": []interface{}{"main"},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"types": []interface{}{"opened"},
|
||||
"branches": []interface{}{"**"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "on:\n schedule:\n - cron: '20 6 * * *'",
|
||||
scalars: []string{"schedule"},
|
||||
datas: []interface{}{
|
||||
[]interface{}{map[string]interface{}{
|
||||
"cron": "20 6 * * *",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
workflow, err := model.ReadWorkflow(strings.NewReader(test.input))
|
||||
assert.NoError(t, err)
|
||||
|
||||
scalars, datas, err := parseMappingNode[interface{}](&workflow.RawOn)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, test.scalars, scalars, fmt.Sprintf("%#v", scalars))
|
||||
assert.EqualValues(t, test.datas, datas, fmt.Sprintf("%#v", datas))
|
||||
})
|
||||
}
|
||||
}
|
||||
8
pkg/jobparser/testdata/empty_step.in.yaml
vendored
8
pkg/jobparser/testdata/empty_step.in.yaml
vendored
@@ -1,8 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo job-1
|
||||
-
|
||||
7
pkg/jobparser/testdata/empty_step.out.yaml
vendored
7
pkg/jobparser/testdata/empty_step.out.yaml
vendored
@@ -1,7 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo job-1
|
||||
16
pkg/jobparser/testdata/erase_needs.in.yaml
vendored
16
pkg/jobparser/testdata/erase_needs.in.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: job1
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: [job1, job2]
|
||||
23
pkg/jobparser/testdata/erase_needs.out.yaml
vendored
23
pkg/jobparser/testdata/erase_needs.out.yaml
vendored
@@ -1,23 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
16
pkg/jobparser/testdata/has_needs.in.yaml
vendored
16
pkg/jobparser/testdata/has_needs.in.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: job1
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
needs: [job1, job2]
|
||||
25
pkg/jobparser/testdata/has_needs.out.yaml
vendored
25
pkg/jobparser/testdata/has_needs.out.yaml
vendored
@@ -1,25 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
needs: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
needs: [job1, job2]
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a
|
||||
14
pkg/jobparser/testdata/has_secrets.in.yaml
vendored
14
pkg/jobparser/testdata/has_secrets.in.yaml
vendored
@@ -1,14 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets:
|
||||
secret: hideme
|
||||
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets: inherit
|
||||
16
pkg/jobparser/testdata/has_secrets.out.yaml
vendored
16
pkg/jobparser/testdata/has_secrets.out.yaml
vendored
@@ -1,16 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets:
|
||||
secret: hideme
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
secrets: inherit
|
||||
15
pkg/jobparser/testdata/has_with.in.yaml
vendored
15
pkg/jobparser/testdata/has_with.in.yaml
vendored
@@ -1,15 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: service
|
||||
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: module
|
||||
17
pkg/jobparser/testdata/has_with.out.yaml
vendored
17
pkg/jobparser/testdata/has_with.out.yaml
vendored
@@ -1,17 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: service
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
uses: .gitea/workflows/build.yml
|
||||
with:
|
||||
package: module
|
||||
22
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
22
pkg/jobparser/testdata/multiple_jobs.in.yaml
vendored
@@ -1,22 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
zzz:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo zzz
|
||||
job1:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
job2:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
job3:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
aaa:
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
39
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
39
pkg/jobparser/testdata/multiple_jobs.out.yaml
vendored
@@ -1,39 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
zzz:
|
||||
name: zzz
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: echo zzz
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job2:
|
||||
name: job2
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job3:
|
||||
name: job3
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
aaa:
|
||||
name: aaa
|
||||
runs-on: linux
|
||||
steps:
|
||||
- run: uname -a && go version
|
||||
13
pkg/jobparser/testdata/multiple_matrix.in.yaml
vendored
13
pkg/jobparser/testdata/multiple_matrix.in.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, ubuntu-20.04]
|
||||
version: [1.17, 1.18, 1.19]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
101
pkg/jobparser/testdata/multiple_matrix.out.yaml
vendored
101
pkg/jobparser/testdata/multiple_matrix.out.yaml
vendored
@@ -1,101 +0,0 @@
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-20.04, 1.17)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.17
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-20.04, 1.18)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.18
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-20.04, 1.19)
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
version:
|
||||
- 1.19
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-22.04, 1.17)
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.17
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-22.04, 1.18)
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.18
|
||||
---
|
||||
name: test
|
||||
jobs:
|
||||
job1:
|
||||
name: job1 (ubuntu-22.04, 1.19)
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.version }}
|
||||
- run: uname -a && go version
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
version:
|
||||
- 1.19
|
||||
@@ -1,18 +0,0 @@
|
||||
package jobparser
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//go:embed testdata
|
||||
var testdata embed.FS
|
||||
|
||||
func ReadTestdata(t *testing.T, name string) []byte {
|
||||
content, err := testdata.ReadFile(filepath.Join("testdata", name))
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
@@ -20,7 +20,7 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
// Force input to lowercase for case insensitive comparison
|
||||
format := ActionRunsUsing(strings.ToLower(using))
|
||||
switch format {
|
||||
case ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite, ActionRunsUsingGo:
|
||||
case ActionRunsUsingNode20, ActionRunsUsingNode16, ActionRunsUsingNode12, ActionRunsUsingDocker, ActionRunsUsingComposite:
|
||||
*a = format
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key in action.yml must be one of: %v, got %s", []string{
|
||||
@@ -29,7 +29,6 @@ func (a *ActionRunsUsing) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
ActionRunsUsingNode12,
|
||||
ActionRunsUsingNode16,
|
||||
ActionRunsUsingNode20,
|
||||
ActionRunsUsingGo,
|
||||
}, format))
|
||||
}
|
||||
return nil
|
||||
@@ -46,8 +45,6 @@ const (
|
||||
ActionRunsUsingDocker = "docker"
|
||||
// ActionRunsUsingComposite for running composite
|
||||
ActionRunsUsingComposite = "composite"
|
||||
// ActionRunsUsingGo for running with go
|
||||
ActionRunsUsingGo = "go"
|
||||
)
|
||||
|
||||
// ActionRuns are a field in Action
|
||||
|
||||
@@ -162,13 +162,6 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
||||
return wp, nil
|
||||
}
|
||||
|
||||
// CombineWorkflowPlanner combines workflows to a WorkflowPlanner
|
||||
func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
|
||||
return &workflowPlanner{
|
||||
workflows: workflows,
|
||||
}
|
||||
}
|
||||
|
||||
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
|
||||
wp := new(workflowPlanner)
|
||||
|
||||
|
||||
@@ -66,30 +66,6 @@ func (w *Workflow) OnEvent(event string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Workflow) OnSchedule() []string {
|
||||
schedules := w.OnEvent("schedule")
|
||||
if schedules == nil {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
switch val := schedules.(type) {
|
||||
case []interface{}:
|
||||
allSchedules := []string{}
|
||||
for _, v := range val {
|
||||
for k, cron := range v.(map[string]interface{}) {
|
||||
if k != "cron" {
|
||||
continue
|
||||
}
|
||||
allSchedules = append(allSchedules, cron.(string))
|
||||
}
|
||||
}
|
||||
return allSchedules
|
||||
default:
|
||||
}
|
||||
|
||||
return []string{}
|
||||
}
|
||||
|
||||
type WorkflowDispatchInput struct {
|
||||
Description string `yaml:"description"`
|
||||
Required bool `yaml:"required"`
|
||||
@@ -367,7 +343,7 @@ func environment(yml yaml.Node) map[string]string {
|
||||
return env
|
||||
}
|
||||
|
||||
// Environments returns string-based key=value map for a job
|
||||
// Environment returns string-based key=value map for a job
|
||||
func (j *Job) Environment() map[string]string {
|
||||
return environment(j.Env)
|
||||
}
|
||||
@@ -573,14 +549,10 @@ type ContainerSpec struct {
|
||||
Args string
|
||||
Name string
|
||||
Reuse bool
|
||||
|
||||
// Gitea specific
|
||||
Cmd []string `yaml:"cmd"`
|
||||
}
|
||||
|
||||
// Step is the structure of one step in a job
|
||||
type Step struct {
|
||||
Number int `yaml:"-"`
|
||||
ID string `yaml:"id"`
|
||||
If yaml.Node `yaml:"if"`
|
||||
Name string `yaml:"name"`
|
||||
@@ -606,7 +578,7 @@ func (s *Step) String() string {
|
||||
return s.ID
|
||||
}
|
||||
|
||||
// Environments returns string-based key=value map for a step
|
||||
// Environment returns string-based key=value map for a step
|
||||
func (s *Step) Environment() map[string]string {
|
||||
return environment(s.Env)
|
||||
}
|
||||
@@ -627,7 +599,7 @@ func (s *Step) GetEnv() map[string]string {
|
||||
func (s *Step) ShellCommand() string {
|
||||
shellCommand := ""
|
||||
|
||||
// Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17
|
||||
//Reference: https://github.com/actions/runner/blob/8109c962f09d9acc473d92c595ff43afceddb347/src/Runner.Worker/Handlers/ScriptHandlerHelpers.cs#L9-L17
|
||||
switch s.Shell {
|
||||
case "", "bash":
|
||||
shellCommand = "bash --noprofile --norc -e -o pipefail {0}"
|
||||
|
||||
@@ -7,88 +7,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReadWorkflow_ScheduleEvent(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 5 * * 1,3'
|
||||
- cron: '30 5 * * 2,4'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err := ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
schedules := workflow.OnEvent("schedule")
|
||||
assert.Len(t, schedules, 2)
|
||||
|
||||
newSchedules := workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 2)
|
||||
|
||||
assert.Equal(t, "30 5 * * 1,3", newSchedules[0])
|
||||
assert.Equal(t, "30 5 * * 2,4", newSchedules[1])
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
schedule:
|
||||
test: '30 5 * * 1,3'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
newSchedules = workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 0)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on:
|
||||
schedule:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
newSchedules = workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 0)
|
||||
|
||||
yaml = `
|
||||
name: local-action-docker-url
|
||||
on: [push, tag]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ./actions/docker-url
|
||||
`
|
||||
|
||||
workflow, err = ReadWorkflow(strings.NewReader(yaml))
|
||||
assert.NoError(t, err, "read workflow should succeed")
|
||||
|
||||
newSchedules = workflow.OnSchedule()
|
||||
assert.Len(t, newSchedules, 0)
|
||||
}
|
||||
|
||||
func TestReadWorkflow_StringEvent(t *testing.T) {
|
||||
yaml := `
|
||||
name: local-action-docker-url
|
||||
|
||||
@@ -197,21 +197,6 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
}
|
||||
|
||||
return execAsComposite(step)(ctx)
|
||||
case model.ActionRunsUsingGo:
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Main)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Main}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return fmt.Errorf(fmt.Sprintf("The runs.using key must be one of: %v, got %s", []string{
|
||||
model.ActionRunsUsingDocker,
|
||||
@@ -219,7 +204,6 @@ func runActionImpl(step actionStep, actionDir string, remoteAction *remoteAction
|
||||
model.ActionRunsUsingNode16,
|
||||
model.ActionRunsUsingNode20,
|
||||
model.ActionRunsUsingComposite,
|
||||
model.ActionRunsUsingGo,
|
||||
}, action.Runs.Using))
|
||||
}
|
||||
}
|
||||
@@ -415,25 +399,23 @@ func newStepContainer(ctx context.Context, step step, image string, cmd []string
|
||||
networkMode = "default"
|
||||
}
|
||||
stepContainer := container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+stepModel.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: networkMode,
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.Config.ContainerOptions,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createContainerName(rc.jobContainerName(), stepModel.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: networkMode,
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.Config.ContainerOptions,
|
||||
})
|
||||
return stepContainer
|
||||
}
|
||||
@@ -509,8 +491,7 @@ func hasPreStep(step actionStep) common.Conditional {
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode20 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode20) &&
|
||||
action.Runs.Pre != "")
|
||||
}
|
||||
}
|
||||
@@ -569,43 +550,6 @@ func runPreStep(step actionStep) common.Executor {
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
// defaults in pre steps were missing, however provided inputs are available
|
||||
populateEnvsFromInput(ctx, step.getEnv(), action, rc)
|
||||
// todo: refactor into step
|
||||
var actionDir string
|
||||
var actionPath string
|
||||
if _, ok := step.(*stepActionRemote); ok {
|
||||
actionPath = newRemoteAction(stepModel.Uses).Path
|
||||
actionDir = fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(stepModel.Uses))
|
||||
} else {
|
||||
actionDir = filepath.Join(rc.Config.Workdir, stepModel.Uses)
|
||||
actionPath = ""
|
||||
}
|
||||
|
||||
actionLocation := ""
|
||||
if actionPath != "" {
|
||||
actionLocation = path.Join(actionDir, actionPath)
|
||||
} else {
|
||||
actionLocation = actionDir
|
||||
}
|
||||
|
||||
_, containerActionDir := getContainerActionPaths(stepModel, actionLocation, rc)
|
||||
|
||||
if err := maybeCopyToActionDir(ctx, step, actionDir, actionPath, containerActionDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Pre)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Pre}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -643,8 +587,7 @@ func hasPostStep(step actionStep) common.Conditional {
|
||||
return action.Runs.Using == model.ActionRunsUsingComposite ||
|
||||
((action.Runs.Using == model.ActionRunsUsingNode12 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode16 ||
|
||||
action.Runs.Using == model.ActionRunsUsingNode20 ||
|
||||
action.Runs.Using == model.ActionRunsUsingGo) &&
|
||||
action.Runs.Using == model.ActionRunsUsingNode20) &&
|
||||
action.Runs.Post != "")
|
||||
}
|
||||
}
|
||||
@@ -700,19 +643,6 @@ func runPostStep(step actionStep) common.Executor {
|
||||
}
|
||||
return fmt.Errorf("missing steps in composite action")
|
||||
|
||||
case model.ActionRunsUsingGo:
|
||||
populateEnvsFromSavedState(step.getEnv(), step, rc)
|
||||
rc.ApplyExtraPath(ctx, step.getEnv())
|
||||
|
||||
execFileName := fmt.Sprintf("%s.out", action.Runs.Post)
|
||||
buildArgs := []string{"go", "build", "-o", execFileName, action.Runs.Post}
|
||||
execArgs := []string{filepath.Join(containerActionDir, execFileName)}
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
rc.execJobContainer(buildArgs, *step.getEnv(), "", containerActionDir),
|
||||
rc.execJobContainer(execArgs, *step.getEnv(), "", ""),
|
||||
)(ctx)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func evaluateCompositeInputAndEnv(ctx context.Context, parent *RunContext, step
|
||||
envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(inputID), "_")
|
||||
envKey = fmt.Sprintf("INPUT_%s", strings.ToUpper(envKey))
|
||||
|
||||
// lookup if key is defined in the step but the the already
|
||||
// lookup if key is defined in the step but the already
|
||||
// evaluated value from the environment
|
||||
_, defined := step.getStepModel().With[inputID]
|
||||
if value, ok := stepEnv[envKey]; defined && ok {
|
||||
@@ -140,7 +140,6 @@ func (rc *RunContext) compositeExecutor(action *model.Action) *compositeSteps {
|
||||
if step.ID == "" {
|
||||
step.ID = fmt.Sprintf("%d", i)
|
||||
}
|
||||
step.Number = i
|
||||
|
||||
// create a copy of the step, since this composite action could
|
||||
// run multiple times and we might modify the instance
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
)
|
||||
|
||||
var commandPatternGA *regexp.Regexp
|
||||
|
||||
var commandPatternADO *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
@@ -42,9 +41,7 @@ func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
}
|
||||
|
||||
if resumeCommand != "" && command != resumeCommand {
|
||||
// There should not be any emojis in the log output for Gitea.
|
||||
// The code in the switch statement is the same.
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U00002699 %s", line)
|
||||
return false
|
||||
}
|
||||
arg = unescapeCommandData(arg)
|
||||
@@ -57,37 +54,36 @@ func (rc *RunContext) commandHandler(ctx context.Context) common.LineHandler {
|
||||
case "add-path":
|
||||
rc.addPath(ctx, arg)
|
||||
case "debug":
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U0001F4AC %s", line)
|
||||
case "warning":
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U0001F6A7 %s", line)
|
||||
case "error":
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U00002757 %s", line)
|
||||
case "add-mask":
|
||||
rc.AddMask(arg)
|
||||
logger.Infof("%s", "***")
|
||||
logger.Infof(" \U00002699 %s", "***")
|
||||
case "stop-commands":
|
||||
resumeCommand = arg
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U00002699 %s", line)
|
||||
case resumeCommand:
|
||||
resumeCommand = ""
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U00002699 %s", line)
|
||||
case "save-state":
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U0001f4be %s", line)
|
||||
rc.saveState(ctx, kvPairs, arg)
|
||||
case "add-matcher":
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U00002753 add-matcher %s", arg)
|
||||
default:
|
||||
logger.Infof("%s", line)
|
||||
logger.Infof(" \U00002753 %s", line)
|
||||
}
|
||||
|
||||
// return true to let gitea's logger handle these special outputs also
|
||||
return true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RunContext) setEnv(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
name := kvPairs["name"]
|
||||
common.Logger(ctx).Infof("::set-env:: %s=%s", name, arg)
|
||||
common.Logger(ctx).Infof(" \U00002699 ::set-env:: %s=%s", name, arg)
|
||||
if rc.Env == nil {
|
||||
rc.Env = make(map[string]string)
|
||||
}
|
||||
@@ -104,7 +100,6 @@ func (rc *RunContext) setEnv(ctx context.Context, kvPairs map[string]string, arg
|
||||
mergeIntoMap(rc.Env, newenv)
|
||||
mergeIntoMap(rc.GlobalEnv, newenv)
|
||||
}
|
||||
|
||||
func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string, arg string) {
|
||||
logger := common.Logger(ctx)
|
||||
stepID := rc.CurrentStep
|
||||
@@ -120,12 +115,11 @@ func (rc *RunContext) setOutput(ctx context.Context, kvPairs map[string]string,
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("::set-output:: %s=%s", outputName, arg)
|
||||
logger.Infof(" \U00002699 ::set-output:: %s=%s", outputName, arg)
|
||||
result.Outputs[outputName] = arg
|
||||
}
|
||||
|
||||
func (rc *RunContext) addPath(ctx context.Context, arg string) {
|
||||
common.Logger(ctx).Infof("::add-path:: %s", arg)
|
||||
common.Logger(ctx).Infof(" \U00002699 ::add-path:: %s", arg)
|
||||
extraPath := []string{arg}
|
||||
for _, v := range rc.ExtraPath {
|
||||
if v != arg {
|
||||
@@ -146,7 +140,6 @@ func parseKeyValuePairs(kvPairs string, separator string) map[string]string {
|
||||
}
|
||||
return rtn
|
||||
}
|
||||
|
||||
func unescapeCommandData(arg string) string {
|
||||
escapeMap := map[string]string{
|
||||
"%25": "%",
|
||||
@@ -158,7 +151,6 @@ func unescapeCommandData(arg string) string {
|
||||
}
|
||||
return arg
|
||||
}
|
||||
|
||||
func unescapeCommandProperty(arg string) string {
|
||||
escapeMap := map[string]string{
|
||||
"%25": "%",
|
||||
@@ -172,7 +164,6 @@ func unescapeCommandProperty(arg string) string {
|
||||
}
|
||||
return arg
|
||||
}
|
||||
|
||||
func unescapeKvPairs(kvPairs map[string]string) map[string]string {
|
||||
for k, v := range kvPairs {
|
||||
kvPairs[k] = unescapeCommandProperty(v)
|
||||
|
||||
@@ -106,7 +106,7 @@ func (rc *RunContext) NewExpressionEvaluatorWithEnv(ctx context.Context, env map
|
||||
//go:embed hashfiles/index.js
|
||||
var hashfiles string
|
||||
|
||||
// NewExpressionEvaluator creates a new evaluator
|
||||
// NewStepExpressionEvaluator creates a new evaluator
|
||||
func (rc *RunContext) NewStepExpressionEvaluator(ctx context.Context, step step) ExpressionEvaluator {
|
||||
// todo: cleanup EvaluationEnvironment creation
|
||||
job := rc.Run.Job()
|
||||
|
||||
@@ -63,7 +63,6 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
if stepModel.ID == "" {
|
||||
stepModel.ID = fmt.Sprintf("%d", i)
|
||||
}
|
||||
stepModel.Number = i
|
||||
|
||||
step, err := sf.newStep(stepModel, rc)
|
||||
|
||||
@@ -71,19 +70,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
return common.NewErrorExecutor(err)
|
||||
}
|
||||
|
||||
preExec := step.pre()
|
||||
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, func(ctx context.Context) error {
|
||||
logger := common.Logger(ctx)
|
||||
preErr := preExec(ctx)
|
||||
if preErr != nil {
|
||||
logger.Errorf("%v", preErr)
|
||||
common.SetJobError(ctx, preErr)
|
||||
} else if ctx.Err() != nil {
|
||||
logger.Errorf("%v", ctx.Err())
|
||||
common.SetJobError(ctx, ctx.Err())
|
||||
}
|
||||
return preErr
|
||||
}))
|
||||
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, step.pre()))
|
||||
|
||||
stepExec := step.main()
|
||||
steps = append(steps, useStepLogger(rc, stepModel, stepStageMain, func(ctx context.Context) error {
|
||||
@@ -117,31 +104,10 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
|
||||
defer cancel()
|
||||
|
||||
logger := common.Logger(ctx)
|
||||
// For Gitea
|
||||
// We don't need to call `stopServiceContainers` here since it will be called by following `info.stopContainer`
|
||||
// logger.Infof("Cleaning up services for job %s", rc.JobName)
|
||||
// if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
// logger.Errorf("Error while cleaning services: %v", err)
|
||||
// }
|
||||
|
||||
logger.Infof("Cleaning up container for job %s", rc.JobName)
|
||||
if err = info.stopContainer()(ctx); err != nil {
|
||||
logger.Errorf("Error while stop job container: %v", err)
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// We don't need to call `NewDockerNetworkRemoveExecutor` here since it is called by above `info.stopContainer`
|
||||
// if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" {
|
||||
// // clean network in docker mode only
|
||||
// // if the value of `ContainerNetworkMode` is empty string,
|
||||
// // it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// // so, we should remove the network at last.
|
||||
// networkName, _ := rc.networkName()
|
||||
// logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
// if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
// logger.Errorf("Error while cleaning network: %v", err)
|
||||
// }
|
||||
// }
|
||||
}
|
||||
setJobResult(ctx, info, rc, jobError == nil)
|
||||
setJobOutputs(ctx, rc)
|
||||
@@ -213,7 +179,7 @@ func setJobOutputs(ctx context.Context, rc *RunContext) {
|
||||
|
||||
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
ctx = withStepLogger(ctx, stepModel.Number, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
|
||||
ctx = withStepLogger(ctx, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
|
||||
|
||||
rawLogger := common.Logger(ctx).WithField("raw_output", true)
|
||||
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
|
||||
|
||||
@@ -52,7 +52,7 @@ func Masks(ctx context.Context) *[]string {
|
||||
return &[]string{}
|
||||
}
|
||||
|
||||
// WithLogger adds a value to the context for the logger
|
||||
// WithMasks adds a value to the context for the logger
|
||||
func WithMasks(ctx context.Context, masks *[]string) context.Context {
|
||||
return context.WithValue(ctx, masksContextKeyVal, masks)
|
||||
}
|
||||
@@ -96,17 +96,6 @@ func WithJobLogger(ctx context.Context, jobID string, jobName string, config *Co
|
||||
logger.SetFormatter(formatter)
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
if hook := common.LoggerHook(ctx); hook != nil {
|
||||
logger.AddHook(hook)
|
||||
}
|
||||
if config.JobLoggerLevel != nil {
|
||||
logger.SetLevel(*config.JobLoggerLevel)
|
||||
} else {
|
||||
logger.SetLevel(logrus.TraceLevel)
|
||||
}
|
||||
}
|
||||
|
||||
logger.SetFormatter(&maskedFormatter{
|
||||
Formatter: logger.Formatter,
|
||||
masker: valueMasker(config.InsecureSecrets, config.Secrets),
|
||||
@@ -143,12 +132,11 @@ func WithCompositeStepLogger(ctx context.Context, stepID string) context.Context
|
||||
}).WithContext(ctx))
|
||||
}
|
||||
|
||||
func withStepLogger(ctx context.Context, stepNumber int, stepID, stepName, stageName string) context.Context {
|
||||
func withStepLogger(ctx context.Context, stepID string, stepName string, stageName string) context.Context {
|
||||
rtn := common.Logger(ctx).WithFields(logrus.Fields{
|
||||
"stepNumber": stepNumber,
|
||||
"step": stepName,
|
||||
"stepID": []string{stepID},
|
||||
"stage": stageName,
|
||||
"step": stepName,
|
||||
"stepID": []string{stepID},
|
||||
"stage": stageName,
|
||||
})
|
||||
return common.WithLogger(ctx, rtn)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
@@ -18,45 +17,15 @@ import (
|
||||
)
|
||||
|
||||
func newLocalReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||
if !rc.Config.NoSkipCheckout {
|
||||
fullPath := rc.Run.Job().Uses
|
||||
|
||||
fileName := path.Base(fullPath)
|
||||
workflowDir := strings.TrimSuffix(fullPath, path.Join("/", fileName))
|
||||
workflowDir = strings.TrimPrefix(workflowDir, "./")
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
newReusableWorkflowExecutor(rc, workflowDir, fileName),
|
||||
)
|
||||
}
|
||||
|
||||
// ./.gitea/workflows/wf.yml -> .gitea/workflows/wf.yml
|
||||
trimmedUses := strings.TrimPrefix(rc.Run.Job().Uses, "./")
|
||||
// uses string format is {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}
|
||||
uses := fmt.Sprintf("%s/%s@%s", rc.Config.PresetGitHubContext.Repository, trimmedUses, rc.Config.PresetGitHubContext.Sha)
|
||||
|
||||
remoteReusableWorkflow := newRemoteReusableWorkflowWithPlat(rc.Config.GitHubInstance, uses)
|
||||
if remoteReusableWorkflow == nil {
|
||||
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
|
||||
}
|
||||
|
||||
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(uses))
|
||||
|
||||
// If the repository is private, we need a token to clone it
|
||||
token := rc.Config.GetToken()
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
||||
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
||||
)
|
||||
return newReusableWorkflowExecutor(rc, rc.Config.Workdir, rc.Run.Job().Uses)
|
||||
}
|
||||
|
||||
func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||
uses := rc.Run.Job().Uses
|
||||
|
||||
remoteReusableWorkflow := newRemoteReusableWorkflowWithPlat(rc.Config.GitHubInstance, uses)
|
||||
remoteReusableWorkflow := newRemoteReusableWorkflow(uses)
|
||||
if remoteReusableWorkflow == nil {
|
||||
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.{git_platform}/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
|
||||
return common.NewErrorExecutor(fmt.Errorf("expected format {owner}/{repo}/.github/workflows/{filename}@{ref}. Actual '%s' Input string was not in a correct format", uses))
|
||||
}
|
||||
|
||||
// uses with safe filename makes the target directory look something like this {owner}-{repo}-.github-workflows-{filename}@{ref}
|
||||
@@ -69,12 +38,9 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||
return newActionCacheReusableWorkflowExecutor(rc, filename, remoteReusableWorkflow)
|
||||
}
|
||||
|
||||
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
|
||||
token := ""
|
||||
|
||||
return common.NewPipelineExecutor(
|
||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir, token)),
|
||||
newReusableWorkflowExecutor(rc, workflowDir, remoteReusableWorkflow.FilePath()),
|
||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir)),
|
||||
newReusableWorkflowExecutor(rc, workflowDir, fmt.Sprintf("./.github/workflows/%s", remoteReusableWorkflow.Filename)),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -126,7 +92,7 @@ func newMutexExecutor(executor common.Executor) common.Executor {
|
||||
}
|
||||
}
|
||||
|
||||
func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkflow, targetDirectory, token string) common.Executor {
|
||||
func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkflow, targetDirectory string) common.Executor {
|
||||
return common.NewConditionalExecutor(
|
||||
func(ctx context.Context) bool {
|
||||
_, err := os.Stat(targetDirectory)
|
||||
@@ -134,15 +100,12 @@ func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkfl
|
||||
return notExists
|
||||
},
|
||||
func(ctx context.Context) error {
|
||||
// Do not change the remoteReusableWorkflow.URL, because:
|
||||
// 1. Gitea doesn't support specifying GithubContext.ServerURL by the GITHUB_SERVER_URL env
|
||||
// 2. Gitea has already full URL with rc.Config.GitHubInstance when calling newRemoteReusableWorkflowWithPlat
|
||||
// remoteReusableWorkflow.URL = rc.getGithubContext(ctx).ServerURL
|
||||
remoteReusableWorkflow.URL = rc.getGithubContext(ctx).ServerURL
|
||||
return git.NewGitCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
URL: remoteReusableWorkflow.CloneURL(),
|
||||
Ref: remoteReusableWorkflow.Ref,
|
||||
Dir: targetDirectory,
|
||||
Token: token,
|
||||
Token: rc.Config.Token,
|
||||
OfflineMode: rc.Config.ActionOfflineMode,
|
||||
})(ctx)
|
||||
},
|
||||
@@ -189,44 +152,12 @@ type remoteReusableWorkflow struct {
|
||||
Repo string
|
||||
Filename string
|
||||
Ref string
|
||||
|
||||
GitPlatform string
|
||||
}
|
||||
|
||||
func (r *remoteReusableWorkflow) CloneURL() string {
|
||||
// In Gitea, r.URL always has the protocol prefix, we don't need to add extra prefix in this case.
|
||||
if strings.HasPrefix(r.URL, "http://") || strings.HasPrefix(r.URL, "https://") {
|
||||
return fmt.Sprintf("%s/%s/%s", r.URL, r.Org, r.Repo)
|
||||
}
|
||||
return fmt.Sprintf("https://%s/%s/%s", r.URL, r.Org, r.Repo)
|
||||
return fmt.Sprintf("%s/%s/%s", r.URL, r.Org, r.Repo)
|
||||
}
|
||||
|
||||
func (r *remoteReusableWorkflow) FilePath() string {
|
||||
return fmt.Sprintf("./.%s/workflows/%s", r.GitPlatform, r.Filename)
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// newRemoteReusableWorkflowWithPlat create a `remoteReusableWorkflow`
|
||||
// workflows from `.gitea/workflows` and `.github/workflows` are supported
|
||||
func newRemoteReusableWorkflowWithPlat(url, uses string) *remoteReusableWorkflow {
|
||||
// GitHub docs:
|
||||
// https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_iduses
|
||||
r := regexp.MustCompile(`^([^/]+)/([^/]+)/\.([^/]+)/workflows/([^@]+)@(.*)$`)
|
||||
matches := r.FindStringSubmatch(uses)
|
||||
if len(matches) != 6 {
|
||||
return nil
|
||||
}
|
||||
return &remoteReusableWorkflow{
|
||||
Org: matches[1],
|
||||
Repo: matches[2],
|
||||
GitPlatform: matches[3],
|
||||
Filename: matches[4],
|
||||
Ref: matches[5],
|
||||
URL: url,
|
||||
}
|
||||
}
|
||||
|
||||
// deprecated: use newRemoteReusableWorkflowWithPlat
|
||||
func newRemoteReusableWorkflow(uses string) *remoteReusableWorkflow {
|
||||
// GitHub docs:
|
||||
// https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_iduses
|
||||
|
||||
@@ -16,15 +16,13 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/container"
|
||||
"github.com/nektos/act/pkg/exprparser"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
)
|
||||
|
||||
// RunContext contains info about current job
|
||||
@@ -83,19 +81,13 @@ func (rc *RunContext) GetEnv() map[string]string {
|
||||
}
|
||||
}
|
||||
rc.Env["ACT"] = "true"
|
||||
|
||||
if !rc.Config.NoSkipCheckout {
|
||||
rc.Env["ACT_SKIP_CHECKOUT"] = "true"
|
||||
}
|
||||
|
||||
return rc.Env
|
||||
}
|
||||
|
||||
func (rc *RunContext) jobContainerName() string {
|
||||
return createSimpleContainerName(rc.Config.ContainerNamePrefix, "WORKFLOW-"+rc.Run.Workflow.Name, "JOB-"+rc.Name)
|
||||
return createContainerName("act", rc.String())
|
||||
}
|
||||
|
||||
// Deprecated: use `networkNameForGitea`
|
||||
// networkName return the name of the network which will be created by `act` automatically for job,
|
||||
// only create network if using a service container
|
||||
func (rc *RunContext) networkName() (string, bool) {
|
||||
@@ -108,14 +100,6 @@ func (rc *RunContext) networkName() (string, bool) {
|
||||
return string(rc.Config.ContainerNetworkMode), false
|
||||
}
|
||||
|
||||
// networkNameForGitea return the name of the network
|
||||
func (rc *RunContext) networkNameForGitea() (string, bool) {
|
||||
if rc.Config.ContainerNetworkMode != "" {
|
||||
return string(rc.Config.ContainerNetworkMode), false
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true
|
||||
}
|
||||
|
||||
func getDockerDaemonSocketMountPath(daemonPath string) string {
|
||||
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
|
||||
scheme := daemonPath[:protoIndex]
|
||||
@@ -183,14 +167,6 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
|
||||
mounts[name] = ext.ToContainerPath(rc.Config.Workdir)
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// add some default binds and mounts to ValidVolumes
|
||||
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, "act-toolcache")
|
||||
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, name)
|
||||
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, name+"-env")
|
||||
// TODO: add a new configuration to control whether the docker daemon can be mounted
|
||||
rc.Config.ValidVolumes = append(rc.Config.ValidVolumes, getDockerDaemonSocketMountPath(rc.Config.ContainerDaemonSocket))
|
||||
|
||||
return binds, mounts
|
||||
}
|
||||
|
||||
@@ -285,9 +261,6 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
|
||||
logger.Infof("\U0001f680 Start image=%s", image)
|
||||
name := rc.jobContainerName()
|
||||
// For gitea, to support --volumes-from <container_name_or_id> in options.
|
||||
// We need to set the container name to the environment variable.
|
||||
rc.Env["JOB_CONTAINER_NAME"] = name
|
||||
|
||||
envList := make([]string, 0)
|
||||
|
||||
@@ -303,7 +276,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
|
||||
// if using service containers, will create a new network for the containers.
|
||||
// and it will be removed after at last.
|
||||
networkName, createAndDeleteNetwork := rc.networkNameForGitea()
|
||||
networkName, createAndDeleteNetwork := rc.networkName()
|
||||
|
||||
// add service containers
|
||||
for serviceID, spec := range rc.Run.Job().Services {
|
||||
@@ -316,11 +289,6 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
for k, v := range interpolatedEnvs {
|
||||
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
// interpolate cmd
|
||||
interpolatedCmd := make([]string, 0, len(spec.Cmd))
|
||||
for _, v := range spec.Cmd {
|
||||
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
|
||||
}
|
||||
username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
|
||||
@@ -348,7 +316,6 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
|
||||
Username: username,
|
||||
Password: password,
|
||||
Cmd: interpolatedCmd,
|
||||
Env: envs,
|
||||
Mounts: serviceMounts,
|
||||
Binds: serviceBinds,
|
||||
@@ -357,7 +324,6 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
Options: rc.ExprEval.Interpolate(ctx, spec.Options),
|
||||
NetworkMode: networkName,
|
||||
NetworkAliases: []string{serviceID},
|
||||
@@ -382,15 +348,15 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
if err := rc.stopServiceContainers()(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning services: %v", err)
|
||||
}
|
||||
}
|
||||
if createAndDeleteNetwork {
|
||||
// clean network if it has been created by act
|
||||
// if using service containers
|
||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// so, we should remove the network at last.
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
if createAndDeleteNetwork {
|
||||
// clean network if it has been created by act
|
||||
// if using service containers
|
||||
// it means that the network to which containers are connecting is created by `act_runner`,
|
||||
// so, we should remove the network at last.
|
||||
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
|
||||
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
|
||||
logger.Errorf("Error while cleaning network: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -406,12 +372,9 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
jobContainerNetwork = "host"
|
||||
}
|
||||
|
||||
// For Gitea, `jobContainerNetwork` should be the same as `networkName`
|
||||
jobContainerNetwork = networkName
|
||||
|
||||
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
|
||||
Cmd: []string{"/bin/sleep", fmt.Sprint(rc.Config.ContainerMaxLifetime.Round(time.Second).Seconds())},
|
||||
Entrypoint: nil,
|
||||
Cmd: nil,
|
||||
Entrypoint: []string{"tail", "-f", "/dev/null"},
|
||||
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: username,
|
||||
@@ -428,8 +391,6 @@ func (rc *RunContext) startJobContainer() common.Executor {
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
Options: rc.options(ctx),
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
})
|
||||
if rc.JobContainer == nil {
|
||||
return errors.New("Failed to create job container")
|
||||
@@ -678,18 +639,6 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
|
||||
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
|
||||
}
|
||||
|
||||
job := rc.Run.Job()
|
||||
runsOn := job.RunsOn()
|
||||
for i, v := range runsOn {
|
||||
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
|
||||
}
|
||||
|
||||
if pick := rc.Config.PlatformPicker; pick != nil {
|
||||
if image := pick(runsOn); image != "" {
|
||||
return image
|
||||
}
|
||||
}
|
||||
|
||||
for _, platformName := range rc.runsOnPlatformNames(ctx) {
|
||||
image := rc.Config.Platforms[strings.ToLower(platformName)]
|
||||
if image != "" {
|
||||
@@ -727,7 +676,7 @@ func (rc *RunContext) options(ctx context.Context) string {
|
||||
job := rc.Run.Job()
|
||||
c := job.Container()
|
||||
if c != nil {
|
||||
return rc.Config.ContainerOptions + " " + rc.ExprEval.Interpolate(ctx, c.Options)
|
||||
return rc.ExprEval.Interpolate(ctx, c.Options)
|
||||
}
|
||||
|
||||
return rc.Config.ContainerOptions
|
||||
@@ -776,7 +725,6 @@ func mergeMaps(maps ...map[string]string) map[string]string {
|
||||
return rtnMap
|
||||
}
|
||||
|
||||
// deprecated: use createSimpleContainerName
|
||||
func createContainerName(parts ...string) string {
|
||||
name := strings.Join(parts, "-")
|
||||
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
@@ -790,22 +738,6 @@ func createContainerName(parts ...string) string {
|
||||
return fmt.Sprintf("%s-%x", trimmedName, hash)
|
||||
}
|
||||
|
||||
func createSimpleContainerName(parts ...string) string {
|
||||
pattern := regexp.MustCompile("[^a-zA-Z0-9-]")
|
||||
name := make([]string, 0, len(parts))
|
||||
for _, v := range parts {
|
||||
v = pattern.ReplaceAllString(v, "-")
|
||||
v = strings.Trim(v, "-")
|
||||
for strings.Contains(v, "--") {
|
||||
v = strings.ReplaceAll(v, "--", "-")
|
||||
}
|
||||
if v != "" {
|
||||
name = append(name, v)
|
||||
}
|
||||
}
|
||||
return strings.Join(name, "_")
|
||||
}
|
||||
|
||||
func trimToLen(s string, l int) string {
|
||||
if l < 0 {
|
||||
l = 0
|
||||
@@ -888,36 +820,6 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||
ghc.Actor = "nektos/act"
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
if preset := rc.Config.PresetGitHubContext; preset != nil {
|
||||
ghc.Event = preset.Event
|
||||
ghc.RunID = preset.RunID
|
||||
ghc.RunNumber = preset.RunNumber
|
||||
ghc.Actor = preset.Actor
|
||||
ghc.Repository = preset.Repository
|
||||
ghc.EventName = preset.EventName
|
||||
ghc.Sha = preset.Sha
|
||||
ghc.Ref = preset.Ref
|
||||
ghc.RefName = preset.RefName
|
||||
ghc.RefType = preset.RefType
|
||||
ghc.HeadRef = preset.HeadRef
|
||||
ghc.BaseRef = preset.BaseRef
|
||||
ghc.Token = preset.Token
|
||||
ghc.RepositoryOwner = preset.RepositoryOwner
|
||||
ghc.RetentionDays = preset.RetentionDays
|
||||
|
||||
instance := rc.Config.GitHubInstance
|
||||
if !strings.HasPrefix(instance, "http://") &&
|
||||
!strings.HasPrefix(instance, "https://") {
|
||||
instance = "https://" + instance
|
||||
}
|
||||
ghc.ServerURL = instance
|
||||
ghc.APIURL = instance + "/api/v1" // the version of Gitea is v1
|
||||
ghc.GraphQLURL = "" // Gitea doesn't support graphql
|
||||
return ghc
|
||||
}
|
||||
}
|
||||
|
||||
if rc.EventJSON != "" {
|
||||
err := json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
|
||||
if err != nil {
|
||||
@@ -947,18 +849,6 @@ func (rc *RunContext) getGithubContext(ctx context.Context) *model.GithubContext
|
||||
ghc.APIURL = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
|
||||
ghc.GraphQLURL = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
|
||||
}
|
||||
|
||||
{ // Adapt to Gitea
|
||||
instance := rc.Config.GitHubInstance
|
||||
if !strings.HasPrefix(instance, "http://") &&
|
||||
!strings.HasPrefix(instance, "https://") {
|
||||
instance = "https://" + instance
|
||||
}
|
||||
ghc.ServerURL = instance
|
||||
ghc.APIURL = instance + "/api/v1" // the version of Gitea is v1
|
||||
ghc.GraphQLURL = "" // Gitea doesn't support graphql
|
||||
}
|
||||
|
||||
// allow to be overridden by user
|
||||
if rc.Config.Env["GITHUB_SERVER_URL"] != "" {
|
||||
ghc.ServerURL = rc.Config.Env["GITHUB_SERVER_URL"]
|
||||
@@ -1046,17 +936,6 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
|
||||
env["GITHUB_API_URL"] = github.APIURL
|
||||
env["GITHUB_GRAPHQL_URL"] = github.GraphQLURL
|
||||
|
||||
{ // Adapt to Gitea
|
||||
instance := rc.Config.GitHubInstance
|
||||
if !strings.HasPrefix(instance, "http://") &&
|
||||
!strings.HasPrefix(instance, "https://") {
|
||||
instance = "https://" + instance
|
||||
}
|
||||
env["GITHUB_SERVER_URL"] = instance
|
||||
env["GITHUB_API_URL"] = instance + "/api/v1" // the version of Gitea is v1
|
||||
env["GITHUB_GRAPHQL_URL"] = "" // Gitea doesn't support graphql
|
||||
}
|
||||
|
||||
if rc.Config.ArtifactServerPath != "" {
|
||||
setActionRuntimeVars(rc, env)
|
||||
}
|
||||
|
||||
@@ -682,24 +682,3 @@ func TestRunContextGetEnv(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_createSimpleContainerName(t *testing.T) {
|
||||
tests := []struct {
|
||||
parts []string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
parts: []string{"a--a", "BB正", "c-C"},
|
||||
want: "a-a_BB_c-C",
|
||||
},
|
||||
{
|
||||
parts: []string{"a-a", "", "-"},
|
||||
want: "a-a",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(strings.Join(tt.parts, " "), func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, createSimpleContainerName(tt.parts...), "createSimpleContainerName(%v)", tt.parts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,13 +6,11 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
docker_container "github.com/docker/docker/api/types/container"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Runner provides capabilities to run GitHub actions
|
||||
@@ -63,25 +61,6 @@ type Config struct {
|
||||
Matrix map[string]map[string]bool // Matrix config to run
|
||||
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
||||
ActionCache ActionCache // Use a custom ActionCache Implementation
|
||||
|
||||
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
|
||||
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
|
||||
ContainerNamePrefix string // the prefix of container name
|
||||
ContainerMaxLifetime time.Duration // the max lifetime of job containers
|
||||
DefaultActionInstance string // the default actions web site
|
||||
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
|
||||
JobLoggerLevel *log.Level // the level of job logger
|
||||
ValidVolumes []string // only volumes (and bind mounts) in this slice can be mounted on the job container or service containers
|
||||
InsecureSkipTLS bool // whether to skip verifying TLS certificate of the Gitea instance
|
||||
}
|
||||
|
||||
// GetToken: Adapt to Gitea
|
||||
func (c Config) GetToken() string {
|
||||
token := c.Secrets["GITHUB_TOKEN"]
|
||||
if c.Secrets["GITEA_TOKEN"] != "" {
|
||||
token = c.Secrets["GITEA_TOKEN"]
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
type caller struct {
|
||||
@@ -105,9 +84,7 @@ func New(runnerConfig *Config) (Runner, error) {
|
||||
|
||||
func (runner *runnerImpl) configure() (Runner, error) {
|
||||
runner.eventJSON = "{}"
|
||||
if runner.config.EventJSON != "" {
|
||||
runner.eventJSON = runner.config.EventJSON
|
||||
} else if runner.config.EventPath != "" {
|
||||
if runner.config.EventPath != "" {
|
||||
log.Debugf("Reading event.json from %s", runner.config.EventPath)
|
||||
eventJSONBytes, err := os.ReadFile(runner.config.EventPath)
|
||||
if err != nil {
|
||||
|
||||
@@ -580,43 +580,6 @@ func TestRunEventSecrets(t *testing.T) {
|
||||
tjfi.runTest(context.Background(), t, &Config{Secrets: secrets, Env: env})
|
||||
}
|
||||
|
||||
func TestRunWithService(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
log.SetLevel(log.DebugLevel)
|
||||
ctx := context.Background()
|
||||
|
||||
platforms := map[string]string{
|
||||
"ubuntu-latest": "node:12.20.1-buster-slim",
|
||||
}
|
||||
|
||||
workflowPath := "services"
|
||||
eventName := "push"
|
||||
|
||||
workdir, err := filepath.Abs("testdata")
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
runnerConfig := &Config{
|
||||
Workdir: workdir,
|
||||
EventName: eventName,
|
||||
Platforms: platforms,
|
||||
ReuseContainers: false,
|
||||
}
|
||||
runner, err := New(runnerConfig)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
planner, err := model.NewWorkflowPlanner(fmt.Sprintf("testdata/%s", workflowPath), true)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
plan, err := planner.PlanEvent(eventName)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
|
||||
err = runner.NewPlanExecutor(plan)(ctx)
|
||||
assert.NoError(t, err, workflowPath)
|
||||
}
|
||||
|
||||
func TestRunActionInputs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
|
||||
@@ -33,7 +33,9 @@ type stepActionRemote struct {
|
||||
resolvedSha string
|
||||
}
|
||||
|
||||
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||
var (
|
||||
stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
|
||||
)
|
||||
|
||||
func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
return func(ctx context.Context) error {
|
||||
@@ -42,18 +44,14 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
return nil
|
||||
}
|
||||
|
||||
// For gitea:
|
||||
// Since actions can specify the download source via a url prefix.
|
||||
// The prefix may contain some sensitive information that needs to be stored in secrets,
|
||||
// so we need to interpolate the expression value for uses first.
|
||||
sar.Step.Uses = sar.RunContext.NewExpressionEvaluator(ctx).Interpolate(ctx, sar.Step.Uses)
|
||||
|
||||
sar.remoteAction = newRemoteAction(sar.Step.Uses)
|
||||
if sar.remoteAction == nil {
|
||||
return fmt.Errorf("Expected format {org}/{repo}[/path]@ref. Actual '%s' Input string was not in a correct format", sar.Step.Uses)
|
||||
}
|
||||
|
||||
github := sar.getGithubContext(ctx)
|
||||
sar.remoteAction.URL = github.ServerURL
|
||||
|
||||
if sar.remoteAction.IsCheckout() && isLocalCheckout(github, sar.Step) && !sar.RunContext.Config.NoSkipCheckout {
|
||||
common.Logger(ctx).Debugf("Skipping local actions/checkout because workdir was already copied")
|
||||
return nil
|
||||
@@ -110,19 +108,11 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||
|
||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
||||
URL: sar.remoteAction.CloneURL(sar.RunContext.Config.DefaultActionInstance),
|
||||
Ref: sar.remoteAction.Ref,
|
||||
Dir: actionDir,
|
||||
Token: "", /*
|
||||
Shouldn't provide token when cloning actions,
|
||||
the token comes from the instance which triggered the task,
|
||||
however, it might be not the same instance which provides actions.
|
||||
For GitHub, they are the same, always github.com.
|
||||
But for Gitea, tasks triggered by a.com can clone actions from b.com.
|
||||
*/
|
||||
URL: sar.remoteAction.CloneURL(),
|
||||
Ref: sar.remoteAction.Ref,
|
||||
Dir: actionDir,
|
||||
Token: github.Token,
|
||||
OfflineMode: sar.RunContext.Config.ActionOfflineMode,
|
||||
|
||||
InsecureSkipTLS: sar.cloneSkipTLS(), // For Gitea
|
||||
})
|
||||
var ntErr common.Executor
|
||||
if err := gitClone(ctx); err != nil {
|
||||
@@ -260,22 +250,6 @@ func (sar *stepActionRemote) getCompositeSteps() *compositeSteps {
|
||||
return sar.compositeSteps
|
||||
}
|
||||
|
||||
// For Gitea
|
||||
// cloneSkipTLS returns true if the runner can clone an action from the Gitea instance
|
||||
func (sar *stepActionRemote) cloneSkipTLS() bool {
|
||||
if !sar.RunContext.Config.InsecureSkipTLS {
|
||||
// Return false if the Gitea instance is not an insecure instance
|
||||
return false
|
||||
}
|
||||
if sar.remoteAction.URL == "" {
|
||||
// Empty URL means the default action instance should be used
|
||||
// Return true if the URL of the Gitea instance is the same as the URL of the default action instance
|
||||
return sar.RunContext.Config.DefaultActionInstance == sar.RunContext.Config.GitHubInstance
|
||||
}
|
||||
// Return true if the URL of the remote action is the same as the URL of the Gitea instance
|
||||
return sar.remoteAction.URL == sar.RunContext.Config.GitHubInstance
|
||||
}
|
||||
|
||||
type remoteAction struct {
|
||||
URL string
|
||||
Org string
|
||||
@@ -284,16 +258,8 @@ type remoteAction struct {
|
||||
Ref string
|
||||
}
|
||||
|
||||
func (ra *remoteAction) CloneURL(u string) string {
|
||||
if ra.URL == "" {
|
||||
if !strings.HasPrefix(u, "http://") && !strings.HasPrefix(u, "https://") {
|
||||
u = "https://" + u
|
||||
}
|
||||
} else {
|
||||
u = ra.URL
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s/%s", u, ra.Org, ra.Repo)
|
||||
func (ra *remoteAction) CloneURL() string {
|
||||
return fmt.Sprintf("%s/%s/%s", ra.URL, ra.Org, ra.Repo)
|
||||
}
|
||||
|
||||
func (ra *remoteAction) IsCheckout() bool {
|
||||
@@ -304,26 +270,6 @@ func (ra *remoteAction) IsCheckout() bool {
|
||||
}
|
||||
|
||||
func newRemoteAction(action string) *remoteAction {
|
||||
// support http(s)://host/owner/repo@v3
|
||||
for _, schema := range []string{"https://", "http://"} {
|
||||
if strings.HasPrefix(action, schema) {
|
||||
splits := strings.SplitN(strings.TrimPrefix(action, schema), "/", 2)
|
||||
if len(splits) != 2 {
|
||||
return nil
|
||||
}
|
||||
ret := parseAction(splits[1])
|
||||
if ret == nil {
|
||||
return nil
|
||||
}
|
||||
ret.URL = schema + splits[0]
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
return parseAction(action)
|
||||
}
|
||||
|
||||
func parseAction(action string) *remoteAction {
|
||||
// GitHub's document[^] describes:
|
||||
// > We strongly recommend that you include the version of
|
||||
// > the action you are using by specifying a Git ref, SHA, or Docker tag number.
|
||||
@@ -339,7 +285,7 @@ func parseAction(action string) *remoteAction {
|
||||
Repo: matches[2],
|
||||
Path: matches[4],
|
||||
Ref: matches[6],
|
||||
URL: "",
|
||||
URL: "https://github.com",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -616,100 +616,6 @@ func TestStepActionRemotePost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_newRemoteAction(t *testing.T) {
|
||||
tests := []struct {
|
||||
action string
|
||||
want *remoteAction
|
||||
wantCloneURL string
|
||||
}{
|
||||
{
|
||||
action: "actions/heroku@main",
|
||||
want: &remoteAction{
|
||||
URL: "",
|
||||
Org: "actions",
|
||||
Repo: "heroku",
|
||||
Path: "",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://github.com/actions/heroku",
|
||||
},
|
||||
{
|
||||
action: "actions/aws/ec2@main",
|
||||
want: &remoteAction{
|
||||
URL: "",
|
||||
Org: "actions",
|
||||
Repo: "aws",
|
||||
Path: "ec2",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://github.com/actions/aws",
|
||||
},
|
||||
{
|
||||
action: "./.github/actions/my-action", // it's valid for GitHub, but act don't support it
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
action: "docker://alpine:3.8", // it's valid for GitHub, but act don't support it
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
action: "https://gitea.com/actions/heroku@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "https://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "heroku",
|
||||
Path: "",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://gitea.com/actions/heroku",
|
||||
},
|
||||
{
|
||||
action: "https://gitea.com/actions/aws/ec2@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "https://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "aws",
|
||||
Path: "ec2",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "https://gitea.com/actions/aws",
|
||||
},
|
||||
{
|
||||
action: "http://gitea.com/actions/heroku@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "http://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "heroku",
|
||||
Path: "",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "http://gitea.com/actions/heroku",
|
||||
},
|
||||
{
|
||||
action: "http://gitea.com/actions/aws/ec2@main", // it's invalid for GitHub, but gitea supports it
|
||||
want: &remoteAction{
|
||||
URL: "http://gitea.com",
|
||||
Org: "actions",
|
||||
Repo: "aws",
|
||||
Path: "ec2",
|
||||
Ref: "main",
|
||||
},
|
||||
wantCloneURL: "http://gitea.com/actions/aws",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.action, func(t *testing.T) {
|
||||
got := newRemoteAction(tt.action)
|
||||
assert.Equalf(t, tt.want, got, "newRemoteAction(%v)", tt.action)
|
||||
cloneURL := ""
|
||||
if got != nil {
|
||||
cloneURL = got.CloneURL("github.com")
|
||||
}
|
||||
assert.Equalf(t, tt.wantCloneURL, cloneURL, "newRemoteAction(%v).CloneURL()", tt.action)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_safeFilename(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
|
||||
@@ -114,24 +114,22 @@ func (sd *stepDocker) newStepContainer(ctx context.Context, image string, cmd []
|
||||
|
||||
binds, mounts := rc.GetBindsAndMounts()
|
||||
stepContainer := ContainerNewContainer(&container.NewContainerInput{
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createSimpleContainerName(rc.jobContainerName(), "STEP-"+step.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
AutoRemove: rc.Config.AutoRemove,
|
||||
ValidVolumes: rc.Config.ValidVolumes,
|
||||
Cmd: cmd,
|
||||
Entrypoint: entrypoint,
|
||||
WorkingDir: rc.JobContainer.ToContainerPath(rc.Config.Workdir),
|
||||
Image: image,
|
||||
Username: rc.Config.Secrets["DOCKER_USERNAME"],
|
||||
Password: rc.Config.Secrets["DOCKER_PASSWORD"],
|
||||
Name: createContainerName(rc.jobContainerName(), step.ID),
|
||||
Env: envList,
|
||||
Mounts: mounts,
|
||||
NetworkMode: fmt.Sprintf("container:%s", rc.jobContainerName()),
|
||||
Binds: binds,
|
||||
Stdout: logWriter,
|
||||
Stderr: logWriter,
|
||||
Privileged: rc.Config.Privileged,
|
||||
UsernsMode: rc.Config.UsernsMode,
|
||||
Platform: rc.Config.ContainerArchitecture,
|
||||
})
|
||||
return stepContainer
|
||||
}
|
||||
|
||||
4
pkg/runner/testdata/networking/push.yml
vendored
4
pkg/runner/testdata/networking/push.yml
vendored
@@ -7,8 +7,8 @@ jobs:
|
||||
- name: Install tools
|
||||
run: |
|
||||
apt update
|
||||
apt install -y bind9-host
|
||||
apt install -y iputils-ping
|
||||
- name: Run hostname test
|
||||
run: |
|
||||
hostname -f
|
||||
host $(hostname -f)
|
||||
ping -c 4 $(hostname -f)
|
||||
|
||||
Reference in New Issue
Block a user