Merge pull request 'cherry-pick some changes from the gitea repository' (#6) from earl-warren/runner:wip-sync into main
Reviewed-on: https://codeberg.org/forgejo/runner/pulls/6
This commit is contained in:
commit
1bb87d0ebb
6 changed files with 466 additions and 7 deletions
9
.gitignore
vendored
9
.gitignore
vendored
|
@ -1,4 +1,9 @@
|
||||||
act_runner
|
forgejo-runner
|
||||||
.env
|
.env
|
||||||
.runner
|
.runner
|
||||||
coverage.txt
|
coverage.txt
|
||||||
|
/gitea-vet
|
||||||
|
|
||||||
|
# MS VSCode
|
||||||
|
.vscode
|
||||||
|
__debug_bin
|
||||||
|
|
|
@ -51,9 +51,11 @@ func Execute(ctx context.Context) {
|
||||||
Args: cobra.MaximumNArgs(1),
|
Args: cobra.MaximumNArgs(1),
|
||||||
RunE: runDaemon(ctx, gArgs.EnvFile),
|
RunE: runDaemon(ctx, gArgs.EnvFile),
|
||||||
}
|
}
|
||||||
// add all command
|
|
||||||
rootCmd.AddCommand(daemonCmd)
|
rootCmd.AddCommand(daemonCmd)
|
||||||
|
|
||||||
|
// ./act_runner exec
|
||||||
|
rootCmd.AddCommand(loadExecCmd(ctx))
|
||||||
|
|
||||||
// hide completion command
|
// hide completion command
|
||||||
rootCmd.CompletionOptions.HiddenDefaultCmd = true
|
rootCmd.CompletionOptions.HiddenDefaultCmd = true
|
||||||
|
|
||||||
|
|
448
cmd/exec.go
Normal file
448
cmd/exec.go
Normal file
|
@ -0,0 +1,448 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// Copyright 2019 nektos
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"codeberg.org/forgejo/runner/artifactcache"
|
||||||
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/nektos/act/pkg/artifacts"
|
||||||
|
"github.com/nektos/act/pkg/common"
|
||||||
|
"github.com/nektos/act/pkg/model"
|
||||||
|
"github.com/nektos/act/pkg/runner"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/term"
|
||||||
|
)
|
||||||
|
|
||||||
|
type executeArgs struct {
|
||||||
|
runList bool
|
||||||
|
job string
|
||||||
|
event string
|
||||||
|
workdir string
|
||||||
|
workflowsPath string
|
||||||
|
noWorkflowRecurse bool
|
||||||
|
autodetectEvent bool
|
||||||
|
forcePull bool
|
||||||
|
forceRebuild bool
|
||||||
|
jsonLogger bool
|
||||||
|
envs []string
|
||||||
|
envfile string
|
||||||
|
secrets []string
|
||||||
|
defaultActionsUrl string
|
||||||
|
insecureSecrets bool
|
||||||
|
privileged bool
|
||||||
|
usernsMode string
|
||||||
|
containerArchitecture string
|
||||||
|
containerDaemonSocket string
|
||||||
|
useGitIgnore bool
|
||||||
|
containerCapAdd []string
|
||||||
|
containerCapDrop []string
|
||||||
|
artifactServerPath string
|
||||||
|
artifactServerPort string
|
||||||
|
noSkipCheckout bool
|
||||||
|
debug bool
|
||||||
|
dryrun bool
|
||||||
|
cacheHandler *artifactcache.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkflowsPath returns path to workflow file(s)
|
||||||
|
func (i *executeArgs) WorkflowsPath() string {
|
||||||
|
return i.resolve(i.workflowsPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Envfile returns path to .env
|
||||||
|
func (i *executeArgs) Envfile() string {
|
||||||
|
return i.resolve(i.envfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *executeArgs) LoadSecrets() map[string]string {
|
||||||
|
s := make(map[string]string)
|
||||||
|
for _, secretPair := range i.secrets {
|
||||||
|
secretPairParts := strings.SplitN(secretPair, "=", 2)
|
||||||
|
secretPairParts[0] = strings.ToUpper(secretPairParts[0])
|
||||||
|
if strings.ToUpper(s[secretPairParts[0]]) == secretPairParts[0] {
|
||||||
|
log.Errorf("Secret %s is already defined (secrets are case insensitive)", secretPairParts[0])
|
||||||
|
}
|
||||||
|
if len(secretPairParts) == 2 {
|
||||||
|
s[secretPairParts[0]] = secretPairParts[1]
|
||||||
|
} else if env, ok := os.LookupEnv(secretPairParts[0]); ok && env != "" {
|
||||||
|
s[secretPairParts[0]] = env
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Provide value for '%s': ", secretPairParts[0])
|
||||||
|
val, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
|
fmt.Println()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to read input: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
s[secretPairParts[0]] = string(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEnvs(path string, envs map[string]string) bool {
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
env, err := godotenv.Read(path)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error loading from %s: %v", path, err)
|
||||||
|
}
|
||||||
|
for k, v := range env {
|
||||||
|
envs[k] = v
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *executeArgs) LoadEnvs() map[string]string {
|
||||||
|
envs := make(map[string]string)
|
||||||
|
if i.envs != nil {
|
||||||
|
for _, envVar := range i.envs {
|
||||||
|
e := strings.SplitN(envVar, `=`, 2)
|
||||||
|
if len(e) == 2 {
|
||||||
|
envs[e[0]] = e[1]
|
||||||
|
} else {
|
||||||
|
envs[e[0]] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = readEnvs(i.Envfile(), envs)
|
||||||
|
|
||||||
|
envs["ACTIONS_CACHE_URL"] = i.cacheHandler.ExternalURL() + "/"
|
||||||
|
|
||||||
|
return envs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Workdir returns path to workdir
|
||||||
|
func (i *executeArgs) Workdir() string {
|
||||||
|
return i.resolve(".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *executeArgs) resolve(path string) string {
|
||||||
|
basedir, err := filepath.Abs(i.workdir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if path == "" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(path) {
|
||||||
|
path = filepath.Join(basedir, path)
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func printList(plan *model.Plan) error {
|
||||||
|
type lineInfoDef struct {
|
||||||
|
jobID string
|
||||||
|
jobName string
|
||||||
|
stage string
|
||||||
|
wfName string
|
||||||
|
wfFile string
|
||||||
|
events string
|
||||||
|
}
|
||||||
|
lineInfos := []lineInfoDef{}
|
||||||
|
|
||||||
|
header := lineInfoDef{
|
||||||
|
jobID: "Job ID",
|
||||||
|
jobName: "Job name",
|
||||||
|
stage: "Stage",
|
||||||
|
wfName: "Workflow name",
|
||||||
|
wfFile: "Workflow file",
|
||||||
|
events: "Events",
|
||||||
|
}
|
||||||
|
|
||||||
|
jobs := map[string]bool{}
|
||||||
|
duplicateJobIDs := false
|
||||||
|
|
||||||
|
jobIDMaxWidth := len(header.jobID)
|
||||||
|
jobNameMaxWidth := len(header.jobName)
|
||||||
|
stageMaxWidth := len(header.stage)
|
||||||
|
wfNameMaxWidth := len(header.wfName)
|
||||||
|
wfFileMaxWidth := len(header.wfFile)
|
||||||
|
eventsMaxWidth := len(header.events)
|
||||||
|
|
||||||
|
for i, stage := range plan.Stages {
|
||||||
|
for _, r := range stage.Runs {
|
||||||
|
jobID := r.JobID
|
||||||
|
line := lineInfoDef{
|
||||||
|
jobID: jobID,
|
||||||
|
jobName: r.String(),
|
||||||
|
stage: strconv.Itoa(i),
|
||||||
|
wfName: r.Workflow.Name,
|
||||||
|
wfFile: r.Workflow.File,
|
||||||
|
events: strings.Join(r.Workflow.On(), `,`),
|
||||||
|
}
|
||||||
|
if _, ok := jobs[jobID]; ok {
|
||||||
|
duplicateJobIDs = true
|
||||||
|
} else {
|
||||||
|
jobs[jobID] = true
|
||||||
|
}
|
||||||
|
lineInfos = append(lineInfos, line)
|
||||||
|
if jobIDMaxWidth < len(line.jobID) {
|
||||||
|
jobIDMaxWidth = len(line.jobID)
|
||||||
|
}
|
||||||
|
if jobNameMaxWidth < len(line.jobName) {
|
||||||
|
jobNameMaxWidth = len(line.jobName)
|
||||||
|
}
|
||||||
|
if stageMaxWidth < len(line.stage) {
|
||||||
|
stageMaxWidth = len(line.stage)
|
||||||
|
}
|
||||||
|
if wfNameMaxWidth < len(line.wfName) {
|
||||||
|
wfNameMaxWidth = len(line.wfName)
|
||||||
|
}
|
||||||
|
if wfFileMaxWidth < len(line.wfFile) {
|
||||||
|
wfFileMaxWidth = len(line.wfFile)
|
||||||
|
}
|
||||||
|
if eventsMaxWidth < len(line.events) {
|
||||||
|
eventsMaxWidth = len(line.events)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
jobIDMaxWidth += 2
|
||||||
|
jobNameMaxWidth += 2
|
||||||
|
stageMaxWidth += 2
|
||||||
|
wfNameMaxWidth += 2
|
||||||
|
wfFileMaxWidth += 2
|
||||||
|
|
||||||
|
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
|
||||||
|
-stageMaxWidth, header.stage,
|
||||||
|
-jobIDMaxWidth, header.jobID,
|
||||||
|
-jobNameMaxWidth, header.jobName,
|
||||||
|
-wfNameMaxWidth, header.wfName,
|
||||||
|
-wfFileMaxWidth, header.wfFile,
|
||||||
|
-eventsMaxWidth, header.events,
|
||||||
|
)
|
||||||
|
for _, line := range lineInfos {
|
||||||
|
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
|
||||||
|
-stageMaxWidth, line.stage,
|
||||||
|
-jobIDMaxWidth, line.jobID,
|
||||||
|
-jobNameMaxWidth, line.jobName,
|
||||||
|
-wfNameMaxWidth, line.wfName,
|
||||||
|
-wfFileMaxWidth, line.wfFile,
|
||||||
|
-eventsMaxWidth, line.events,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if duplicateJobIDs {
|
||||||
|
fmt.Print("\nDetected multiple jobs with the same job name, use `-W` to specify the path to the specific workflow.\n")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runExecList(ctx context.Context, planner model.WorkflowPlanner, execArgs *executeArgs) error {
|
||||||
|
// plan with filtered jobs - to be used for filtering only
|
||||||
|
var filterPlan *model.Plan
|
||||||
|
|
||||||
|
// Determine the event name to be filtered
|
||||||
|
var filterEventName string = ""
|
||||||
|
|
||||||
|
if len(execArgs.event) > 0 {
|
||||||
|
log.Infof("Using chosed event for filtering: %s", execArgs.event)
|
||||||
|
filterEventName = execArgs.event
|
||||||
|
} else if execArgs.autodetectEvent {
|
||||||
|
// collect all events from loaded workflows
|
||||||
|
events := planner.GetEvents()
|
||||||
|
|
||||||
|
// set default event type to first event from many available
|
||||||
|
// this way user dont have to specify the event.
|
||||||
|
log.Infof("Using first detected workflow event for filtering: %s", events[0])
|
||||||
|
|
||||||
|
filterEventName = events[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if execArgs.job != "" {
|
||||||
|
log.Infof("Preparing plan with a job: %s", execArgs.job)
|
||||||
|
filterPlan = planner.PlanJob(execArgs.job)
|
||||||
|
} else if filterEventName != "" {
|
||||||
|
log.Infof("Preparing plan for a event: %s", filterEventName)
|
||||||
|
filterPlan = planner.PlanEvent(filterEventName)
|
||||||
|
} else {
|
||||||
|
log.Infof("Preparing plan with all jobs")
|
||||||
|
filterPlan = planner.PlanAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
printList(filterPlan)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command, args []string) error {
|
||||||
|
return func(cmd *cobra.Command, args []string) error {
|
||||||
|
planner, err := model.NewWorkflowPlanner(execArgs.WorkflowsPath(), execArgs.noWorkflowRecurse)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if execArgs.runList {
|
||||||
|
return runExecList(ctx, planner, execArgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// plan with triggered jobs
|
||||||
|
var plan *model.Plan
|
||||||
|
|
||||||
|
// Determine the event name to be triggered
|
||||||
|
var eventName string
|
||||||
|
|
||||||
|
// collect all events from loaded workflows
|
||||||
|
events := planner.GetEvents()
|
||||||
|
|
||||||
|
if len(execArgs.event) > 0 {
|
||||||
|
log.Infof("Using chosed event for filtering: %s", execArgs.event)
|
||||||
|
eventName = args[0]
|
||||||
|
} else if len(events) == 1 && len(events[0]) > 0 {
|
||||||
|
log.Infof("Using the only detected workflow event: %s", events[0])
|
||||||
|
eventName = events[0]
|
||||||
|
} else if execArgs.autodetectEvent && len(events) > 0 && len(events[0]) > 0 {
|
||||||
|
// set default event type to first event from many available
|
||||||
|
// this way user dont have to specify the event.
|
||||||
|
log.Infof("Using first detected workflow event: %s", events[0])
|
||||||
|
eventName = events[0]
|
||||||
|
} else {
|
||||||
|
log.Infof("Using default workflow event: push")
|
||||||
|
eventName = "push"
|
||||||
|
}
|
||||||
|
|
||||||
|
// build the plan for this run
|
||||||
|
if execArgs.job != "" {
|
||||||
|
log.Infof("Planning job: %s", execArgs.job)
|
||||||
|
plan = planner.PlanJob(execArgs.job)
|
||||||
|
} else {
|
||||||
|
log.Infof("Planning jobs for event: %s", eventName)
|
||||||
|
plan = planner.PlanEvent(eventName)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxLifetime := 3 * time.Hour
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
maxLifetime = time.Until(deadline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// init a cache server
|
||||||
|
handler, err := artifactcache.NewHandler()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Infof("cache handler listens on: %v", handler.ExternalURL())
|
||||||
|
execArgs.cacheHandler = handler
|
||||||
|
|
||||||
|
// run the plan
|
||||||
|
config := &runner.Config{
|
||||||
|
Workdir: execArgs.Workdir(),
|
||||||
|
BindWorkdir: false,
|
||||||
|
ReuseContainers: false,
|
||||||
|
ForcePull: execArgs.forcePull,
|
||||||
|
ForceRebuild: execArgs.forceRebuild,
|
||||||
|
LogOutput: true,
|
||||||
|
JSONLogger: execArgs.jsonLogger,
|
||||||
|
Env: execArgs.LoadEnvs(),
|
||||||
|
Secrets: execArgs.LoadSecrets(),
|
||||||
|
InsecureSecrets: execArgs.insecureSecrets,
|
||||||
|
Privileged: execArgs.privileged,
|
||||||
|
UsernsMode: execArgs.usernsMode,
|
||||||
|
ContainerArchitecture: execArgs.containerArchitecture,
|
||||||
|
ContainerDaemonSocket: execArgs.containerDaemonSocket,
|
||||||
|
UseGitIgnore: execArgs.useGitIgnore,
|
||||||
|
// GitHubInstance: t.client.Address(),
|
||||||
|
ContainerCapAdd: execArgs.containerCapAdd,
|
||||||
|
ContainerCapDrop: execArgs.containerCapDrop,
|
||||||
|
AutoRemove: true,
|
||||||
|
ArtifactServerPath: execArgs.artifactServerPath,
|
||||||
|
ArtifactServerPort: execArgs.artifactServerPort,
|
||||||
|
NoSkipCheckout: execArgs.noSkipCheckout,
|
||||||
|
// PresetGitHubContext: preset,
|
||||||
|
// EventJSON: string(eventJSON),
|
||||||
|
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%s", eventName),
|
||||||
|
ContainerMaxLifetime: maxLifetime,
|
||||||
|
ContainerNetworkMode: "bridge",
|
||||||
|
DefaultActionInstance: execArgs.defaultActionsUrl,
|
||||||
|
PlatformPicker: func(_ []string) string {
|
||||||
|
return "node:16-bullseye"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: handle log level config
|
||||||
|
// waiting https://gitea.com/gitea/act/pulls/19
|
||||||
|
// if !execArgs.debug {
|
||||||
|
// logLevel := log.Level(log.InfoLevel)
|
||||||
|
// config.JobLoggerLevel = &logLevel
|
||||||
|
// }
|
||||||
|
|
||||||
|
r, err := runner.New(config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(execArgs.artifactServerPath) == 0 {
|
||||||
|
tempDir, err := os.MkdirTemp("", "gitea-act-")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
execArgs.artifactServerPath = tempDir
|
||||||
|
}
|
||||||
|
|
||||||
|
artifactCancel := artifacts.Serve(ctx, execArgs.artifactServerPath, execArgs.artifactServerPort)
|
||||||
|
log.Debugf("artifacts server started at %s:%s", execArgs.artifactServerPath, execArgs.artifactServerPort)
|
||||||
|
|
||||||
|
ctx = common.WithDryrun(ctx, execArgs.dryrun)
|
||||||
|
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
|
||||||
|
artifactCancel()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return executor(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadExecCmd(ctx context.Context) *cobra.Command {
|
||||||
|
execArg := executeArgs{}
|
||||||
|
|
||||||
|
execCmd := &cobra.Command{
|
||||||
|
Use: "exec",
|
||||||
|
Short: "Run workflow locally.",
|
||||||
|
Args: cobra.MaximumNArgs(20),
|
||||||
|
RunE: runExec(ctx, &execArg),
|
||||||
|
}
|
||||||
|
|
||||||
|
execCmd.Flags().BoolVarP(&execArg.runList, "list", "l", false, "list workflows")
|
||||||
|
execCmd.Flags().StringVarP(&execArg.job, "job", "j", "", "run a specific job ID")
|
||||||
|
execCmd.Flags().StringVarP(&execArg.event, "event", "E", "", "run a event name")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.workflowsPath, "workflows", "W", "./.forgejo/workflows/", "path to workflow file(s)")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.workdir, "directory", "C", ".", "working directory")
|
||||||
|
execCmd.PersistentFlags().BoolVarP(&execArg.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag")
|
||||||
|
execCmd.Flags().BoolVarP(&execArg.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow")
|
||||||
|
execCmd.Flags().BoolVarP(&execArg.forcePull, "pull", "p", false, "pull docker image(s) even if already present")
|
||||||
|
execCmd.Flags().BoolVarP(&execArg.forceRebuild, "rebuild", "", false, "rebuild local action docker image(s) even if already present")
|
||||||
|
execCmd.PersistentFlags().BoolVar(&execArg.jsonLogger, "json", false, "Output logs in json format")
|
||||||
|
execCmd.Flags().StringArrayVarP(&execArg.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
|
||||||
|
execCmd.Flags().StringArrayVarP(&execArg.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)")
|
||||||
|
execCmd.PersistentFlags().BoolVarP(&execArg.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.")
|
||||||
|
execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode")
|
||||||
|
execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
|
||||||
|
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
|
||||||
|
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
|
||||||
|
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
|
||||||
|
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsUrl, "default-actions-url", "", "https://codeberg.org", "Defines the default url of action instance.")
|
||||||
|
execCmd.PersistentFlags().BoolVarP(&execArg.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
|
||||||
|
execCmd.PersistentFlags().BoolVarP(&execArg.debug, "debug", "d", false, "enable debug log")
|
||||||
|
execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode")
|
||||||
|
|
||||||
|
return execCmd
|
||||||
|
}
|
|
@ -65,6 +65,9 @@ func FromEnviron() (Config, error) {
|
||||||
if runner.UUID != "" {
|
if runner.UUID != "" {
|
||||||
cfg.Runner.UUID = runner.UUID
|
cfg.Runner.UUID = runner.UUID
|
||||||
}
|
}
|
||||||
|
if runner.Name != "" {
|
||||||
|
cfg.Runner.Name = runner.Name
|
||||||
|
}
|
||||||
if runner.Token != "" {
|
if runner.Token != "" {
|
||||||
cfg.Runner.Token = runner.Token
|
cfg.Runner.Token = runner.Token
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func (s *Runner) Run(ctx context.Context, task *runnerv1.Task) error {
|
||||||
env[k] = v
|
env[k] = v
|
||||||
}
|
}
|
||||||
env["ACTIONS_CACHE_URL"] = s.CacheHandler.ExternalURL() + "/"
|
env["ACTIONS_CACHE_URL"] = s.CacheHandler.ExternalURL() + "/"
|
||||||
return NewTask(s.ForgeInstance, task.Id, s.Client, env, s.platformPicker).Run(ctx, task)
|
return NewTask(s.ForgeInstance, task.Id, s.Client, env, s.platformPicker).Run(ctx, task, s.Machine)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Runner) platformPicker(labels []string) string {
|
func (s *Runner) platformPicker(labels []string) string {
|
||||||
|
|
|
@ -112,7 +112,7 @@ func getToken(task *runnerv1.Task) string {
|
||||||
return token
|
return token
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Task) Run(ctx context.Context, task *runnerv1.Task) (lastErr error) {
|
func (t *Task) Run(ctx context.Context, task *runnerv1.Task, runnerName string) (lastErr error) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
_, exist := globalTaskMap.Load(task.Id)
|
_, exist := globalTaskMap.Load(task.Id)
|
||||||
|
@ -134,13 +134,14 @@ func (t *Task) Run(ctx context.Context, task *runnerv1.Task) (lastErr error) {
|
||||||
Data: log.Fields{
|
Data: log.Fields{
|
||||||
"jobResult": "failure",
|
"jobResult": "failure",
|
||||||
},
|
},
|
||||||
|
Time: time.Now(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
_ = reporter.Close(lastWords)
|
_ = reporter.Close(lastWords)
|
||||||
}()
|
}()
|
||||||
reporter.RunDaemon()
|
reporter.RunDaemon()
|
||||||
|
|
||||||
reporter.Logf("received task %v of job %v", task.Id, task.Context.Fields["job"].GetStringValue())
|
reporter.Logf("%s received task %v of job %v", runnerName, task.Id, task.Context.Fields["job"].GetStringValue())
|
||||||
|
|
||||||
workflowsPath, err := getWorkflowsPath(t.Input.repoDirectory)
|
workflowsPath, err := getWorkflowsPath(t.Input.repoDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,7 +207,7 @@ func (t *Task) Run(ctx context.Context, task *runnerv1.Task) (lastErr error) {
|
||||||
|
|
||||||
input := t.Input
|
input := t.Input
|
||||||
config := &runner.Config{
|
config := &runner.Config{
|
||||||
Workdir: "/" + preset.Repository,
|
Workdir: "." + string(filepath.Separator),
|
||||||
BindWorkdir: false,
|
BindWorkdir: false,
|
||||||
ReuseContainers: false,
|
ReuseContainers: false,
|
||||||
ForcePull: input.forcePull,
|
ForcePull: input.forcePull,
|
||||||
|
|
Loading…
Reference in a new issue