2023-04-04 14:32:04 +01:00
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package run
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
2023-04-10 14:35:07 +01:00
"strings"
2023-04-04 14:32:04 +01:00
"sync"
"time"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
2023-06-15 04:59:15 +01:00
"github.com/bufbuild/connect-go"
Add configuration item of `container.network` (#184)
Close https://gitea.com/gitea/act_runner/issues/177
Related https://gitea.com/gitea/act/pulls/56
### ⚠️ Breaking
The `container.network_mode` is a deprecated configuration item. It may be removed after Gitea 1.20 released.
Previously, if the value of `container.network_mode` is `bridge`, it means that `act_runner` will create a new network for job.But `bridge` is easily confused with the bridge network created by Docker by default.
We recommand that using `container.network` to specify the network to which containers created by `act_runner` connect.
### 🆕 container.network
The configuration file of `act_runner` add a new item of `contianer.network`.
In `config.example.yaml`:
```yaml
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
```
As the comment in the example above says, the purpose of the `container.network` is specifying the network to which containers created by `act_runner` will connect.
`container.network` accepts the following valid values:
- `host`: All of containers (including job containers and service contianers) created by `act_runner` will be connected to the network named `host` which is created automatically by Docker. Containers will share the host’s network stack and all interfaces from the host will be available to these containers.
- `bridge`: It is similar to `host`. All of containers created by `act_runner` will be connected to the network named `bridge` which is created automatically by Docker. All containers connected to the `bridge` (Perhaps there are containers that are not created by `act_runner`) are allowed to communicate with each other, while providing isolation from containers which are not connected to that `bridge` network.
- `<custom_network>`: Please make sure that the `<custom_network>` network already exists firstly (`act_runner` does not detect whether the specified network exists currently. If not exists yet, will return error in the stage of `docker create`). All of containers created by `act_runner` will be connected to `<custom_network>`. After the job is executed, containers are removed and automatically disconnected from the `<custom_network>`.
- empty: `act_runner` will create a new network for each job container and their service containers (if defined in workflow). So each job container and their service containers share a network environment, but are isolated from others container and the Docker host. Of course, these networks created by `act_runner` will be removed at last.
### Others
- If you do not have special needs, we highly recommend that setting `container.network` to empty string (and do not use `container.network_mode` any more). Because the containers created by `act_runner` will connect to the networks that are created by itself. This point will provide better isolation.
- If you set `contianer.network` to empty string or `<custom_network>`, we can be access to service containers by `<service-id>:<port>` in the steps of job. Because we added an alias to the service container when connecting to the network.
Co-authored-by: Jason Song <i@wolfogre.com>
Reviewed-on: https://gitea.com/gitea/act_runner/pulls/184
Reviewed-by: Jason Song <i@wolfogre.com>
Co-authored-by: sillyguodong <gedong_1994@163.com>
Co-committed-by: sillyguodong <gedong_1994@163.com>
2023-05-16 07:46:59 +01:00
"github.com/docker/docker/api/types/container"
2023-05-04 11:45:01 +01:00
"github.com/nektos/act/pkg/artifactcache"
2023-04-04 14:32:04 +01:00
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
"github.com/nektos/act/pkg/runner"
log "github.com/sirupsen/logrus"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/labels"
"gitea.com/gitea/act_runner/internal/pkg/report"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
// Runner runs the pipeline.
type Runner struct {
name string
cfg * config . Config
client client . Client
labels labels . Labels
envs map [ string ] string
runningTasks sync . Map
}
func NewRunner ( cfg * config . Config , reg * config . Registration , cli client . Client ) * Runner {
ls := labels . Labels { }
for _ , v := range reg . Labels {
if l , err := labels . Parse ( v ) ; err == nil {
ls = append ( ls , l )
}
}
2023-04-30 17:14:06 +01:00
if cfg . Runner . Envs == nil {
cfg . Runner . Envs = make ( map [ string ] string , 10 )
}
cfg . Runner . Envs [ "GITHUB_SERVER_URL" ] = reg . Address
2023-04-04 14:32:04 +01:00
envs := make ( map [ string ] string , len ( cfg . Runner . Envs ) )
for k , v := range cfg . Runner . Envs {
envs [ k ] = v
}
if cfg . Cache . Enabled == nil || * cfg . Cache . Enabled {
2023-05-04 11:45:01 +01:00
cacheHandler , err := artifactcache . StartHandler (
cfg . Cache . Dir ,
cfg . Cache . Host ,
cfg . Cache . Port ,
log . StandardLogger ( ) . WithField ( "module" , "cache_request" ) ,
)
2023-04-04 14:32:04 +01:00
if err != nil {
log . Errorf ( "cannot init cache server, it will be disabled: %v" , err )
// go on
} else {
envs [ "ACTIONS_CACHE_URL" ] = cacheHandler . ExternalURL ( ) + "/"
}
}
2023-04-10 14:35:07 +01:00
// set artifact gitea api
artifactGiteaAPI := strings . TrimSuffix ( cli . Address ( ) , "/" ) + "/api/actions_pipeline/"
envs [ "ACTIONS_RUNTIME_URL" ] = artifactGiteaAPI
2023-04-12 07:44:26 +01:00
// Set specific environments to distinguish between Gitea and GitHub
envs [ "GITEA_ACTIONS" ] = "true"
envs [ "GITEA_ACTIONS_RUNNER_VERSION" ] = ver . Version ( )
2023-04-04 14:32:04 +01:00
return & Runner {
name : reg . Name ,
cfg : cfg ,
client : cli ,
labels : ls ,
envs : envs ,
}
}
func ( r * Runner ) Run ( ctx context . Context , task * runnerv1 . Task ) error {
if _ , ok := r . runningTasks . Load ( task . Id ) ; ok {
return fmt . Errorf ( "task %d is already running" , task . Id )
} else {
r . runningTasks . Store ( task . Id , struct { } { } )
defer r . runningTasks . Delete ( task . Id )
}
ctx , cancel := context . WithTimeout ( ctx , r . cfg . Runner . Timeout )
defer cancel ( )
reporter := report . NewReporter ( ctx , cancel , r . client , task )
var runErr error
defer func ( ) {
lastWords := ""
if runErr != nil {
lastWords = runErr . Error ( )
}
_ = reporter . Close ( lastWords )
} ( )
reporter . RunDaemon ( )
runErr = r . run ( ctx , task , reporter )
return nil
}
func ( r * Runner ) run ( ctx context . Context , task * runnerv1 . Task , reporter * report . Reporter ) ( err error ) {
defer func ( ) {
if r := recover ( ) ; r != nil {
err = fmt . Errorf ( "panic: %v" , r )
}
} ( )
reporter . Logf ( "%s(version:%s) received task %v of job %v, be triggered by event: %s" , r . name , ver . Version ( ) , task . Id , task . Context . Fields [ "job" ] . GetStringValue ( ) , task . Context . Fields [ "event_name" ] . GetStringValue ( ) )
2023-04-20 16:27:46 +01:00
workflow , jobID , err := generateWorkflow ( task )
2023-04-04 14:32:04 +01:00
if err != nil {
return err
}
plan , err := model . CombineWorkflowPlanner ( workflow ) . PlanJob ( jobID )
if err != nil {
return err
}
job := workflow . GetJob ( jobID )
reporter . ResetSteps ( len ( job . Steps ) )
taskContext := task . Context . Fields
log . Infof ( "task %v repo is %v %v %v" , task . Id , taskContext [ "repository" ] . GetStringValue ( ) ,
taskContext [ "gitea_default_actions_url" ] . GetStringValue ( ) ,
r . client . Address ( ) )
preset := & model . GithubContext {
Event : taskContext [ "event" ] . GetStructValue ( ) . AsMap ( ) ,
RunID : taskContext [ "run_id" ] . GetStringValue ( ) ,
RunNumber : taskContext [ "run_number" ] . GetStringValue ( ) ,
Actor : taskContext [ "actor" ] . GetStringValue ( ) ,
Repository : taskContext [ "repository" ] . GetStringValue ( ) ,
EventName : taskContext [ "event_name" ] . GetStringValue ( ) ,
Sha : taskContext [ "sha" ] . GetStringValue ( ) ,
Ref : taskContext [ "ref" ] . GetStringValue ( ) ,
RefName : taskContext [ "ref_name" ] . GetStringValue ( ) ,
RefType : taskContext [ "ref_type" ] . GetStringValue ( ) ,
HeadRef : taskContext [ "head_ref" ] . GetStringValue ( ) ,
BaseRef : taskContext [ "base_ref" ] . GetStringValue ( ) ,
Token : taskContext [ "token" ] . GetStringValue ( ) ,
RepositoryOwner : taskContext [ "repository_owner" ] . GetStringValue ( ) ,
RetentionDays : taskContext [ "retention_days" ] . GetStringValue ( ) ,
}
if t := task . Secrets [ "GITEA_TOKEN" ] ; t != "" {
preset . Token = t
} else if t := task . Secrets [ "GITHUB_TOKEN" ] ; t != "" {
preset . Token = t
}
2023-04-10 14:35:07 +01:00
// use task token to action api token
r . envs [ "ACTIONS_RUNTIME_TOKEN" ] = preset . Token
2023-04-04 14:32:04 +01:00
eventJSON , err := json . Marshal ( preset . Event )
if err != nil {
return err
}
maxLifetime := 3 * time . Hour
if deadline , ok := ctx . Deadline ( ) ; ok {
maxLifetime = time . Until ( deadline )
}
runnerConfig := & runner . Config {
2023-04-28 15:03:52 +01:00
// On Linux, Workdir will be like "/<parent_directory>/<owner>/<repo>"
// On Windows, Workdir will be like "\<parent_directory>\<owner>\<repo>"
2023-07-02 09:41:54 +01:00
Workdir : filepath . FromSlash ( filepath . Clean ( fmt . Sprintf ( "/%s/%s" , r . cfg . Container . WorkdirParent , preset . Repository ) ) ) ,
2023-06-20 09:29:05 +01:00
BindWorkdir : false ,
ActionCacheDir : filepath . FromSlash ( r . cfg . Host . WorkdirParent ) ,
2023-04-04 14:32:04 +01:00
2023-06-30 05:00:04 +01:00
ReuseContainers : false ,
ForcePull : false ,
ForceRebuild : false ,
LogOutput : true ,
JSONLogger : false ,
Env : r . envs ,
Secrets : task . Secrets ,
GitHubInstance : strings . TrimSuffix ( r . client . Address ( ) , "/" ) ,
AutoRemove : true ,
NoSkipCheckout : true ,
PresetGitHubContext : preset ,
EventJSON : string ( eventJSON ) ,
ContainerNamePrefix : fmt . Sprintf ( "GITEA-ACTIONS-TASK-%d" , task . Id ) ,
ContainerMaxLifetime : maxLifetime ,
ContainerNetworkMode : container . NetworkMode ( r . cfg . Container . Network ) ,
ContainerOptions : r . cfg . Container . Options ,
ContainerDaemonSocket : r . cfg . Container . DockerHost ,
Privileged : r . cfg . Container . Privileged ,
2023-06-30 08:53:18 +01:00
DefaultActionInstance : taskContext [ "gitea_default_actions_url" ] . GetStringValue ( ) ,
2023-06-30 05:00:04 +01:00
PlatformPicker : r . labels . PickPlatform ,
Vars : task . Vars ,
ValidVolumes : r . cfg . Container . ValidVolumes ,
2023-04-04 14:32:04 +01:00
}
rr , err := runner . New ( runnerConfig )
if err != nil {
return err
}
executor := rr . NewPlanExecutor ( plan )
reporter . Logf ( "workflow prepared" )
// add logger recorders
ctx = common . WithLoggerHook ( ctx , reporter )
2023-04-20 16:27:46 +01:00
execErr := executor ( ctx )
reporter . SetOutputs ( job . Outputs )
return execErr
2023-04-04 14:32:04 +01:00
}
2023-06-06 05:03:02 +01:00
2023-06-15 04:59:15 +01:00
func ( r * Runner ) Declare ( ctx context . Context , labels [ ] string ) ( * connect . Response [ runnerv1 . DeclareResponse ] , error ) {
return r . client . Declare ( ctx , connect . NewRequest ( & runnerv1 . DeclareRequest {
Version : ver . Version ( ) ,
Labels : labels ,
} ) )
}