12 Commits

Author SHA1 Message Date
restitux e1382e50ea Cleanup docker build output before saving (#23) 2023-04-07 20:08:59 -06:00
restitux 7f44e5ed41 Add nil check to runner chan (#22) 2023-04-07 19:52:44 -06:00
restitux bcc53dfbe0 Refactored runner map wrap and add to function 2023-04-07 19:52:37 -06:00
restitux bbf96498aa Add updatePipeline endpoint 2023-04-07 18:44:04 -06:00
restitux 954966db58 Start/restart poll job when created or updated
This currently contains the logic for restarting updated jobs,
but nothing exercises this logic. The logic for starting polling
for a newly created pipeline is implemented.
2023-04-07 18:31:59 -06:00
restitux ed7df18f83 Add timeout and retry interval to GetRunner api 2023-04-05 18:38:06 -06:00
restitux a8e9a68f0e Don't sleep on first scan to ease testing 2023-04-05 18:01:10 -06:00
restitux 20c664f0ed Reorganize docker configuration 2023-03-11 18:56:13 -07:00
restitux 0000ea2a13 Rewrite run-dev.sh to use golang program 2023-03-11 13:05:50 -07:00
restitux fe53a17160 Runners are removed from manager when alloacted
This removes an existing unlocked shared access to runner.running.
This also sets us up for better management of the runners.
2023-03-08 00:13:40 -07:00
restitux f190274bce Utilify runner tag printing function 2023-03-07 23:42:01 -07:00
restitux d9ba14550e Removed outdated comments 2023-03-07 19:47:57 -07:00
24 changed files with 517 additions and 284 deletions
+71 -3
View File
@@ -13,7 +13,7 @@ import (
var log = logging.MustGetLogger("cursorius-server") var log = logging.MustGetLogger("cursorius-server")
func createSchema(db database.Database) (graphql.Schema, error) { func createSchema(db database.Database, pollChan chan uuid.UUID) (graphql.Schema, error) {
runnerType := graphql.NewObject(graphql.ObjectConfig{ runnerType := graphql.NewObject(graphql.ObjectConfig{
Name: "Runner", Name: "Runner",
Description: "A runner available for use inside of a pipeline.", Description: "A runner available for use inside of a pipeline.",
@@ -454,6 +454,8 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return nil, err return nil, err
} }
pollChan <- pipeline.Id
return pipeline, nil return pipeline, nil
}, },
}, },
@@ -558,6 +560,72 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return runner, nil return runner, nil
}, },
}, },
"updatePipeline": &graphql.Field{
Type: pipelineType,
Description: "Create a new pipeline",
Args: graphql.FieldConfigArgument{
"pipelineId": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"name": &graphql.ArgumentConfig{
Type: graphql.String,
},
"url": &graphql.ArgumentConfig{
Type: graphql.String,
},
"pollInterval": &graphql.ArgumentConfig{
Type: graphql.Int,
},
"cloneCredentialId": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
if err != nil {
return nil, err
}
var name *string
var url *string
var interval *int
if nameVal, ok := params.Args["name"]; ok {
nameVal := nameVal.(string)
name = &nameVal
} else {
name = nil
}
if urlVal, ok := params.Args["url"]; ok {
urlVal := urlVal.(string)
url = &urlVal
} else {
url = nil
}
if intervalVal, ok := params.Args["pollInterval"]; ok {
intervalVal := intervalVal.(int)
interval = &intervalVal
} else {
interval = nil
}
pipeline, err := db.UpdatePipeline(
pipelineId,
name,
url,
interval,
)
if err != nil {
return nil, err
}
pollChan <- pipeline.Id
return pipeline, nil
},
},
"setPipelineCloneCredential": &graphql.Field{ "setPipelineCloneCredential": &graphql.Field{
Type: pipelineType, Type: pipelineType,
Description: "Set the CloneCredential used by a pipeline to clone the source repo", Description: "Set the CloneCredential used by a pipeline to clone the source repo",
@@ -681,9 +749,9 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return schema, nil return schema, nil
} }
func CreateHandler(db database.Database, mux *http.ServeMux) error { func CreateHandler(db database.Database, pollChan chan uuid.UUID, mux *http.ServeMux) error {
schema, err := createSchema(db) schema, err := createSchema(db, pollChan)
if err != nil { if err != nil {
return err return err
} }
+43
View File
@@ -77,6 +77,49 @@ RETURNING id, name, url, poll_interval;`
return pipeline, nil return pipeline, nil
} }
func (db *Database) UpdatePipeline(pipelineId uuid.UUID, name *string, url *string, pollInterval *int) (Pipeline, error) {
query := `
UPDATE pipelines
SET name=$1, url=$2, poll_interval=$3
WHERE id=$4
RETURNING name, url, poll_interval, clone_credential;`
pipeline, err := db.GetPipelineById(pipelineId)
if err != nil {
return pipeline, err
}
var nameNew string
var urlNew string
var pollIntervalNew int
if name != nil {
nameNew = *name
} else {
nameNew = pipeline.Name
}
if url != nil {
urlNew = *url
} else {
urlNew = pipeline.Url
}
if pollInterval != nil {
pollIntervalNew = *pollInterval
} else {
pollIntervalNew = pipeline.PollInterval
}
err = db.Conn.QueryRow(context.Background(),
query, nameNew, urlNew, pollIntervalNew, pipelineId).Scan(
&pipeline.Name, &pipeline.Url, &pipeline.PollInterval, &pipeline.CloneCredential,
)
if err != nil {
return pipeline, fmt.Errorf("Could not add credential to pipeline: %w", err)
}
return pipeline, err
}
func (db *Database) SetPipelineCloneCredential(pipelineId uuid.UUID, credentialId *uuid.UUID) (Pipeline, error) { func (db *Database) SetPipelineCloneCredential(pipelineId uuid.UUID, credentialId *uuid.UUID) (Pipeline, error) {
query := ` query := `
UPDATE pipelines UPDATE pipelines
@@ -4,4 +4,4 @@ MAINTAINER restitux <restitux@ohea.xyz>
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
ca-certificates \ ca-certificates \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
ENTRYPOINT ["/build/server/docker/build-and-run.sh"] ENTRYPOINT ["/build/server/docker/cursorius/build-and-run.sh"]
+19 -4
View File
@@ -2,15 +2,15 @@ version: "3.3"
services: services:
cursorius-server: cursorius-server:
build: build:
context: .. context: ".."
dockerfile: docker/Dockerfile.dev dockerfile: "docker/cursorius/Dockerfile.dev"
ports: ports:
- "0.0.0.0:45420:45420" - "0.0.0.0:45420:45420"
networks: networks:
- cursorius - cursorius
volumes: volumes:
- "..:/build/server" - "..:/build/server"
- "../server.toml:/root/.config/cursorius/server.toml" - "./server.toml:/root/.config/cursorius/server.toml"
- "/var/run/docker.sock:/var/run/docker.sock" - "/var/run/docker.sock:/var/run/docker.sock"
- "../_working/go:/go" - "../_working/go:/go"
- "../_working/jobs:/cursorius/jobs" - "../_working/jobs:/cursorius/jobs"
@@ -20,15 +20,30 @@ services:
- POSTGRES_USER=cursorius - POSTGRES_USER=cursorius
- POSTGRES_PASSWORD=cursorius - POSTGRES_PASSWORD=cursorius
- POSTGRES_DB=cursorius - POSTGRES_DB=cursorius
volumes:
- "../_working/postgres:/var/lib/postgresql/data"
networks: networks:
- cursorius - cursorius
graphiql: graphiql:
build: build:
dockerfile: Dockerfile.graphiql context: "graphiql"
dockerfile: "Dockerfile.graphiql"
ports: ports:
- "0.0.0.0:45421:80" - "0.0.0.0:45421:80"
networks: networks:
- cursorius - cursorius
gitea:
image: gitea/gitea:latest
profiles: ["gitea"]
environment:
- GITEA__webhook__ALLOWED_HOST_LIST=cursorius-server, external
ports:
- "127.0.0.1:2222:22"
- "127.0.0.1:3000:3000"
networks:
- cursorius
volumes:
- "../_working/gitea:/data"
networks: networks:
cursorius: cursorius:
-24
View File
@@ -1,24 +0,0 @@
version: "3.3"
services:
cursorius-server:
networks:
- gitea
gitea:
image: gitea/gitea:latest
environment:
- GITEA__webhook__ALLOWED_HOST_LIST=cursorius-server, external
ports:
- "127.0.0.1:2222:22"
- "127.0.0.1:3000:3000"
networks:
- gitea
volumes:
- gitea-data:/data
volumes:
gitea-data:
networks:
gitea:
external: false
+1 -101
View File
@@ -2,104 +2,4 @@
set -e set -e
mkdir -p _working/go go run docker/run.go "$@"
mkdir -p _working/jobs
base_default_compose_files="docker/docker-compose.yml"
default_compose_files="$base_default_compose_files"
override_compose="docker/docker-compose.override.yml"
gitea_compose="docker/gitea-override.yml"
if [ -f "$override_compose" ]
then
default_compose_files+=" $override_compose"
else
default_compose_files="docker/docker-compose.yml"
fi
function stop_containers {
current_containers="$(cat _working/current_containers)"
if [ "$current_containers" == "default" ]
then
compose_files="$default_compose_files"
elif [ "$current_containers" == "gitea" ]
then
compose_files="$default_compose_files $gitea_compose"
fi
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags down
}
function show_logs {
current_containers="$(cat _working/current_containers)"
if [ "$current_containers" == "default" ]
then
compose_files="$default_compose_files"
elif [ "$current_containers" == "gitea" ]
then
compose_files="$default_compose_files $gitea_compose"
fi
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags logs -f
}
function show_ps {
current_containers="$(cat _working/current_containers)"
if [ "$current_containers" == "default" ]
then
compose_files="$default_compose_files"
elif [ "$current_containers" == "gitea" ]
then
compose_files="$default_compose_files $gitea_compose"
fi
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags ps
}
function runprev {
current_containers="$(cat _working/current_containers)"
if [ "$current_containers" == "default" ]
then
compose_files="$default_compose_files"
elif [ "$current_containers" == "gitea" ]
then
compose_files="$default_compose_files $gitea_compose"
fi
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags up --build -d
docker compose $compose_file_flags logs -f
}
case $1 in
"default")
echo "default" > _working/current_containers
compose_files="$default_compose_files"
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags up --build -d
docker compose $compose_file_flags logs -f;;
"gitea")
echo "gitea" > _working/current_containers
stop_containers
compose_files="$default_compose_files $gitea_compose"
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags up --build -d
docker compose $compose_file_flags logs -f;;
"dbshell")
compose_files="$default_compose_files $gitea_compose"
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
docker compose $compose_file_flags exec cursorius-db psql --user=cursorius;;
"stop")
stop_containers;;
"logs")
show_logs;;
"ps")
show_ps;;
"runprev")
runprev;;
*) echo "ERROR: Unknown param \"$1\"" 2>&1
exit 255;;
esac
+93
View File
@@ -0,0 +1,93 @@
package main
import (
"fmt"
"os"
"os/exec"
)
func panicError(errorString string, params ...any) {
fmt.Fprintf(os.Stderr, fmt.Sprintf("ERROR: %v\n", errorString), params...)
os.Exit(1)
}
func run(name string, arg ...string) {
cmd := exec.Command(name, arg...)
if err := cmd.Run(); err != nil {
panicError("could not run command %v: %v", name, err)
}
}
func runAttach(name string, arg ...string) {
cmd := exec.Command(name, arg...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
panicError("could not run command %v: %v", name, err)
}
}
func runCompose(args []string) {
runAttach("docker", append([]string{"compose"}, args...)...)
}
func createDirs() {
run("mkdir", "-p", "_working/go")
run("mkdir", "-p", "_working/jobs")
}
func currentContainers() string {
bytes, err := os.ReadFile("_working/current_containers")
if err != nil {
panicError("could not read current containers: %v", err)
}
return string(bytes)
}
func composeFlags() []string {
containers := currentContainers()
flags := []string{"-f", "docker/docker-compose.yml"}
switch containers {
case "gitea":
flags = append(flags, "--profile", "gitea")
}
return flags
}
func runContainers(containers string) {
err := os.WriteFile("_working/current_containers", []byte(containers), 0633)
if err != nil {
panicError("could not write current_containers file: %v", err)
}
runCompose(append(composeFlags(), "up", "--build", "-d"))
runCompose(append(composeFlags(), "logs", "-f"))
}
func main() {
if len(os.Args) < 2 {
panicError("not enough arguments passed")
}
createDirs()
switch os.Args[1] {
case "default", "gitea":
runContainers(os.Args[1])
case "runprev":
runContainers(currentContainers())
case "stop":
runCompose(append(composeFlags(), "down"))
case "dbshell":
runCompose(append(composeFlags(), "exec", "cursorius-db", "psql", "--user=cursorius"))
case "logs":
runCompose(append(composeFlags(), "logs", "-f"))
case "ps":
runCompose(append(composeFlags(), "ps"))
case "help":
fmt.Println("commands: default, gitea, runprev, stop, dbshell, logs, ps, help")
default:
panicError("Unknown subcommand: %v", os.Args[1])
}
}
+2 -2
View File
@@ -3,7 +3,7 @@ module git.ohea.xyz/cursorius/server
go 1.19 go 1.19
require ( require (
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230109075652-ead0aeff2eb9 git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230405234139-34d8875b72f4
git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2 git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2
git.ohea.xyz/golang/config v0.0.0-20220915224621-b9debd233173 git.ohea.xyz/golang/config v0.0.0-20220915224621-b9debd233173
github.com/bufbuild/connect-go v1.4.1 github.com/bufbuild/connect-go v1.4.1
@@ -17,7 +17,7 @@ require (
github.com/jhoonb/archivex v0.0.0-20201016144719-6a343cdae81d github.com/jhoonb/archivex v0.0.0-20201016144719-6a343cdae81d
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
golang.org/x/net v0.2.0 golang.org/x/net v0.2.0
google.golang.org/protobuf v1.28.1 google.golang.org/protobuf v1.30.0
nhooyr.io/websocket v1.8.7 nhooyr.io/websocket v1.8.7
) )
+4 -3
View File
@@ -78,8 +78,8 @@ contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EU
contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230109075652-ead0aeff2eb9 h1:8p7Kw3B7dbi2zdgG+Me9ETRWrJzoNVjcase4YqXfGbs= git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230405234139-34d8875b72f4 h1:kKQQEg1nmWnqiNOqtUHteEuacyfy0NdxyDj6HPjbA2c=
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230109075652-ead0aeff2eb9/go.mod h1:D7GGcFIH421mo6KuRaXXXmlXPwWeEsemTZG/BOZA/4o= git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230405234139-34d8875b72f4/go.mod h1:D7GGcFIH421mo6KuRaXXXmlXPwWeEsemTZG/BOZA/4o=
git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2 h1:G1XQEqhj1LZPQbH7avzvT7QL9Wfbb4CXMm0nLL39eDc= git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2 h1:G1XQEqhj1LZPQbH7avzvT7QL9Wfbb4CXMm0nLL39eDc=
git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2/go.mod h1:F9y5Ck4Wchsaj5amSX2eDRUlQ/iYP1VNLFduvjNwmLc= git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2/go.mod h1:F9y5Ck4Wchsaj5amSX2eDRUlQ/iYP1VNLFduvjNwmLc=
git.ohea.xyz/cursorius/webhooks/v6 v6.0.2-0.20221224221147-a2bdbf1756ed h1:gsK15m4Npow74+R6OfZKwwAg1sl7QWQCRXOeE2QLUco= git.ohea.xyz/cursorius/webhooks/v6 v6.0.2-0.20221224221147-a2bdbf1756ed h1:gsK15m4Npow74+R6OfZKwwAg1sl7QWQCRXOeE2QLUco=
@@ -2129,8 +2129,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+11 -9
View File
@@ -10,6 +10,8 @@ import (
"git.ohea.xyz/cursorius/server/pipeline_api" "git.ohea.xyz/cursorius/server/pipeline_api"
"git.ohea.xyz/cursorius/server/runnermanager" "git.ohea.xyz/cursorius/server/runnermanager"
"git.ohea.xyz/cursorius/server/webhook" "git.ohea.xyz/cursorius/server/webhook"
"github.com/google/uuid"
"github.com/op/go-logging" "github.com/op/go-logging"
"golang.org/x/net/http2" "golang.org/x/net/http2"
"golang.org/x/net/http2/h2c" "golang.org/x/net/http2/h2c"
@@ -22,15 +24,15 @@ func setupHTTPServer(
mux *http.ServeMux, mux *http.ServeMux,
conf config.PipelineConf, conf config.PipelineConf,
db database.Database, db database.Database,
registerCh chan runnermanager.RunnerRegistration, runnerManagerChans runnermanager.RunnerManagerChans,
getRunnerCh chan runnermanager.GetRunnerRequest, pollChan chan uuid.UUID,
) error { ) error {
webhook.CreateWebhookHandler(db, conf, mux) webhook.CreateWebhookHandler(db, conf, mux)
pipeline_api.CreateHandler(getRunnerCh, mux) pipeline_api.CreateHandler(runnerManagerChans.Allocation, runnerManagerChans.Release, mux)
err := admin_api.CreateHandler(db, mux) err := admin_api.CreateHandler(db, pollChan, mux)
if err != nil { if err != nil {
return fmt.Errorf("Could not create admin api handler: %w", err) return fmt.Errorf("Could not create admin api handler: %w", err)
} }
@@ -41,7 +43,7 @@ func setupHTTPServer(
log.Errorf("Could not upgrade runner connection to websocket: %v", err) log.Errorf("Could not upgrade runner connection to websocket: %v", err)
return return
} }
go runnermanager.RegisterRunner(conn, registerCh) go runnermanager.RegisterRunner(conn, runnerManagerChans.Registration)
}) })
return nil return nil
} }
@@ -52,16 +54,16 @@ func Listen(
port int, port int,
conf config.PipelineConf, conf config.PipelineConf,
db database.Database, db database.Database,
registerCh chan runnermanager.RunnerRegistration, runnerManagerChans runnermanager.RunnerManagerChans,
getRunnerCh chan runnermanager.GetRunnerRequest, pollChan chan uuid.UUID,
) error { ) error {
err := setupHTTPServer( err := setupHTTPServer(
mux, mux,
conf, conf,
db, db,
registerCh, runnerManagerChans,
getRunnerCh, pollChan,
) )
if err != nil { if err != nil {
return fmt.Errorf("Could not setup http endpoints: %w", err) return fmt.Errorf("Could not setup http endpoints: %w", err)
+4 -4
View File
@@ -40,13 +40,13 @@ func main() {
return return
} }
getRunnerCh, registerCh, err := runnermanager.StartRunnerManager(configData.Config.Runners, db) runnerManagerChans, err := runnermanager.StartRunnerManager(configData.Config.Runners, db)
if err != nil { if err != nil {
log.Errorf("Could not start runner: %v", err) log.Errorf("Could not start runner: %v", err)
return return
} }
_ = poll.StartPolling(configData.Config.PipelineConf, db) pollChan := poll.StartPolling(configData.Config.PipelineConf, db)
mux := http.NewServeMux() mux := http.NewServeMux()
@@ -56,7 +56,7 @@ func main() {
configData.Config.Port, configData.Config.Port,
configData.Config.PipelineConf, configData.Config.PipelineConf,
db, db,
registerCh, runnerManagerChans,
getRunnerCh, pollChan,
)) ))
} }
+64 -27
View File
@@ -4,12 +4,13 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"strings"
"sync" "sync"
"time"
apiv2 "git.ohea.xyz/cursorius/pipeline-api/go/api/v2" apiv2 "git.ohea.xyz/cursorius/pipeline-api/go/api/v2"
"git.ohea.xyz/cursorius/pipeline-api/go/api/v2/apiv2connect" "git.ohea.xyz/cursorius/pipeline-api/go/api/v2/apiv2connect"
"git.ohea.xyz/cursorius/server/runnermanager" "git.ohea.xyz/cursorius/server/runnermanager"
"git.ohea.xyz/cursorius/server/util"
"github.com/bufbuild/connect-go" "github.com/bufbuild/connect-go"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/op/go-logging" "github.com/op/go-logging"
@@ -18,7 +19,8 @@ import (
var log = logging.MustGetLogger("cursorius-server") var log = logging.MustGetLogger("cursorius-server")
type ApiServer struct { type ApiServer struct {
getRunnerCh chan runnermanager.GetRunnerRequest allocationCh chan runnermanager.RunnerAllocationRequest
releaseCh chan runnermanager.RunnerReleaseRequest
allocatedRunners map[uuid.UUID]*RunnerWrapper allocatedRunners map[uuid.UUID]*RunnerWrapper
allocatedRunnersMutex sync.RWMutex allocatedRunnersMutex sync.RWMutex
} }
@@ -34,15 +36,17 @@ func (r *RunnerWrapper) RunCommand(cmd string, args []string) (int64, string, st
return_code, stdout, stderr, err := r.runner.RunCommand(cmd, args) return_code, stdout, stderr, err := r.runner.RunCommand(cmd, args)
// TODO: run command by sending websocket packet
// TODO: get stdout and stderr response
return return_code, stdout, stderr, err return return_code, stdout, stderr, err
} }
func (r *RunnerWrapper) Release() { func (r *RunnerWrapper) Release(releaseCh chan runnermanager.RunnerReleaseRequest) {
r.mutex.Lock() r.mutex.Lock()
defer r.mutex.Unlock() defer r.mutex.Unlock()
r.runner.Release()
releaseCh <- runnermanager.RunnerReleaseRequest{
Runner: r.runner,
}
r.runner = nil
} }
func (s *ApiServer) GetRunnerFromMap(u uuid.UUID) (*RunnerWrapper, bool) { func (s *ApiServer) GetRunnerFromMap(u uuid.UUID) (*RunnerWrapper, bool) {
@@ -52,39 +56,71 @@ func (s *ApiServer) GetRunnerFromMap(u uuid.UUID) (*RunnerWrapper, bool) {
return runner, ok return runner, ok
} }
func (s *ApiServer) AddRunnerToMap(u uuid.UUID, runner *runnermanager.Runner) {
s.allocatedRunnersMutex.Lock()
defer s.allocatedRunnersMutex.Unlock()
s.allocatedRunners[u] = &RunnerWrapper{runner: runner}
}
func (s *ApiServer) GetRunner( func (s *ApiServer) GetRunner(
ctx context.Context, ctx context.Context,
req *connect.Request[apiv2.GetRunnerRequest], req *connect.Request[apiv2.GetRunnerRequest],
) (*connect.Response[apiv2.GetRunnerResponse], error) { ) (*connect.Response[apiv2.GetRunnerResponse], error) {
respChan := make(chan runnermanager.GetRunnerResponse) var response runnermanager.RunnerAllocationResponse
s.getRunnerCh <- runnermanager.GetRunnerRequest{ var timeoutCtx *context.Context
Tags: req.Msg.Tags, var retryInterval int64 = 0
RespChan: respChan,
}
var runnerTagsStr strings.Builder respChan := make(chan runnermanager.RunnerAllocationResponse)
if len(req.Msg.Tags) > 0 {
fmt.Fprintf(&runnerTagsStr, "[%v", req.Msg.Tags[0]) tagsStr := util.FormatTags(req.Msg.Tags)
for _, tag := range req.Msg.Tags[1:] {
fmt.Fprintf(&runnerTagsStr, ", %v", tag) if req.Msg.Options != nil {
if req.Msg.Options.Timeout != 0 {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(req.Msg.Options.Timeout)*time.Second)
timeoutCtx = &ctx
defer cancel()
} }
fmt.Fprintf(&runnerTagsStr, "]")
retryInterval = req.Msg.Options.RetryInterval
} }
response := <-respChan for {
if response.Err != nil { s.allocationCh <- runnermanager.RunnerAllocationRequest{
log.Errorf("Could not get runner with tags \"%v\": %v", runnerTagsStr.String(), response.Err) Tags: req.Msg.Tags,
RespChan: respChan,
}
response = <-respChan
if response.Err == nil {
break
}
log.Infof("Could not get runner with tags \"%v\": %v", tagsStr, response.Err)
// If no timeout is specified, skip after one attempt
if timeoutCtx == nil {
break
}
// If timeout is expired, stop trying to allocate runner
if (*timeoutCtx).Err() != nil {
break
}
log.Infof("Sleeping for %v seconds before retry...", retryInterval)
time.Sleep(time.Duration(retryInterval) * time.Second)
}
if response.Runner == nil {
return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("Could not get runner")) return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("Could not get runner"))
} }
log.Infof("Got runner with tags: %v", runnerTagsStr.String()) log.Infof("Got runner with tags: %v", tagsStr)
runnerUuid := uuid.New() runnerUuid := uuid.New()
s.allocatedRunnersMutex.Lock() s.AddRunnerToMap(runnerUuid, response.Runner)
s.allocatedRunners[runnerUuid] = &RunnerWrapper{runner: response.Runner}
s.allocatedRunnersMutex.Unlock()
res := connect.NewResponse(&apiv2.GetRunnerResponse{ res := connect.NewResponse(&apiv2.GetRunnerResponse{
Id: runnerUuid.String(), Id: runnerUuid.String(),
@@ -107,7 +143,7 @@ func (s *ApiServer) ReleaseRunner(
s.allocatedRunnersMutex.Lock() s.allocatedRunnersMutex.Lock()
runner := s.allocatedRunners[uuid] runner := s.allocatedRunners[uuid]
delete(s.allocatedRunners, uuid) delete(s.allocatedRunners, uuid)
runner.Release() runner.Release(s.releaseCh)
s.allocatedRunnersMutex.Unlock() s.allocatedRunnersMutex.Unlock()
res := connect.NewResponse(&apiv2.ReleaseRunnerResponse{}) res := connect.NewResponse(&apiv2.ReleaseRunnerResponse{})
@@ -146,9 +182,10 @@ func (s *ApiServer) RunCommand(
return res, nil return res, nil
} }
func CreateHandler(getRunnerCh chan runnermanager.GetRunnerRequest, mux *http.ServeMux) { func CreateHandler(allocationCh chan runnermanager.RunnerAllocationRequest, releaseCh chan runnermanager.RunnerReleaseRequest, mux *http.ServeMux) {
api_server := &ApiServer{ api_server := &ApiServer{
getRunnerCh: getRunnerCh, allocationCh: allocationCh,
releaseCh: releaseCh,
allocatedRunners: make(map[uuid.UUID]*RunnerWrapper), allocatedRunners: make(map[uuid.UUID]*RunnerWrapper),
} }
path, handler := apiv2connect.NewGetRunnerServiceHandler(api_server) path, handler := apiv2connect.NewGetRunnerServiceHandler(api_server)
+1 -8
View File
@@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@@ -141,13 +140,7 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
return return
} }
response, err := ioutil.ReadAll(buildResponse.Body) err = db.UpdateRunBuildOutput(pe.Run.Id, cleanupBuildOutput(buildResponse.Body))
if err != nil {
log.Errorf("could no read build response: %w", err)
return
}
err = db.UpdateRunBuildOutput(pe.Run.Id, string(response))
if err != nil { if err != nil {
log.Errorf("could not update build output for run: %w", err) log.Errorf("could not update build output for run: %w", err)
return return
+25
View File
@@ -0,0 +1,25 @@
package pipeline_executor
import (
"bufio"
"encoding/json"
"io"
)
func cleanupBuildOutput(input io.ReadCloser) string {
output := ""
scanner := bufio.NewScanner(input)
for scanner.Scan() {
var log map[string]any
json.Unmarshal(scanner.Bytes(), &log)
if logVar, ok := log["stream"]; ok {
if log, ok := logVar.(string); ok {
output += log
}
}
}
return output
}
+46 -9
View File
@@ -1,17 +1,18 @@
package poll package poll
import ( import (
"context"
"time" "time"
"github.com/google/uuid"
"github.com/op/go-logging"
"git.ohea.xyz/cursorius/server/config" "git.ohea.xyz/cursorius/server/config"
"git.ohea.xyz/cursorius/server/database" "git.ohea.xyz/cursorius/server/database"
"git.ohea.xyz/cursorius/server/pipeline_executor" "git.ohea.xyz/cursorius/server/pipeline_executor"
"github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/storage/memory" "github.com/go-git/go-git/v5/storage/memory"
"github.com/google/uuid"
"github.com/op/go-logging"
) )
var log = logging.MustGetLogger("cursorius-server") var log = logging.MustGetLogger("cursorius-server")
@@ -26,10 +27,31 @@ type tag struct {
commitHash string commitHash string
} }
func pollJob(pipeline database.Pipeline, pipelineConf config.PipelineConf, db database.Database) { func pollJob(ctx context.Context, pipeline database.Pipeline, pipelineConf config.PipelineConf, db database.Database) {
firstScan := true
for { for {
time.Sleep(time.Duration(pipeline.PollInterval) * time.Second) // Don't sleep on first scan to ease testing
log.Infof("Polling repo %v", pipeline.Name) // TODO: this should be replaced with a script that mocks a webhook
if !firstScan {
ctx, cancel := context.WithTimeout(ctx, time.Duration(pipeline.PollInterval)*time.Second)
select {
case <-ctx.Done():
switch ctx.Err() {
case context.Canceled:
log.Infof("Polling for pipeline %v canceled, stopping", pipeline.Name)
cancel()
return
}
}
cancel()
log.Infof("Polling repo %v", pipeline.Name)
} else {
firstScan = false
}
prevRefs, err := db.GetPipelineRefs(pipeline.Id) prevRefs, err := db.GetPipelineRefs(pipeline.Id)
if err != nil { if err != nil {
@@ -129,11 +151,17 @@ func launchPollJobs(conf config.PipelineConf, db database.Database, pollChan cha
return return
} }
pipelineCancelations := make(map[uuid.UUID]context.CancelFunc)
for _, pipeline := range pipelines { for _, pipeline := range pipelines {
if pipeline.PollInterval == 0 { if pipeline.PollInterval == 0 {
continue continue
} else { } else {
go pollJob(pipeline, conf, db) ctx, cancel := context.WithCancel(context.Background())
pipelineCancelations[pipeline.Id] = cancel
log.Infof("Starting polling for pipeline %v with id %v", pipeline.Name, pipeline.Id)
go pollJob(ctx, pipeline, conf, db)
} }
} }
@@ -144,8 +172,17 @@ func launchPollJobs(conf config.PipelineConf, db database.Database, pollChan cha
log.Errorf("Could not get pipeline with id \"%v\" from database: %v", err) log.Errorf("Could not get pipeline with id \"%v\" from database: %v", err)
continue continue
} }
// TODO: stop existing polling process for given uuid
go pollJob(pipeline, conf, db) // Cancel existing polling job if it exists
if cancelFunc, ok := pipelineCancelations[pipeline.Id]; ok {
cancelFunc()
}
// Start new polling job
log.Infof("Starting polling for pipeline %v with id %v", pipeline.Name, pipeline.Id)
ctx, cancel := context.WithCancel(context.Background())
pipelineCancelations[pipeline.Id] = cancel
go pollJob(ctx, pipeline, conf, db)
} }
} }
+21 -5
View File
@@ -22,19 +22,33 @@ type Runner struct {
tags []string tags []string
conn *websocket.Conn conn *websocket.Conn
receiveChan chan []byte receiveChan chan []byte
running bool }
func (r *Runner) HasTags(requestedTags []string) bool {
tagIter:
for _, requestedTag := range requestedTags {
for _, posessedTag := range r.tags {
// if we find the tag, move on to search for the next one
if posessedTag == requestedTag {
continue tagIter
}
}
// if we don't find the tag
return false
}
return true
} }
func (r *Runner) Id() uuid.UUID { func (r *Runner) Id() uuid.UUID {
return r.id return r.id
} }
func (r *Runner) Release() {
r.running = false
}
func (r *Runner) RunCommand(cmd string, args []string) (returnCode int64, stdout string, stderr string, err error) { func (r *Runner) RunCommand(cmd string, args []string) (returnCode int64, stdout string, stderr string, err error) {
if r.conn == nil {
return 0, "", "", fmt.Errorf("runner with id %v has nil conn, THIS IS A BUG", r.id)
}
// Write RunCommand message to client // Write RunCommand message to client
serverToRunnerMsg := &runner_api.ServerToRunnerMsg{ serverToRunnerMsg := &runner_api.ServerToRunnerMsg{
Msg: &runner_api.ServerToRunnerMsg_RunCommandMsg{ Msg: &runner_api.ServerToRunnerMsg_RunCommandMsg{
@@ -87,6 +101,8 @@ func (r *Runner) sendProtoStruct(p protoreflect.ProtoMessage) error {
ctx := context.Background() ctx := context.Background()
log.Debugf("r.conn: %p", r.conn)
if err := r.conn.Write(ctx, websocket.MessageBinary, protoOut); err != nil { if err := r.conn.Write(ctx, websocket.MessageBinary, protoOut); err != nil {
return fmt.Errorf("Could not send proto to websocket: %w", err) return fmt.Errorf("Could not send proto to websocket: %w", err)
} }
+44 -84
View File
@@ -3,7 +3,6 @@ package runnermanager
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"time" "time"
"github.com/google/uuid" "github.com/google/uuid"
@@ -13,66 +12,24 @@ import (
"git.ohea.xyz/cursorius/server/config" "git.ohea.xyz/cursorius/server/config"
"git.ohea.xyz/cursorius/server/database" "git.ohea.xyz/cursorius/server/database"
"git.ohea.xyz/cursorius/server/util"
runner_api "git.ohea.xyz/cursorius/runner-api/go/api/v2" runner_api "git.ohea.xyz/cursorius/runner-api/go/api/v2"
) )
var log = logging.MustGetLogger("cursorius-server") var log = logging.MustGetLogger("cursorius-server")
type RunnerRegistration struct { func (r *runnerManager) processRunnerAllocation(req RunnerAllocationRequest) {
Secret string tagsStr := util.FormatTags(req.Tags)
Id string log.Infof("Got request for runner with tags \"%v\"", tagsStr)
Tags []string
conn *websocket.Conn
}
type runnerManager struct { log.Debugf("Finding runner with tags %v", tagsStr)
getRunnerCh chan GetRunnerRequest
registerCh chan RunnerRegistration
connectedRunners []Runner
numConnectedRunners uint64
configuredRunners map[string]config.Runner
db database.Database
}
type GetRunnerRequest struct {
Tags []string
RespChan chan GetRunnerResponse
}
type GetRunnerResponse struct {
Runner *Runner
Err error
}
type runnerJob struct {
Id string
URL string
}
func (r *runnerManager) processRequest(req GetRunnerRequest) {
var runnerTagsStr strings.Builder
if len(req.Tags) > 0 {
fmt.Fprintf(&runnerTagsStr, "[%v", req.Tags[0])
for _, tag := range req.Tags[1:] {
fmt.Fprintf(&runnerTagsStr, ", %v", tag)
}
fmt.Fprintf(&runnerTagsStr, "]")
}
log.Infof("Got request for runner with tags \"%v\"", runnerTagsStr.String())
log.Debugf("Finding runner with tags %v", runnerTagsStr.String())
foundRunner := false foundRunner := false
runnersToRemove := []int{} runnersToRemove := []int{}
runnerIter: runnerIter:
for i, runner := range r.connectedRunners { for i, runner := range r.connectedRunners {
// don't allocate runner that is already occupied
if runner.running {
log.Debugf("Skipping runner %v, as runner is activly running another job", runner.id)
continue
}
// don't allocate runner with closed receiveChan (is defunct) // don't allocate runner with closed receiveChan (is defunct)
// there should never be messages to read on an inactive runner, // there should never be messages to read on an inactive runner,
// so we aren't losing any data here // so we aren't losing any data here
@@ -89,29 +46,25 @@ runnerIter:
default: default:
log.Debugf("Checking runner %v for requested tags", runner.id) log.Debugf("Checking runner %v for requested tags", runner.id)
tagIter: if !runner.HasTags(req.Tags) {
for _, requestedTag := range req.Tags {
for _, posessedTag := range runner.tags {
if requestedTag == posessedTag {
continue tagIter
}
}
continue runnerIter continue runnerIter
} }
r.connectedRunners[i].running = true runnersToRemove = append(runnersToRemove, i)
foundRunner = true foundRunner = true
req.RespChan <- GetRunnerResponse{ log.Debugf("Runner %v has requested tags, allocating", runner.id)
req.RespChan <- RunnerAllocationResponse{
Runner: &r.connectedRunners[i], Runner: &r.connectedRunners[i],
Err: nil, Err: nil,
} }
} }
} }
// remove allocated runner plus defunct runners
// since we iterate, all the indexes will be in accending order // since we iterate, all the indexes will be in accending order
for i, runnerInd := range runnersToRemove { for i, runnerInd := range runnersToRemove {
r.connectedRunners[runnerInd-i] = r.connectedRunners[len(r.connectedRunners)-1] r.connectedRunners[runnerInd-i] = r.connectedRunners[len(r.connectedRunners)-1]
r.connectedRunners = r.connectedRunners[0 : len(r.connectedRunners)-2] r.connectedRunners = r.connectedRunners[0 : len(r.connectedRunners)-1]
} }
if foundRunner { if foundRunner {
@@ -122,43 +75,42 @@ runnerIter:
if len(r.connectedRunners) == 0 { if len(r.connectedRunners) == 0 {
errorMsg = "no connected runners" errorMsg = "no connected runners"
} }
req.RespChan <- GetRunnerResponse{ req.RespChan <- RunnerAllocationResponse{
Runner: &Runner{}, Runner: nil,
Err: fmt.Errorf("Could not allocate runner: %v", errorMsg), Err: fmt.Errorf("Could not allocate runner: %v", errorMsg),
} }
} }
func (r *runnerManager) processRegistration(reg RunnerRegistration) { func (r *runnerManager) processRunnerRegistration(req RunnerRegistrationRequest) {
log.Debugf("New runner appeared with id: %v and secret: %v", reg.Id, reg.Secret) log.Debugf("New runner appeared with id: %v and secret: %v", req.Id, req.Secret)
// Get runner with give id from database // Get runner with give id from database
runnerId, err := uuid.Parse(reg.Id) runnerId, err := uuid.Parse(req.Id)
if err != nil { if err != nil {
log.Errorf("Disconnecting runner with id: %v, could not parse as UUID: %v", reg.Id, err) log.Errorf("Disconnecting runner with id: %v, could not parse as UUID: %v", req.Id, err)
reg.conn.Close(websocket.StatusNormalClosure, "registration invalid") req.conn.Close(websocket.StatusNormalClosure, "registration invalid")
return return
} }
dbRunner, err := r.db.GetRunnerById(runnerId) dbRunner, err := r.db.GetRunnerById(runnerId)
if err != nil { if err != nil {
log.Errorf("Disconnecting runner with id: %v, could not find runner in DB: %v", runnerId, err) log.Errorf("Disconnecting runner with id: %v, could not find runner in DB: %v", runnerId, err)
reg.conn.Close(websocket.StatusNormalClosure, "registration invalid") req.conn.Close(websocket.StatusNormalClosure, "registration invalid")
return return
} }
if reg.Secret != dbRunner.Token { if req.Secret != dbRunner.Token {
log.Errorf("Disconnecting runner with id: %v, invalid secret", runnerId) log.Errorf("Disconnecting runner with id: %v, invalid secret", runnerId)
reg.conn.Close(websocket.StatusNormalClosure, "registration invalid") req.conn.Close(websocket.StatusNormalClosure, "registration invalid")
return return
} }
log.Infof("Registering runner \"%v\" with tags %v", reg.Id, reg.Tags) log.Infof("Registering runner \"%v\" with tags %v", req.Id, req.Tags)
runner := Runner{ runner := Runner{
id: runnerId, id: runnerId,
tags: reg.Tags, tags: req.Tags,
conn: reg.conn, conn: req.conn,
receiveChan: make(chan []byte), receiveChan: make(chan []byte),
running: false,
} }
r.connectedRunners = append(r.connectedRunners, runner) r.connectedRunners = append(r.connectedRunners, runner)
// start goroutine to call Read function on websocket connection // start goroutine to call Read function on websocket connection
@@ -167,7 +119,7 @@ func (r *runnerManager) processRegistration(reg RunnerRegistration) {
defer log.Noticef("Deregistered runner with id: %v", runner.id) defer log.Noticef("Deregistered runner with id: %v", runner.id)
defer close(runner.receiveChan) defer close(runner.receiveChan)
for { for {
msgType, data, err := reg.conn.Read(context.Background()) msgType, data, err := req.conn.Read(context.Background())
if err != nil { if err != nil {
// TODO: this is still racy, since a runner could be allocated between the // TODO: this is still racy, since a runner could be allocated between the
// connection returning an err and the channel closing // connection returning an err and the channel closing
@@ -187,22 +139,30 @@ func (r *runnerManager) processRegistration(reg RunnerRegistration) {
}() }()
} }
func (r *runnerManager) processRunnerRelease(req RunnerReleaseRequest) {
r.connectedRunners = append(r.connectedRunners, *req.Runner)
}
func runRunnerManager(r runnerManager) { func runRunnerManager(r runnerManager) {
for { for {
select { select {
case request := <-r.getRunnerCh: case request := <-r.chans.Allocation:
r.processRequest(request) r.processRunnerAllocation(request)
case release := <-r.chans.Release:
case registration := <-r.registerCh: r.processRunnerRelease(release)
r.processRegistration(registration) case registration := <-r.chans.Registration:
r.processRunnerRegistration(registration)
} }
} }
} }
func StartRunnerManager(configuredRunners map[string]config.Runner, db database.Database) (chan GetRunnerRequest, chan RunnerRegistration, error) { func StartRunnerManager(configuredRunners map[string]config.Runner, db database.Database) (RunnerManagerChans, error) {
scheduler := runnerManager{ scheduler := runnerManager{
getRunnerCh: make(chan GetRunnerRequest), chans: RunnerManagerChans{
registerCh: make(chan RunnerRegistration), Allocation: make(chan RunnerAllocationRequest),
Release: make(chan RunnerReleaseRequest),
Registration: make(chan RunnerRegistrationRequest),
},
connectedRunners: make([]Runner, 0), connectedRunners: make([]Runner, 0),
configuredRunners: configuredRunners, configuredRunners: configuredRunners,
db: db, db: db,
@@ -210,14 +170,14 @@ func StartRunnerManager(configuredRunners map[string]config.Runner, db database.
go runRunnerManager(scheduler) go runRunnerManager(scheduler)
return scheduler.getRunnerCh, scheduler.registerCh, nil return scheduler.chans, nil
} }
func RegisterRunner(conn *websocket.Conn, registerCh chan RunnerRegistration) { func RegisterRunner(conn *websocket.Conn, registerCh chan RunnerRegistrationRequest) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel() defer cancel()
var registration RunnerRegistration var registration RunnerRegistrationRequest
registration.conn = conn registration.conn = conn
typ, r, err := conn.Read(ctx) typ, r, err := conn.Read(ctx)
+49
View File
@@ -0,0 +1,49 @@
package runnermanager
import (
"nhooyr.io/websocket"
"git.ohea.xyz/cursorius/server/config"
"git.ohea.xyz/cursorius/server/database"
)
type RunnerManagerChans struct {
Allocation chan RunnerAllocationRequest
Release chan RunnerReleaseRequest
Registration chan RunnerRegistrationRequest
}
type runnerManager struct {
chans RunnerManagerChans
connectedRunners []Runner
numConnectedRunners uint64
configuredRunners map[string]config.Runner
db database.Database
}
type RunnerAllocationRequest struct {
Tags []string
RespChan chan RunnerAllocationResponse
CancelChan chan string
}
type RunnerAllocationResponse struct {
Runner *Runner
Err error
}
type RunnerReleaseRequest struct {
Runner *Runner
}
type RunnerRegistrationRequest struct {
Secret string
Id string
Tags []string
conn *websocket.Conn
}
type runnerJob struct {
Id string
URL string
}
+18
View File
@@ -0,0 +1,18 @@
package util
import (
"fmt"
"strings"
)
func FormatTags(tags []string) string {
var tagsStr strings.Builder
if len(tags) > 0 {
fmt.Fprintf(&tagsStr, "[%v", tags[0])
for _, tag := range tags[1:] {
fmt.Fprintf(&tagsStr, ", %v", tag)
}
fmt.Fprintf(&tagsStr, "]")
}
return tagsStr.String()
}