Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 66eba61bbe | |||
| dc09c0f4f5 | |||
| dcaeaeb6d6 | |||
| 32a86ace9f | |||
| 89be2c4816 | |||
| 63c0f83c16 | |||
| 8188bd391b | |||
| 8ac90700bf | |||
| 1882d14bee | |||
| 4f7b315f54 | |||
| fe9e1cac15 | |||
| a9481fa9bc | |||
| e1382e50ea | |||
| 7f44e5ed41 | |||
| bcc53dfbe0 | |||
| bbf96498aa | |||
| 954966db58 | |||
| ed7df18f83 | |||
| a8e9a68f0e | |||
| 20c664f0ed | |||
| 0000ea2a13 | |||
| fe53a17160 | |||
| f190274bce | |||
| d9ba14550e |
+195
-3
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
var log = logging.MustGetLogger("cursorius-server")
|
||||
|
||||
func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
func createSchema(db database.Database, pollChan chan uuid.UUID, cronChan chan uuid.UUID) (graphql.Schema, error) {
|
||||
runnerType := graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: "Runner",
|
||||
Description: "A runner available for use inside of a pipeline.",
|
||||
@@ -88,6 +88,43 @@ func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
},
|
||||
})
|
||||
|
||||
cronType := graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: "Cron",
|
||||
Description: "A cron available for trigger pipeline runs.",
|
||||
Fields: graphql.Fields{
|
||||
"id": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
Description: "The id of the cron.",
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if cron, ok := p.Source.(database.Cron); ok {
|
||||
return cron.Id, nil
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
"cron": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
Description: "The cron.",
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if cron, ok := p.Source.(database.Cron); ok {
|
||||
return cron.Cron, nil
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
"pattern": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
Description: "A pattern for determining what refs to run the cron on.",
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if cron, ok := p.Source.(database.Cron); ok {
|
||||
return cron.Pattern, nil
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
cloneCredentialType := graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: "CloneCredential",
|
||||
Description: "A credential for authenticating with the pipeline source host.",
|
||||
@@ -316,6 +353,16 @@ func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
return []database.Secret{}, nil
|
||||
},
|
||||
},
|
||||
"crons": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(cronType))),
|
||||
Description: "The list of crons for the pipeline.",
|
||||
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
|
||||
if pipeline, ok := p.Source.(database.Pipeline); ok {
|
||||
return db.GetCronsForPipeline(pipeline.Id)
|
||||
}
|
||||
return []database.Cron{}, nil
|
||||
},
|
||||
},
|
||||
"webhooks": &graphql.Field{
|
||||
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(webhookType))),
|
||||
Description: "The list of webhooks for the pipeline.",
|
||||
@@ -454,6 +501,8 @@ func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pollChan <- pipeline.Id
|
||||
|
||||
return pipeline, nil
|
||||
},
|
||||
},
|
||||
@@ -558,6 +607,72 @@ func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
return runner, nil
|
||||
},
|
||||
},
|
||||
"updatePipeline": &graphql.Field{
|
||||
Type: pipelineType,
|
||||
Description: "Create a new pipeline",
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"pipelineId": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
},
|
||||
"name": &graphql.ArgumentConfig{
|
||||
Type: graphql.String,
|
||||
},
|
||||
"url": &graphql.ArgumentConfig{
|
||||
Type: graphql.String,
|
||||
},
|
||||
"pollInterval": &graphql.ArgumentConfig{
|
||||
Type: graphql.Int,
|
||||
},
|
||||
"cloneCredentialId": &graphql.ArgumentConfig{
|
||||
Type: graphql.String,
|
||||
},
|
||||
},
|
||||
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
|
||||
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var name *string
|
||||
var url *string
|
||||
var interval *int
|
||||
|
||||
if nameVal, ok := params.Args["name"]; ok {
|
||||
nameVal := nameVal.(string)
|
||||
name = &nameVal
|
||||
} else {
|
||||
name = nil
|
||||
}
|
||||
|
||||
if urlVal, ok := params.Args["url"]; ok {
|
||||
urlVal := urlVal.(string)
|
||||
url = &urlVal
|
||||
} else {
|
||||
url = nil
|
||||
}
|
||||
|
||||
if intervalVal, ok := params.Args["pollInterval"]; ok {
|
||||
intervalVal := intervalVal.(int)
|
||||
interval = &intervalVal
|
||||
} else {
|
||||
interval = nil
|
||||
}
|
||||
|
||||
pipeline, err := db.UpdatePipeline(
|
||||
pipelineId,
|
||||
name,
|
||||
url,
|
||||
interval,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pollChan <- pipeline.Id
|
||||
|
||||
return pipeline, nil
|
||||
},
|
||||
},
|
||||
"setPipelineCloneCredential": &graphql.Field{
|
||||
Type: pipelineType,
|
||||
Description: "Set the CloneCredential used by a pipeline to clone the source repo",
|
||||
@@ -667,6 +782,83 @@ func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
return pipeline, nil
|
||||
},
|
||||
},
|
||||
"addCronToPipeline": &graphql.Field{
|
||||
Type: pipelineType,
|
||||
Description: "Add a cron string to trigger the pipeline",
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"cron": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
},
|
||||
"pattern": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
},
|
||||
"pipelineId": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
},
|
||||
},
|
||||
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
|
||||
|
||||
cron := params.Args["cron"].(string)
|
||||
pattern := params.Args["pattern"].(string)
|
||||
|
||||
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cronObj, err := db.AddCronForPipeline(pipelineId, cron, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pipeline, err := db.GetPipelineById(pipelineId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cronChan <- cronObj.Id
|
||||
|
||||
return pipeline, nil
|
||||
},
|
||||
},
|
||||
"removeCronFromPipeline": &graphql.Field{
|
||||
Type: pipelineType,
|
||||
Description: "Remove a cron trigger from a pipeline.",
|
||||
Args: graphql.FieldConfigArgument{
|
||||
"cronId": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
},
|
||||
"pipelineId": &graphql.ArgumentConfig{
|
||||
Type: graphql.NewNonNull(graphql.String),
|
||||
},
|
||||
},
|
||||
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
|
||||
|
||||
cronId, err := uuid.Parse(params.Args["cronId"].(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = db.RemoveCronForPipeline(cronId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pipeline, err := db.GetPipelineById(pipelineId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cronChan <- cronId
|
||||
|
||||
return pipeline, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -681,9 +873,9 @@ func createSchema(db database.Database) (graphql.Schema, error) {
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
func CreateHandler(db database.Database, mux *http.ServeMux) error {
|
||||
func CreateHandler(db database.Database, pollChan chan uuid.UUID, cronChan chan uuid.UUID, mux *http.ServeMux) error {
|
||||
|
||||
schema, err := createSchema(db)
|
||||
schema, err := createSchema(db, pollChan, cronChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
+121
@@ -0,0 +1,121 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"git.ohea.xyz/cursorius/server/config"
|
||||
"git.ohea.xyz/cursorius/server/database"
|
||||
"git.ohea.xyz/cursorius/server/pipeline_executor"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/op/go-logging"
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
var log = logging.MustGetLogger("cursorius-server")
|
||||
|
||||
func runPipeline(db database.Database, conf config.PipelineConf, pipelineId uuid.UUID, cron database.Cron) error {
|
||||
run, err := db.CreateRun(pipelineId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not create run for pipeline with id %v: %w", pipelineId, err)
|
||||
}
|
||||
|
||||
refs, err := db.GetPipelineRefs(pipelineId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get refs for pipeline with id %v: %w", pipelineId, err)
|
||||
}
|
||||
|
||||
useRef, err := regexp.Compile(cron.Pattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not compile regex for cron %v for pipeline %v: %w", cron.Id, pipelineId, err)
|
||||
}
|
||||
|
||||
for ref := range refs {
|
||||
if !useRef.MatchString(ref) {
|
||||
log.Debugf("Skipping ref %v for pipeline %v as regex %v in cron %v doesn't match", ref, pipelineId, cron.Pattern, cron.Id)
|
||||
continue
|
||||
}
|
||||
|
||||
pipeline, err := db.GetPipelineById(pipelineId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get pipeline with id %v from db: %w", pipelineId, err)
|
||||
}
|
||||
|
||||
pe := pipeline_executor.PipelineExecution{
|
||||
Pipeline: pipeline,
|
||||
Ref: ref,
|
||||
Run: run,
|
||||
}
|
||||
|
||||
go pipeline_executor.ExecutePipeline(pe, db, conf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func launchCrons(db database.Database, conf config.PipelineConf, updateChan chan uuid.UUID) {
|
||||
pipelines, err := db.GetPipelines()
|
||||
if err != nil {
|
||||
log.Errorf("Could not get pipelines from database: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
cronManager := cron.New()
|
||||
|
||||
cronEntries := make(map[uuid.UUID]cron.EntryID)
|
||||
|
||||
for _, pipeline := range pipelines {
|
||||
crons, err := db.GetCronsForPipeline(pipeline.Id)
|
||||
if err != nil {
|
||||
log.Errorf("Could not get crons for pipeline with id \"%v\": %w", pipeline.Id, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Starting crons for pipeline %v with id %v", pipeline.Name, pipeline.Id)
|
||||
for _, cron := range crons {
|
||||
cronEntries[cron.Id], err = cronManager.AddFunc(cron.Cron, func() {
|
||||
log.Infof("Triggering cron with value \"%v\" for pipeline with id \"%v\"", cron.Cron, cron.PipelineId)
|
||||
err := runPipeline(db, conf, pipeline.Id, cron)
|
||||
if err != nil {
|
||||
log.Errorf("Could not run pipeline with id \"%v\": %w", pipeline.Id, err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Could not configure cron for pipeline with id \"%v\": %w", pipeline.Id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
cronManager.Start()
|
||||
|
||||
for {
|
||||
cronUUID := <-updateChan
|
||||
|
||||
if entryId, ok := cronEntries[cronUUID]; ok {
|
||||
log.Infof("Canceling cron %v", cronUUID)
|
||||
cronManager.Remove(entryId)
|
||||
}
|
||||
|
||||
cron, err := db.GetCronById(cronUUID)
|
||||
// if cron no longer exists, don't try to restart it
|
||||
// TODO: this squashes other DB errors
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Starting cron %v with value %v for pipeline %v", cron.Id, cron.Cron, cron.PipelineId)
|
||||
|
||||
cronEntries[cron.Id], err = cronManager.AddFunc(cron.Cron, func() {
|
||||
err := runPipeline(db, conf, cron.PipelineId, cron)
|
||||
if err != nil {
|
||||
log.Errorf("Could not setup run pipeline with id \"%v\": %w", cron.PipelineId, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func StartCrons(conf config.PipelineConf, db database.Database) chan uuid.UUID {
|
||||
cronChan := make(chan uuid.UUID)
|
||||
go launchCrons(db, conf, cronChan)
|
||||
return cronChan
|
||||
}
|
||||
+14
-1
@@ -181,14 +181,27 @@ CREATE TABLE runners (
|
||||
);
|
||||
|
||||
CREATE TABLE pipeline_refs (
|
||||
name TEXT PRIMARY KEY NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
pipeline_id UUID NOT NULL,
|
||||
hash TEXT NOT NULL,
|
||||
|
||||
PRIMARY KEY(name, pipeline_id),
|
||||
|
||||
CONSTRAINT fk_pipeline_id
|
||||
FOREIGN KEY(pipeline_id)
|
||||
REFERENCES pipelines(id)
|
||||
);
|
||||
|
||||
CREATE TABLE crons (
|
||||
id UUID PRIMARY KEY,
|
||||
pipeline_id UUID NOT NULL,
|
||||
cron TEXT NOT NULL,
|
||||
pattern TEXT NOT NULL,
|
||||
|
||||
CONSTRAINT fk_pipeline_id
|
||||
FOREIGN KEY(pipeline_id)
|
||||
REFERENCES pipelines(id)
|
||||
);
|
||||
`
|
||||
|
||||
_, err := conn.Exec(context.Background(), createTablesQuery)
|
||||
|
||||
+132
-1
@@ -77,6 +77,49 @@ RETURNING id, name, url, poll_interval;`
|
||||
return pipeline, nil
|
||||
}
|
||||
|
||||
func (db *Database) UpdatePipeline(pipelineId uuid.UUID, name *string, url *string, pollInterval *int) (Pipeline, error) {
|
||||
query := `
|
||||
UPDATE pipelines
|
||||
SET name=$1, url=$2, poll_interval=$3
|
||||
WHERE id=$4
|
||||
RETURNING name, url, poll_interval, clone_credential;`
|
||||
|
||||
pipeline, err := db.GetPipelineById(pipelineId)
|
||||
if err != nil {
|
||||
return pipeline, err
|
||||
}
|
||||
|
||||
var nameNew string
|
||||
var urlNew string
|
||||
var pollIntervalNew int
|
||||
|
||||
if name != nil {
|
||||
nameNew = *name
|
||||
} else {
|
||||
nameNew = pipeline.Name
|
||||
}
|
||||
if url != nil {
|
||||
urlNew = *url
|
||||
} else {
|
||||
urlNew = pipeline.Url
|
||||
}
|
||||
if pollInterval != nil {
|
||||
pollIntervalNew = *pollInterval
|
||||
} else {
|
||||
pollIntervalNew = pipeline.PollInterval
|
||||
}
|
||||
|
||||
err = db.Conn.QueryRow(context.Background(),
|
||||
query, nameNew, urlNew, pollIntervalNew, pipelineId).Scan(
|
||||
&pipeline.Name, &pipeline.Url, &pipeline.PollInterval, &pipeline.CloneCredential,
|
||||
)
|
||||
if err != nil {
|
||||
return pipeline, fmt.Errorf("Could not add credential to pipeline: %w", err)
|
||||
}
|
||||
|
||||
return pipeline, err
|
||||
}
|
||||
|
||||
func (db *Database) SetPipelineCloneCredential(pipelineId uuid.UUID, credentialId *uuid.UUID) (Pipeline, error) {
|
||||
query := `
|
||||
UPDATE pipelines
|
||||
@@ -394,7 +437,7 @@ func (db *Database) UpdatePipelineRefs(pipelineId uuid.UUID, refsMap map[string]
|
||||
query := `
|
||||
INSERT INTO pipeline_refs(name, pipeline_id, hash)
|
||||
VALUES($1, $2, $3)
|
||||
ON CONFLICT (name)
|
||||
ON CONFLICT (name, pipeline_id)
|
||||
DO
|
||||
UPDATE SET hash=$3;`
|
||||
|
||||
@@ -627,3 +670,91 @@ RETURNING id, name, token;`
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (db *Database) GetCronsForPipeline(pipelineId uuid.UUID) ([]Cron, error) {
|
||||
query := `
|
||||
SELECT id, cron, pattern
|
||||
FROM crons
|
||||
WHERE pipeline_id=$1;`
|
||||
|
||||
var crons []Cron
|
||||
|
||||
cronEntrys, err := db.Conn.Query(context.Background(), query, pipelineId)
|
||||
if err != nil {
|
||||
return crons, fmt.Errorf("Could not get crons for pipeline with id \"%v\": %w", pipelineId, err)
|
||||
}
|
||||
defer cronEntrys.Close()
|
||||
|
||||
for cronEntrys.Next() {
|
||||
var cron Cron
|
||||
var idStr string
|
||||
if err := cronEntrys.Scan(
|
||||
&idStr, &cron.Cron, &cron.Pattern,
|
||||
); err != nil {
|
||||
return crons, err
|
||||
}
|
||||
|
||||
cron.Id, err = uuid.Parse(idStr)
|
||||
if err != nil {
|
||||
return crons, err
|
||||
}
|
||||
|
||||
crons = append(crons, cron)
|
||||
}
|
||||
|
||||
return crons, nil
|
||||
}
|
||||
|
||||
func (db *Database) GetCronById(id uuid.UUID) (Cron, error) {
|
||||
|
||||
query := `
|
||||
SELECT cron, pipeline_id, pattern
|
||||
FROM crons
|
||||
WHERE id=$1;`
|
||||
|
||||
cron := Cron{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
var pipelineIdStr string
|
||||
|
||||
err := db.Conn.QueryRow(context.Background(), query, id).Scan(&cron.Cron, &pipelineIdStr, &cron.Pattern)
|
||||
if err != nil {
|
||||
return cron, fmt.Errorf("Could not query database for cron with id %v: %w", id.String(), err)
|
||||
}
|
||||
|
||||
cron.PipelineId, err = uuid.Parse(pipelineIdStr)
|
||||
return cron, err
|
||||
}
|
||||
|
||||
func (db *Database) AddCronForPipeline(pipelineId uuid.UUID, cronStr string, pattern string) (Cron, error) {
|
||||
query := `
|
||||
INSERT INTO crons (id, pipeline_id, cron, pattern)
|
||||
VALUES (uuid_generate_v4(), $1, $2, $3)
|
||||
RETURNING id;`
|
||||
|
||||
cron := Cron{
|
||||
PipelineId: pipelineId,
|
||||
Cron: cronStr,
|
||||
Pattern: pattern,
|
||||
}
|
||||
|
||||
var idStr string
|
||||
|
||||
err := db.Conn.QueryRow(context.Background(), query, pipelineId, cronStr, pattern).Scan(&idStr)
|
||||
if err != nil {
|
||||
return cron, fmt.Errorf("Could not create cron: %w", err)
|
||||
}
|
||||
|
||||
cron.Id, err = uuid.Parse(idStr)
|
||||
return cron, err
|
||||
}
|
||||
|
||||
func (db *Database) RemoveCronForPipeline(cronId uuid.UUID) error {
|
||||
query := `
|
||||
DELETE FROM crons
|
||||
WHERE id=$1;`
|
||||
|
||||
_, err := db.Conn.Exec(context.Background(), query, cronId)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -79,3 +79,10 @@ type Runner struct {
|
||||
Name string
|
||||
Token string
|
||||
}
|
||||
|
||||
type Cron struct {
|
||||
Id uuid.UUID
|
||||
PipelineId uuid.UUID
|
||||
Cron string
|
||||
Pattern string
|
||||
}
|
||||
|
||||
@@ -4,4 +4,4 @@ MAINTAINER restitux <restitux@ohea.xyz>
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ENTRYPOINT ["/build/server/docker/build-and-run.sh"]
|
||||
ENTRYPOINT ["/build/server/docker/cursorius/build-and-run.sh"]
|
||||
@@ -2,15 +2,15 @@ version: "3.3"
|
||||
services:
|
||||
cursorius-server:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile.dev
|
||||
context: ".."
|
||||
dockerfile: "docker/cursorius/Dockerfile.dev"
|
||||
ports:
|
||||
- "0.0.0.0:45420:45420"
|
||||
networks:
|
||||
- cursorius
|
||||
volumes:
|
||||
- "..:/build/server"
|
||||
- "../server.toml:/root/.config/cursorius/server.toml"
|
||||
- "./server.toml:/root/.config/cursorius/server.toml"
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "../_working/go:/go"
|
||||
- "../_working/jobs:/cursorius/jobs"
|
||||
@@ -20,15 +20,30 @@ services:
|
||||
- POSTGRES_USER=cursorius
|
||||
- POSTGRES_PASSWORD=cursorius
|
||||
- POSTGRES_DB=cursorius
|
||||
volumes:
|
||||
- "../_working/postgres:/var/lib/postgresql/data"
|
||||
networks:
|
||||
- cursorius
|
||||
graphiql:
|
||||
build:
|
||||
dockerfile: Dockerfile.graphiql
|
||||
context: "graphiql"
|
||||
dockerfile: "Dockerfile.graphiql"
|
||||
ports:
|
||||
- "0.0.0.0:45421:80"
|
||||
networks:
|
||||
- cursorius
|
||||
gitea:
|
||||
image: gitea/gitea:latest
|
||||
profiles: ["gitea"]
|
||||
environment:
|
||||
- GITEA__webhook__ALLOWED_HOST_LIST=cursorius-server, external
|
||||
ports:
|
||||
- "127.0.0.1:2222:22"
|
||||
- "127.0.0.1:3000:3000"
|
||||
networks:
|
||||
- cursorius
|
||||
volumes:
|
||||
- "../_working/gitea:/data"
|
||||
|
||||
networks:
|
||||
cursorius:
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
version: "3.3"
|
||||
services:
|
||||
cursorius-server:
|
||||
networks:
|
||||
- gitea
|
||||
gitea:
|
||||
image: gitea/gitea:latest
|
||||
environment:
|
||||
- GITEA__webhook__ALLOWED_HOST_LIST=cursorius-server, external
|
||||
ports:
|
||||
- "127.0.0.1:2222:22"
|
||||
- "127.0.0.1:3000:3000"
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- gitea-data:/data
|
||||
|
||||
|
||||
volumes:
|
||||
gitea-data:
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
external: false
|
||||
+8
-2
@@ -1,4 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker build . -f docker/Dockerfile -t git.ohea.xyz/cursorius/server:latest
|
||||
docker push git.ohea.xyz/cursorius/server:latest
|
||||
if [[ -z "${1}" ]]; then
|
||||
echo "You must provide a docker tag to push to."
|
||||
else
|
||||
echo "Building container git.ohea.xyz/cursorius/server:$1"
|
||||
docker build . -f docker/cursorius/Dockerfile -t "git.ohea.xyz/cursorius/server:$1"
|
||||
echo "Pushing container git.ohea.xyz/cursorius/server:$1"
|
||||
docker push "git.ohea.xyz/cursorius/server:$1"
|
||||
fi
|
||||
|
||||
+1
-101
@@ -2,104 +2,4 @@
|
||||
|
||||
set -e
|
||||
|
||||
mkdir -p _working/go
|
||||
mkdir -p _working/jobs
|
||||
|
||||
base_default_compose_files="docker/docker-compose.yml"
|
||||
default_compose_files="$base_default_compose_files"
|
||||
|
||||
override_compose="docker/docker-compose.override.yml"
|
||||
gitea_compose="docker/gitea-override.yml"
|
||||
|
||||
|
||||
if [ -f "$override_compose" ]
|
||||
then
|
||||
default_compose_files+=" $override_compose"
|
||||
else
|
||||
default_compose_files="docker/docker-compose.yml"
|
||||
fi
|
||||
|
||||
function stop_containers {
|
||||
current_containers="$(cat _working/current_containers)"
|
||||
if [ "$current_containers" == "default" ]
|
||||
then
|
||||
compose_files="$default_compose_files"
|
||||
elif [ "$current_containers" == "gitea" ]
|
||||
then
|
||||
compose_files="$default_compose_files $gitea_compose"
|
||||
fi
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags down
|
||||
}
|
||||
|
||||
function show_logs {
|
||||
current_containers="$(cat _working/current_containers)"
|
||||
if [ "$current_containers" == "default" ]
|
||||
then
|
||||
compose_files="$default_compose_files"
|
||||
elif [ "$current_containers" == "gitea" ]
|
||||
then
|
||||
compose_files="$default_compose_files $gitea_compose"
|
||||
fi
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags logs -f
|
||||
}
|
||||
|
||||
function show_ps {
|
||||
current_containers="$(cat _working/current_containers)"
|
||||
if [ "$current_containers" == "default" ]
|
||||
then
|
||||
compose_files="$default_compose_files"
|
||||
elif [ "$current_containers" == "gitea" ]
|
||||
then
|
||||
compose_files="$default_compose_files $gitea_compose"
|
||||
fi
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags ps
|
||||
}
|
||||
|
||||
function runprev {
|
||||
current_containers="$(cat _working/current_containers)"
|
||||
if [ "$current_containers" == "default" ]
|
||||
then
|
||||
compose_files="$default_compose_files"
|
||||
elif [ "$current_containers" == "gitea" ]
|
||||
then
|
||||
compose_files="$default_compose_files $gitea_compose"
|
||||
fi
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags up --build -d
|
||||
docker compose $compose_file_flags logs -f
|
||||
}
|
||||
|
||||
|
||||
case $1 in
|
||||
"default")
|
||||
echo "default" > _working/current_containers
|
||||
compose_files="$default_compose_files"
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags up --build -d
|
||||
docker compose $compose_file_flags logs -f;;
|
||||
"gitea")
|
||||
echo "gitea" > _working/current_containers
|
||||
stop_containers
|
||||
compose_files="$default_compose_files $gitea_compose"
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags up --build -d
|
||||
docker compose $compose_file_flags logs -f;;
|
||||
"dbshell")
|
||||
compose_files="$default_compose_files $gitea_compose"
|
||||
compose_file_flags=$(echo "$compose_files" | tr ' ' '\n' | xargs -I'{}' echo "-f {} " | tr -d '\n')
|
||||
docker compose $compose_file_flags exec cursorius-db psql --user=cursorius;;
|
||||
"stop")
|
||||
stop_containers;;
|
||||
"logs")
|
||||
show_logs;;
|
||||
"ps")
|
||||
show_ps;;
|
||||
"runprev")
|
||||
runprev;;
|
||||
*) echo "ERROR: Unknown param \"$1\"" 2>&1
|
||||
exit 255;;
|
||||
esac
|
||||
|
||||
go run docker/run.go "$@"
|
||||
|
||||
@@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func panicError(errorString string, params ...any) {
|
||||
fmt.Fprintf(os.Stderr, fmt.Sprintf("ERROR: %v\n", errorString), params...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func run(name string, arg ...string) {
|
||||
cmd := exec.Command(name, arg...)
|
||||
if err := cmd.Run(); err != nil {
|
||||
panicError("could not run command %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func runAttach(name string, arg ...string) {
|
||||
cmd := exec.Command(name, arg...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
panicError("could not run command %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func runCompose(args []string) {
|
||||
runAttach("docker", append([]string{"compose"}, args...)...)
|
||||
}
|
||||
|
||||
func createDirs() {
|
||||
run("mkdir", "-p", "_working/go")
|
||||
run("mkdir", "-p", "_working/jobs")
|
||||
}
|
||||
|
||||
func currentContainers() string {
|
||||
bytes, err := os.ReadFile("_working/current_containers")
|
||||
if err != nil {
|
||||
panicError("could not read current containers: %v", err)
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func composeFlags() []string {
|
||||
containers := currentContainers()
|
||||
flags := []string{"-f", "docker/docker-compose.yml"}
|
||||
switch containers {
|
||||
case "gitea":
|
||||
flags = append(flags, "--profile", "gitea")
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func runContainers(containers string) {
|
||||
err := os.WriteFile("_working/current_containers", []byte(containers), 0633)
|
||||
if err != nil {
|
||||
panicError("could not write current_containers file: %v", err)
|
||||
}
|
||||
runCompose(append(composeFlags(), "up", "--build", "-d"))
|
||||
runCompose(append(composeFlags(), "logs", "-f"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
panicError("not enough arguments passed")
|
||||
}
|
||||
|
||||
createDirs()
|
||||
|
||||
switch os.Args[1] {
|
||||
case "default", "gitea":
|
||||
runContainers(os.Args[1])
|
||||
case "runprev":
|
||||
runContainers(currentContainers())
|
||||
case "stop":
|
||||
runCompose(append(composeFlags(), "down"))
|
||||
case "dbshell":
|
||||
runCompose(append(composeFlags(), "exec", "cursorius-db", "psql", "--user=cursorius"))
|
||||
case "logs":
|
||||
runCompose(append(composeFlags(), "logs", "-f"))
|
||||
case "ps":
|
||||
runCompose(append(composeFlags(), "ps"))
|
||||
case "help":
|
||||
fmt.Println("commands: default, gitea, runprev, stop, dbshell, logs, ps, help")
|
||||
default:
|
||||
panicError("Unknown subcommand: %v", os.Args[1])
|
||||
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@ module git.ohea.xyz/cursorius/server
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230109075652-ead0aeff2eb9
|
||||
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230405234139-34d8875b72f4
|
||||
git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2
|
||||
git.ohea.xyz/golang/config v0.0.0-20220915224621-b9debd233173
|
||||
github.com/bufbuild/connect-go v1.4.1
|
||||
@@ -16,8 +16,9 @@ require (
|
||||
github.com/jackc/pgx/v5 v5.2.0
|
||||
github.com/jhoonb/archivex v0.0.0-20201016144719-6a343cdae81d
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
golang.org/x/net v0.2.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
google.golang.org/protobuf v1.30.0
|
||||
nhooyr.io/websocket v1.8.7
|
||||
)
|
||||
|
||||
|
||||
@@ -78,8 +78,8 @@ contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EU
|
||||
contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
|
||||
contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230109075652-ead0aeff2eb9 h1:8p7Kw3B7dbi2zdgG+Me9ETRWrJzoNVjcase4YqXfGbs=
|
||||
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230109075652-ead0aeff2eb9/go.mod h1:D7GGcFIH421mo6KuRaXXXmlXPwWeEsemTZG/BOZA/4o=
|
||||
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230405234139-34d8875b72f4 h1:kKQQEg1nmWnqiNOqtUHteEuacyfy0NdxyDj6HPjbA2c=
|
||||
git.ohea.xyz/cursorius/pipeline-api/go/api/v2 v2.0.0-20230405234139-34d8875b72f4/go.mod h1:D7GGcFIH421mo6KuRaXXXmlXPwWeEsemTZG/BOZA/4o=
|
||||
git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2 h1:G1XQEqhj1LZPQbH7avzvT7QL9Wfbb4CXMm0nLL39eDc=
|
||||
git.ohea.xyz/cursorius/runner-api/go/api/v2 v2.0.0-20230109074922-e20285fe6cf2/go.mod h1:F9y5Ck4Wchsaj5amSX2eDRUlQ/iYP1VNLFduvjNwmLc=
|
||||
git.ohea.xyz/cursorius/webhooks/v6 v6.0.2-0.20221224221147-a2bdbf1756ed h1:gsK15m4Npow74+R6OfZKwwAg1sl7QWQCRXOeE2QLUco=
|
||||
@@ -1210,6 +1210,8 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
@@ -2129,8 +2131,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
+14
-9
@@ -10,6 +10,8 @@ import (
|
||||
"git.ohea.xyz/cursorius/server/pipeline_api"
|
||||
"git.ohea.xyz/cursorius/server/runnermanager"
|
||||
"git.ohea.xyz/cursorius/server/webhook"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/op/go-logging"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
@@ -22,15 +24,16 @@ func setupHTTPServer(
|
||||
mux *http.ServeMux,
|
||||
conf config.PipelineConf,
|
||||
db database.Database,
|
||||
registerCh chan runnermanager.RunnerRegistration,
|
||||
getRunnerCh chan runnermanager.GetRunnerRequest,
|
||||
runnerManagerChans runnermanager.RunnerManagerChans,
|
||||
pollChan chan uuid.UUID,
|
||||
cronChan chan uuid.UUID,
|
||||
) error {
|
||||
|
||||
webhook.CreateWebhookHandler(db, conf, mux)
|
||||
|
||||
pipeline_api.CreateHandler(getRunnerCh, mux)
|
||||
pipeline_api.CreateHandler(runnerManagerChans.Allocation, runnerManagerChans.Release, mux)
|
||||
|
||||
err := admin_api.CreateHandler(db, mux)
|
||||
err := admin_api.CreateHandler(db, pollChan, cronChan, mux)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not create admin api handler: %w", err)
|
||||
}
|
||||
@@ -41,7 +44,7 @@ func setupHTTPServer(
|
||||
log.Errorf("Could not upgrade runner connection to websocket: %v", err)
|
||||
return
|
||||
}
|
||||
go runnermanager.RegisterRunner(conn, registerCh)
|
||||
go runnermanager.RegisterRunner(conn, runnerManagerChans.Registration)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@@ -52,16 +55,18 @@ func Listen(
|
||||
port int,
|
||||
conf config.PipelineConf,
|
||||
db database.Database,
|
||||
registerCh chan runnermanager.RunnerRegistration,
|
||||
getRunnerCh chan runnermanager.GetRunnerRequest,
|
||||
runnerManagerChans runnermanager.RunnerManagerChans,
|
||||
pollChan chan uuid.UUID,
|
||||
cronChan chan uuid.UUID,
|
||||
) error {
|
||||
|
||||
err := setupHTTPServer(
|
||||
mux,
|
||||
conf,
|
||||
db,
|
||||
registerCh,
|
||||
getRunnerCh,
|
||||
runnerManagerChans,
|
||||
pollChan,
|
||||
cronChan,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not setup http endpoints: %w", err)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"git.ohea.xyz/cursorius/server/config"
|
||||
"git.ohea.xyz/cursorius/server/cron"
|
||||
"git.ohea.xyz/cursorius/server/database"
|
||||
"git.ohea.xyz/cursorius/server/listen"
|
||||
"git.ohea.xyz/cursorius/server/poll"
|
||||
@@ -26,7 +27,7 @@ func main() {
|
||||
|
||||
logging.SetBackend(backendLeveled)
|
||||
|
||||
log.Info("Starting cursorius-server v0.1.0")
|
||||
log.Info("Starting cursorius-server v0.3.0")
|
||||
|
||||
configData, err := config.GetConfig()
|
||||
if err != nil {
|
||||
@@ -40,13 +41,15 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
getRunnerCh, registerCh, err := runnermanager.StartRunnerManager(configData.Config.Runners, db)
|
||||
runnerManagerChans, err := runnermanager.StartRunnerManager(configData.Config.Runners, db)
|
||||
if err != nil {
|
||||
log.Errorf("Could not start runner: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = poll.StartPolling(configData.Config.PipelineConf, db)
|
||||
pollChan := poll.StartPolling(configData.Config.PipelineConf, db)
|
||||
|
||||
cronChan := cron.StartCrons(configData.Config.PipelineConf, db)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
@@ -56,7 +59,8 @@ func main() {
|
||||
configData.Config.Port,
|
||||
configData.Config.PipelineConf,
|
||||
db,
|
||||
registerCh,
|
||||
getRunnerCh,
|
||||
runnerManagerChans,
|
||||
pollChan,
|
||||
cronChan,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -4,12 +4,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apiv2 "git.ohea.xyz/cursorius/pipeline-api/go/api/v2"
|
||||
"git.ohea.xyz/cursorius/pipeline-api/go/api/v2/apiv2connect"
|
||||
"git.ohea.xyz/cursorius/server/runnermanager"
|
||||
"git.ohea.xyz/cursorius/server/util"
|
||||
"github.com/bufbuild/connect-go"
|
||||
"github.com/google/uuid"
|
||||
"github.com/op/go-logging"
|
||||
@@ -18,7 +19,8 @@ import (
|
||||
var log = logging.MustGetLogger("cursorius-server")
|
||||
|
||||
type ApiServer struct {
|
||||
getRunnerCh chan runnermanager.GetRunnerRequest
|
||||
allocationCh chan runnermanager.RunnerAllocationRequest
|
||||
releaseCh chan runnermanager.RunnerReleaseRequest
|
||||
allocatedRunners map[uuid.UUID]*RunnerWrapper
|
||||
allocatedRunnersMutex sync.RWMutex
|
||||
}
|
||||
@@ -34,15 +36,17 @@ func (r *RunnerWrapper) RunCommand(cmd string, args []string) (int64, string, st
|
||||
|
||||
return_code, stdout, stderr, err := r.runner.RunCommand(cmd, args)
|
||||
|
||||
// TODO: run command by sending websocket packet
|
||||
// TODO: get stdout and stderr response
|
||||
return return_code, stdout, stderr, err
|
||||
}
|
||||
|
||||
func (r *RunnerWrapper) Release() {
|
||||
func (r *RunnerWrapper) Release(releaseCh chan runnermanager.RunnerReleaseRequest) {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
r.runner.Release()
|
||||
|
||||
releaseCh <- runnermanager.RunnerReleaseRequest{
|
||||
Runner: r.runner,
|
||||
}
|
||||
r.runner = nil
|
||||
}
|
||||
|
||||
func (s *ApiServer) GetRunnerFromMap(u uuid.UUID) (*RunnerWrapper, bool) {
|
||||
@@ -52,39 +56,71 @@ func (s *ApiServer) GetRunnerFromMap(u uuid.UUID) (*RunnerWrapper, bool) {
|
||||
return runner, ok
|
||||
}
|
||||
|
||||
func (s *ApiServer) AddRunnerToMap(u uuid.UUID, runner *runnermanager.Runner) {
|
||||
s.allocatedRunnersMutex.Lock()
|
||||
defer s.allocatedRunnersMutex.Unlock()
|
||||
s.allocatedRunners[u] = &RunnerWrapper{runner: runner}
|
||||
}
|
||||
|
||||
func (s *ApiServer) GetRunner(
|
||||
ctx context.Context,
|
||||
req *connect.Request[apiv2.GetRunnerRequest],
|
||||
) (*connect.Response[apiv2.GetRunnerResponse], error) {
|
||||
|
||||
respChan := make(chan runnermanager.GetRunnerResponse)
|
||||
s.getRunnerCh <- runnermanager.GetRunnerRequest{
|
||||
Tags: req.Msg.Tags,
|
||||
RespChan: respChan,
|
||||
}
|
||||
var response runnermanager.RunnerAllocationResponse
|
||||
var timeoutCtx *context.Context
|
||||
var retryInterval int64 = 0
|
||||
|
||||
var runnerTagsStr strings.Builder
|
||||
if len(req.Msg.Tags) > 0 {
|
||||
fmt.Fprintf(&runnerTagsStr, "[%v", req.Msg.Tags[0])
|
||||
for _, tag := range req.Msg.Tags[1:] {
|
||||
fmt.Fprintf(&runnerTagsStr, ", %v", tag)
|
||||
respChan := make(chan runnermanager.RunnerAllocationResponse)
|
||||
|
||||
tagsStr := util.FormatTags(req.Msg.Tags)
|
||||
|
||||
if req.Msg.Options != nil {
|
||||
if req.Msg.Options.Timeout != 0 {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(req.Msg.Options.Timeout)*time.Second)
|
||||
timeoutCtx = &ctx
|
||||
defer cancel()
|
||||
}
|
||||
fmt.Fprintf(&runnerTagsStr, "]")
|
||||
|
||||
retryInterval = req.Msg.Options.RetryInterval
|
||||
}
|
||||
|
||||
response := <-respChan
|
||||
if response.Err != nil {
|
||||
log.Errorf("Could not get runner with tags \"%v\": %v", runnerTagsStr.String(), response.Err)
|
||||
for {
|
||||
s.allocationCh <- runnermanager.RunnerAllocationRequest{
|
||||
Tags: req.Msg.Tags,
|
||||
RespChan: respChan,
|
||||
}
|
||||
|
||||
response = <-respChan
|
||||
if response.Err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Infof("Could not get runner with tags \"%v\": %v", tagsStr, response.Err)
|
||||
|
||||
// If no timeout is specified, skip after one attempt
|
||||
if timeoutCtx == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// If timeout is expired, stop trying to allocate runner
|
||||
if (*timeoutCtx).Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Infof("Sleeping for %v seconds before retry...", retryInterval)
|
||||
time.Sleep(time.Duration(retryInterval) * time.Second)
|
||||
}
|
||||
|
||||
if response.Runner == nil {
|
||||
return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("Could not get runner"))
|
||||
}
|
||||
|
||||
log.Infof("Got runner with tags: %v", runnerTagsStr.String())
|
||||
log.Infof("Got runner with tags: %v", tagsStr)
|
||||
|
||||
runnerUuid := uuid.New()
|
||||
|
||||
s.allocatedRunnersMutex.Lock()
|
||||
s.allocatedRunners[runnerUuid] = &RunnerWrapper{runner: response.Runner}
|
||||
s.allocatedRunnersMutex.Unlock()
|
||||
s.AddRunnerToMap(runnerUuid, response.Runner)
|
||||
|
||||
res := connect.NewResponse(&apiv2.GetRunnerResponse{
|
||||
Id: runnerUuid.String(),
|
||||
@@ -107,7 +143,7 @@ func (s *ApiServer) ReleaseRunner(
|
||||
s.allocatedRunnersMutex.Lock()
|
||||
runner := s.allocatedRunners[uuid]
|
||||
delete(s.allocatedRunners, uuid)
|
||||
runner.Release()
|
||||
runner.Release(s.releaseCh)
|
||||
s.allocatedRunnersMutex.Unlock()
|
||||
|
||||
res := connect.NewResponse(&apiv2.ReleaseRunnerResponse{})
|
||||
@@ -146,9 +182,10 @@ func (s *ApiServer) RunCommand(
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func CreateHandler(getRunnerCh chan runnermanager.GetRunnerRequest, mux *http.ServeMux) {
|
||||
func CreateHandler(allocationCh chan runnermanager.RunnerAllocationRequest, releaseCh chan runnermanager.RunnerReleaseRequest, mux *http.ServeMux) {
|
||||
api_server := &ApiServer{
|
||||
getRunnerCh: getRunnerCh,
|
||||
allocationCh: allocationCh,
|
||||
releaseCh: releaseCh,
|
||||
allocatedRunners: make(map[uuid.UUID]*RunnerWrapper),
|
||||
}
|
||||
path, handler := apiv2connect.NewGetRunnerServiceHandler(api_server)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -35,39 +34,40 @@ type PipelineExecution struct {
|
||||
}
|
||||
|
||||
func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf config.PipelineConf) {
|
||||
idStr := pe.Pipeline.Id.String()
|
||||
|
||||
jobFolder := filepath.Join(pipelineConf.WorkingDir, pe.Pipeline.Id.String(), pe.Run.Id.String())
|
||||
cloneFolder := filepath.Join(jobFolder, "repo")
|
||||
|
||||
log.Debugf("Job %v configured with URL \"%v\"", pe.Pipeline.Name, pe.Pipeline.Url)
|
||||
|
||||
log.Debugf("Job %v configured with folder \"%v\"", pe.Pipeline.Name, jobFolder)
|
||||
log.Debugf("%v: URL: %v", idStr, pe.Pipeline.Url)
|
||||
log.Debugf("%v: Folder: %v", idStr, jobFolder)
|
||||
|
||||
err := os.RemoveAll(jobFolder)
|
||||
if err != nil {
|
||||
log.Errorf("could not delete existing folder %v", jobFolder)
|
||||
log.Errorf("%v: could not delete existing folder %v", idStr, jobFolder)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.MkdirAll(cloneFolder, 0755)
|
||||
if err != nil {
|
||||
log.Errorf("could not create working directory for job %v: %w", pe.Pipeline.Name, err)
|
||||
log.Errorf("%v: could not create working directory: %w", idStr, pe.Pipeline.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Cloning source from URL %v", pe.Pipeline.Url)
|
||||
log.Infof("%v: cloning source from URL %v", idStr, pe.Pipeline.Url)
|
||||
|
||||
var auth transport.AuthMethod
|
||||
|
||||
if pe.Pipeline.CloneCredential != nil {
|
||||
credential, err := db.GetCloneCredentialById(*pe.Pipeline.CloneCredential)
|
||||
if err != nil {
|
||||
log.Errorf("could not get credenital from db: %v", err)
|
||||
log.Errorf("%v: could not get credenital from db: %v", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch credential.Type {
|
||||
case "USER_PASS":
|
||||
log.Debugf("job %v configured to use credential %v", pe.Pipeline.Name, credential.Name)
|
||||
log.Debugf("%v: credential %v configured", idStr, credential.Name)
|
||||
auth = transport.AuthMethod(&http.BasicAuth{
|
||||
Username: credential.Username,
|
||||
Password: credential.Secret,
|
||||
@@ -75,12 +75,12 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
|
||||
case "SSH_KEY":
|
||||
publicKeys, err := ssh.NewPublicKeys(credential.Username, []byte(credential.Secret), "")
|
||||
if err != nil {
|
||||
log.Errorf("could not parse credential %v", credential.Name)
|
||||
log.Errorf("%v: could not parse credential %v: %v", idStr, credential.Name, err)
|
||||
return
|
||||
}
|
||||
auth = transport.AuthMethod(publicKeys)
|
||||
default:
|
||||
log.Errorf("unsupported credential type %v", credential.Type)
|
||||
log.Errorf("%v: unsupported credential type %v", idStr, credential.Type)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -92,40 +92,46 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
|
||||
Auth: auth,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("could not clone repo: %v", err)
|
||||
log.Errorf("%v: could not clone repo: %v", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
log.Errorf("Could not create docker client: %w", err)
|
||||
log.Errorf("%v: could not create docker client: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
log.Info("Source cloned successfully")
|
||||
log.Infof("%v: source cloned successfully", idStr)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
log.Info("Building container")
|
||||
log.Debugf("%v: tarring up job source", idStr)
|
||||
|
||||
log.Debugf("%v: creating tarfile", idStr)
|
||||
tarFile := filepath.Join(jobFolder, "archive.tar")
|
||||
tar := new(archivex.TarFile)
|
||||
err = tar.Create(tarFile)
|
||||
if err != nil {
|
||||
log.Errorf("could not create tarfile: %w", err)
|
||||
log.Errorf("%v: could not create tarfile: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("%v: adding files to tarfile", idStr)
|
||||
err = tar.AddAll(cloneFolder, false)
|
||||
if err != nil {
|
||||
log.Errorf("could not add repo to tarfile: %w", err)
|
||||
log.Errorf("%v: could not add repo to tarfile: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("%v: saving tarfile tarfile", idStr)
|
||||
err = tar.Close()
|
||||
if err != nil {
|
||||
log.Errorf("could not close tarfile: %w", err)
|
||||
log.Errorf("%v: could not close tarfile: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
log.Debugf("%v: job source tarred", idStr)
|
||||
|
||||
log.Infof("%v: building container", idStr)
|
||||
|
||||
dockerBuildContext, err := os.Open(tarFile)
|
||||
defer dockerBuildContext.Close()
|
||||
@@ -137,29 +143,29 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
|
||||
Dockerfile: ".cursorius/Dockerfile",
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("could not build container: %w", err)
|
||||
log.Errorf("%v: could not build container: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
response, err := ioutil.ReadAll(buildResponse.Body)
|
||||
log.Debugf("%v: reading build output from docker daemon", idStr)
|
||||
|
||||
err = db.UpdateRunBuildOutput(pe.Run.Id, cleanupBuildOutput(buildResponse.Body))
|
||||
if err != nil {
|
||||
log.Errorf("could no read build response: %w", err)
|
||||
log.Errorf("%v: could not update build output for run: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = db.UpdateRunBuildOutput(pe.Run.Id, string(response))
|
||||
if err != nil {
|
||||
log.Errorf("could not update build output for run: %w", err)
|
||||
return
|
||||
}
|
||||
log.Debugf("%v: build output read from docker daemon", idStr)
|
||||
|
||||
err = buildResponse.Body.Close()
|
||||
if err != nil {
|
||||
log.Errorf("Could not close build response body: %w", err)
|
||||
log.Errorf("%v: could not close build response body: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Image built sucessfully")
|
||||
log.Debugf("%v: build response closed", idStr)
|
||||
|
||||
log.Infof("%v: image built sucessfully", idStr)
|
||||
|
||||
hostConfig := container.HostConfig{}
|
||||
|
||||
@@ -203,7 +209,7 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
|
||||
// load secrets into environment
|
||||
secrets, err := db.GetSecretsForPipeline(pe.Pipeline.Id)
|
||||
if err != nil {
|
||||
log.Errorf("Could not get secrets for pipeline", err)
|
||||
log.Errorf("%v: could not get secrets for pipeline", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,6 +218,8 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
|
||||
env = append(env, fmt.Sprintf("%v=%v", strings.ToUpper(secret.Name), secret.Secret))
|
||||
}
|
||||
|
||||
log.Debugf("%v: creating container", idStr)
|
||||
|
||||
resp, err := cli.ContainerCreate(ctx,
|
||||
&container.Config{
|
||||
Image: imageName,
|
||||
@@ -223,41 +231,49 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
|
||||
nil, nil, "",
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("could not create container: %w", err)
|
||||
log.Errorf("%v: could not create container: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Launching container")
|
||||
log.Info("%v: starting container", idStr)
|
||||
|
||||
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
log.Errorf("could not start container: %v", err)
|
||||
log.Errorf("%v: could not start container: %v", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("%v: container started", idStr)
|
||||
log.Debugf("%v: waiting on container", idStr)
|
||||
|
||||
statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
log.Errorf("container returned error: %v", err)
|
||||
log.Errorf("%v: container returned error: %v", idStr, err)
|
||||
return
|
||||
}
|
||||
case okBody := <-statusCh:
|
||||
if okBody.Error != nil {
|
||||
log.Errorf("Could not wait on container: %v", err)
|
||||
log.Errorf("%v: could not wait on container: %v", idStr, err)
|
||||
return
|
||||
} else {
|
||||
log.Debugf("Container finished running with return code: %v", okBody.StatusCode)
|
||||
log.Debugf("%v: container finished running with return code: %v", idStr, okBody.StatusCode)
|
||||
pe.Run.Result = &okBody.StatusCode
|
||||
}
|
||||
}
|
||||
|
||||
pe.Run.InProgress = false
|
||||
|
||||
log.Debugf("%v: getting container logs", idStr)
|
||||
|
||||
out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
|
||||
if err != nil {
|
||||
log.Errorf("could not get container logs: %w", err)
|
||||
log.Errorf("%v: could not get container logs: %w", idStr, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("%v: gotcontainer logs", idStr)
|
||||
|
||||
var stdOut bytes.Buffer
|
||||
var stdErr bytes.Buffer
|
||||
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
package pipeline_executor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
func cleanupBuildOutput(input io.ReadCloser) string {
|
||||
output := ""
|
||||
|
||||
scanner := bufio.NewScanner(input)
|
||||
for scanner.Scan() {
|
||||
var log map[string]any
|
||||
json.Unmarshal(scanner.Bytes(), &log)
|
||||
|
||||
if logVar, ok := log["stream"]; ok {
|
||||
if log, ok := logVar.(string); ok {
|
||||
output += log
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
+51
-9
@@ -1,17 +1,18 @@
|
||||
package poll
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/op/go-logging"
|
||||
|
||||
"git.ohea.xyz/cursorius/server/config"
|
||||
"git.ohea.xyz/cursorius/server/database"
|
||||
"git.ohea.xyz/cursorius/server/pipeline_executor"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/storage/memory"
|
||||
"github.com/google/uuid"
|
||||
"github.com/op/go-logging"
|
||||
)
|
||||
|
||||
var log = logging.MustGetLogger("cursorius-server")
|
||||
@@ -26,16 +27,41 @@ type tag struct {
|
||||
commitHash string
|
||||
}
|
||||
|
||||
func pollJob(pipeline database.Pipeline, pipelineConf config.PipelineConf, db database.Database) {
|
||||
func pollJob(ctx context.Context, pipeline database.Pipeline, pipelineConf config.PipelineConf, db database.Database) {
|
||||
firstScan := true
|
||||
for {
|
||||
time.Sleep(time.Duration(pipeline.PollInterval) * time.Second)
|
||||
log.Infof("Polling repo %v", pipeline.Name)
|
||||
// Don't sleep on first scan to ease testing
|
||||
// TODO: this should be replaced with a script that mocks a webhook
|
||||
if !firstScan {
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(pipeline.PollInterval)*time.Second)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
switch ctx.Err() {
|
||||
case context.Canceled:
|
||||
log.Infof("Polling for pipeline %v canceled, stopping", pipeline.Name)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
log.Infof("Polling repo %v", pipeline.Name)
|
||||
} else {
|
||||
firstScan = false
|
||||
}
|
||||
|
||||
prevRefs, err := db.GetPipelineRefs(pipeline.Id)
|
||||
if err != nil {
|
||||
log.Errorf("Could not get pipeline refs from db: %v", err)
|
||||
return
|
||||
}
|
||||
log.Debugf("Got pipeline hashs for repo %v (id: %v)", pipeline.Name, pipeline.Id)
|
||||
for refName, hash := range prevRefs {
|
||||
log.Debugf("%v: %v: %v", pipeline.Id, refName, hash)
|
||||
}
|
||||
|
||||
repo, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
|
||||
URL: pipeline.Url,
|
||||
@@ -58,6 +84,7 @@ func pollJob(pipeline database.Pipeline, pipelineConf config.PipelineConf, db da
|
||||
prevRef, ok := prevRefs[branch.Name().String()]
|
||||
if ok {
|
||||
if branch.Hash().String() != prevRef {
|
||||
log.Debugf("Branch %v in repo %v (id: %v) has hash %v, which does not match the previously seen hash of %v", branch.Name().String(), pipeline.Name, pipeline.Id, branch.Hash().String(), prevRef)
|
||||
log.Debugf("Queuing job for branch %v in repo %v (id: %v) with hash %v", branch.Name().String(), pipeline.Name, pipeline.Id, branch.Hash().String())
|
||||
prevRefs[branch.Name().String()] = branch.Hash().String()
|
||||
refsToRunFor = append(refsToRunFor, branch.Name().String())
|
||||
@@ -129,11 +156,17 @@ func launchPollJobs(conf config.PipelineConf, db database.Database, pollChan cha
|
||||
return
|
||||
}
|
||||
|
||||
pipelineCancelations := make(map[uuid.UUID]context.CancelFunc)
|
||||
|
||||
for _, pipeline := range pipelines {
|
||||
if pipeline.PollInterval == 0 {
|
||||
continue
|
||||
} else {
|
||||
go pollJob(pipeline, conf, db)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
pipelineCancelations[pipeline.Id] = cancel
|
||||
|
||||
log.Infof("Starting polling for pipeline %v with id %v", pipeline.Name, pipeline.Id)
|
||||
go pollJob(ctx, pipeline, conf, db)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,8 +177,17 @@ func launchPollJobs(conf config.PipelineConf, db database.Database, pollChan cha
|
||||
log.Errorf("Could not get pipeline with id \"%v\" from database: %v", err)
|
||||
continue
|
||||
}
|
||||
// TODO: stop existing polling process for given uuid
|
||||
go pollJob(pipeline, conf, db)
|
||||
|
||||
// Cancel existing polling job if it exists
|
||||
if cancelFunc, ok := pipelineCancelations[pipeline.Id]; ok {
|
||||
cancelFunc()
|
||||
}
|
||||
|
||||
// Start new polling job
|
||||
log.Infof("Starting polling for pipeline %v with id %v", pipeline.Name, pipeline.Id)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
pipelineCancelations[pipeline.Id] = cancel
|
||||
go pollJob(ctx, pipeline, conf, db)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
+19
-5
@@ -22,19 +22,33 @@ type Runner struct {
|
||||
tags []string
|
||||
conn *websocket.Conn
|
||||
receiveChan chan []byte
|
||||
running bool
|
||||
}
|
||||
|
||||
func (r *Runner) HasTags(requestedTags []string) bool {
|
||||
tagIter:
|
||||
for _, requestedTag := range requestedTags {
|
||||
for _, posessedTag := range r.tags {
|
||||
// if we find the tag, move on to search for the next one
|
||||
if posessedTag == requestedTag {
|
||||
continue tagIter
|
||||
}
|
||||
}
|
||||
// if we don't find the tag
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *Runner) Id() uuid.UUID {
|
||||
return r.id
|
||||
}
|
||||
|
||||
func (r *Runner) Release() {
|
||||
r.running = false
|
||||
}
|
||||
|
||||
func (r *Runner) RunCommand(cmd string, args []string) (returnCode int64, stdout string, stderr string, err error) {
|
||||
|
||||
if r.conn == nil {
|
||||
return 0, "", "", fmt.Errorf("runner with id %v has nil conn, THIS IS A BUG", r.id)
|
||||
}
|
||||
|
||||
// Write RunCommand message to client
|
||||
serverToRunnerMsg := &runner_api.ServerToRunnerMsg{
|
||||
Msg: &runner_api.ServerToRunnerMsg_RunCommandMsg{
|
||||
|
||||
@@ -3,7 +3,6 @@ package runnermanager
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -13,66 +12,24 @@ import (
|
||||
|
||||
"git.ohea.xyz/cursorius/server/config"
|
||||
"git.ohea.xyz/cursorius/server/database"
|
||||
"git.ohea.xyz/cursorius/server/util"
|
||||
|
||||
runner_api "git.ohea.xyz/cursorius/runner-api/go/api/v2"
|
||||
)
|
||||
|
||||
var log = logging.MustGetLogger("cursorius-server")
|
||||
|
||||
type RunnerRegistration struct {
|
||||
Secret string
|
||||
Id string
|
||||
Tags []string
|
||||
conn *websocket.Conn
|
||||
}
|
||||
func (r *runnerManager) processRunnerAllocation(req RunnerAllocationRequest) {
|
||||
tagsStr := util.FormatTags(req.Tags)
|
||||
log.Infof("Got request for runner with tags \"%v\"", tagsStr)
|
||||
|
||||
type runnerManager struct {
|
||||
getRunnerCh chan GetRunnerRequest
|
||||
registerCh chan RunnerRegistration
|
||||
connectedRunners []Runner
|
||||
numConnectedRunners uint64
|
||||
configuredRunners map[string]config.Runner
|
||||
db database.Database
|
||||
}
|
||||
|
||||
type GetRunnerRequest struct {
|
||||
Tags []string
|
||||
RespChan chan GetRunnerResponse
|
||||
}
|
||||
|
||||
type GetRunnerResponse struct {
|
||||
Runner *Runner
|
||||
Err error
|
||||
}
|
||||
|
||||
type runnerJob struct {
|
||||
Id string
|
||||
URL string
|
||||
}
|
||||
|
||||
func (r *runnerManager) processRequest(req GetRunnerRequest) {
|
||||
var runnerTagsStr strings.Builder
|
||||
if len(req.Tags) > 0 {
|
||||
fmt.Fprintf(&runnerTagsStr, "[%v", req.Tags[0])
|
||||
for _, tag := range req.Tags[1:] {
|
||||
fmt.Fprintf(&runnerTagsStr, ", %v", tag)
|
||||
}
|
||||
fmt.Fprintf(&runnerTagsStr, "]")
|
||||
}
|
||||
log.Infof("Got request for runner with tags \"%v\"", runnerTagsStr.String())
|
||||
|
||||
log.Debugf("Finding runner with tags %v", runnerTagsStr.String())
|
||||
log.Debugf("Finding runner with tags %v", tagsStr)
|
||||
|
||||
foundRunner := false
|
||||
|
||||
runnersToRemove := []int{}
|
||||
runnerIter:
|
||||
for i, runner := range r.connectedRunners {
|
||||
// don't allocate runner that is already occupied
|
||||
if runner.running {
|
||||
log.Debugf("Skipping runner %v, as runner is activly running another job", runner.id)
|
||||
continue
|
||||
}
|
||||
// don't allocate runner with closed receiveChan (is defunct)
|
||||
// there should never be messages to read on an inactive runner,
|
||||
// so we aren't losing any data here
|
||||
@@ -89,29 +46,25 @@ runnerIter:
|
||||
default:
|
||||
log.Debugf("Checking runner %v for requested tags", runner.id)
|
||||
|
||||
tagIter:
|
||||
for _, requestedTag := range req.Tags {
|
||||
for _, posessedTag := range runner.tags {
|
||||
if requestedTag == posessedTag {
|
||||
continue tagIter
|
||||
}
|
||||
}
|
||||
if !runner.HasTags(req.Tags) {
|
||||
continue runnerIter
|
||||
}
|
||||
|
||||
r.connectedRunners[i].running = true
|
||||
runnersToRemove = append(runnersToRemove, i)
|
||||
foundRunner = true
|
||||
req.RespChan <- GetRunnerResponse{
|
||||
log.Debugf("Runner %v has requested tags, allocating", runner.id)
|
||||
req.RespChan <- RunnerAllocationResponse{
|
||||
Runner: &r.connectedRunners[i],
|
||||
Err: nil,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// remove allocated runner plus defunct runners
|
||||
// since we iterate, all the indexes will be in accending order
|
||||
for i, runnerInd := range runnersToRemove {
|
||||
r.connectedRunners[runnerInd-i] = r.connectedRunners[len(r.connectedRunners)-1]
|
||||
r.connectedRunners = r.connectedRunners[0 : len(r.connectedRunners)-2]
|
||||
r.connectedRunners = r.connectedRunners[0 : len(r.connectedRunners)-1]
|
||||
}
|
||||
|
||||
if foundRunner {
|
||||
@@ -122,43 +75,42 @@ runnerIter:
|
||||
if len(r.connectedRunners) == 0 {
|
||||
errorMsg = "no connected runners"
|
||||
}
|
||||
req.RespChan <- GetRunnerResponse{
|
||||
Runner: &Runner{},
|
||||
req.RespChan <- RunnerAllocationResponse{
|
||||
Runner: nil,
|
||||
Err: fmt.Errorf("Could not allocate runner: %v", errorMsg),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *runnerManager) processRegistration(reg RunnerRegistration) {
|
||||
log.Debugf("New runner appeared with id: %v and secret: %v", reg.Id, reg.Secret)
|
||||
func (r *runnerManager) processRunnerRegistration(req RunnerRegistrationRequest) {
|
||||
log.Debugf("New runner appeared with id: %v and secret: %v", req.Id, req.Secret)
|
||||
|
||||
// Get runner with give id from database
|
||||
runnerId, err := uuid.Parse(reg.Id)
|
||||
runnerId, err := uuid.Parse(req.Id)
|
||||
if err != nil {
|
||||
log.Errorf("Disconnecting runner with id: %v, could not parse as UUID: %v", reg.Id, err)
|
||||
reg.conn.Close(websocket.StatusNormalClosure, "registration invalid")
|
||||
log.Errorf("Disconnecting runner with id: %v, could not parse as UUID: %v", req.Id, err)
|
||||
req.conn.Close(websocket.StatusNormalClosure, "registration invalid")
|
||||
return
|
||||
}
|
||||
dbRunner, err := r.db.GetRunnerById(runnerId)
|
||||
if err != nil {
|
||||
log.Errorf("Disconnecting runner with id: %v, could not find runner in DB: %v", runnerId, err)
|
||||
reg.conn.Close(websocket.StatusNormalClosure, "registration invalid")
|
||||
req.conn.Close(websocket.StatusNormalClosure, "registration invalid")
|
||||
return
|
||||
}
|
||||
|
||||
if reg.Secret != dbRunner.Token {
|
||||
if req.Secret != dbRunner.Token {
|
||||
log.Errorf("Disconnecting runner with id: %v, invalid secret", runnerId)
|
||||
reg.conn.Close(websocket.StatusNormalClosure, "registration invalid")
|
||||
req.conn.Close(websocket.StatusNormalClosure, "registration invalid")
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Registering runner \"%v\" with tags %v", reg.Id, reg.Tags)
|
||||
log.Infof("Registering runner \"%v\" with tags %v", req.Id, req.Tags)
|
||||
runner := Runner{
|
||||
id: runnerId,
|
||||
tags: reg.Tags,
|
||||
conn: reg.conn,
|
||||
tags: req.Tags,
|
||||
conn: req.conn,
|
||||
receiveChan: make(chan []byte),
|
||||
running: false,
|
||||
}
|
||||
r.connectedRunners = append(r.connectedRunners, runner)
|
||||
// start goroutine to call Read function on websocket connection
|
||||
@@ -167,7 +119,7 @@ func (r *runnerManager) processRegistration(reg RunnerRegistration) {
|
||||
defer log.Noticef("Deregistered runner with id: %v", runner.id)
|
||||
defer close(runner.receiveChan)
|
||||
for {
|
||||
msgType, data, err := reg.conn.Read(context.Background())
|
||||
msgType, data, err := req.conn.Read(context.Background())
|
||||
if err != nil {
|
||||
// TODO: this is still racy, since a runner could be allocated between the
|
||||
// connection returning an err and the channel closing
|
||||
@@ -187,22 +139,30 @@ func (r *runnerManager) processRegistration(reg RunnerRegistration) {
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *runnerManager) processRunnerRelease(req RunnerReleaseRequest) {
|
||||
r.connectedRunners = append(r.connectedRunners, *req.Runner)
|
||||
}
|
||||
|
||||
func runRunnerManager(r runnerManager) {
|
||||
for {
|
||||
select {
|
||||
case request := <-r.getRunnerCh:
|
||||
r.processRequest(request)
|
||||
|
||||
case registration := <-r.registerCh:
|
||||
r.processRegistration(registration)
|
||||
case request := <-r.chans.Allocation:
|
||||
r.processRunnerAllocation(request)
|
||||
case release := <-r.chans.Release:
|
||||
r.processRunnerRelease(release)
|
||||
case registration := <-r.chans.Registration:
|
||||
r.processRunnerRegistration(registration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func StartRunnerManager(configuredRunners map[string]config.Runner, db database.Database) (chan GetRunnerRequest, chan RunnerRegistration, error) {
|
||||
func StartRunnerManager(configuredRunners map[string]config.Runner, db database.Database) (RunnerManagerChans, error) {
|
||||
scheduler := runnerManager{
|
||||
getRunnerCh: make(chan GetRunnerRequest),
|
||||
registerCh: make(chan RunnerRegistration),
|
||||
chans: RunnerManagerChans{
|
||||
Allocation: make(chan RunnerAllocationRequest),
|
||||
Release: make(chan RunnerReleaseRequest),
|
||||
Registration: make(chan RunnerRegistrationRequest),
|
||||
},
|
||||
connectedRunners: make([]Runner, 0),
|
||||
configuredRunners: configuredRunners,
|
||||
db: db,
|
||||
@@ -210,14 +170,14 @@ func StartRunnerManager(configuredRunners map[string]config.Runner, db database.
|
||||
|
||||
go runRunnerManager(scheduler)
|
||||
|
||||
return scheduler.getRunnerCh, scheduler.registerCh, nil
|
||||
return scheduler.chans, nil
|
||||
}
|
||||
|
||||
func RegisterRunner(conn *websocket.Conn, registerCh chan RunnerRegistration) {
|
||||
func RegisterRunner(conn *websocket.Conn, registerCh chan RunnerRegistrationRequest) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
var registration RunnerRegistration
|
||||
var registration RunnerRegistrationRequest
|
||||
registration.conn = conn
|
||||
|
||||
typ, r, err := conn.Read(ctx)
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
package runnermanager
|
||||
|
||||
import (
|
||||
"nhooyr.io/websocket"
|
||||
|
||||
"git.ohea.xyz/cursorius/server/config"
|
||||
"git.ohea.xyz/cursorius/server/database"
|
||||
)
|
||||
|
||||
type RunnerManagerChans struct {
|
||||
Allocation chan RunnerAllocationRequest
|
||||
Release chan RunnerReleaseRequest
|
||||
Registration chan RunnerRegistrationRequest
|
||||
}
|
||||
|
||||
type runnerManager struct {
|
||||
chans RunnerManagerChans
|
||||
connectedRunners []Runner
|
||||
numConnectedRunners uint64
|
||||
configuredRunners map[string]config.Runner
|
||||
db database.Database
|
||||
}
|
||||
|
||||
type RunnerAllocationRequest struct {
|
||||
Tags []string
|
||||
RespChan chan RunnerAllocationResponse
|
||||
CancelChan chan string
|
||||
}
|
||||
|
||||
type RunnerAllocationResponse struct {
|
||||
Runner *Runner
|
||||
Err error
|
||||
}
|
||||
|
||||
type RunnerReleaseRequest struct {
|
||||
Runner *Runner
|
||||
}
|
||||
|
||||
type RunnerRegistrationRequest struct {
|
||||
Secret string
|
||||
Id string
|
||||
Tags []string
|
||||
conn *websocket.Conn
|
||||
}
|
||||
|
||||
type runnerJob struct {
|
||||
Id string
|
||||
URL string
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func FormatTags(tags []string) string {
|
||||
var tagsStr strings.Builder
|
||||
if len(tags) > 0 {
|
||||
fmt.Fprintf(&tagsStr, "[%v", tags[0])
|
||||
for _, tag := range tags[1:] {
|
||||
fmt.Fprintf(&tagsStr, ", %v", tag)
|
||||
}
|
||||
fmt.Fprintf(&tagsStr, "]")
|
||||
}
|
||||
return tagsStr.String()
|
||||
}
|
||||
Reference in New Issue
Block a user