Compare commits

17 Commits

Author SHA1 Message Date
restitux 66eba61bbe Remove leftover debug print (#26) 2023-05-04 20:40:01 -06:00
restitux dc09c0f4f5 Add shell script to publish container 2023-05-04 20:35:39 -06:00
restitux dcaeaeb6d6 Fix pipeline executor logging to use Infof 2023-05-04 20:27:33 -06:00
restitux 32a86ace9f Update ON CONFLICT with pipeline_refs primary key 2023-05-04 20:26:43 -06:00
restitux 89be2c4816 Correct primary key on pipeline_refs table 2023-05-04 20:15:37 -06:00
restitux 63c0f83c16 Add debug print of prevRefs to polling logic 2023-05-04 19:21:22 -06:00
restitux 8188bd391b Add debug print to new ref value codepath in polling logic 2023-05-04 19:16:17 -06:00
restitux 8ac90700bf Add debug logging to pipeline_executor 2023-04-23 04:59:46 -06:00
restitux 1882d14bee Change version 2023-04-09 21:41:22 -06:00
restitux 4f7b315f54 Complete support for cron pipeline triggering 2023-04-08 16:28:00 -06:00
restitux fe9e1cac15 Update cron type with ref pattern 2023-04-08 15:18:29 -06:00
restitux a9481fa9bc Add scaffolding for cron trigger support 2023-04-08 14:42:23 -06:00
restitux e1382e50ea Cleanup docker build output before saving (#23) 2023-04-07 20:08:59 -06:00
restitux 7f44e5ed41 Add nil check to runner chan (#22) 2023-04-07 19:52:44 -06:00
restitux bcc53dfbe0 Refactored runner map wrap and add to function 2023-04-07 19:52:37 -06:00
restitux bbf96498aa Add updatePipeline endpoint 2023-04-07 18:44:04 -06:00
restitux 954966db58 Start/restart poll job when created or updated
This currently contains the logic for restarting updated jobs,
but nothing exercises this logic. The logic for starting polling
for a newly created pipeline is implemented.
2023-04-07 18:31:59 -06:00
15 changed files with 627 additions and 59 deletions
+195 -3
View File
@@ -13,7 +13,7 @@ import (
var log = logging.MustGetLogger("cursorius-server")
func createSchema(db database.Database) (graphql.Schema, error) {
func createSchema(db database.Database, pollChan chan uuid.UUID, cronChan chan uuid.UUID) (graphql.Schema, error) {
runnerType := graphql.NewObject(graphql.ObjectConfig{
Name: "Runner",
Description: "A runner available for use inside of a pipeline.",
@@ -88,6 +88,43 @@ func createSchema(db database.Database) (graphql.Schema, error) {
},
})
cronType := graphql.NewObject(graphql.ObjectConfig{
Name: "Cron",
Description: "A cron available for trigger pipeline runs.",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The id of the cron.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if cron, ok := p.Source.(database.Cron); ok {
return cron.Id, nil
}
return nil, nil
},
},
"cron": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The cron.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if cron, ok := p.Source.(database.Cron); ok {
return cron.Cron, nil
}
return nil, nil
},
},
"pattern": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "A pattern for determining what refs to run the cron on.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if cron, ok := p.Source.(database.Cron); ok {
return cron.Pattern, nil
}
return nil, nil
},
},
},
})
cloneCredentialType := graphql.NewObject(graphql.ObjectConfig{
Name: "CloneCredential",
Description: "A credential for authenticating with the pipeline source host.",
@@ -316,6 +353,16 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return []database.Secret{}, nil
},
},
"crons": &graphql.Field{
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(cronType))),
Description: "The list of crons for the pipeline.",
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
if pipeline, ok := p.Source.(database.Pipeline); ok {
return db.GetCronsForPipeline(pipeline.Id)
}
return []database.Cron{}, nil
},
},
"webhooks": &graphql.Field{
Type: graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(webhookType))),
Description: "The list of webhooks for the pipeline.",
@@ -454,6 +501,8 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return nil, err
}
pollChan <- pipeline.Id
return pipeline, nil
},
},
@@ -558,6 +607,72 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return runner, nil
},
},
"updatePipeline": &graphql.Field{
Type: pipelineType,
Description: "Create a new pipeline",
Args: graphql.FieldConfigArgument{
"pipelineId": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"name": &graphql.ArgumentConfig{
Type: graphql.String,
},
"url": &graphql.ArgumentConfig{
Type: graphql.String,
},
"pollInterval": &graphql.ArgumentConfig{
Type: graphql.Int,
},
"cloneCredentialId": &graphql.ArgumentConfig{
Type: graphql.String,
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
if err != nil {
return nil, err
}
var name *string
var url *string
var interval *int
if nameVal, ok := params.Args["name"]; ok {
nameVal := nameVal.(string)
name = &nameVal
} else {
name = nil
}
if urlVal, ok := params.Args["url"]; ok {
urlVal := urlVal.(string)
url = &urlVal
} else {
url = nil
}
if intervalVal, ok := params.Args["pollInterval"]; ok {
intervalVal := intervalVal.(int)
interval = &intervalVal
} else {
interval = nil
}
pipeline, err := db.UpdatePipeline(
pipelineId,
name,
url,
interval,
)
if err != nil {
return nil, err
}
pollChan <- pipeline.Id
return pipeline, nil
},
},
"setPipelineCloneCredential": &graphql.Field{
Type: pipelineType,
Description: "Set the CloneCredential used by a pipeline to clone the source repo",
@@ -667,6 +782,83 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return pipeline, nil
},
},
"addCronToPipeline": &graphql.Field{
Type: pipelineType,
Description: "Add a cron string to trigger the pipeline",
Args: graphql.FieldConfigArgument{
"cron": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"pattern": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"pipelineId": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
cron := params.Args["cron"].(string)
pattern := params.Args["pattern"].(string)
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
if err != nil {
return nil, err
}
cronObj, err := db.AddCronForPipeline(pipelineId, cron, pattern)
if err != nil {
return nil, err
}
pipeline, err := db.GetPipelineById(pipelineId)
if err != nil {
return nil, err
}
cronChan <- cronObj.Id
return pipeline, nil
},
},
"removeCronFromPipeline": &graphql.Field{
Type: pipelineType,
Description: "Remove a cron trigger from a pipeline.",
Args: graphql.FieldConfigArgument{
"cronId": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
"pipelineId": &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String),
},
},
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
cronId, err := uuid.Parse(params.Args["cronId"].(string))
if err != nil {
return nil, err
}
pipelineId, err := uuid.Parse(params.Args["pipelineId"].(string))
if err != nil {
return nil, err
}
err = db.RemoveCronForPipeline(cronId)
if err != nil {
return nil, err
}
pipeline, err := db.GetPipelineById(pipelineId)
if err != nil {
return nil, err
}
cronChan <- cronId
return pipeline, nil
},
},
},
})
@@ -681,9 +873,9 @@ func createSchema(db database.Database) (graphql.Schema, error) {
return schema, nil
}
func CreateHandler(db database.Database, mux *http.ServeMux) error {
func CreateHandler(db database.Database, pollChan chan uuid.UUID, cronChan chan uuid.UUID, mux *http.ServeMux) error {
schema, err := createSchema(db)
schema, err := createSchema(db, pollChan, cronChan)
if err != nil {
return err
}
+121
View File
@@ -0,0 +1,121 @@
package cron
import (
"fmt"
"regexp"
"git.ohea.xyz/cursorius/server/config"
"git.ohea.xyz/cursorius/server/database"
"git.ohea.xyz/cursorius/server/pipeline_executor"
"github.com/google/uuid"
"github.com/op/go-logging"
"github.com/robfig/cron/v3"
)
var log = logging.MustGetLogger("cursorius-server")
func runPipeline(db database.Database, conf config.PipelineConf, pipelineId uuid.UUID, cron database.Cron) error {
run, err := db.CreateRun(pipelineId)
if err != nil {
return fmt.Errorf("Could not create run for pipeline with id %v: %w", pipelineId, err)
}
refs, err := db.GetPipelineRefs(pipelineId)
if err != nil {
return fmt.Errorf("Could not get refs for pipeline with id %v: %w", pipelineId, err)
}
useRef, err := regexp.Compile(cron.Pattern)
if err != nil {
return fmt.Errorf("Could not compile regex for cron %v for pipeline %v: %w", cron.Id, pipelineId, err)
}
for ref := range refs {
if !useRef.MatchString(ref) {
log.Debugf("Skipping ref %v for pipeline %v as regex %v in cron %v doesn't match", ref, pipelineId, cron.Pattern, cron.Id)
continue
}
pipeline, err := db.GetPipelineById(pipelineId)
if err != nil {
return fmt.Errorf("could not get pipeline with id %v from db: %w", pipelineId, err)
}
pe := pipeline_executor.PipelineExecution{
Pipeline: pipeline,
Ref: ref,
Run: run,
}
go pipeline_executor.ExecutePipeline(pe, db, conf)
}
return nil
}
func launchCrons(db database.Database, conf config.PipelineConf, updateChan chan uuid.UUID) {
pipelines, err := db.GetPipelines()
if err != nil {
log.Errorf("Could not get pipelines from database: %w", err)
return
}
cronManager := cron.New()
cronEntries := make(map[uuid.UUID]cron.EntryID)
for _, pipeline := range pipelines {
crons, err := db.GetCronsForPipeline(pipeline.Id)
if err != nil {
log.Errorf("Could not get crons for pipeline with id \"%v\": %w", pipeline.Id, err)
return
}
log.Infof("Starting crons for pipeline %v with id %v", pipeline.Name, pipeline.Id)
for _, cron := range crons {
cronEntries[cron.Id], err = cronManager.AddFunc(cron.Cron, func() {
log.Infof("Triggering cron with value \"%v\" for pipeline with id \"%v\"", cron.Cron, cron.PipelineId)
err := runPipeline(db, conf, pipeline.Id, cron)
if err != nil {
log.Errorf("Could not run pipeline with id \"%v\": %w", pipeline.Id, err)
}
})
if err != nil {
log.Errorf("Could not configure cron for pipeline with id \"%v\": %w", pipeline.Id, err)
}
}
}
cronManager.Start()
for {
cronUUID := <-updateChan
if entryId, ok := cronEntries[cronUUID]; ok {
log.Infof("Canceling cron %v", cronUUID)
cronManager.Remove(entryId)
}
cron, err := db.GetCronById(cronUUID)
// if cron no longer exists, don't try to restart it
// TODO: this squashes other DB errors
if err != nil {
continue
}
log.Infof("Starting cron %v with value %v for pipeline %v", cron.Id, cron.Cron, cron.PipelineId)
cronEntries[cron.Id], err = cronManager.AddFunc(cron.Cron, func() {
err := runPipeline(db, conf, cron.PipelineId, cron)
if err != nil {
log.Errorf("Could not setup run pipeline with id \"%v\": %w", cron.PipelineId, err)
}
})
}
}
func StartCrons(conf config.PipelineConf, db database.Database) chan uuid.UUID {
cronChan := make(chan uuid.UUID)
go launchCrons(db, conf, cronChan)
return cronChan
}
+14 -1
View File
@@ -181,10 +181,23 @@ CREATE TABLE runners (
);
CREATE TABLE pipeline_refs (
name TEXT PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
pipeline_id UUID NOT NULL,
hash TEXT NOT NULL,
PRIMARY KEY(name, pipeline_id),
CONSTRAINT fk_pipeline_id
FOREIGN KEY(pipeline_id)
REFERENCES pipelines(id)
);
CREATE TABLE crons (
id UUID PRIMARY KEY,
pipeline_id UUID NOT NULL,
cron TEXT NOT NULL,
pattern TEXT NOT NULL,
CONSTRAINT fk_pipeline_id
FOREIGN KEY(pipeline_id)
REFERENCES pipelines(id)
+132 -1
View File
@@ -77,6 +77,49 @@ RETURNING id, name, url, poll_interval;`
return pipeline, nil
}
func (db *Database) UpdatePipeline(pipelineId uuid.UUID, name *string, url *string, pollInterval *int) (Pipeline, error) {
query := `
UPDATE pipelines
SET name=$1, url=$2, poll_interval=$3
WHERE id=$4
RETURNING name, url, poll_interval, clone_credential;`
pipeline, err := db.GetPipelineById(pipelineId)
if err != nil {
return pipeline, err
}
var nameNew string
var urlNew string
var pollIntervalNew int
if name != nil {
nameNew = *name
} else {
nameNew = pipeline.Name
}
if url != nil {
urlNew = *url
} else {
urlNew = pipeline.Url
}
if pollInterval != nil {
pollIntervalNew = *pollInterval
} else {
pollIntervalNew = pipeline.PollInterval
}
err = db.Conn.QueryRow(context.Background(),
query, nameNew, urlNew, pollIntervalNew, pipelineId).Scan(
&pipeline.Name, &pipeline.Url, &pipeline.PollInterval, &pipeline.CloneCredential,
)
if err != nil {
return pipeline, fmt.Errorf("Could not add credential to pipeline: %w", err)
}
return pipeline, err
}
func (db *Database) SetPipelineCloneCredential(pipelineId uuid.UUID, credentialId *uuid.UUID) (Pipeline, error) {
query := `
UPDATE pipelines
@@ -394,7 +437,7 @@ func (db *Database) UpdatePipelineRefs(pipelineId uuid.UUID, refsMap map[string]
query := `
INSERT INTO pipeline_refs(name, pipeline_id, hash)
VALUES($1, $2, $3)
ON CONFLICT (name)
ON CONFLICT (name, pipeline_id)
DO
UPDATE SET hash=$3;`
@@ -627,3 +670,91 @@ RETURNING id, name, token;`
return s, nil
}
func (db *Database) GetCronsForPipeline(pipelineId uuid.UUID) ([]Cron, error) {
query := `
SELECT id, cron, pattern
FROM crons
WHERE pipeline_id=$1;`
var crons []Cron
cronEntrys, err := db.Conn.Query(context.Background(), query, pipelineId)
if err != nil {
return crons, fmt.Errorf("Could not get crons for pipeline with id \"%v\": %w", pipelineId, err)
}
defer cronEntrys.Close()
for cronEntrys.Next() {
var cron Cron
var idStr string
if err := cronEntrys.Scan(
&idStr, &cron.Cron, &cron.Pattern,
); err != nil {
return crons, err
}
cron.Id, err = uuid.Parse(idStr)
if err != nil {
return crons, err
}
crons = append(crons, cron)
}
return crons, nil
}
func (db *Database) GetCronById(id uuid.UUID) (Cron, error) {
query := `
SELECT cron, pipeline_id, pattern
FROM crons
WHERE id=$1;`
cron := Cron{
Id: id,
}
var pipelineIdStr string
err := db.Conn.QueryRow(context.Background(), query, id).Scan(&cron.Cron, &pipelineIdStr, &cron.Pattern)
if err != nil {
return cron, fmt.Errorf("Could not query database for cron with id %v: %w", id.String(), err)
}
cron.PipelineId, err = uuid.Parse(pipelineIdStr)
return cron, err
}
func (db *Database) AddCronForPipeline(pipelineId uuid.UUID, cronStr string, pattern string) (Cron, error) {
query := `
INSERT INTO crons (id, pipeline_id, cron, pattern)
VALUES (uuid_generate_v4(), $1, $2, $3)
RETURNING id;`
cron := Cron{
PipelineId: pipelineId,
Cron: cronStr,
Pattern: pattern,
}
var idStr string
err := db.Conn.QueryRow(context.Background(), query, pipelineId, cronStr, pattern).Scan(&idStr)
if err != nil {
return cron, fmt.Errorf("Could not create cron: %w", err)
}
cron.Id, err = uuid.Parse(idStr)
return cron, err
}
func (db *Database) RemoveCronForPipeline(cronId uuid.UUID) error {
query := `
DELETE FROM crons
WHERE id=$1;`
_, err := db.Conn.Exec(context.Background(), query, cronId)
return err
}
+7
View File
@@ -79,3 +79,10 @@ type Runner struct {
Name string
Token string
}
type Cron struct {
Id uuid.UUID
PipelineId uuid.UUID
Cron string
Pattern string
}
+8 -2
View File
@@ -1,4 +1,10 @@
#!/bin/bash
docker build . -f docker/Dockerfile -t git.ohea.xyz/cursorius/server:latest
docker push git.ohea.xyz/cursorius/server:latest
if [[ -z "${1}" ]]; then
echo "You must provide a docker tag to push to."
else
echo "Building container git.ohea.xyz/cursorius/server:$1"
docker build . -f docker/cursorius/Dockerfile -t "git.ohea.xyz/cursorius/server:$1"
echo "Pushing container git.ohea.xyz/cursorius/server:$1"
docker push "git.ohea.xyz/cursorius/server:$1"
fi
+1
View File
@@ -16,6 +16,7 @@ require (
github.com/jackc/pgx/v5 v5.2.0
github.com/jhoonb/archivex v0.0.0-20201016144719-6a343cdae81d
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
github.com/robfig/cron/v3 v3.0.1
golang.org/x/net v0.2.0
google.golang.org/protobuf v1.30.0
nhooyr.io/websocket v1.8.7
+2
View File
@@ -1210,6 +1210,8 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+9 -1
View File
@@ -10,6 +10,8 @@ import (
"git.ohea.xyz/cursorius/server/pipeline_api"
"git.ohea.xyz/cursorius/server/runnermanager"
"git.ohea.xyz/cursorius/server/webhook"
"github.com/google/uuid"
"github.com/op/go-logging"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
@@ -23,13 +25,15 @@ func setupHTTPServer(
conf config.PipelineConf,
db database.Database,
runnerManagerChans runnermanager.RunnerManagerChans,
pollChan chan uuid.UUID,
cronChan chan uuid.UUID,
) error {
webhook.CreateWebhookHandler(db, conf, mux)
pipeline_api.CreateHandler(runnerManagerChans.Allocation, runnerManagerChans.Release, mux)
err := admin_api.CreateHandler(db, mux)
err := admin_api.CreateHandler(db, pollChan, cronChan, mux)
if err != nil {
return fmt.Errorf("Could not create admin api handler: %w", err)
}
@@ -52,6 +56,8 @@ func Listen(
conf config.PipelineConf,
db database.Database,
runnerManagerChans runnermanager.RunnerManagerChans,
pollChan chan uuid.UUID,
cronChan chan uuid.UUID,
) error {
err := setupHTTPServer(
@@ -59,6 +65,8 @@ func Listen(
conf,
db,
runnerManagerChans,
pollChan,
cronChan,
)
if err != nil {
return fmt.Errorf("Could not setup http endpoints: %w", err)
+7 -2
View File
@@ -5,6 +5,7 @@ import (
"os"
"git.ohea.xyz/cursorius/server/config"
"git.ohea.xyz/cursorius/server/cron"
"git.ohea.xyz/cursorius/server/database"
"git.ohea.xyz/cursorius/server/listen"
"git.ohea.xyz/cursorius/server/poll"
@@ -26,7 +27,7 @@ func main() {
logging.SetBackend(backendLeveled)
log.Info("Starting cursorius-server v0.1.0")
log.Info("Starting cursorius-server v0.3.0")
configData, err := config.GetConfig()
if err != nil {
@@ -46,7 +47,9 @@ func main() {
return
}
_ = poll.StartPolling(configData.Config.PipelineConf, db)
pollChan := poll.StartPolling(configData.Config.PipelineConf, db)
cronChan := cron.StartCrons(configData.Config.PipelineConf, db)
mux := http.NewServeMux()
@@ -57,5 +60,7 @@ func main() {
configData.Config.PipelineConf,
db,
runnerManagerChans,
pollChan,
cronChan,
))
}
+7 -3
View File
@@ -56,6 +56,12 @@ func (s *ApiServer) GetRunnerFromMap(u uuid.UUID) (*RunnerWrapper, bool) {
return runner, ok
}
func (s *ApiServer) AddRunnerToMap(u uuid.UUID, runner *runnermanager.Runner) {
s.allocatedRunnersMutex.Lock()
defer s.allocatedRunnersMutex.Unlock()
s.allocatedRunners[u] = &RunnerWrapper{runner: runner}
}
func (s *ApiServer) GetRunner(
ctx context.Context,
req *connect.Request[apiv2.GetRunnerRequest],
@@ -114,9 +120,7 @@ func (s *ApiServer) GetRunner(
runnerUuid := uuid.New()
s.allocatedRunnersMutex.Lock()
s.allocatedRunners[runnerUuid] = &RunnerWrapper{runner: response.Runner}
s.allocatedRunnersMutex.Unlock()
s.AddRunnerToMap(runnerUuid, response.Runner)
res := connect.NewResponse(&apiv2.GetRunnerResponse{
Id: runnerUuid.String(),
+52 -36
View File
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -35,39 +34,40 @@ type PipelineExecution struct {
}
func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf config.PipelineConf) {
idStr := pe.Pipeline.Id.String()
jobFolder := filepath.Join(pipelineConf.WorkingDir, pe.Pipeline.Id.String(), pe.Run.Id.String())
cloneFolder := filepath.Join(jobFolder, "repo")
log.Debugf("Job %v configured with URL \"%v\"", pe.Pipeline.Name, pe.Pipeline.Url)
log.Debugf("Job %v configured with folder \"%v\"", pe.Pipeline.Name, jobFolder)
log.Debugf("%v: URL: %v", idStr, pe.Pipeline.Url)
log.Debugf("%v: Folder: %v", idStr, jobFolder)
err := os.RemoveAll(jobFolder)
if err != nil {
log.Errorf("could not delete existing folder %v", jobFolder)
log.Errorf("%v: could not delete existing folder %v", idStr, jobFolder)
return
}
err = os.MkdirAll(cloneFolder, 0755)
if err != nil {
log.Errorf("could not create working directory for job %v: %w", pe.Pipeline.Name, err)
log.Errorf("%v: could not create working directory: %w", idStr, pe.Pipeline.Name, err)
return
}
log.Infof("Cloning source from URL %v", pe.Pipeline.Url)
log.Infof("%v: cloning source from URL %v", idStr, pe.Pipeline.Url)
var auth transport.AuthMethod
if pe.Pipeline.CloneCredential != nil {
credential, err := db.GetCloneCredentialById(*pe.Pipeline.CloneCredential)
if err != nil {
log.Errorf("could not get credenital from db: %v", err)
log.Errorf("%v: could not get credenital from db: %v", idStr, err)
return
}
switch credential.Type {
case "USER_PASS":
log.Debugf("job %v configured to use credential %v", pe.Pipeline.Name, credential.Name)
log.Debugf("%v: credential %v configured", idStr, credential.Name)
auth = transport.AuthMethod(&http.BasicAuth{
Username: credential.Username,
Password: credential.Secret,
@@ -75,12 +75,12 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
case "SSH_KEY":
publicKeys, err := ssh.NewPublicKeys(credential.Username, []byte(credential.Secret), "")
if err != nil {
log.Errorf("could not parse credential %v", credential.Name)
log.Errorf("%v: could not parse credential %v: %v", idStr, credential.Name, err)
return
}
auth = transport.AuthMethod(publicKeys)
default:
log.Errorf("unsupported credential type %v", credential.Type)
log.Errorf("%v: unsupported credential type %v", idStr, credential.Type)
return
}
} else {
@@ -92,40 +92,46 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
Auth: auth,
})
if err != nil {
log.Errorf("could not clone repo: %v", err)
log.Errorf("%v: could not clone repo: %v", idStr, err)
return
}
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Errorf("Could not create docker client: %w", err)
log.Errorf("%v: could not create docker client: %w", idStr, err)
return
}
log.Info("Source cloned successfully")
log.Infof("%v: source cloned successfully", idStr)
ctx := context.Background()
log.Info("Building container")
log.Debugf("%v: tarring up job source", idStr)
log.Debugf("%v: creating tarfile", idStr)
tarFile := filepath.Join(jobFolder, "archive.tar")
tar := new(archivex.TarFile)
err = tar.Create(tarFile)
if err != nil {
log.Errorf("could not create tarfile: %w", err)
log.Errorf("%v: could not create tarfile: %w", idStr, err)
return
}
log.Debugf("%v: adding files to tarfile", idStr)
err = tar.AddAll(cloneFolder, false)
if err != nil {
log.Errorf("could not add repo to tarfile: %w", err)
log.Errorf("%v: could not add repo to tarfile: %w", idStr, err)
return
}
log.Debugf("%v: saving tarfile tarfile", idStr)
err = tar.Close()
if err != nil {
log.Errorf("could not close tarfile: %w", err)
log.Errorf("%v: could not close tarfile: %w", idStr, err)
return
}
log.Debugf("%v: job source tarred", idStr)
log.Infof("%v: building container", idStr)
dockerBuildContext, err := os.Open(tarFile)
defer dockerBuildContext.Close()
@@ -137,29 +143,29 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
Dockerfile: ".cursorius/Dockerfile",
})
if err != nil {
log.Errorf("could not build container: %w", err)
log.Errorf("%v: could not build container: %w", idStr, err)
return
}
response, err := ioutil.ReadAll(buildResponse.Body)
log.Debugf("%v: reading build output from docker daemon", idStr)
err = db.UpdateRunBuildOutput(pe.Run.Id, cleanupBuildOutput(buildResponse.Body))
if err != nil {
log.Errorf("could no read build response: %w", err)
log.Errorf("%v: could not update build output for run: %w", idStr, err)
return
}
err = db.UpdateRunBuildOutput(pe.Run.Id, string(response))
if err != nil {
log.Errorf("could not update build output for run: %w", err)
return
}
log.Debugf("%v: build output read from docker daemon", idStr)
err = buildResponse.Body.Close()
if err != nil {
log.Errorf("Could not close build response body: %w", err)
log.Errorf("%v: could not close build response body: %w", idStr, err)
return
}
log.Info("Image built sucessfully")
log.Debugf("%v: build response closed", idStr)
log.Infof("%v: image built sucessfully", idStr)
hostConfig := container.HostConfig{}
@@ -203,7 +209,7 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
// load secrets into environment
secrets, err := db.GetSecretsForPipeline(pe.Pipeline.Id)
if err != nil {
log.Errorf("Could not get secrets for pipeline", err)
log.Errorf("%v: could not get secrets for pipeline", idStr, err)
return
}
@@ -212,6 +218,8 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
env = append(env, fmt.Sprintf("%v=%v", strings.ToUpper(secret.Name), secret.Secret))
}
log.Debugf("%v: creating container", idStr)
resp, err := cli.ContainerCreate(ctx,
&container.Config{
Image: imageName,
@@ -223,41 +231,49 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co
nil, nil, "",
)
if err != nil {
log.Errorf("could not create container: %w", err)
log.Errorf("%v: could not create container: %w", idStr, err)
return
}
log.Info("Launching container")
log.Info("%v: starting container", idStr)
if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
log.Errorf("could not start container: %v", err)
log.Errorf("%v: could not start container: %v", idStr, err)
return
}
log.Debugf("%v: container started", idStr)
log.Debugf("%v: waiting on container", idStr)
statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
log.Errorf("container returned error: %v", err)
log.Errorf("%v: container returned error: %v", idStr, err)
return
}
case okBody := <-statusCh:
if okBody.Error != nil {
log.Errorf("Could not wait on container: %v", err)
log.Errorf("%v: could not wait on container: %v", idStr, err)
return
} else {
log.Debugf("Container finished running with return code: %v", okBody.StatusCode)
log.Debugf("%v: container finished running with return code: %v", idStr, okBody.StatusCode)
pe.Run.Result = &okBody.StatusCode
}
}
pe.Run.InProgress = false
log.Debugf("%v: getting container logs", idStr)
out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
log.Errorf("could not get container logs: %w", err)
log.Errorf("%v: could not get container logs: %w", idStr, err)
return
}
log.Debugf("%v: gotcontainer logs", idStr)
var stdOut bytes.Buffer
var stdErr bytes.Buffer
+25
View File
@@ -0,0 +1,25 @@
package pipeline_executor
import (
"bufio"
"encoding/json"
"io"
)
func cleanupBuildOutput(input io.ReadCloser) string {
output := ""
scanner := bufio.NewScanner(input)
for scanner.Scan() {
var log map[string]any
json.Unmarshal(scanner.Bytes(), &log)
if logVar, ok := log["stream"]; ok {
if log, ok := logVar.(string); ok {
output += log
}
}
}
return output
}
+43 -8
View File
@@ -1,17 +1,18 @@
package poll
import (
"context"
"time"
"github.com/google/uuid"
"github.com/op/go-logging"
"git.ohea.xyz/cursorius/server/config"
"git.ohea.xyz/cursorius/server/database"
"git.ohea.xyz/cursorius/server/pipeline_executor"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/storage/memory"
"github.com/google/uuid"
"github.com/op/go-logging"
)
var log = logging.MustGetLogger("cursorius-server")
@@ -26,13 +27,27 @@ type tag struct {
commitHash string
}
func pollJob(pipeline database.Pipeline, pipelineConf config.PipelineConf, db database.Database) {
func pollJob(ctx context.Context, pipeline database.Pipeline, pipelineConf config.PipelineConf, db database.Database) {
firstScan := true
for {
// Don't sleep on first scan to ease testing
// TODO: this should be replaced with a script that mocks a webhook
if !firstScan {
time.Sleep(time.Duration(pipeline.PollInterval) * time.Second)
ctx, cancel := context.WithTimeout(ctx, time.Duration(pipeline.PollInterval)*time.Second)
select {
case <-ctx.Done():
switch ctx.Err() {
case context.Canceled:
log.Infof("Polling for pipeline %v canceled, stopping", pipeline.Name)
cancel()
return
}
}
cancel()
log.Infof("Polling repo %v", pipeline.Name)
} else {
firstScan = false
@@ -43,6 +58,10 @@ func pollJob(pipeline database.Pipeline, pipelineConf config.PipelineConf, db da
log.Errorf("Could not get pipeline refs from db: %v", err)
return
}
log.Debugf("Got pipeline hashs for repo %v (id: %v)", pipeline.Name, pipeline.Id)
for refName, hash := range prevRefs {
log.Debugf("%v: %v: %v", pipeline.Id, refName, hash)
}
repo, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
URL: pipeline.Url,
@@ -65,6 +84,7 @@ func pollJob(pipeline database.Pipeline, pipelineConf config.PipelineConf, db da
prevRef, ok := prevRefs[branch.Name().String()]
if ok {
if branch.Hash().String() != prevRef {
log.Debugf("Branch %v in repo %v (id: %v) has hash %v, which does not match the previously seen hash of %v", branch.Name().String(), pipeline.Name, pipeline.Id, branch.Hash().String(), prevRef)
log.Debugf("Queuing job for branch %v in repo %v (id: %v) with hash %v", branch.Name().String(), pipeline.Name, pipeline.Id, branch.Hash().String())
prevRefs[branch.Name().String()] = branch.Hash().String()
refsToRunFor = append(refsToRunFor, branch.Name().String())
@@ -136,11 +156,17 @@ func launchPollJobs(conf config.PipelineConf, db database.Database, pollChan cha
return
}
pipelineCancelations := make(map[uuid.UUID]context.CancelFunc)
for _, pipeline := range pipelines {
if pipeline.PollInterval == 0 {
continue
} else {
go pollJob(pipeline, conf, db)
ctx, cancel := context.WithCancel(context.Background())
pipelineCancelations[pipeline.Id] = cancel
log.Infof("Starting polling for pipeline %v with id %v", pipeline.Name, pipeline.Id)
go pollJob(ctx, pipeline, conf, db)
}
}
@@ -151,8 +177,17 @@ func launchPollJobs(conf config.PipelineConf, db database.Database, pollChan cha
log.Errorf("Could not get pipeline with id \"%v\" from database: %v", err)
continue
}
// TODO: stop existing polling process for given uuid
go pollJob(pipeline, conf, db)
// Cancel existing polling job if it exists
if cancelFunc, ok := pipelineCancelations[pipeline.Id]; ok {
cancelFunc()
}
// Start new polling job
log.Infof("Starting polling for pipeline %v with id %v", pipeline.Name, pipeline.Id)
ctx, cancel := context.WithCancel(context.Background())
pipelineCancelations[pipeline.Id] = cancel
go pollJob(ctx, pipeline, conf, db)
}
}
+4 -2
View File
@@ -45,6 +45,10 @@ func (r *Runner) Id() uuid.UUID {
func (r *Runner) RunCommand(cmd string, args []string) (returnCode int64, stdout string, stderr string, err error) {
if r.conn == nil {
return 0, "", "", fmt.Errorf("runner with id %v has nil conn, THIS IS A BUG", r.id)
}
// Write RunCommand message to client
serverToRunnerMsg := &runner_api.ServerToRunnerMsg{
Msg: &runner_api.ServerToRunnerMsg_RunCommandMsg{
@@ -97,8 +101,6 @@ func (r *Runner) sendProtoStruct(p protoreflect.ProtoMessage) error {
ctx := context.Background()
log.Debugf("r.conn: %p", r.conn)
if err := r.conn.Write(ctx, websocket.MessageBinary, protoOut); err != nil {
return fmt.Errorf("Could not send proto to websocket: %w", err)
}