From 8ac90700bf31eb57b7252404a5d67162622bd305 Mon Sep 17 00:00:00 2001 From: restitux Date: Sun, 23 Apr 2023 04:59:46 -0600 Subject: [PATCH] Add debug logging to pipeline_executor --- pipeline_executor/pipeline_executor.go | 81 +++++++++++++++++--------- 1 file changed, 52 insertions(+), 29 deletions(-) diff --git a/pipeline_executor/pipeline_executor.go b/pipeline_executor/pipeline_executor.go index d852871..84690bc 100644 --- a/pipeline_executor/pipeline_executor.go +++ b/pipeline_executor/pipeline_executor.go @@ -34,39 +34,40 @@ type PipelineExecution struct { } func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf config.PipelineConf) { + idStr := pe.Pipeline.Id.String() + jobFolder := filepath.Join(pipelineConf.WorkingDir, pe.Pipeline.Id.String(), pe.Run.Id.String()) cloneFolder := filepath.Join(jobFolder, "repo") - log.Debugf("Job %v configured with URL \"%v\"", pe.Pipeline.Name, pe.Pipeline.Url) - - log.Debugf("Job %v configured with folder \"%v\"", pe.Pipeline.Name, jobFolder) + log.Debugf("%v: URL: %v", idStr, pe.Pipeline.Url) + log.Debugf("%v: Folder: %v", idStr, jobFolder) err := os.RemoveAll(jobFolder) if err != nil { - log.Errorf("could not delete existing folder %v", jobFolder) + log.Errorf("%v: could not delete existing folder %v", idStr, jobFolder) return } err = os.MkdirAll(cloneFolder, 0755) if err != nil { - log.Errorf("could not create working directory for job %v: %w", pe.Pipeline.Name, err) + log.Errorf("%v: could not create working directory: %w", idStr, pe.Pipeline.Name, err) return } - log.Infof("Cloning source from URL %v", pe.Pipeline.Url) + log.Infof("%v: cloning source from URL %v", idStr, pe.Pipeline.Url) var auth transport.AuthMethod if pe.Pipeline.CloneCredential != nil { credential, err := db.GetCloneCredentialById(*pe.Pipeline.CloneCredential) if err != nil { - log.Errorf("could not get credenital from db: %v", err) + log.Errorf("%v: could not get credenital from db: %v", idStr, err) return } switch credential.Type { case "USER_PASS": - log.Debugf("job %v configured to use credential %v", pe.Pipeline.Name, credential.Name) + log.Debugf("%v: credential %v configured", idStr, credential.Name) auth = transport.AuthMethod(&http.BasicAuth{ Username: credential.Username, Password: credential.Secret, @@ -74,12 +75,12 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co case "SSH_KEY": publicKeys, err := ssh.NewPublicKeys(credential.Username, []byte(credential.Secret), "") if err != nil { - log.Errorf("could not parse credential %v", credential.Name) + log.Errorf("%v: could not parse credential %v: %v", idStr, credential.Name, err) return } auth = transport.AuthMethod(publicKeys) default: - log.Errorf("unsupported credential type %v", credential.Type) + log.Errorf("%v: unsupported credential type %v", idStr, credential.Type) return } } else { @@ -91,40 +92,46 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co Auth: auth, }) if err != nil { - log.Errorf("could not clone repo: %v", err) + log.Errorf("%v: could not clone repo: %v", idStr, err) return } cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { - log.Errorf("Could not create docker client: %w", err) + log.Errorf("%v: could not create docker client: %w", idStr, err) return } - log.Info("Source cloned successfully") + log.Info("%v: source cloned successfully", idStr) ctx := context.Background() - log.Info("Building container") + log.Debugf("%v: tarring up job source", idStr) + log.Debugf("%v: creating tarfile", idStr) tarFile := filepath.Join(jobFolder, "archive.tar") tar := new(archivex.TarFile) err = tar.Create(tarFile) if err != nil { - log.Errorf("could not create tarfile: %w", err) + log.Errorf("%v: could not create tarfile: %w", idStr, err) return } + log.Debugf("%v: adding files to tarfile", idStr) err = tar.AddAll(cloneFolder, false) if err != nil { - log.Errorf("could not add repo to tarfile: %w", err) + log.Errorf("%v: could not add repo to tarfile: %w", idStr, err) return } + log.Debugf("%v: saving tarfile tarfile", idStr) err = tar.Close() if err != nil { - log.Errorf("could not close tarfile: %w", err) + log.Errorf("%v: could not close tarfile: %w", idStr, err) return } + log.Debugf("%v: job source tarred", idStr) + + log.Info("%v: building container", idStr) dockerBuildContext, err := os.Open(tarFile) defer dockerBuildContext.Close() @@ -136,23 +143,29 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co Dockerfile: ".cursorius/Dockerfile", }) if err != nil { - log.Errorf("could not build container: %w", err) + log.Errorf("%v: could not build container: %w", idStr, err) return } + log.Debugf("%v: reading build output from docker daemon", idStr) + err = db.UpdateRunBuildOutput(pe.Run.Id, cleanupBuildOutput(buildResponse.Body)) if err != nil { - log.Errorf("could not update build output for run: %w", err) + log.Errorf("%v: could not update build output for run: %w", idStr, err) return } + log.Debugf("%v: build output read from docker daemon", idStr) + err = buildResponse.Body.Close() if err != nil { - log.Errorf("Could not close build response body: %w", err) + log.Errorf("%v: could not close build response body: %w", idStr, err) return } - log.Info("Image built sucessfully") + log.Debugf("%v: build response closed", idStr) + + log.Infof("%v: image built sucessfully", idStr) hostConfig := container.HostConfig{} @@ -196,7 +209,7 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co // load secrets into environment secrets, err := db.GetSecretsForPipeline(pe.Pipeline.Id) if err != nil { - log.Errorf("Could not get secrets for pipeline", err) + log.Errorf("%v: could not get secrets for pipeline", idStr, err) return } @@ -205,6 +218,8 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co env = append(env, fmt.Sprintf("%v=%v", strings.ToUpper(secret.Name), secret.Secret)) } + log.Debugf("%v: creating container", idStr) + resp, err := cli.ContainerCreate(ctx, &container.Config{ Image: imageName, @@ -216,41 +231,49 @@ func ExecutePipeline(pe PipelineExecution, db database.Database, pipelineConf co nil, nil, "", ) if err != nil { - log.Errorf("could not create container: %w", err) + log.Errorf("%v: could not create container: %w", idStr, err) return } - log.Info("Launching container") + log.Info("%v: starting container", idStr) if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { - log.Errorf("could not start container: %v", err) + log.Errorf("%v: could not start container: %v", idStr, err) return } + + log.Debugf("%v: container started", idStr) + log.Debugf("%v: waiting on container", idStr) + statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) select { case err := <-errCh: if err != nil { - log.Errorf("container returned error: %v", err) + log.Errorf("%v: container returned error: %v", idStr, err) return } case okBody := <-statusCh: if okBody.Error != nil { - log.Errorf("Could not wait on container: %v", err) + log.Errorf("%v: could not wait on container: %v", idStr, err) return } else { - log.Debugf("Container finished running with return code: %v", okBody.StatusCode) + log.Debugf("%v: container finished running with return code: %v", idStr, okBody.StatusCode) pe.Run.Result = &okBody.StatusCode } } pe.Run.InProgress = false + log.Debugf("%v: getting container logs", idStr) + out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}) if err != nil { - log.Errorf("could not get container logs: %w", err) + log.Errorf("%v: could not get container logs: %w", idStr, err) return } + log.Debugf("%v: gotcontainer logs", idStr) + var stdOut bytes.Buffer var stdErr bytes.Buffer