diff --git a/cli/cmd/digger/main.go b/cli/cmd/digger/main.go
index 5116ae86d..0e20441ba 100644
--- a/cli/cmd/digger/main.go
+++ b/cli/cmd/digger/main.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/diggerhq/digger/cli/pkg/comment_updater"
core_drift "github.com/diggerhq/digger/cli/pkg/core/drift"
core_reporting "github.com/diggerhq/digger/cli/pkg/core/reporting"
"github.com/diggerhq/digger/cli/pkg/drift"
@@ -140,7 +141,9 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to report job status to backend. Exiting. %s", err), 4)
}
- digger.UpdateStatusComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
+ commentUpdater := comment_updater.BasicCommentUpdater{}
+
+ commentUpdater.UpdateComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
digger.UpdateAggregateStatus(serializedBatch, &githubPrService)
planStorage := newPlanStorage(ghToken, repoOwner, repositoryName, githubActor, job.PullRequestNumber)
@@ -158,7 +161,7 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
log.Printf("Failed to report job status to backend. %v", reportingError)
reportErrorAndExit(githubActor, fmt.Sprintf("Failed run commands. %s", err), 5)
}
- digger.UpdateStatusComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
+ commentUpdater.UpdateComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
digger.UpdateAggregateStatus(serializedBatch, &githubPrService)
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to run commands. %s", err), 5)
@@ -166,13 +169,13 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
jobs := []orchestrator.Job{orchestrator.JsonToJob(job)}
- _, _, err = digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, backendApi, inputs.Id, true, commentId64, currentDir)
+ _, _, err = digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, commentUpdater, backendApi, inputs.Id, true, commentId64, currentDir)
if err != nil {
serializedBatch, reportingError := backendApi.ReportProjectJobStatus(repoName, job.ProjectName, inputs.Id, "failed", time.Now(), nil)
if reportingError != nil {
reportErrorAndExit(githubActor, fmt.Sprintf("Failed run commands. %s", err), 5)
}
- digger.UpdateStatusComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
+ commentUpdater.UpdateComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
digger.UpdateAggregateStatus(serializedBatch, &githubPrService)
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to run commands. %s", err), 5)
}
@@ -349,7 +352,7 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- allAppliesSuccessful, atLeastOneApply, err := digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ allAppliesSuccessful, atLeastOneApply, err := digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to run commands. %s", err), 8)
// aggregate status checks: failure
@@ -466,7 +469,7 @@ func gitLabCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
ReportStrategy: reportingStrategy,
}
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, gitlabService, gitlabService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, gitlabService, gitlabService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
log.Printf("failed to execute command, %v", err)
@@ -554,7 +557,7 @@ func azureCI(lock core_locking.Lock, policyChecker core_policy.Checker, backendA
ReportStrategy: reportingStrategy,
}
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, azureService, azureService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, azureService, azureService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
reportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to run commands. %s", err), 8)
}
@@ -800,7 +803,7 @@ func bitbucketCI(lock core_locking.Lock, policyChecker core_policy.Checker, back
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- _, _, err = digger.RunJobs(jobs, &bitbucketService, &bitbucketService, lock, &reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ _, _, err = digger.RunJobs(jobs, &bitbucketService, &bitbucketService, lock, &reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
reportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8)
}
@@ -837,7 +840,7 @@ func exec(actor string, projectName string, repoNamespace string, command string
}
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- _, _, err = digger.RunJobs(jobs, prService, orgService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 123, currentDir)
+ _, _, err = digger.RunJobs(jobs, prService, orgService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 123, currentDir)
}
/*
diff --git a/cli/pkg/comment_updater/updater.go b/cli/pkg/comment_updater/updater.go
new file mode 100644
index 000000000..a4a7a1598
--- /dev/null
+++ b/cli/pkg/comment_updater/updater.go
@@ -0,0 +1,44 @@
+package comment_updater
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/diggerhq/digger/libs/orchestrator"
+ "github.com/diggerhq/digger/libs/orchestrator/scheduler"
+ "log"
+)
+
+type CommentUpdater interface {
+ UpdateComment(jobs []scheduler.SerializedJob, prNumber int, prService orchestrator.PullRequestService, prCommentId int64) error
+}
+
+type BasicCommentUpdater struct {
+}
+
+func (b BasicCommentUpdater) UpdateComment(jobs []scheduler.SerializedJob, prNumber int, prService orchestrator.PullRequestService, prCommentId int64) error {
+
+ message := ":construction_worker: Jobs status:\n\n"
+ for _, job := range jobs {
+ var jobSpec orchestrator.JobJson
+ err := json.Unmarshal(job.JobString, &jobSpec)
+ if err != nil {
+ log.Printf("Failed to convert unmarshall Serialized job, %v", err)
+ return fmt.Errorf("Failed to unmarshall serialized job: %v", err)
+ }
+ isPlan := jobSpec.IsPlan()
+
+ message = message + fmt.Sprintf("\n", job.ProjectName)
+ message = message + fmt.Sprintf("%v **%v** %v%v\n", job.Status.ToEmoji(), jobSpec.ProjectName, *job.WorkflowRunUrl, job.Status.ToString(), job.ResourcesSummaryString(isPlan))
+ message = message + fmt.Sprintf("\n", job.ProjectName)
+ }
+
+ prService.EditComment(prNumber, prCommentId, message)
+ return nil
+}
+
+type NoopCommentUpdater struct {
+}
+
+func (b NoopCommentUpdater) UpdateComment(jobs []scheduler.SerializedJob, prNumber int, prService orchestrator.PullRequestService, prCommentId int64) error {
+ return nil
+}
diff --git a/cli/pkg/digger/digger.go b/cli/pkg/digger/digger.go
index b85280eb8..d9a434522 100644
--- a/cli/pkg/digger/digger.go
+++ b/cli/pkg/digger/digger.go
@@ -3,6 +3,7 @@ package digger
import (
"errors"
"fmt"
+ "github.com/diggerhq/digger/cli/pkg/comment_updater"
"log"
"os"
"path"
@@ -73,6 +74,7 @@ func RunJobs(
reporter core_reporting.Reporter,
planStorage storage.PlanStorage,
policyChecker policy.Checker,
+ commentUpdater comment_updater.CommentUpdater,
backendApi backend.Api,
batchId string,
reportFinalStatusToBackend bool,
@@ -145,7 +147,7 @@ func RunJobs(
return false, false, fmt.Errorf("error while running command: %v", err)
}
- err = UpdateStatusComment(batchResult.Jobs, prNumber, prService, prCommentId)
+ err = commentUpdater.UpdateComment(batchResult.Jobs, prNumber, prService, prCommentId)
if err != nil {
log.Printf("error Updating status comment: %v.\n", err)
return false, false, err
diff --git a/cli/pkg/digger/io.go b/cli/pkg/digger/io.go
index 67dc0bc42..ef9f5ceb0 100644
--- a/cli/pkg/digger/io.go
+++ b/cli/pkg/digger/io.go
@@ -4,31 +4,9 @@ import (
"fmt"
"github.com/diggerhq/digger/libs/orchestrator"
"github.com/diggerhq/digger/libs/orchestrator/scheduler"
- "github.com/goccy/go-json"
"log"
)
-func UpdateStatusComment(jobs []scheduler.SerializedJob, prNumber int, prService orchestrator.PullRequestService, prCommentId int64) error {
-
- message := ":construction_worker: Jobs status:\n\n"
- for _, job := range jobs {
- var jobSpec orchestrator.JobJson
- err := json.Unmarshal(job.JobString, &jobSpec)
- if err != nil {
- log.Printf("Failed to convert unmarshall Serialized job, %v", err)
- return fmt.Errorf("Failed to unmarshall serialized job: %v", err)
- }
- isPlan := jobSpec.IsPlan()
-
- message = message + fmt.Sprintf("\n", job.ProjectName)
- message = message + fmt.Sprintf("%v **%v** %v%v\n", job.Status.ToEmoji(), jobSpec.ProjectName, *job.WorkflowRunUrl, job.Status.ToString(), job.ResourcesSummaryString(isPlan))
- message = message + fmt.Sprintf("\n", job.ProjectName)
- }
-
- prService.EditComment(prNumber, prCommentId, message)
- return nil
-}
-
func UpdateAggregateStatus(batch *scheduler.SerializedBatch, prService orchestrator.PullRequestService) error {
// TODO: Introduce batch-level
isPlan, err := batch.IsPlan()
diff --git a/ee/cli/cmd/digger/main.go b/ee/cli/cmd/digger/main.go
index 2c09e0b52..d2aa1a999 100644
--- a/ee/cli/cmd/digger/main.go
+++ b/ee/cli/cmd/digger/main.go
@@ -4,9 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/diggerhq/digger/cli/pkg/comment_updater"
core_drift "github.com/diggerhq/digger/cli/pkg/core/drift"
core_reporting "github.com/diggerhq/digger/cli/pkg/core/reporting"
"github.com/diggerhq/digger/cli/pkg/drift"
+ comment_updater2 "github.com/diggerhq/digger/ee/cli/pkg/comment_updater"
ee_drift "github.com/diggerhq/digger/ee/cli/pkg/drift"
"log"
"net/http"
@@ -141,7 +143,8 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to report job status to backend. Exiting. %s", err), 4)
}
- digger.UpdateStatusComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
+ commentUpdater := comment_updater2.AdvancedCommentUpdater{}
+ commentUpdater.UpdateComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
digger.UpdateAggregateStatus(serializedBatch, &githubPrService)
planStorage := newPlanStorage(ghToken, repoOwner, repositoryName, githubActor, job.PullRequestNumber)
@@ -159,7 +162,7 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
log.Printf("Failed to report job status to backend. %v", reportingError)
reportErrorAndExit(githubActor, fmt.Sprintf("Failed run commands. %s", err), 5)
}
- digger.UpdateStatusComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
+ commentUpdater.UpdateComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
digger.UpdateAggregateStatus(serializedBatch, &githubPrService)
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to run commands. %s", err), 5)
@@ -167,13 +170,13 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
jobs := []orchestrator.Job{orchestrator.JsonToJob(job)}
- _, _, err = digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, backendApi, inputs.Id, true, commentId64, currentDir)
+ _, _, err = digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, commentUpdater, backendApi, inputs.Id, true, commentId64, currentDir)
if err != nil {
serializedBatch, reportingError := backendApi.ReportProjectJobStatus(repoName, job.ProjectName, inputs.Id, "failed", time.Now(), nil)
if reportingError != nil {
reportErrorAndExit(githubActor, fmt.Sprintf("Failed run commands. %s", err), 5)
}
- digger.UpdateStatusComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
+ commentUpdater.UpdateComment(serializedBatch.Jobs, serializedBatch.PrNumber, &githubPrService, commentId64)
digger.UpdateAggregateStatus(serializedBatch, &githubPrService)
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to run commands. %s", err), 5)
}
@@ -354,7 +357,7 @@ func gitHubCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- allAppliesSuccessful, atLeastOneApply, err := digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ allAppliesSuccessful, atLeastOneApply, err := digger.RunJobs(jobs, &githubPrService, &githubPrService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
reportErrorAndExit(githubActor, fmt.Sprintf("Failed to run commands. %s", err), 8)
// aggregate status checks: failure
@@ -471,7 +474,7 @@ func gitLabCI(lock core_locking.Lock, policyChecker core_policy.Checker, backend
ReportStrategy: reportingStrategy,
}
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, gitlabService, gitlabService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, gitlabService, gitlabService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
log.Printf("failed to execute command, %v", err)
@@ -559,7 +562,7 @@ func azureCI(lock core_locking.Lock, policyChecker core_policy.Checker, backendA
ReportStrategy: reportingStrategy,
}
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, azureService, azureService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ allAppliesSuccess, atLeastOneApply, err := digger.RunJobs(jobs, azureService, azureService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
reportErrorAndExit(parsedAzureContext.BaseUrl, fmt.Sprintf("Failed to run commands. %s", err), 8)
}
@@ -805,7 +808,7 @@ func bitbucketCI(lock core_locking.Lock, policyChecker core_policy.Checker, back
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- _, _, err = digger.RunJobs(jobs, &bitbucketService, &bitbucketService, lock, &reporter, planStorage, policyChecker, backendApi, "", false, 0, currentDir)
+ _, _, err = digger.RunJobs(jobs, &bitbucketService, &bitbucketService, lock, &reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 0, currentDir)
if err != nil {
reportErrorAndExit(actor, fmt.Sprintf("Failed to run commands. %s", err), 8)
}
@@ -842,7 +845,7 @@ func exec(actor string, projectName string, repoNamespace string, command string
}
jobs = digger.SortedCommandsByDependency(jobs, &dependencyGraph)
- _, _, err = digger.RunJobs(jobs, prService, orgService, lock, reporter, planStorage, policyChecker, backendApi, "", false, 123, currentDir)
+ _, _, err = digger.RunJobs(jobs, prService, orgService, lock, reporter, planStorage, policyChecker, comment_updater.NoopCommentUpdater{}, backendApi, "", false, 123, currentDir)
}
/*
diff --git a/ee/cli/pkg/comment_updater/updater.go b/ee/cli/pkg/comment_updater/updater.go
new file mode 100644
index 000000000..3fcbe0aa0
--- /dev/null
+++ b/ee/cli/pkg/comment_updater/updater.go
@@ -0,0 +1,33 @@
+package comment_updater
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/diggerhq/digger/libs/orchestrator"
+ "github.com/diggerhq/digger/libs/orchestrator/scheduler"
+ "log"
+)
+
+type AdvancedCommentUpdater struct {
+}
+
+func (a AdvancedCommentUpdater) UpdateComment(jobs []scheduler.SerializedJob, prNumber int, prService orchestrator.PullRequestService, prCommentId int64) error {
+
+ message := ":construction_worker: Jobs status:\n\n"
+ for _, job := range jobs {
+ var jobSpec orchestrator.JobJson
+ err := json.Unmarshal(job.JobString, &jobSpec)
+ if err != nil {
+ log.Printf("Failed to convert unmarshall Serialized job, %v", err)
+ return fmt.Errorf("Failed to unmarshall serialized job: %v", err)
+ }
+ isPlan := jobSpec.IsPlan()
+
+ message = message + fmt.Sprintf("\n", job.ProjectName)
+ message = message + fmt.Sprintf("%v **%v** %v%v\n", job.Status.ToEmoji(), jobSpec.ProjectName, *job.WorkflowRunUrl, job.Status.ToString(), job.ResourcesSummaryString(isPlan))
+ message = message + fmt.Sprintf("\n", job.ProjectName)
+ }
+
+ prService.EditComment(prNumber, prCommentId, message)
+ return nil
+}