diff --git a/.gitignore b/.gitignore index 9c7731ce55..8fdac0813d 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,6 @@ ui/dist ui/package-lock.json .gradle .project -bin build client/python/conductor.egg-info *.pyc diff --git a/build.gradle b/build.gradle index b1c170cf60..60f9adf280 100644 --- a/build.gradle +++ b/build.gradle @@ -1,16 +1,15 @@ buildscript { - repositories { jcenter() } dependencies { - classpath 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3' + classpath 'com.netflix.nebula:gradle-extra-configurations-plugin:4.0.1' classpath 'org.apache.ant:ant:1.9.7' } } plugins { - id 'nebula.netflixoss' version '5.1.1' + id 'nebula.netflixoss' version '6.0.3' } // Establish version and status @@ -23,7 +22,6 @@ apply from: "$rootDir/versionsOfDependencies.gradle" subprojects { apply plugin: 'nebula.netflixoss' - apply plugin: 'nebula.provided-base' apply plugin: 'java' apply plugin: 'idea' apply plugin: 'eclipse' @@ -37,8 +35,10 @@ subprojects { } dependencies { - testCompile "junit:junit-dep:${revJUnit}" - testCompile "org.mockito:mockito-all:${revMockito}" + testCompile "junit:junit:${revJUnit}" + testCompile("org.mockito:mockito-core:${revMockito}") { + exclude group: 'org.hamcrest', module: 'hamcrest-core' + } } group = "com.netflix.${githubProjectName}" diff --git a/client/gogrpc/.gitignore b/client/gogrpc/.gitignore new file mode 100644 index 0000000000..49ce3c193f --- /dev/null +++ b/client/gogrpc/.gitignore @@ -0,0 +1 @@ +/vendor \ No newline at end of file diff --git a/client/gogrpc/Gopkg.lock b/client/gogrpc/Gopkg.lock new file mode 100644 index 0000000000..3f80dd76e9 --- /dev/null +++ b/client/gogrpc/Gopkg.lock @@ -0,0 +1,113 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/empty", + "ptypes/struct", + "ptypes/timestamp" + ] + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert"] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace" + ] + revision = "dfa909b99c79129e1100513e5cd36307665e5723" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "694d95ba50e67b2e363f3483057db5d4910c18f9" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "channelz", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport" + ] + revision = "41344da2231b913fa3d983840a57a6b1b7b631a1" + version = "v1.12.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ca4602bc2319dbe8e34a88c36f06c428a40582ae6be51bb01af6953432d754c5" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/client/gogrpc/Gopkg.toml b/client/gogrpc/Gopkg.toml new file mode 100644 index 0000000000..e40cb23029 --- /dev/null +++ b/client/gogrpc/Gopkg.toml @@ -0,0 +1,47 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/golang/protobuf" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.12.0" + +[prune] + go-tests = true + unused-packages = true + non-go = true + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.1" diff --git a/client/gogrpc/Makefile b/client/gogrpc/Makefile new file mode 100644 index 0000000000..805a841fa7 --- /dev/null +++ b/client/gogrpc/Makefile @@ -0,0 +1,16 @@ +PROTO_SRC = ../../grpc/src/main/proto + +SERVICES = \ + $(PROTO_SRC)/grpc/event_service.pb.go \ + $(PROTO_SRC)/grpc/metadata_service.pb.go \ + $(PROTO_SRC)/grpc/search.pb.go \ + $(PROTO_SRC)/grpc/task_service.pb.go \ + $(PROTO_SRC)/grpc/workflow_service.pb.go + +$(SERVICES): %.pb.go: %.proto + protoc -I $(PROTO_SRC) $< --go_out=plugins=grpc:$(GOPATH)/src + +models: + protoc -I $(PROTO_SRC) $(PROTO_SRC)/model/*.proto --go_out=$(GOPATH)/src + +proto: models $(SERVICES) \ No newline at end of file diff --git a/client/gogrpc/conductor/client.go b/client/gogrpc/conductor/client.go new file mode 100644 index 0000000000..264de7a861 --- /dev/null +++ b/client/gogrpc/conductor/client.go @@ -0,0 +1,77 @@ +package conductor + +import ( + "github.com/netflix/conductor/client/gogrpc/conductor/grpc/tasks" + "github.com/netflix/conductor/client/gogrpc/conductor/grpc/metadata" + "github.com/netflix/conductor/client/gogrpc/conductor/grpc/workflows" + grpc "google.golang.org/grpc" +) + +// TasksClient is a Conductor client that exposes the Conductor +// Tasks API. +type TasksClient interface { + Tasks() tasks.TaskServiceClient + Shutdown() +} + +// MetadataClient is a Conductor client that exposes the Conductor +// Metadata API. +type MetadataClient interface { + Metadata() metadata.MetadataServiceClient + Shutdown() +} + +// WorkflowsClient is a Conductor client that exposes the Conductor +// Workflows API. +type WorkflowsClient interface { + Workflows() workflows.WorkflowServiceClient + Shutdown() +} + +// Client encapsulates a GRPC connection to a Conductor server and +// the different services it exposes. +type Client struct { + conn *grpc.ClientConn + tasks tasks.TaskServiceClient + metadata metadata.MetadataServiceClient + workflows workflows.WorkflowServiceClient +} + +// NewClient returns a new Client with a GRPC connection to the given address, +// and any optional grpc.Dialoption settings. +func NewClient(address string, options ...grpc.DialOption) (*Client, error) { + conn, err := grpc.Dial(address, options...) + if err != nil { + return nil, err + } + return &Client{conn: conn}, nil +} + +// Shutdown closes the underlying GRPC connection for this client. +func (client *Client) Shutdown() { + client.conn.Close() +} + +// Tasks returns the Tasks service for this client +func (client *Client) Tasks() tasks.TaskServiceClient { + if client.tasks == nil { + client.tasks = tasks.NewTaskServiceClient(client.conn) + } + return client.tasks +} + +// Metadata returns the Metadata service for this client +func (client *Client) Metadata() metadata.MetadataServiceClient { + if client.metadata == nil { + client.metadata = metadata.NewMetadataServiceClient(client.conn) + } + return client.metadata +} + +// Workflows returns the workflows service for this client +func (client *Client) Workflows() workflows.WorkflowServiceClient { + if client.workflows == nil { + client.workflows = workflows.NewWorkflowServiceClient(client.conn) + } + return client.workflows +} diff --git a/client/gogrpc/conductor/grpc/events/event_service.pb.go b/client/gogrpc/conductor/grpc/events/event_service.pb.go new file mode 100644 index 0000000000..46b19b0231 --- /dev/null +++ b/client/gogrpc/conductor/grpc/events/event_service.pb.go @@ -0,0 +1,991 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/event_service.proto + +package events // import "github.com/netflix/conductor/client/gogrpc/conductor/grpc/events" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import model "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AddEventHandlerRequest struct { + Handler *model.EventHandler `protobuf:"bytes,1,opt,name=handler,proto3" json:"handler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddEventHandlerRequest) Reset() { *m = AddEventHandlerRequest{} } +func (m *AddEventHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*AddEventHandlerRequest) ProtoMessage() {} +func (*AddEventHandlerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{0} +} +func (m *AddEventHandlerRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddEventHandlerRequest.Unmarshal(m, b) +} +func (m *AddEventHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddEventHandlerRequest.Marshal(b, m, deterministic) +} +func (dst *AddEventHandlerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddEventHandlerRequest.Merge(dst, src) +} +func (m *AddEventHandlerRequest) XXX_Size() int { + return xxx_messageInfo_AddEventHandlerRequest.Size(m) +} +func (m *AddEventHandlerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddEventHandlerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddEventHandlerRequest proto.InternalMessageInfo + +func (m *AddEventHandlerRequest) GetHandler() *model.EventHandler { + if m != nil { + return m.Handler + } + return nil +} + +type AddEventHandlerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddEventHandlerResponse) Reset() { *m = AddEventHandlerResponse{} } +func (m *AddEventHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*AddEventHandlerResponse) ProtoMessage() {} +func (*AddEventHandlerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{1} +} +func (m *AddEventHandlerResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddEventHandlerResponse.Unmarshal(m, b) +} +func (m *AddEventHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddEventHandlerResponse.Marshal(b, m, deterministic) +} +func (dst *AddEventHandlerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddEventHandlerResponse.Merge(dst, src) +} +func (m *AddEventHandlerResponse) XXX_Size() int { + return xxx_messageInfo_AddEventHandlerResponse.Size(m) +} +func (m *AddEventHandlerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddEventHandlerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AddEventHandlerResponse proto.InternalMessageInfo + +type UpdateEventHandlerRequest struct { + Handler *model.EventHandler `protobuf:"bytes,1,opt,name=handler,proto3" json:"handler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateEventHandlerRequest) Reset() { *m = UpdateEventHandlerRequest{} } +func (m *UpdateEventHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateEventHandlerRequest) ProtoMessage() {} +func (*UpdateEventHandlerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{2} +} +func (m *UpdateEventHandlerRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateEventHandlerRequest.Unmarshal(m, b) +} +func (m *UpdateEventHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateEventHandlerRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateEventHandlerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateEventHandlerRequest.Merge(dst, src) +} +func (m *UpdateEventHandlerRequest) XXX_Size() int { + return xxx_messageInfo_UpdateEventHandlerRequest.Size(m) +} +func (m *UpdateEventHandlerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateEventHandlerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateEventHandlerRequest proto.InternalMessageInfo + +func (m *UpdateEventHandlerRequest) GetHandler() *model.EventHandler { + if m != nil { + return m.Handler + } + return nil +} + +type UpdateEventHandlerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateEventHandlerResponse) Reset() { *m = UpdateEventHandlerResponse{} } +func (m *UpdateEventHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateEventHandlerResponse) ProtoMessage() {} +func (*UpdateEventHandlerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{3} +} +func (m *UpdateEventHandlerResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateEventHandlerResponse.Unmarshal(m, b) +} +func (m *UpdateEventHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateEventHandlerResponse.Marshal(b, m, deterministic) +} +func (dst *UpdateEventHandlerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateEventHandlerResponse.Merge(dst, src) +} +func (m *UpdateEventHandlerResponse) XXX_Size() int { + return xxx_messageInfo_UpdateEventHandlerResponse.Size(m) +} +func (m *UpdateEventHandlerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateEventHandlerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateEventHandlerResponse proto.InternalMessageInfo + +type RemoveEventHandlerRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveEventHandlerRequest) Reset() { *m = RemoveEventHandlerRequest{} } +func (m *RemoveEventHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveEventHandlerRequest) ProtoMessage() {} +func (*RemoveEventHandlerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{4} +} +func (m *RemoveEventHandlerRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoveEventHandlerRequest.Unmarshal(m, b) +} +func (m *RemoveEventHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoveEventHandlerRequest.Marshal(b, m, deterministic) +} +func (dst *RemoveEventHandlerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveEventHandlerRequest.Merge(dst, src) +} +func (m *RemoveEventHandlerRequest) XXX_Size() int { + return xxx_messageInfo_RemoveEventHandlerRequest.Size(m) +} +func (m *RemoveEventHandlerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveEventHandlerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveEventHandlerRequest proto.InternalMessageInfo + +func (m *RemoveEventHandlerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type RemoveEventHandlerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveEventHandlerResponse) Reset() { *m = RemoveEventHandlerResponse{} } +func (m *RemoveEventHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveEventHandlerResponse) ProtoMessage() {} +func (*RemoveEventHandlerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{5} +} +func (m *RemoveEventHandlerResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoveEventHandlerResponse.Unmarshal(m, b) +} +func (m *RemoveEventHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoveEventHandlerResponse.Marshal(b, m, deterministic) +} +func (dst *RemoveEventHandlerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveEventHandlerResponse.Merge(dst, src) +} +func (m *RemoveEventHandlerResponse) XXX_Size() int { + return xxx_messageInfo_RemoveEventHandlerResponse.Size(m) +} +func (m *RemoveEventHandlerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveEventHandlerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveEventHandlerResponse proto.InternalMessageInfo + +type GetEventHandlersRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetEventHandlersRequest) Reset() { *m = GetEventHandlersRequest{} } +func (m *GetEventHandlersRequest) String() string { return proto.CompactTextString(m) } +func (*GetEventHandlersRequest) ProtoMessage() {} +func (*GetEventHandlersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{6} +} +func (m *GetEventHandlersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetEventHandlersRequest.Unmarshal(m, b) +} +func (m *GetEventHandlersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetEventHandlersRequest.Marshal(b, m, deterministic) +} +func (dst *GetEventHandlersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEventHandlersRequest.Merge(dst, src) +} +func (m *GetEventHandlersRequest) XXX_Size() int { + return xxx_messageInfo_GetEventHandlersRequest.Size(m) +} +func (m *GetEventHandlersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetEventHandlersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetEventHandlersRequest proto.InternalMessageInfo + +type GetEventHandlersForEventRequest struct { + Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetEventHandlersForEventRequest) Reset() { *m = GetEventHandlersForEventRequest{} } +func (m *GetEventHandlersForEventRequest) String() string { return proto.CompactTextString(m) } +func (*GetEventHandlersForEventRequest) ProtoMessage() {} +func (*GetEventHandlersForEventRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{7} +} +func (m *GetEventHandlersForEventRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetEventHandlersForEventRequest.Unmarshal(m, b) +} +func (m *GetEventHandlersForEventRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetEventHandlersForEventRequest.Marshal(b, m, deterministic) +} +func (dst *GetEventHandlersForEventRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEventHandlersForEventRequest.Merge(dst, src) +} +func (m *GetEventHandlersForEventRequest) XXX_Size() int { + return xxx_messageInfo_GetEventHandlersForEventRequest.Size(m) +} +func (m *GetEventHandlersForEventRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetEventHandlersForEventRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetEventHandlersForEventRequest proto.InternalMessageInfo + +func (m *GetEventHandlersForEventRequest) GetEvent() string { + if m != nil { + return m.Event + } + return "" +} + +func (m *GetEventHandlersForEventRequest) GetActiveOnly() bool { + if m != nil { + return m.ActiveOnly + } + return false +} + +type GetQueuesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueuesRequest) Reset() { *m = GetQueuesRequest{} } +func (m *GetQueuesRequest) String() string { return proto.CompactTextString(m) } +func (*GetQueuesRequest) ProtoMessage() {} +func (*GetQueuesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{8} +} +func (m *GetQueuesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueuesRequest.Unmarshal(m, b) +} +func (m *GetQueuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueuesRequest.Marshal(b, m, deterministic) +} +func (dst *GetQueuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueuesRequest.Merge(dst, src) +} +func (m *GetQueuesRequest) XXX_Size() int { + return xxx_messageInfo_GetQueuesRequest.Size(m) +} +func (m *GetQueuesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueuesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueuesRequest proto.InternalMessageInfo + +type GetQueuesResponse struct { + EventToQueueUri map[string]string `protobuf:"bytes,1,rep,name=event_to_queue_uri,json=eventToQueueUri,proto3" json:"event_to_queue_uri,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueuesResponse) Reset() { *m = GetQueuesResponse{} } +func (m *GetQueuesResponse) String() string { return proto.CompactTextString(m) } +func (*GetQueuesResponse) ProtoMessage() {} +func (*GetQueuesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{9} +} +func (m *GetQueuesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueuesResponse.Unmarshal(m, b) +} +func (m *GetQueuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueuesResponse.Marshal(b, m, deterministic) +} +func (dst *GetQueuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueuesResponse.Merge(dst, src) +} +func (m *GetQueuesResponse) XXX_Size() int { + return xxx_messageInfo_GetQueuesResponse.Size(m) +} +func (m *GetQueuesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueuesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueuesResponse proto.InternalMessageInfo + +func (m *GetQueuesResponse) GetEventToQueueUri() map[string]string { + if m != nil { + return m.EventToQueueUri + } + return nil +} + +type GetQueueSizesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueueSizesRequest) Reset() { *m = GetQueueSizesRequest{} } +func (m *GetQueueSizesRequest) String() string { return proto.CompactTextString(m) } +func (*GetQueueSizesRequest) ProtoMessage() {} +func (*GetQueueSizesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{10} +} +func (m *GetQueueSizesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueueSizesRequest.Unmarshal(m, b) +} +func (m *GetQueueSizesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueueSizesRequest.Marshal(b, m, deterministic) +} +func (dst *GetQueueSizesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueueSizesRequest.Merge(dst, src) +} +func (m *GetQueueSizesRequest) XXX_Size() int { + return xxx_messageInfo_GetQueueSizesRequest.Size(m) +} +func (m *GetQueueSizesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueueSizesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueueSizesRequest proto.InternalMessageInfo + +type GetQueueSizesResponse struct { + EventToQueueInfo map[string]*GetQueueSizesResponse_QueueInfo `protobuf:"bytes,2,rep,name=event_to_queue_info,json=eventToQueueInfo,proto3" json:"event_to_queue_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueueSizesResponse) Reset() { *m = GetQueueSizesResponse{} } +func (m *GetQueueSizesResponse) String() string { return proto.CompactTextString(m) } +func (*GetQueueSizesResponse) ProtoMessage() {} +func (*GetQueueSizesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{11} +} +func (m *GetQueueSizesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueueSizesResponse.Unmarshal(m, b) +} +func (m *GetQueueSizesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueueSizesResponse.Marshal(b, m, deterministic) +} +func (dst *GetQueueSizesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueueSizesResponse.Merge(dst, src) +} +func (m *GetQueueSizesResponse) XXX_Size() int { + return xxx_messageInfo_GetQueueSizesResponse.Size(m) +} +func (m *GetQueueSizesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueueSizesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueueSizesResponse proto.InternalMessageInfo + +func (m *GetQueueSizesResponse) GetEventToQueueInfo() map[string]*GetQueueSizesResponse_QueueInfo { + if m != nil { + return m.EventToQueueInfo + } + return nil +} + +type GetQueueSizesResponse_QueueInfo struct { + QueueSizes map[string]int64 `protobuf:"bytes,1,rep,name=queue_sizes,json=queueSizes,proto3" json:"queue_sizes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueueSizesResponse_QueueInfo) Reset() { *m = GetQueueSizesResponse_QueueInfo{} } +func (m *GetQueueSizesResponse_QueueInfo) String() string { return proto.CompactTextString(m) } +func (*GetQueueSizesResponse_QueueInfo) ProtoMessage() {} +func (*GetQueueSizesResponse_QueueInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{11, 0} +} +func (m *GetQueueSizesResponse_QueueInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueueSizesResponse_QueueInfo.Unmarshal(m, b) +} +func (m *GetQueueSizesResponse_QueueInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueueSizesResponse_QueueInfo.Marshal(b, m, deterministic) +} +func (dst *GetQueueSizesResponse_QueueInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueueSizesResponse_QueueInfo.Merge(dst, src) +} +func (m *GetQueueSizesResponse_QueueInfo) XXX_Size() int { + return xxx_messageInfo_GetQueueSizesResponse_QueueInfo.Size(m) +} +func (m *GetQueueSizesResponse_QueueInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueueSizesResponse_QueueInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueueSizesResponse_QueueInfo proto.InternalMessageInfo + +func (m *GetQueueSizesResponse_QueueInfo) GetQueueSizes() map[string]int64 { + if m != nil { + return m.QueueSizes + } + return nil +} + +type GetQueueProvidersRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueueProvidersRequest) Reset() { *m = GetQueueProvidersRequest{} } +func (m *GetQueueProvidersRequest) String() string { return proto.CompactTextString(m) } +func (*GetQueueProvidersRequest) ProtoMessage() {} +func (*GetQueueProvidersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{12} +} +func (m *GetQueueProvidersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueueProvidersRequest.Unmarshal(m, b) +} +func (m *GetQueueProvidersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueueProvidersRequest.Marshal(b, m, deterministic) +} +func (dst *GetQueueProvidersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueueProvidersRequest.Merge(dst, src) +} +func (m *GetQueueProvidersRequest) XXX_Size() int { + return xxx_messageInfo_GetQueueProvidersRequest.Size(m) +} +func (m *GetQueueProvidersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueueProvidersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueueProvidersRequest proto.InternalMessageInfo + +type GetQueueProvidersResponse struct { + Providers []string `protobuf:"bytes,1,rep,name=providers,proto3" json:"providers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetQueueProvidersResponse) Reset() { *m = GetQueueProvidersResponse{} } +func (m *GetQueueProvidersResponse) String() string { return proto.CompactTextString(m) } +func (*GetQueueProvidersResponse) ProtoMessage() {} +func (*GetQueueProvidersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_event_service_30d3c8d74d6840aa, []int{13} +} +func (m *GetQueueProvidersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetQueueProvidersResponse.Unmarshal(m, b) +} +func (m *GetQueueProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetQueueProvidersResponse.Marshal(b, m, deterministic) +} +func (dst *GetQueueProvidersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetQueueProvidersResponse.Merge(dst, src) +} +func (m *GetQueueProvidersResponse) XXX_Size() int { + return xxx_messageInfo_GetQueueProvidersResponse.Size(m) +} +func (m *GetQueueProvidersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetQueueProvidersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetQueueProvidersResponse proto.InternalMessageInfo + +func (m *GetQueueProvidersResponse) GetProviders() []string { + if m != nil { + return m.Providers + } + return nil +} + +func init() { + proto.RegisterType((*AddEventHandlerRequest)(nil), "conductor.grpc.events.AddEventHandlerRequest") + proto.RegisterType((*AddEventHandlerResponse)(nil), "conductor.grpc.events.AddEventHandlerResponse") + proto.RegisterType((*UpdateEventHandlerRequest)(nil), "conductor.grpc.events.UpdateEventHandlerRequest") + proto.RegisterType((*UpdateEventHandlerResponse)(nil), "conductor.grpc.events.UpdateEventHandlerResponse") + proto.RegisterType((*RemoveEventHandlerRequest)(nil), "conductor.grpc.events.RemoveEventHandlerRequest") + proto.RegisterType((*RemoveEventHandlerResponse)(nil), "conductor.grpc.events.RemoveEventHandlerResponse") + proto.RegisterType((*GetEventHandlersRequest)(nil), "conductor.grpc.events.GetEventHandlersRequest") + proto.RegisterType((*GetEventHandlersForEventRequest)(nil), "conductor.grpc.events.GetEventHandlersForEventRequest") + proto.RegisterType((*GetQueuesRequest)(nil), "conductor.grpc.events.GetQueuesRequest") + proto.RegisterType((*GetQueuesResponse)(nil), "conductor.grpc.events.GetQueuesResponse") + proto.RegisterMapType((map[string]string)(nil), "conductor.grpc.events.GetQueuesResponse.EventToQueueUriEntry") + proto.RegisterType((*GetQueueSizesRequest)(nil), "conductor.grpc.events.GetQueueSizesRequest") + proto.RegisterType((*GetQueueSizesResponse)(nil), "conductor.grpc.events.GetQueueSizesResponse") + proto.RegisterMapType((map[string]*GetQueueSizesResponse_QueueInfo)(nil), "conductor.grpc.events.GetQueueSizesResponse.EventToQueueInfoEntry") + proto.RegisterType((*GetQueueSizesResponse_QueueInfo)(nil), "conductor.grpc.events.GetQueueSizesResponse.QueueInfo") + proto.RegisterMapType((map[string]int64)(nil), "conductor.grpc.events.GetQueueSizesResponse.QueueInfo.QueueSizesEntry") + proto.RegisterType((*GetQueueProvidersRequest)(nil), "conductor.grpc.events.GetQueueProvidersRequest") + proto.RegisterType((*GetQueueProvidersResponse)(nil), "conductor.grpc.events.GetQueueProvidersResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// EventServiceClient is the client API for EventService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EventServiceClient interface { + // POST / + AddEventHandler(ctx context.Context, in *AddEventHandlerRequest, opts ...grpc.CallOption) (*AddEventHandlerResponse, error) + // PUT / + UpdateEventHandler(ctx context.Context, in *UpdateEventHandlerRequest, opts ...grpc.CallOption) (*UpdateEventHandlerResponse, error) + // DELETE /{name} + RemoveEventHandler(ctx context.Context, in *RemoveEventHandlerRequest, opts ...grpc.CallOption) (*RemoveEventHandlerResponse, error) + // GET / + GetEventHandlers(ctx context.Context, in *GetEventHandlersRequest, opts ...grpc.CallOption) (EventService_GetEventHandlersClient, error) + // GET /{name} + GetEventHandlersForEvent(ctx context.Context, in *GetEventHandlersForEventRequest, opts ...grpc.CallOption) (EventService_GetEventHandlersForEventClient, error) + // GET /queues + GetQueues(ctx context.Context, in *GetQueuesRequest, opts ...grpc.CallOption) (*GetQueuesResponse, error) + GetQueueSizes(ctx context.Context, in *GetQueueSizesRequest, opts ...grpc.CallOption) (*GetQueueSizesResponse, error) + // GET /queues/providers + GetQueueProviders(ctx context.Context, in *GetQueueProvidersRequest, opts ...grpc.CallOption) (*GetQueueProvidersResponse, error) +} + +type eventServiceClient struct { + cc *grpc.ClientConn +} + +func NewEventServiceClient(cc *grpc.ClientConn) EventServiceClient { + return &eventServiceClient{cc} +} + +func (c *eventServiceClient) AddEventHandler(ctx context.Context, in *AddEventHandlerRequest, opts ...grpc.CallOption) (*AddEventHandlerResponse, error) { + out := new(AddEventHandlerResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.events.EventService/AddEventHandler", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventServiceClient) UpdateEventHandler(ctx context.Context, in *UpdateEventHandlerRequest, opts ...grpc.CallOption) (*UpdateEventHandlerResponse, error) { + out := new(UpdateEventHandlerResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.events.EventService/UpdateEventHandler", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventServiceClient) RemoveEventHandler(ctx context.Context, in *RemoveEventHandlerRequest, opts ...grpc.CallOption) (*RemoveEventHandlerResponse, error) { + out := new(RemoveEventHandlerResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.events.EventService/RemoveEventHandler", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventServiceClient) GetEventHandlers(ctx context.Context, in *GetEventHandlersRequest, opts ...grpc.CallOption) (EventService_GetEventHandlersClient, error) { + stream, err := c.cc.NewStream(ctx, &_EventService_serviceDesc.Streams[0], "/conductor.grpc.events.EventService/GetEventHandlers", opts...) + if err != nil { + return nil, err + } + x := &eventServiceGetEventHandlersClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type EventService_GetEventHandlersClient interface { + Recv() (*model.EventHandler, error) + grpc.ClientStream +} + +type eventServiceGetEventHandlersClient struct { + grpc.ClientStream +} + +func (x *eventServiceGetEventHandlersClient) Recv() (*model.EventHandler, error) { + m := new(model.EventHandler) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *eventServiceClient) GetEventHandlersForEvent(ctx context.Context, in *GetEventHandlersForEventRequest, opts ...grpc.CallOption) (EventService_GetEventHandlersForEventClient, error) { + stream, err := c.cc.NewStream(ctx, &_EventService_serviceDesc.Streams[1], "/conductor.grpc.events.EventService/GetEventHandlersForEvent", opts...) + if err != nil { + return nil, err + } + x := &eventServiceGetEventHandlersForEventClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type EventService_GetEventHandlersForEventClient interface { + Recv() (*model.EventHandler, error) + grpc.ClientStream +} + +type eventServiceGetEventHandlersForEventClient struct { + grpc.ClientStream +} + +func (x *eventServiceGetEventHandlersForEventClient) Recv() (*model.EventHandler, error) { + m := new(model.EventHandler) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *eventServiceClient) GetQueues(ctx context.Context, in *GetQueuesRequest, opts ...grpc.CallOption) (*GetQueuesResponse, error) { + out := new(GetQueuesResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.events.EventService/GetQueues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventServiceClient) GetQueueSizes(ctx context.Context, in *GetQueueSizesRequest, opts ...grpc.CallOption) (*GetQueueSizesResponse, error) { + out := new(GetQueueSizesResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.events.EventService/GetQueueSizes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventServiceClient) GetQueueProviders(ctx context.Context, in *GetQueueProvidersRequest, opts ...grpc.CallOption) (*GetQueueProvidersResponse, error) { + out := new(GetQueueProvidersResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.events.EventService/GetQueueProviders", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EventServiceServer is the server API for EventService service. +type EventServiceServer interface { + // POST / + AddEventHandler(context.Context, *AddEventHandlerRequest) (*AddEventHandlerResponse, error) + // PUT / + UpdateEventHandler(context.Context, *UpdateEventHandlerRequest) (*UpdateEventHandlerResponse, error) + // DELETE /{name} + RemoveEventHandler(context.Context, *RemoveEventHandlerRequest) (*RemoveEventHandlerResponse, error) + // GET / + GetEventHandlers(*GetEventHandlersRequest, EventService_GetEventHandlersServer) error + // GET /{name} + GetEventHandlersForEvent(*GetEventHandlersForEventRequest, EventService_GetEventHandlersForEventServer) error + // GET /queues + GetQueues(context.Context, *GetQueuesRequest) (*GetQueuesResponse, error) + GetQueueSizes(context.Context, *GetQueueSizesRequest) (*GetQueueSizesResponse, error) + // GET /queues/providers + GetQueueProviders(context.Context, *GetQueueProvidersRequest) (*GetQueueProvidersResponse, error) +} + +func RegisterEventServiceServer(s *grpc.Server, srv EventServiceServer) { + s.RegisterService(&_EventService_serviceDesc, srv) +} + +func _EventService_AddEventHandler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddEventHandlerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventServiceServer).AddEventHandler(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.events.EventService/AddEventHandler", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventServiceServer).AddEventHandler(ctx, req.(*AddEventHandlerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EventService_UpdateEventHandler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateEventHandlerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventServiceServer).UpdateEventHandler(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.events.EventService/UpdateEventHandler", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventServiceServer).UpdateEventHandler(ctx, req.(*UpdateEventHandlerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EventService_RemoveEventHandler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveEventHandlerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventServiceServer).RemoveEventHandler(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.events.EventService/RemoveEventHandler", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventServiceServer).RemoveEventHandler(ctx, req.(*RemoveEventHandlerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EventService_GetEventHandlers_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetEventHandlersRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EventServiceServer).GetEventHandlers(m, &eventServiceGetEventHandlersServer{stream}) +} + +type EventService_GetEventHandlersServer interface { + Send(*model.EventHandler) error + grpc.ServerStream +} + +type eventServiceGetEventHandlersServer struct { + grpc.ServerStream +} + +func (x *eventServiceGetEventHandlersServer) Send(m *model.EventHandler) error { + return x.ServerStream.SendMsg(m) +} + +func _EventService_GetEventHandlersForEvent_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetEventHandlersForEventRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EventServiceServer).GetEventHandlersForEvent(m, &eventServiceGetEventHandlersForEventServer{stream}) +} + +type EventService_GetEventHandlersForEventServer interface { + Send(*model.EventHandler) error + grpc.ServerStream +} + +type eventServiceGetEventHandlersForEventServer struct { + grpc.ServerStream +} + +func (x *eventServiceGetEventHandlersForEventServer) Send(m *model.EventHandler) error { + return x.ServerStream.SendMsg(m) +} + +func _EventService_GetQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetQueuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventServiceServer).GetQueues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.events.EventService/GetQueues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventServiceServer).GetQueues(ctx, req.(*GetQueuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EventService_GetQueueSizes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetQueueSizesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventServiceServer).GetQueueSizes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.events.EventService/GetQueueSizes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventServiceServer).GetQueueSizes(ctx, req.(*GetQueueSizesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EventService_GetQueueProviders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetQueueProvidersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventServiceServer).GetQueueProviders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.events.EventService/GetQueueProviders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventServiceServer).GetQueueProviders(ctx, req.(*GetQueueProvidersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _EventService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "conductor.grpc.events.EventService", + HandlerType: (*EventServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddEventHandler", + Handler: _EventService_AddEventHandler_Handler, + }, + { + MethodName: "UpdateEventHandler", + Handler: _EventService_UpdateEventHandler_Handler, + }, + { + MethodName: "RemoveEventHandler", + Handler: _EventService_RemoveEventHandler_Handler, + }, + { + MethodName: "GetQueues", + Handler: _EventService_GetQueues_Handler, + }, + { + MethodName: "GetQueueSizes", + Handler: _EventService_GetQueueSizes_Handler, + }, + { + MethodName: "GetQueueProviders", + Handler: _EventService_GetQueueProviders_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetEventHandlers", + Handler: _EventService_GetEventHandlers_Handler, + ServerStreams: true, + }, + { + StreamName: "GetEventHandlersForEvent", + Handler: _EventService_GetEventHandlersForEvent_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/event_service.proto", +} + +func init() { + proto.RegisterFile("grpc/event_service.proto", fileDescriptor_event_service_30d3c8d74d6840aa) +} + +var fileDescriptor_event_service_30d3c8d74d6840aa = []byte{ + // 687 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x5d, 0x6f, 0xd3, 0x3c, + 0x18, 0x55, 0xd6, 0xf7, 0x65, 0xf4, 0x29, 0xb0, 0x61, 0xf6, 0x91, 0x5a, 0x43, 0x9b, 0x7a, 0x43, + 0x25, 0xc0, 0x19, 0x45, 0x1a, 0x1f, 0xd2, 0x24, 0xa8, 0xb4, 0x0d, 0x24, 0x24, 0xb6, 0x6c, 0x93, + 0x10, 0x17, 0x54, 0x5d, 0xe2, 0x75, 0x19, 0xa9, 0xdd, 0x26, 0x4e, 0x44, 0xe1, 0x6f, 0xf0, 0x4f, + 0xb8, 0xe6, 0x8a, 0x3f, 0x86, 0x62, 0xa7, 0x49, 0x9a, 0x26, 0xb4, 0x45, 0xdc, 0xd5, 0xcf, 0xc7, + 0x39, 0x3e, 0xb6, 0x9f, 0xd3, 0x80, 0xde, 0xf3, 0x06, 0x96, 0x41, 0x43, 0xca, 0x44, 0xc7, 0xa7, + 0x5e, 0xe8, 0x58, 0x94, 0x0c, 0x3c, 0x2e, 0x38, 0x5a, 0xb7, 0x38, 0xb3, 0x03, 0x4b, 0x70, 0x8f, + 0x44, 0x35, 0x44, 0xd6, 0xf8, 0x58, 0xef, 0x73, 0x9b, 0xba, 0xaa, 0xe3, 0xaa, 0xcb, 0x6c, 0x97, + 0x7a, 0xaa, 0xa1, 0x71, 0x02, 0x1b, 0xaf, 0x6d, 0xfb, 0x20, 0x4a, 0xbc, 0x51, 0x09, 0x93, 0x0e, + 0x03, 0xea, 0x0b, 0xf4, 0x0c, 0x96, 0xe3, 0x52, 0x5d, 0xdb, 0xd1, 0x9a, 0xb5, 0xd6, 0x7d, 0x92, + 0x82, 0xcb, 0x66, 0x32, 0xd1, 0x36, 0xae, 0x6e, 0xd4, 0x61, 0x73, 0x0a, 0xd2, 0x1f, 0x70, 0xe6, + 0xd3, 0xc6, 0x19, 0xd4, 0xcf, 0x07, 0x76, 0x57, 0xd0, 0x7f, 0x4a, 0xb8, 0x05, 0xb8, 0x08, 0x35, + 0xe6, 0x34, 0xa0, 0x6e, 0xd2, 0x3e, 0x0f, 0x0b, 0x39, 0x11, 0xfc, 0xc7, 0xba, 0x7d, 0x2a, 0x09, + 0xab, 0xa6, 0xfc, 0x1d, 0xc1, 0x15, 0x35, 0xc4, 0x70, 0x75, 0xd8, 0x3c, 0xa2, 0x22, 0x9b, 0xf2, + 0x63, 0xb0, 0xc6, 0x07, 0xd8, 0xce, 0xa7, 0x0e, 0xb9, 0x27, 0xd7, 0x63, 0xbe, 0x35, 0xf8, 0x5f, + 0x5e, 0x42, 0x4c, 0xa8, 0x16, 0x68, 0x1b, 0x6a, 0x5d, 0x4b, 0x38, 0x21, 0xed, 0x70, 0xe6, 0x8e, + 0xf4, 0xa5, 0x1d, 0xad, 0x79, 0xd3, 0x04, 0x15, 0x7a, 0xcf, 0xdc, 0x51, 0x03, 0xc1, 0xea, 0x11, + 0x15, 0x27, 0x01, 0x0d, 0x68, 0xc2, 0xf6, 0x4b, 0x83, 0xbb, 0x99, 0xa0, 0xda, 0x1e, 0xba, 0x06, + 0xa4, 0xde, 0x85, 0xe0, 0x9d, 0x61, 0x94, 0xea, 0x04, 0x9e, 0xa3, 0x6b, 0x3b, 0x95, 0x66, 0xad, + 0xb5, 0x4f, 0x0a, 0x5f, 0x07, 0x99, 0x42, 0x51, 0xe7, 0x7c, 0xc6, 0x65, 0xf4, 0xdc, 0x73, 0x0e, + 0x98, 0xf0, 0x46, 0xe6, 0x0a, 0x9d, 0x8c, 0xe2, 0x36, 0xac, 0x15, 0x15, 0xa2, 0x55, 0xa8, 0x7c, + 0xa6, 0xa3, 0x58, 0x62, 0xf4, 0x33, 0x92, 0x1d, 0x76, 0xdd, 0x80, 0x4a, 0x69, 0x55, 0x53, 0x2d, + 0x5e, 0x2e, 0x3d, 0xd7, 0x1a, 0x1b, 0xb0, 0x36, 0xa6, 0x3f, 0x75, 0xbe, 0xa6, 0xea, 0x7e, 0x56, + 0x60, 0x3d, 0x97, 0x88, 0x15, 0x0e, 0xe1, 0x5e, 0x4e, 0xa1, 0xc3, 0x2e, 0xb9, 0xbe, 0x24, 0x25, + 0xb6, 0x67, 0x48, 0x9c, 0x80, 0x9a, 0x90, 0xf9, 0x96, 0x5d, 0x72, 0xa5, 0x73, 0x95, 0xe6, 0xc2, + 0xf8, 0x87, 0x06, 0xd5, 0x64, 0x85, 0x7a, 0x50, 0x53, 0xbc, 0x7e, 0x04, 0x16, 0x9f, 0xed, 0xe1, + 0x42, 0xc4, 0x09, 0x18, 0x49, 0x93, 0x8a, 0x1c, 0x86, 0x49, 0x00, 0xef, 0xc3, 0x4a, 0x2e, 0x3d, + 0xeb, 0x68, 0x2b, 0x99, 0xa3, 0xc5, 0xdf, 0x60, 0xbd, 0x50, 0x60, 0x01, 0xc8, 0xbb, 0x2c, 0x48, + 0xad, 0xb5, 0xf7, 0x77, 0x62, 0xb2, 0xf7, 0x8a, 0x41, 0x1f, 0x57, 0x1f, 0x7b, 0x3c, 0x74, 0xec, + 0xcc, 0x9c, 0xbc, 0x80, 0x7a, 0x41, 0x2e, 0xbe, 0xde, 0x2d, 0xa8, 0x0e, 0xc6, 0x41, 0x79, 0xb6, + 0x55, 0x33, 0x0d, 0xb4, 0xbe, 0x2f, 0xc3, 0x2d, 0x29, 0xea, 0x54, 0xd9, 0x1e, 0x1a, 0xc0, 0x4a, + 0xce, 0x6c, 0xd0, 0xe3, 0x92, 0xdd, 0x17, 0xfb, 0x1c, 0x26, 0xf3, 0x96, 0xc7, 0x1b, 0x1c, 0x01, + 0x9a, 0x76, 0x1b, 0xb4, 0x5b, 0x82, 0x52, 0x6a, 0x77, 0xf8, 0xc9, 0x02, 0x1d, 0x29, 0xf5, 0xb4, + 0x33, 0x95, 0x52, 0x97, 0xba, 0x5e, 0x29, 0x75, 0xb9, 0xed, 0x21, 0x4b, 0x3a, 0xd0, 0x84, 0xb7, + 0x21, 0x52, 0xfe, 0x4c, 0x8a, 0xfc, 0x11, 0xff, 0xd9, 0xcf, 0x77, 0x35, 0xe4, 0xcb, 0x47, 0x53, + 0x68, 0xa0, 0x68, 0x6f, 0x4e, 0xb2, 0x9c, 0xe3, 0xce, 0x26, 0xfd, 0x04, 0xd5, 0xc4, 0x00, 0xd1, + 0x83, 0xd9, 0x16, 0xa9, 0x60, 0x9b, 0xf3, 0x7a, 0x29, 0xba, 0x86, 0xdb, 0x13, 0x73, 0x83, 0x1e, + 0xce, 0x37, 0x5d, 0x8a, 0xe7, 0xd1, 0x22, 0xa3, 0x88, 0xc2, 0xf4, 0x2f, 0x21, 0x99, 0x2c, 0x64, + 0xcc, 0x80, 0xc8, 0xcf, 0x27, 0xde, 0x9d, 0xbf, 0x41, 0xf1, 0xb6, 0x19, 0x60, 0x8b, 0xf7, 0x09, + 0xa3, 0xe2, 0xd2, 0x75, 0xbe, 0xe4, 0xda, 0xdb, 0x77, 0xb2, 0x13, 0x7b, 0x7c, 0xf1, 0xf1, 0x55, + 0xcf, 0x11, 0x57, 0xc1, 0x05, 0xb1, 0x78, 0xdf, 0x88, 0x5b, 0x8c, 0xa4, 0xc5, 0xb0, 0x5c, 0x87, + 0x32, 0x61, 0xf4, 0xb8, 0xfc, 0xc8, 0x49, 0xe3, 0xe9, 0x37, 0x8f, 0x7f, 0x71, 0x43, 0xde, 0xe4, + 0xd3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x65, 0xa5, 0x44, 0x1b, 0x09, 0x09, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/grpc/metadata/metadata_service.pb.go b/client/gogrpc/conductor/grpc/metadata/metadata_service.pb.go new file mode 100644 index 0000000000..0d431bc56b --- /dev/null +++ b/client/gogrpc/conductor/grpc/metadata/metadata_service.pb.go @@ -0,0 +1,867 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/metadata_service.proto + +package metadata // import "github.com/netflix/conductor/client/gogrpc/conductor/grpc/metadata" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import model "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CreateWorkflowRequest struct { + Workflow *model.WorkflowDef `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateWorkflowRequest) Reset() { *m = CreateWorkflowRequest{} } +func (m *CreateWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*CreateWorkflowRequest) ProtoMessage() {} +func (*CreateWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{0} +} +func (m *CreateWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateWorkflowRequest.Unmarshal(m, b) +} +func (m *CreateWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *CreateWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateWorkflowRequest.Merge(dst, src) +} +func (m *CreateWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_CreateWorkflowRequest.Size(m) +} +func (m *CreateWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateWorkflowRequest proto.InternalMessageInfo + +func (m *CreateWorkflowRequest) GetWorkflow() *model.WorkflowDef { + if m != nil { + return m.Workflow + } + return nil +} + +type CreateWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateWorkflowResponse) Reset() { *m = CreateWorkflowResponse{} } +func (m *CreateWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*CreateWorkflowResponse) ProtoMessage() {} +func (*CreateWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{1} +} +func (m *CreateWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateWorkflowResponse.Unmarshal(m, b) +} +func (m *CreateWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *CreateWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateWorkflowResponse.Merge(dst, src) +} +func (m *CreateWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_CreateWorkflowResponse.Size(m) +} +func (m *CreateWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateWorkflowResponse proto.InternalMessageInfo + +type UpdateWorkflowsRequest struct { + Defs []*model.WorkflowDef `protobuf:"bytes,1,rep,name=defs,proto3" json:"defs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateWorkflowsRequest) Reset() { *m = UpdateWorkflowsRequest{} } +func (m *UpdateWorkflowsRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateWorkflowsRequest) ProtoMessage() {} +func (*UpdateWorkflowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{2} +} +func (m *UpdateWorkflowsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateWorkflowsRequest.Unmarshal(m, b) +} +func (m *UpdateWorkflowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateWorkflowsRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateWorkflowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateWorkflowsRequest.Merge(dst, src) +} +func (m *UpdateWorkflowsRequest) XXX_Size() int { + return xxx_messageInfo_UpdateWorkflowsRequest.Size(m) +} +func (m *UpdateWorkflowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateWorkflowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateWorkflowsRequest proto.InternalMessageInfo + +func (m *UpdateWorkflowsRequest) GetDefs() []*model.WorkflowDef { + if m != nil { + return m.Defs + } + return nil +} + +type UpdateWorkflowsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateWorkflowsResponse) Reset() { *m = UpdateWorkflowsResponse{} } +func (m *UpdateWorkflowsResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateWorkflowsResponse) ProtoMessage() {} +func (*UpdateWorkflowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{3} +} +func (m *UpdateWorkflowsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateWorkflowsResponse.Unmarshal(m, b) +} +func (m *UpdateWorkflowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateWorkflowsResponse.Marshal(b, m, deterministic) +} +func (dst *UpdateWorkflowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateWorkflowsResponse.Merge(dst, src) +} +func (m *UpdateWorkflowsResponse) XXX_Size() int { + return xxx_messageInfo_UpdateWorkflowsResponse.Size(m) +} +func (m *UpdateWorkflowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateWorkflowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateWorkflowsResponse proto.InternalMessageInfo + +type GetWorkflowRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowRequest) Reset() { *m = GetWorkflowRequest{} } +func (m *GetWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowRequest) ProtoMessage() {} +func (*GetWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{4} +} +func (m *GetWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowRequest.Unmarshal(m, b) +} +func (m *GetWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowRequest.Merge(dst, src) +} +func (m *GetWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_GetWorkflowRequest.Size(m) +} +func (m *GetWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowRequest proto.InternalMessageInfo + +func (m *GetWorkflowRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetWorkflowRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +type GetWorkflowResponse struct { + Workflow *model.WorkflowDef `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowResponse) Reset() { *m = GetWorkflowResponse{} } +func (m *GetWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowResponse) ProtoMessage() {} +func (*GetWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{5} +} +func (m *GetWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowResponse.Unmarshal(m, b) +} +func (m *GetWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowResponse.Merge(dst, src) +} +func (m *GetWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_GetWorkflowResponse.Size(m) +} +func (m *GetWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowResponse proto.InternalMessageInfo + +func (m *GetWorkflowResponse) GetWorkflow() *model.WorkflowDef { + if m != nil { + return m.Workflow + } + return nil +} + +type CreateTasksRequest struct { + Defs []*model.TaskDef `protobuf:"bytes,1,rep,name=defs,proto3" json:"defs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTasksRequest) Reset() { *m = CreateTasksRequest{} } +func (m *CreateTasksRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTasksRequest) ProtoMessage() {} +func (*CreateTasksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{6} +} +func (m *CreateTasksRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTasksRequest.Unmarshal(m, b) +} +func (m *CreateTasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTasksRequest.Marshal(b, m, deterministic) +} +func (dst *CreateTasksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTasksRequest.Merge(dst, src) +} +func (m *CreateTasksRequest) XXX_Size() int { + return xxx_messageInfo_CreateTasksRequest.Size(m) +} +func (m *CreateTasksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTasksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTasksRequest proto.InternalMessageInfo + +func (m *CreateTasksRequest) GetDefs() []*model.TaskDef { + if m != nil { + return m.Defs + } + return nil +} + +type CreateTasksResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTasksResponse) Reset() { *m = CreateTasksResponse{} } +func (m *CreateTasksResponse) String() string { return proto.CompactTextString(m) } +func (*CreateTasksResponse) ProtoMessage() {} +func (*CreateTasksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{7} +} +func (m *CreateTasksResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTasksResponse.Unmarshal(m, b) +} +func (m *CreateTasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTasksResponse.Marshal(b, m, deterministic) +} +func (dst *CreateTasksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTasksResponse.Merge(dst, src) +} +func (m *CreateTasksResponse) XXX_Size() int { + return xxx_messageInfo_CreateTasksResponse.Size(m) +} +func (m *CreateTasksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTasksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTasksResponse proto.InternalMessageInfo + +type UpdateTaskRequest struct { + Task *model.TaskDef `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } +func (m *UpdateTaskRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateTaskRequest) ProtoMessage() {} +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{8} +} +func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateTaskRequest.Unmarshal(m, b) +} +func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateTaskRequest.Merge(dst, src) +} +func (m *UpdateTaskRequest) XXX_Size() int { + return xxx_messageInfo_UpdateTaskRequest.Size(m) +} +func (m *UpdateTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo + +func (m *UpdateTaskRequest) GetTask() *model.TaskDef { + if m != nil { + return m.Task + } + return nil +} + +type UpdateTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateTaskResponse) Reset() { *m = UpdateTaskResponse{} } +func (m *UpdateTaskResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateTaskResponse) ProtoMessage() {} +func (*UpdateTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{9} +} +func (m *UpdateTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateTaskResponse.Unmarshal(m, b) +} +func (m *UpdateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateTaskResponse.Marshal(b, m, deterministic) +} +func (dst *UpdateTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateTaskResponse.Merge(dst, src) +} +func (m *UpdateTaskResponse) XXX_Size() int { + return xxx_messageInfo_UpdateTaskResponse.Size(m) +} +func (m *UpdateTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateTaskResponse proto.InternalMessageInfo + +type GetTaskRequest struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} } +func (m *GetTaskRequest) String() string { return proto.CompactTextString(m) } +func (*GetTaskRequest) ProtoMessage() {} +func (*GetTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{10} +} +func (m *GetTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTaskRequest.Unmarshal(m, b) +} +func (m *GetTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTaskRequest.Marshal(b, m, deterministic) +} +func (dst *GetTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTaskRequest.Merge(dst, src) +} +func (m *GetTaskRequest) XXX_Size() int { + return xxx_messageInfo_GetTaskRequest.Size(m) +} +func (m *GetTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTaskRequest proto.InternalMessageInfo + +func (m *GetTaskRequest) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +type GetTaskResponse struct { + Task *model.TaskDef `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} } +func (m *GetTaskResponse) String() string { return proto.CompactTextString(m) } +func (*GetTaskResponse) ProtoMessage() {} +func (*GetTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{11} +} +func (m *GetTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTaskResponse.Unmarshal(m, b) +} +func (m *GetTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTaskResponse.Marshal(b, m, deterministic) +} +func (dst *GetTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTaskResponse.Merge(dst, src) +} +func (m *GetTaskResponse) XXX_Size() int { + return xxx_messageInfo_GetTaskResponse.Size(m) +} +func (m *GetTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTaskResponse proto.InternalMessageInfo + +func (m *GetTaskResponse) GetTask() *model.TaskDef { + if m != nil { + return m.Task + } + return nil +} + +type DeleteTaskRequest struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteTaskRequest) Reset() { *m = DeleteTaskRequest{} } +func (m *DeleteTaskRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTaskRequest) ProtoMessage() {} +func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{12} +} +func (m *DeleteTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteTaskRequest.Unmarshal(m, b) +} +func (m *DeleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteTaskRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteTaskRequest.Merge(dst, src) +} +func (m *DeleteTaskRequest) XXX_Size() int { + return xxx_messageInfo_DeleteTaskRequest.Size(m) +} +func (m *DeleteTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteTaskRequest proto.InternalMessageInfo + +func (m *DeleteTaskRequest) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +type DeleteTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteTaskResponse) Reset() { *m = DeleteTaskResponse{} } +func (m *DeleteTaskResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTaskResponse) ProtoMessage() {} +func (*DeleteTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_metadata_service_aad2a84548370e06, []int{13} +} +func (m *DeleteTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteTaskResponse.Unmarshal(m, b) +} +func (m *DeleteTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteTaskResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteTaskResponse.Merge(dst, src) +} +func (m *DeleteTaskResponse) XXX_Size() int { + return xxx_messageInfo_DeleteTaskResponse.Size(m) +} +func (m *DeleteTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteTaskResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CreateWorkflowRequest)(nil), "conductor.grpc.metadata.CreateWorkflowRequest") + proto.RegisterType((*CreateWorkflowResponse)(nil), "conductor.grpc.metadata.CreateWorkflowResponse") + proto.RegisterType((*UpdateWorkflowsRequest)(nil), "conductor.grpc.metadata.UpdateWorkflowsRequest") + proto.RegisterType((*UpdateWorkflowsResponse)(nil), "conductor.grpc.metadata.UpdateWorkflowsResponse") + proto.RegisterType((*GetWorkflowRequest)(nil), "conductor.grpc.metadata.GetWorkflowRequest") + proto.RegisterType((*GetWorkflowResponse)(nil), "conductor.grpc.metadata.GetWorkflowResponse") + proto.RegisterType((*CreateTasksRequest)(nil), "conductor.grpc.metadata.CreateTasksRequest") + proto.RegisterType((*CreateTasksResponse)(nil), "conductor.grpc.metadata.CreateTasksResponse") + proto.RegisterType((*UpdateTaskRequest)(nil), "conductor.grpc.metadata.UpdateTaskRequest") + proto.RegisterType((*UpdateTaskResponse)(nil), "conductor.grpc.metadata.UpdateTaskResponse") + proto.RegisterType((*GetTaskRequest)(nil), "conductor.grpc.metadata.GetTaskRequest") + proto.RegisterType((*GetTaskResponse)(nil), "conductor.grpc.metadata.GetTaskResponse") + proto.RegisterType((*DeleteTaskRequest)(nil), "conductor.grpc.metadata.DeleteTaskRequest") + proto.RegisterType((*DeleteTaskResponse)(nil), "conductor.grpc.metadata.DeleteTaskResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetadataServiceClient is the client API for MetadataService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetadataServiceClient interface { + // POST /workflow + CreateWorkflow(ctx context.Context, in *CreateWorkflowRequest, opts ...grpc.CallOption) (*CreateWorkflowResponse, error) + // PUT /workflow + UpdateWorkflows(ctx context.Context, in *UpdateWorkflowsRequest, opts ...grpc.CallOption) (*UpdateWorkflowsResponse, error) + // GET /workflow/{name} + GetWorkflow(ctx context.Context, in *GetWorkflowRequest, opts ...grpc.CallOption) (*GetWorkflowResponse, error) + // POST /taskdefs + CreateTasks(ctx context.Context, in *CreateTasksRequest, opts ...grpc.CallOption) (*CreateTasksResponse, error) + // PUT /taskdefs + UpdateTask(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*UpdateTaskResponse, error) + // GET /taskdefs/{tasktype} + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) + // DELETE /taskdefs/{tasktype} + DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteTaskResponse, error) +} + +type metadataServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetadataServiceClient(cc *grpc.ClientConn) MetadataServiceClient { + return &metadataServiceClient{cc} +} + +func (c *metadataServiceClient) CreateWorkflow(ctx context.Context, in *CreateWorkflowRequest, opts ...grpc.CallOption) (*CreateWorkflowResponse, error) { + out := new(CreateWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/CreateWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataServiceClient) UpdateWorkflows(ctx context.Context, in *UpdateWorkflowsRequest, opts ...grpc.CallOption) (*UpdateWorkflowsResponse, error) { + out := new(UpdateWorkflowsResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/UpdateWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataServiceClient) GetWorkflow(ctx context.Context, in *GetWorkflowRequest, opts ...grpc.CallOption) (*GetWorkflowResponse, error) { + out := new(GetWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/GetWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataServiceClient) CreateTasks(ctx context.Context, in *CreateTasksRequest, opts ...grpc.CallOption) (*CreateTasksResponse, error) { + out := new(CreateTasksResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/CreateTasks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataServiceClient) UpdateTask(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*UpdateTaskResponse, error) { + out := new(UpdateTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/UpdateTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataServiceClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) { + out := new(GetTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/GetTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataServiceClient) DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteTaskResponse, error) { + out := new(DeleteTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.metadata.MetadataService/DeleteTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetadataServiceServer is the server API for MetadataService service. +type MetadataServiceServer interface { + // POST /workflow + CreateWorkflow(context.Context, *CreateWorkflowRequest) (*CreateWorkflowResponse, error) + // PUT /workflow + UpdateWorkflows(context.Context, *UpdateWorkflowsRequest) (*UpdateWorkflowsResponse, error) + // GET /workflow/{name} + GetWorkflow(context.Context, *GetWorkflowRequest) (*GetWorkflowResponse, error) + // POST /taskdefs + CreateTasks(context.Context, *CreateTasksRequest) (*CreateTasksResponse, error) + // PUT /taskdefs + UpdateTask(context.Context, *UpdateTaskRequest) (*UpdateTaskResponse, error) + // GET /taskdefs/{tasktype} + GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error) + // DELETE /taskdefs/{tasktype} + DeleteTask(context.Context, *DeleteTaskRequest) (*DeleteTaskResponse, error) +} + +func RegisterMetadataServiceServer(s *grpc.Server, srv MetadataServiceServer) { + s.RegisterService(&_MetadataService_serviceDesc, srv) +} + +func _MetadataService_CreateWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).CreateWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/CreateWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).CreateWorkflow(ctx, req.(*CreateWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetadataService_UpdateWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).UpdateWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/UpdateWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).UpdateWorkflows(ctx, req.(*UpdateWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetadataService_GetWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).GetWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/GetWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).GetWorkflow(ctx, req.(*GetWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetadataService_CreateTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).CreateTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/CreateTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).CreateTasks(ctx, req.(*CreateTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetadataService_UpdateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).UpdateTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/UpdateTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).UpdateTask(ctx, req.(*UpdateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetadataService_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/GetTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetadataService_DeleteTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataServiceServer).DeleteTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.metadata.MetadataService/DeleteTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataServiceServer).DeleteTask(ctx, req.(*DeleteTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetadataService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "conductor.grpc.metadata.MetadataService", + HandlerType: (*MetadataServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateWorkflow", + Handler: _MetadataService_CreateWorkflow_Handler, + }, + { + MethodName: "UpdateWorkflows", + Handler: _MetadataService_UpdateWorkflows_Handler, + }, + { + MethodName: "GetWorkflow", + Handler: _MetadataService_GetWorkflow_Handler, + }, + { + MethodName: "CreateTasks", + Handler: _MetadataService_CreateTasks_Handler, + }, + { + MethodName: "UpdateTask", + Handler: _MetadataService_UpdateTask_Handler, + }, + { + MethodName: "GetTask", + Handler: _MetadataService_GetTask_Handler, + }, + { + MethodName: "DeleteTask", + Handler: _MetadataService_DeleteTask_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc/metadata_service.proto", +} + +func init() { + proto.RegisterFile("grpc/metadata_service.proto", fileDescriptor_metadata_service_aad2a84548370e06) +} + +var fileDescriptor_metadata_service_aad2a84548370e06 = []byte{ + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdf, 0x6b, 0xd3, 0x50, + 0x18, 0xa5, 0xba, 0xb9, 0xed, 0x1b, 0xac, 0xf4, 0x76, 0x5b, 0x63, 0xe6, 0x43, 0xc9, 0x8b, 0xc5, + 0xcd, 0x9b, 0x32, 0x5f, 0x7c, 0x13, 0xe3, 0x60, 0x20, 0x88, 0x5a, 0x27, 0x82, 0x08, 0x23, 0x4d, + 0xbe, 0x74, 0xa1, 0x49, 0x6e, 0x96, 0x7b, 0xbb, 0xd9, 0x7f, 0xdd, 0x27, 0x49, 0x72, 0xf3, 0xbb, + 0x4d, 0x2b, 0xbe, 0xb5, 0xf7, 0x9e, 0xef, 0x9c, 0x9c, 0x8f, 0x73, 0xb8, 0x70, 0x36, 0x8b, 0x42, + 0x4b, 0xf7, 0x51, 0x98, 0xb6, 0x29, 0xcc, 0x5b, 0x8e, 0xd1, 0x83, 0x6b, 0x21, 0x0d, 0x23, 0x26, + 0x18, 0x19, 0x58, 0x2c, 0xb0, 0x17, 0x96, 0x60, 0x11, 0x8d, 0x61, 0x34, 0x83, 0xa9, 0x7d, 0x9f, + 0xd9, 0xe8, 0xe9, 0xc2, 0xe4, 0x73, 0x1b, 0x9d, 0x14, 0xad, 0x0e, 0xd2, 0xc3, 0x47, 0x16, 0xcd, + 0x1d, 0x8f, 0x3d, 0xe6, 0x17, 0xda, 0x57, 0x38, 0xf9, 0x10, 0xa1, 0x29, 0xf0, 0x87, 0xbc, 0x9a, + 0xe0, 0xfd, 0x02, 0xb9, 0x20, 0x6f, 0x61, 0x3f, 0x43, 0x2b, 0x9d, 0x61, 0x67, 0x74, 0x78, 0xf9, + 0x82, 0x16, 0x92, 0xc9, 0x30, 0xcd, 0x66, 0xae, 0xd0, 0x99, 0xe4, 0x68, 0x4d, 0x81, 0xd3, 0x3a, + 0x25, 0x0f, 0x59, 0xc0, 0x51, 0xfb, 0x08, 0xa7, 0xdf, 0x43, 0xbb, 0x74, 0xc3, 0x33, 0xb5, 0x31, + 0xec, 0xd8, 0xe8, 0x70, 0xa5, 0x33, 0x7c, 0xba, 0x51, 0x29, 0x41, 0x6a, 0xcf, 0x61, 0xd0, 0xe0, + 0x92, 0x32, 0x06, 0x90, 0x6b, 0x14, 0x75, 0x43, 0x04, 0x76, 0x02, 0xd3, 0xc7, 0xc4, 0xcc, 0xc1, + 0x24, 0xf9, 0x4d, 0x14, 0xd8, 0x7b, 0xc0, 0x88, 0xbb, 0x2c, 0x50, 0x9e, 0x0c, 0x3b, 0xa3, 0xdd, + 0x49, 0xf6, 0x57, 0xfb, 0x0c, 0xfd, 0x0a, 0x47, 0x4a, 0xfd, 0x1f, 0x5b, 0x31, 0x80, 0xa4, 0x5b, + 0xb9, 0x31, 0xf9, 0x3c, 0xf7, 0x7d, 0x51, 0xf1, 0xad, 0x34, 0xb8, 0x62, 0x70, 0xe1, 0xf9, 0x04, + 0xfa, 0x15, 0x0e, 0xe9, 0xf7, 0x3d, 0xf4, 0xd2, 0x55, 0xc4, 0xc7, 0x25, 0xe6, 0x38, 0x02, 0xf2, + 0x2b, 0x5b, 0x98, 0x63, 0x94, 0x76, 0x0c, 0xa4, 0x4c, 0x21, 0x89, 0x5f, 0xc3, 0xd1, 0x35, 0x8a, + 0x32, 0xeb, 0x19, 0x1c, 0xc4, 0xf8, 0x5b, 0xb1, 0x0c, 0xb3, 0x4d, 0xee, 0xc7, 0x07, 0x37, 0xcb, + 0x10, 0xb5, 0x77, 0xd0, 0xcd, 0xe1, 0x72, 0x5f, 0xff, 0xf6, 0x15, 0x63, 0xe8, 0x5d, 0xa1, 0x87, + 0x55, 0x23, 0xad, 0x92, 0xc7, 0x40, 0xca, 0x13, 0xa9, 0xea, 0xe5, 0x9f, 0x5d, 0xe8, 0x7e, 0x92, + 0x7d, 0xf8, 0x96, 0xb6, 0x86, 0xdc, 0xc3, 0x51, 0x35, 0x95, 0x84, 0xd2, 0x35, 0x15, 0xa2, 0x2b, + 0x1b, 0xa1, 0xea, 0x5b, 0xe3, 0xa5, 0x79, 0x01, 0xdd, 0x5a, 0x44, 0xc9, 0x7a, 0x8e, 0xd5, 0xc5, + 0x50, 0xc7, 0xdb, 0x0f, 0x48, 0xd5, 0x3b, 0x38, 0x2c, 0x25, 0x97, 0x9c, 0xaf, 0x25, 0x68, 0x76, + 0x44, 0xbd, 0xd8, 0x0e, 0x5c, 0x28, 0x95, 0xe2, 0xd8, 0xa2, 0xd4, 0x0c, 0x7e, 0x8b, 0xd2, 0x8a, + 0x84, 0x13, 0x04, 0x28, 0xe2, 0x49, 0x5e, 0x6d, 0xd8, 0x49, 0x29, 0x3d, 0xea, 0xf9, 0x56, 0x58, + 0x29, 0xf3, 0x0b, 0xf6, 0x64, 0x80, 0xc9, 0xcb, 0xb6, 0x4d, 0x94, 0x05, 0x46, 0x9b, 0x81, 0x85, + 0x89, 0x22, 0xab, 0x2d, 0x26, 0x1a, 0x15, 0x68, 0x31, 0xd1, 0x0c, 0xbf, 0xc1, 0x41, 0xb5, 0x98, + 0x4f, 0x03, 0x14, 0x8e, 0xe7, 0xfe, 0xae, 0x4d, 0x1a, 0xbd, 0x5a, 0x2f, 0xbe, 0x4c, 0x7f, 0x1a, + 0x33, 0x57, 0xdc, 0x2d, 0xa6, 0xd4, 0x62, 0xbe, 0x2e, 0xa7, 0xf4, 0x7c, 0x4a, 0xb7, 0x3c, 0x17, + 0x03, 0xa1, 0xcf, 0x58, 0xf2, 0x18, 0x15, 0xe7, 0x95, 0xb7, 0x69, 0xfa, 0x2c, 0xa9, 0xf3, 0x9b, + 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x81, 0xa1, 0x07, 0xb3, 0x06, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/grpc/search/search.pb.go b/client/gogrpc/conductor/grpc/search/search.pb.go new file mode 100644 index 0000000000..cdf1557c30 --- /dev/null +++ b/client/gogrpc/conductor/grpc/search/search.pb.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/search.proto + +package search // import "github.com/netflix/conductor/client/gogrpc/conductor/grpc/search" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Sort string `protobuf:"bytes,3,opt,name=sort,proto3" json:"sort,omitempty"` + FreeText string `protobuf:"bytes,4,opt,name=free_text,json=freeText,proto3" json:"free_text,omitempty"` + Query string `protobuf:"bytes,5,opt,name=query,proto3" json:"query,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_search_a50e745244da0fb1, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetStart() int32 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Request) GetSize() int32 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *Request) GetSort() string { + if m != nil { + return m.Sort + } + return "" +} + +func (m *Request) GetFreeText() string { + if m != nil { + return m.FreeText + } + return "" +} + +func (m *Request) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func init() { + proto.RegisterType((*Request)(nil), "conductor.grpc.search.Request") +} + +func init() { proto.RegisterFile("grpc/search.proto", fileDescriptor_search_a50e745244da0fb1) } + +var fileDescriptor_search_a50e745244da0fb1 = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0xa9, 0x6e, 0x75, 0x37, 0x37, 0x83, 0x42, 0xd0, 0xcb, 0xe2, 0x69, 0x4f, 0xc9, 0xc1, + 0x17, 0x90, 0x7d, 0x02, 0xa9, 0x9e, 0xbc, 0xc8, 0x36, 0x4e, 0xdb, 0x40, 0xdb, 0x69, 0x27, 0x13, + 0xa8, 0x7d, 0x7a, 0xe9, 0x54, 0xd4, 0xbd, 0xcd, 0x7c, 0xdf, 0x10, 0xfe, 0xfc, 0xea, 0xa6, 0xa6, + 0xc1, 0xbb, 0x08, 0x27, 0xf2, 0x8d, 0x1d, 0x08, 0x19, 0xf5, 0x9d, 0xc7, 0xfe, 0x33, 0x79, 0x46, + 0xb2, 0x8b, 0xb4, 0xab, 0x7c, 0x9c, 0xd5, 0x75, 0x01, 0x63, 0x82, 0xc8, 0xfa, 0x56, 0xe5, 0x91, + 0x4f, 0xc4, 0x26, 0xdb, 0x67, 0x87, 0xbc, 0x58, 0x17, 0xad, 0xd5, 0x26, 0x86, 0x19, 0xcc, 0x85, + 0x40, 0x99, 0x85, 0x21, 0xb1, 0xb9, 0xdc, 0x67, 0x87, 0x5d, 0x21, 0xb3, 0x7e, 0x50, 0xbb, 0x8a, + 0x00, 0x3e, 0x18, 0x26, 0x36, 0x1b, 0x11, 0xdb, 0x05, 0xbc, 0xc1, 0x24, 0x4f, 0x8f, 0x09, 0xe8, + 0xcb, 0xe4, 0x22, 0xd6, 0xe5, 0xd8, 0xa8, 0x7b, 0x8f, 0x9d, 0xed, 0x81, 0xab, 0x36, 0x4c, 0xf6, + 0x3c, 0xe0, 0x71, 0xfb, 0x2a, 0x09, 0x5f, 0xca, 0xf7, 0xe7, 0x3a, 0x70, 0x93, 0x4a, 0xeb, 0xb1, + 0x73, 0x3f, 0xc7, 0xee, 0xf7, 0xd8, 0xf9, 0x36, 0x40, 0xcf, 0xae, 0x46, 0xf9, 0xf3, 0x1f, 0xff, + 0x57, 0x41, 0x79, 0x25, 0x1d, 0x3c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x4d, 0x39, 0xe7, + 0x18, 0x01, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/grpc/tasks/task_service.pb.go b/client/gogrpc/conductor/grpc/tasks/task_service.pb.go new file mode 100644 index 0000000000..f6a6dbc0dd --- /dev/null +++ b/client/gogrpc/conductor/grpc/tasks/task_service.pb.go @@ -0,0 +1,1757 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/task_service.proto + +package tasks // import "github.com/netflix/conductor/client/gogrpc/conductor/grpc/tasks" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import model "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PollRequest struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} +func (*PollRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{0} +} +func (m *PollRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollRequest.Unmarshal(m, b) +} +func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) +} +func (dst *PollRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollRequest.Merge(dst, src) +} +func (m *PollRequest) XXX_Size() int { + return xxx_messageInfo_PollRequest.Size(m) +} +func (m *PollRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PollRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PollRequest proto.InternalMessageInfo + +func (m *PollRequest) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +func (m *PollRequest) GetWorkerId() string { + if m != nil { + return m.WorkerId + } + return "" +} + +func (m *PollRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +type PollResponse struct { + Task *model.Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollResponse) Reset() { *m = PollResponse{} } +func (m *PollResponse) String() string { return proto.CompactTextString(m) } +func (*PollResponse) ProtoMessage() {} +func (*PollResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{1} +} +func (m *PollResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollResponse.Unmarshal(m, b) +} +func (m *PollResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollResponse.Marshal(b, m, deterministic) +} +func (dst *PollResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollResponse.Merge(dst, src) +} +func (m *PollResponse) XXX_Size() int { + return xxx_messageInfo_PollResponse.Size(m) +} +func (m *PollResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PollResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PollResponse proto.InternalMessageInfo + +func (m *PollResponse) GetTask() *model.Task { + if m != nil { + return m.Task + } + return nil +} + +type BatchPollRequest struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Timeout int32 `protobuf:"varint,5,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchPollRequest) Reset() { *m = BatchPollRequest{} } +func (m *BatchPollRequest) String() string { return proto.CompactTextString(m) } +func (*BatchPollRequest) ProtoMessage() {} +func (*BatchPollRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{2} +} +func (m *BatchPollRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchPollRequest.Unmarshal(m, b) +} +func (m *BatchPollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchPollRequest.Marshal(b, m, deterministic) +} +func (dst *BatchPollRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchPollRequest.Merge(dst, src) +} +func (m *BatchPollRequest) XXX_Size() int { + return xxx_messageInfo_BatchPollRequest.Size(m) +} +func (m *BatchPollRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BatchPollRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchPollRequest proto.InternalMessageInfo + +func (m *BatchPollRequest) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +func (m *BatchPollRequest) GetWorkerId() string { + if m != nil { + return m.WorkerId + } + return "" +} + +func (m *BatchPollRequest) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *BatchPollRequest) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *BatchPollRequest) GetTimeout() int32 { + if m != nil { + return m.Timeout + } + return 0 +} + +type TasksInProgressRequest struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + StartKey string `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + Count int32 `protobuf:"varint,3,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TasksInProgressRequest) Reset() { *m = TasksInProgressRequest{} } +func (m *TasksInProgressRequest) String() string { return proto.CompactTextString(m) } +func (*TasksInProgressRequest) ProtoMessage() {} +func (*TasksInProgressRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{3} +} +func (m *TasksInProgressRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TasksInProgressRequest.Unmarshal(m, b) +} +func (m *TasksInProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TasksInProgressRequest.Marshal(b, m, deterministic) +} +func (dst *TasksInProgressRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TasksInProgressRequest.Merge(dst, src) +} +func (m *TasksInProgressRequest) XXX_Size() int { + return xxx_messageInfo_TasksInProgressRequest.Size(m) +} +func (m *TasksInProgressRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TasksInProgressRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TasksInProgressRequest proto.InternalMessageInfo + +func (m *TasksInProgressRequest) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +func (m *TasksInProgressRequest) GetStartKey() string { + if m != nil { + return m.StartKey + } + return "" +} + +func (m *TasksInProgressRequest) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type TasksInProgressResponse struct { + Tasks []*model.Task `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TasksInProgressResponse) Reset() { *m = TasksInProgressResponse{} } +func (m *TasksInProgressResponse) String() string { return proto.CompactTextString(m) } +func (*TasksInProgressResponse) ProtoMessage() {} +func (*TasksInProgressResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{4} +} +func (m *TasksInProgressResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TasksInProgressResponse.Unmarshal(m, b) +} +func (m *TasksInProgressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TasksInProgressResponse.Marshal(b, m, deterministic) +} +func (dst *TasksInProgressResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TasksInProgressResponse.Merge(dst, src) +} +func (m *TasksInProgressResponse) XXX_Size() int { + return xxx_messageInfo_TasksInProgressResponse.Size(m) +} +func (m *TasksInProgressResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TasksInProgressResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TasksInProgressResponse proto.InternalMessageInfo + +func (m *TasksInProgressResponse) GetTasks() []*model.Task { + if m != nil { + return m.Tasks + } + return nil +} + +type PendingTaskRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + TaskRefName string `protobuf:"bytes,2,opt,name=task_ref_name,json=taskRefName,proto3" json:"task_ref_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PendingTaskRequest) Reset() { *m = PendingTaskRequest{} } +func (m *PendingTaskRequest) String() string { return proto.CompactTextString(m) } +func (*PendingTaskRequest) ProtoMessage() {} +func (*PendingTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{5} +} +func (m *PendingTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PendingTaskRequest.Unmarshal(m, b) +} +func (m *PendingTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PendingTaskRequest.Marshal(b, m, deterministic) +} +func (dst *PendingTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PendingTaskRequest.Merge(dst, src) +} +func (m *PendingTaskRequest) XXX_Size() int { + return xxx_messageInfo_PendingTaskRequest.Size(m) +} +func (m *PendingTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PendingTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PendingTaskRequest proto.InternalMessageInfo + +func (m *PendingTaskRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *PendingTaskRequest) GetTaskRefName() string { + if m != nil { + return m.TaskRefName + } + return "" +} + +type PendingTaskResponse struct { + Task *model.Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PendingTaskResponse) Reset() { *m = PendingTaskResponse{} } +func (m *PendingTaskResponse) String() string { return proto.CompactTextString(m) } +func (*PendingTaskResponse) ProtoMessage() {} +func (*PendingTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{6} +} +func (m *PendingTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PendingTaskResponse.Unmarshal(m, b) +} +func (m *PendingTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PendingTaskResponse.Marshal(b, m, deterministic) +} +func (dst *PendingTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PendingTaskResponse.Merge(dst, src) +} +func (m *PendingTaskResponse) XXX_Size() int { + return xxx_messageInfo_PendingTaskResponse.Size(m) +} +func (m *PendingTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PendingTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PendingTaskResponse proto.InternalMessageInfo + +func (m *PendingTaskResponse) GetTask() *model.Task { + if m != nil { + return m.Task + } + return nil +} + +type UpdateTaskRequest struct { + Result *model.TaskResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } +func (m *UpdateTaskRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateTaskRequest) ProtoMessage() {} +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{7} +} +func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateTaskRequest.Unmarshal(m, b) +} +func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateTaskRequest.Merge(dst, src) +} +func (m *UpdateTaskRequest) XXX_Size() int { + return xxx_messageInfo_UpdateTaskRequest.Size(m) +} +func (m *UpdateTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo + +func (m *UpdateTaskRequest) GetResult() *model.TaskResult { + if m != nil { + return m.Result + } + return nil +} + +type UpdateTaskResponse struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateTaskResponse) Reset() { *m = UpdateTaskResponse{} } +func (m *UpdateTaskResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateTaskResponse) ProtoMessage() {} +func (*UpdateTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{8} +} +func (m *UpdateTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateTaskResponse.Unmarshal(m, b) +} +func (m *UpdateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateTaskResponse.Marshal(b, m, deterministic) +} +func (dst *UpdateTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateTaskResponse.Merge(dst, src) +} +func (m *UpdateTaskResponse) XXX_Size() int { + return xxx_messageInfo_UpdateTaskResponse.Size(m) +} +func (m *UpdateTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateTaskResponse proto.InternalMessageInfo + +func (m *UpdateTaskResponse) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type AckTaskRequest struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AckTaskRequest) Reset() { *m = AckTaskRequest{} } +func (m *AckTaskRequest) String() string { return proto.CompactTextString(m) } +func (*AckTaskRequest) ProtoMessage() {} +func (*AckTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{9} +} +func (m *AckTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AckTaskRequest.Unmarshal(m, b) +} +func (m *AckTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AckTaskRequest.Marshal(b, m, deterministic) +} +func (dst *AckTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AckTaskRequest.Merge(dst, src) +} +func (m *AckTaskRequest) XXX_Size() int { + return xxx_messageInfo_AckTaskRequest.Size(m) +} +func (m *AckTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AckTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AckTaskRequest proto.InternalMessageInfo + +func (m *AckTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *AckTaskRequest) GetWorkerId() string { + if m != nil { + return m.WorkerId + } + return "" +} + +type AckTaskResponse struct { + Ack bool `protobuf:"varint,1,opt,name=ack,proto3" json:"ack,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AckTaskResponse) Reset() { *m = AckTaskResponse{} } +func (m *AckTaskResponse) String() string { return proto.CompactTextString(m) } +func (*AckTaskResponse) ProtoMessage() {} +func (*AckTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{10} +} +func (m *AckTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AckTaskResponse.Unmarshal(m, b) +} +func (m *AckTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AckTaskResponse.Marshal(b, m, deterministic) +} +func (dst *AckTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AckTaskResponse.Merge(dst, src) +} +func (m *AckTaskResponse) XXX_Size() int { + return xxx_messageInfo_AckTaskResponse.Size(m) +} +func (m *AckTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AckTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AckTaskResponse proto.InternalMessageInfo + +func (m *AckTaskResponse) GetAck() bool { + if m != nil { + return m.Ack + } + return false +} + +type AddLogRequest struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Log string `protobuf:"bytes,2,opt,name=log,proto3" json:"log,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddLogRequest) Reset() { *m = AddLogRequest{} } +func (m *AddLogRequest) String() string { return proto.CompactTextString(m) } +func (*AddLogRequest) ProtoMessage() {} +func (*AddLogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{11} +} +func (m *AddLogRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddLogRequest.Unmarshal(m, b) +} +func (m *AddLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddLogRequest.Marshal(b, m, deterministic) +} +func (dst *AddLogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddLogRequest.Merge(dst, src) +} +func (m *AddLogRequest) XXX_Size() int { + return xxx_messageInfo_AddLogRequest.Size(m) +} +func (m *AddLogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddLogRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddLogRequest proto.InternalMessageInfo + +func (m *AddLogRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *AddLogRequest) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +type AddLogResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddLogResponse) Reset() { *m = AddLogResponse{} } +func (m *AddLogResponse) String() string { return proto.CompactTextString(m) } +func (*AddLogResponse) ProtoMessage() {} +func (*AddLogResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{12} +} +func (m *AddLogResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddLogResponse.Unmarshal(m, b) +} +func (m *AddLogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddLogResponse.Marshal(b, m, deterministic) +} +func (dst *AddLogResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddLogResponse.Merge(dst, src) +} +func (m *AddLogResponse) XXX_Size() int { + return xxx_messageInfo_AddLogResponse.Size(m) +} +func (m *AddLogResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddLogResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AddLogResponse proto.InternalMessageInfo + +type GetTaskLogsRequest struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTaskLogsRequest) Reset() { *m = GetTaskLogsRequest{} } +func (m *GetTaskLogsRequest) String() string { return proto.CompactTextString(m) } +func (*GetTaskLogsRequest) ProtoMessage() {} +func (*GetTaskLogsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{13} +} +func (m *GetTaskLogsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTaskLogsRequest.Unmarshal(m, b) +} +func (m *GetTaskLogsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTaskLogsRequest.Marshal(b, m, deterministic) +} +func (dst *GetTaskLogsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTaskLogsRequest.Merge(dst, src) +} +func (m *GetTaskLogsRequest) XXX_Size() int { + return xxx_messageInfo_GetTaskLogsRequest.Size(m) +} +func (m *GetTaskLogsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTaskLogsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTaskLogsRequest proto.InternalMessageInfo + +func (m *GetTaskLogsRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type GetTaskLogsResponse struct { + Logs []*model.TaskExecLog `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTaskLogsResponse) Reset() { *m = GetTaskLogsResponse{} } +func (m *GetTaskLogsResponse) String() string { return proto.CompactTextString(m) } +func (*GetTaskLogsResponse) ProtoMessage() {} +func (*GetTaskLogsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{14} +} +func (m *GetTaskLogsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTaskLogsResponse.Unmarshal(m, b) +} +func (m *GetTaskLogsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTaskLogsResponse.Marshal(b, m, deterministic) +} +func (dst *GetTaskLogsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTaskLogsResponse.Merge(dst, src) +} +func (m *GetTaskLogsResponse) XXX_Size() int { + return xxx_messageInfo_GetTaskLogsResponse.Size(m) +} +func (m *GetTaskLogsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTaskLogsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTaskLogsResponse proto.InternalMessageInfo + +func (m *GetTaskLogsResponse) GetLogs() []*model.TaskExecLog { + if m != nil { + return m.Logs + } + return nil +} + +type GetTaskRequest struct { + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} } +func (m *GetTaskRequest) String() string { return proto.CompactTextString(m) } +func (*GetTaskRequest) ProtoMessage() {} +func (*GetTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{15} +} +func (m *GetTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTaskRequest.Unmarshal(m, b) +} +func (m *GetTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTaskRequest.Marshal(b, m, deterministic) +} +func (dst *GetTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTaskRequest.Merge(dst, src) +} +func (m *GetTaskRequest) XXX_Size() int { + return xxx_messageInfo_GetTaskRequest.Size(m) +} +func (m *GetTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTaskRequest proto.InternalMessageInfo + +func (m *GetTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type GetTaskResponse struct { + Task *model.Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} } +func (m *GetTaskResponse) String() string { return proto.CompactTextString(m) } +func (*GetTaskResponse) ProtoMessage() {} +func (*GetTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{16} +} +func (m *GetTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTaskResponse.Unmarshal(m, b) +} +func (m *GetTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTaskResponse.Marshal(b, m, deterministic) +} +func (dst *GetTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTaskResponse.Merge(dst, src) +} +func (m *GetTaskResponse) XXX_Size() int { + return xxx_messageInfo_GetTaskResponse.Size(m) +} +func (m *GetTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTaskResponse proto.InternalMessageInfo + +func (m *GetTaskResponse) GetTask() *model.Task { + if m != nil { + return m.Task + } + return nil +} + +type RemoveTaskRequest struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} } +func (m *RemoveTaskRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveTaskRequest) ProtoMessage() {} +func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{17} +} +func (m *RemoveTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoveTaskRequest.Unmarshal(m, b) +} +func (m *RemoveTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoveTaskRequest.Marshal(b, m, deterministic) +} +func (dst *RemoveTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveTaskRequest.Merge(dst, src) +} +func (m *RemoveTaskRequest) XXX_Size() int { + return xxx_messageInfo_RemoveTaskRequest.Size(m) +} +func (m *RemoveTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveTaskRequest proto.InternalMessageInfo + +func (m *RemoveTaskRequest) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +func (m *RemoveTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type RemoveTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} } +func (m *RemoveTaskResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveTaskResponse) ProtoMessage() {} +func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{18} +} +func (m *RemoveTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoveTaskResponse.Unmarshal(m, b) +} +func (m *RemoveTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoveTaskResponse.Marshal(b, m, deterministic) +} +func (dst *RemoveTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveTaskResponse.Merge(dst, src) +} +func (m *RemoveTaskResponse) XXX_Size() int { + return xxx_messageInfo_RemoveTaskResponse.Size(m) +} +func (m *RemoveTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveTaskResponse proto.InternalMessageInfo + +type QueueSizesRequest struct { + TaskTypes []string `protobuf:"bytes,1,rep,name=task_types,json=taskTypes,proto3" json:"task_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueSizesRequest) Reset() { *m = QueueSizesRequest{} } +func (m *QueueSizesRequest) String() string { return proto.CompactTextString(m) } +func (*QueueSizesRequest) ProtoMessage() {} +func (*QueueSizesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{19} +} +func (m *QueueSizesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueSizesRequest.Unmarshal(m, b) +} +func (m *QueueSizesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueSizesRequest.Marshal(b, m, deterministic) +} +func (dst *QueueSizesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueSizesRequest.Merge(dst, src) +} +func (m *QueueSizesRequest) XXX_Size() int { + return xxx_messageInfo_QueueSizesRequest.Size(m) +} +func (m *QueueSizesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueueSizesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueSizesRequest proto.InternalMessageInfo + +func (m *QueueSizesRequest) GetTaskTypes() []string { + if m != nil { + return m.TaskTypes + } + return nil +} + +type QueueSizesResponse struct { + QueueForTask map[string]int32 `protobuf:"bytes,1,rep,name=queue_for_task,json=queueForTask,proto3" json:"queue_for_task,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueSizesResponse) Reset() { *m = QueueSizesResponse{} } +func (m *QueueSizesResponse) String() string { return proto.CompactTextString(m) } +func (*QueueSizesResponse) ProtoMessage() {} +func (*QueueSizesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{20} +} +func (m *QueueSizesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueSizesResponse.Unmarshal(m, b) +} +func (m *QueueSizesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueSizesResponse.Marshal(b, m, deterministic) +} +func (dst *QueueSizesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueSizesResponse.Merge(dst, src) +} +func (m *QueueSizesResponse) XXX_Size() int { + return xxx_messageInfo_QueueSizesResponse.Size(m) +} +func (m *QueueSizesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueueSizesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueSizesResponse proto.InternalMessageInfo + +func (m *QueueSizesResponse) GetQueueForTask() map[string]int32 { + if m != nil { + return m.QueueForTask + } + return nil +} + +type QueueInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueInfoRequest) Reset() { *m = QueueInfoRequest{} } +func (m *QueueInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueueInfoRequest) ProtoMessage() {} +func (*QueueInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{21} +} +func (m *QueueInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueInfoRequest.Unmarshal(m, b) +} +func (m *QueueInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueInfoRequest.Marshal(b, m, deterministic) +} +func (dst *QueueInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueInfoRequest.Merge(dst, src) +} +func (m *QueueInfoRequest) XXX_Size() int { + return xxx_messageInfo_QueueInfoRequest.Size(m) +} +func (m *QueueInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueueInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueInfoRequest proto.InternalMessageInfo + +type QueueInfoResponse struct { + Queues map[string]int64 `protobuf:"bytes,1,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueInfoResponse) Reset() { *m = QueueInfoResponse{} } +func (m *QueueInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueueInfoResponse) ProtoMessage() {} +func (*QueueInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{22} +} +func (m *QueueInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueInfoResponse.Unmarshal(m, b) +} +func (m *QueueInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueInfoResponse.Marshal(b, m, deterministic) +} +func (dst *QueueInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueInfoResponse.Merge(dst, src) +} +func (m *QueueInfoResponse) XXX_Size() int { + return xxx_messageInfo_QueueInfoResponse.Size(m) +} +func (m *QueueInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueueInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueInfoResponse proto.InternalMessageInfo + +func (m *QueueInfoResponse) GetQueues() map[string]int64 { + if m != nil { + return m.Queues + } + return nil +} + +type QueueAllInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueAllInfoRequest) Reset() { *m = QueueAllInfoRequest{} } +func (m *QueueAllInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueueAllInfoRequest) ProtoMessage() {} +func (*QueueAllInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{23} +} +func (m *QueueAllInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueAllInfoRequest.Unmarshal(m, b) +} +func (m *QueueAllInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueAllInfoRequest.Marshal(b, m, deterministic) +} +func (dst *QueueAllInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueAllInfoRequest.Merge(dst, src) +} +func (m *QueueAllInfoRequest) XXX_Size() int { + return xxx_messageInfo_QueueAllInfoRequest.Size(m) +} +func (m *QueueAllInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueueAllInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueAllInfoRequest proto.InternalMessageInfo + +type QueueAllInfoResponse struct { + Queues map[string]*QueueAllInfoResponse_QueueInfo `protobuf:"bytes,1,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueAllInfoResponse) Reset() { *m = QueueAllInfoResponse{} } +func (m *QueueAllInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueueAllInfoResponse) ProtoMessage() {} +func (*QueueAllInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{24} +} +func (m *QueueAllInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueAllInfoResponse.Unmarshal(m, b) +} +func (m *QueueAllInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueAllInfoResponse.Marshal(b, m, deterministic) +} +func (dst *QueueAllInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueAllInfoResponse.Merge(dst, src) +} +func (m *QueueAllInfoResponse) XXX_Size() int { + return xxx_messageInfo_QueueAllInfoResponse.Size(m) +} +func (m *QueueAllInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueueAllInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueAllInfoResponse proto.InternalMessageInfo + +func (m *QueueAllInfoResponse) GetQueues() map[string]*QueueAllInfoResponse_QueueInfo { + if m != nil { + return m.Queues + } + return nil +} + +type QueueAllInfoResponse_ShardInfo struct { + Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + Uacked int64 `protobuf:"varint,2,opt,name=uacked,proto3" json:"uacked,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueAllInfoResponse_ShardInfo) Reset() { *m = QueueAllInfoResponse_ShardInfo{} } +func (m *QueueAllInfoResponse_ShardInfo) String() string { return proto.CompactTextString(m) } +func (*QueueAllInfoResponse_ShardInfo) ProtoMessage() {} +func (*QueueAllInfoResponse_ShardInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{24, 0} +} +func (m *QueueAllInfoResponse_ShardInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueAllInfoResponse_ShardInfo.Unmarshal(m, b) +} +func (m *QueueAllInfoResponse_ShardInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueAllInfoResponse_ShardInfo.Marshal(b, m, deterministic) +} +func (dst *QueueAllInfoResponse_ShardInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueAllInfoResponse_ShardInfo.Merge(dst, src) +} +func (m *QueueAllInfoResponse_ShardInfo) XXX_Size() int { + return xxx_messageInfo_QueueAllInfoResponse_ShardInfo.Size(m) +} +func (m *QueueAllInfoResponse_ShardInfo) XXX_DiscardUnknown() { + xxx_messageInfo_QueueAllInfoResponse_ShardInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueAllInfoResponse_ShardInfo proto.InternalMessageInfo + +func (m *QueueAllInfoResponse_ShardInfo) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *QueueAllInfoResponse_ShardInfo) GetUacked() int64 { + if m != nil { + return m.Uacked + } + return 0 +} + +type QueueAllInfoResponse_QueueInfo struct { + Shards map[string]*QueueAllInfoResponse_ShardInfo `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueueAllInfoResponse_QueueInfo) Reset() { *m = QueueAllInfoResponse_QueueInfo{} } +func (m *QueueAllInfoResponse_QueueInfo) String() string { return proto.CompactTextString(m) } +func (*QueueAllInfoResponse_QueueInfo) ProtoMessage() {} +func (*QueueAllInfoResponse_QueueInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_task_service_1133a2fd800ff6c6, []int{24, 1} +} +func (m *QueueAllInfoResponse_QueueInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueueAllInfoResponse_QueueInfo.Unmarshal(m, b) +} +func (m *QueueAllInfoResponse_QueueInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueueAllInfoResponse_QueueInfo.Marshal(b, m, deterministic) +} +func (dst *QueueAllInfoResponse_QueueInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueAllInfoResponse_QueueInfo.Merge(dst, src) +} +func (m *QueueAllInfoResponse_QueueInfo) XXX_Size() int { + return xxx_messageInfo_QueueAllInfoResponse_QueueInfo.Size(m) +} +func (m *QueueAllInfoResponse_QueueInfo) XXX_DiscardUnknown() { + xxx_messageInfo_QueueAllInfoResponse_QueueInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueAllInfoResponse_QueueInfo proto.InternalMessageInfo + +func (m *QueueAllInfoResponse_QueueInfo) GetShards() map[string]*QueueAllInfoResponse_ShardInfo { + if m != nil { + return m.Shards + } + return nil +} + +func init() { + proto.RegisterType((*PollRequest)(nil), "conductor.grpc.tasks.PollRequest") + proto.RegisterType((*PollResponse)(nil), "conductor.grpc.tasks.PollResponse") + proto.RegisterType((*BatchPollRequest)(nil), "conductor.grpc.tasks.BatchPollRequest") + proto.RegisterType((*TasksInProgressRequest)(nil), "conductor.grpc.tasks.TasksInProgressRequest") + proto.RegisterType((*TasksInProgressResponse)(nil), "conductor.grpc.tasks.TasksInProgressResponse") + proto.RegisterType((*PendingTaskRequest)(nil), "conductor.grpc.tasks.PendingTaskRequest") + proto.RegisterType((*PendingTaskResponse)(nil), "conductor.grpc.tasks.PendingTaskResponse") + proto.RegisterType((*UpdateTaskRequest)(nil), "conductor.grpc.tasks.UpdateTaskRequest") + proto.RegisterType((*UpdateTaskResponse)(nil), "conductor.grpc.tasks.UpdateTaskResponse") + proto.RegisterType((*AckTaskRequest)(nil), "conductor.grpc.tasks.AckTaskRequest") + proto.RegisterType((*AckTaskResponse)(nil), "conductor.grpc.tasks.AckTaskResponse") + proto.RegisterType((*AddLogRequest)(nil), "conductor.grpc.tasks.AddLogRequest") + proto.RegisterType((*AddLogResponse)(nil), "conductor.grpc.tasks.AddLogResponse") + proto.RegisterType((*GetTaskLogsRequest)(nil), "conductor.grpc.tasks.GetTaskLogsRequest") + proto.RegisterType((*GetTaskLogsResponse)(nil), "conductor.grpc.tasks.GetTaskLogsResponse") + proto.RegisterType((*GetTaskRequest)(nil), "conductor.grpc.tasks.GetTaskRequest") + proto.RegisterType((*GetTaskResponse)(nil), "conductor.grpc.tasks.GetTaskResponse") + proto.RegisterType((*RemoveTaskRequest)(nil), "conductor.grpc.tasks.RemoveTaskRequest") + proto.RegisterType((*RemoveTaskResponse)(nil), "conductor.grpc.tasks.RemoveTaskResponse") + proto.RegisterType((*QueueSizesRequest)(nil), "conductor.grpc.tasks.QueueSizesRequest") + proto.RegisterType((*QueueSizesResponse)(nil), "conductor.grpc.tasks.QueueSizesResponse") + proto.RegisterMapType((map[string]int32)(nil), "conductor.grpc.tasks.QueueSizesResponse.QueueForTaskEntry") + proto.RegisterType((*QueueInfoRequest)(nil), "conductor.grpc.tasks.QueueInfoRequest") + proto.RegisterType((*QueueInfoResponse)(nil), "conductor.grpc.tasks.QueueInfoResponse") + proto.RegisterMapType((map[string]int64)(nil), "conductor.grpc.tasks.QueueInfoResponse.QueuesEntry") + proto.RegisterType((*QueueAllInfoRequest)(nil), "conductor.grpc.tasks.QueueAllInfoRequest") + proto.RegisterType((*QueueAllInfoResponse)(nil), "conductor.grpc.tasks.QueueAllInfoResponse") + proto.RegisterMapType((map[string]*QueueAllInfoResponse_QueueInfo)(nil), "conductor.grpc.tasks.QueueAllInfoResponse.QueuesEntry") + proto.RegisterType((*QueueAllInfoResponse_ShardInfo)(nil), "conductor.grpc.tasks.QueueAllInfoResponse.ShardInfo") + proto.RegisterType((*QueueAllInfoResponse_QueueInfo)(nil), "conductor.grpc.tasks.QueueAllInfoResponse.QueueInfo") + proto.RegisterMapType((map[string]*QueueAllInfoResponse_ShardInfo)(nil), "conductor.grpc.tasks.QueueAllInfoResponse.QueueInfo.ShardsEntry") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TaskServiceClient is the client API for TaskService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TaskServiceClient interface { + // GET /poll/{tasktype} + Poll(ctx context.Context, in *PollRequest, opts ...grpc.CallOption) (*PollResponse, error) + // /poll/batch/{tasktype} + BatchPoll(ctx context.Context, in *BatchPollRequest, opts ...grpc.CallOption) (TaskService_BatchPollClient, error) + // GET /in_progress/{tasktype} + GetTasksInProgress(ctx context.Context, in *TasksInProgressRequest, opts ...grpc.CallOption) (*TasksInProgressResponse, error) + // GET /in_progress/{workflowId}/{taskRefName} + GetPendingTaskForWorkflow(ctx context.Context, in *PendingTaskRequest, opts ...grpc.CallOption) (*PendingTaskResponse, error) + // POST / + UpdateTask(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*UpdateTaskResponse, error) + // POST /{taskId}/ack + AckTask(ctx context.Context, in *AckTaskRequest, opts ...grpc.CallOption) (*AckTaskResponse, error) + // POST /{taskId}/log + AddLog(ctx context.Context, in *AddLogRequest, opts ...grpc.CallOption) (*AddLogResponse, error) + // GET {taskId}/log + GetTaskLogs(ctx context.Context, in *GetTaskLogsRequest, opts ...grpc.CallOption) (*GetTaskLogsResponse, error) + // GET /{taskId} + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) + // DELETE /queue/{taskType}/{taskId} + RemoveTaskFromQueue(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) + // GET /queue/sizes + GetQueueSizesForTasks(ctx context.Context, in *QueueSizesRequest, opts ...grpc.CallOption) (*QueueSizesResponse, error) + // GET /queue/all + GetQueueInfo(ctx context.Context, in *QueueInfoRequest, opts ...grpc.CallOption) (*QueueInfoResponse, error) + // GET /queue/all/verbose + GetQueueAllInfo(ctx context.Context, in *QueueAllInfoRequest, opts ...grpc.CallOption) (*QueueAllInfoResponse, error) +} + +type taskServiceClient struct { + cc *grpc.ClientConn +} + +func NewTaskServiceClient(cc *grpc.ClientConn) TaskServiceClient { + return &taskServiceClient{cc} +} + +func (c *taskServiceClient) Poll(ctx context.Context, in *PollRequest, opts ...grpc.CallOption) (*PollResponse, error) { + out := new(PollResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/Poll", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) BatchPoll(ctx context.Context, in *BatchPollRequest, opts ...grpc.CallOption) (TaskService_BatchPollClient, error) { + stream, err := c.cc.NewStream(ctx, &_TaskService_serviceDesc.Streams[0], "/conductor.grpc.tasks.TaskService/BatchPoll", opts...) + if err != nil { + return nil, err + } + x := &taskServiceBatchPollClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TaskService_BatchPollClient interface { + Recv() (*model.Task, error) + grpc.ClientStream +} + +type taskServiceBatchPollClient struct { + grpc.ClientStream +} + +func (x *taskServiceBatchPollClient) Recv() (*model.Task, error) { + m := new(model.Task) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *taskServiceClient) GetTasksInProgress(ctx context.Context, in *TasksInProgressRequest, opts ...grpc.CallOption) (*TasksInProgressResponse, error) { + out := new(TasksInProgressResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetTasksInProgress", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) GetPendingTaskForWorkflow(ctx context.Context, in *PendingTaskRequest, opts ...grpc.CallOption) (*PendingTaskResponse, error) { + out := new(PendingTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetPendingTaskForWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) UpdateTask(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*UpdateTaskResponse, error) { + out := new(UpdateTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/UpdateTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) AckTask(ctx context.Context, in *AckTaskRequest, opts ...grpc.CallOption) (*AckTaskResponse, error) { + out := new(AckTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/AckTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) AddLog(ctx context.Context, in *AddLogRequest, opts ...grpc.CallOption) (*AddLogResponse, error) { + out := new(AddLogResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/AddLog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) GetTaskLogs(ctx context.Context, in *GetTaskLogsRequest, opts ...grpc.CallOption) (*GetTaskLogsResponse, error) { + out := new(GetTaskLogsResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetTaskLogs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) { + out := new(GetTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) RemoveTaskFromQueue(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) { + out := new(RemoveTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/RemoveTaskFromQueue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) GetQueueSizesForTasks(ctx context.Context, in *QueueSizesRequest, opts ...grpc.CallOption) (*QueueSizesResponse, error) { + out := new(QueueSizesResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetQueueSizesForTasks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) GetQueueInfo(ctx context.Context, in *QueueInfoRequest, opts ...grpc.CallOption) (*QueueInfoResponse, error) { + out := new(QueueInfoResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetQueueInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskServiceClient) GetQueueAllInfo(ctx context.Context, in *QueueAllInfoRequest, opts ...grpc.CallOption) (*QueueAllInfoResponse, error) { + out := new(QueueAllInfoResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.tasks.TaskService/GetQueueAllInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TaskServiceServer is the server API for TaskService service. +type TaskServiceServer interface { + // GET /poll/{tasktype} + Poll(context.Context, *PollRequest) (*PollResponse, error) + // /poll/batch/{tasktype} + BatchPoll(*BatchPollRequest, TaskService_BatchPollServer) error + // GET /in_progress/{tasktype} + GetTasksInProgress(context.Context, *TasksInProgressRequest) (*TasksInProgressResponse, error) + // GET /in_progress/{workflowId}/{taskRefName} + GetPendingTaskForWorkflow(context.Context, *PendingTaskRequest) (*PendingTaskResponse, error) + // POST / + UpdateTask(context.Context, *UpdateTaskRequest) (*UpdateTaskResponse, error) + // POST /{taskId}/ack + AckTask(context.Context, *AckTaskRequest) (*AckTaskResponse, error) + // POST /{taskId}/log + AddLog(context.Context, *AddLogRequest) (*AddLogResponse, error) + // GET {taskId}/log + GetTaskLogs(context.Context, *GetTaskLogsRequest) (*GetTaskLogsResponse, error) + // GET /{taskId} + GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error) + // DELETE /queue/{taskType}/{taskId} + RemoveTaskFromQueue(context.Context, *RemoveTaskRequest) (*RemoveTaskResponse, error) + // GET /queue/sizes + GetQueueSizesForTasks(context.Context, *QueueSizesRequest) (*QueueSizesResponse, error) + // GET /queue/all + GetQueueInfo(context.Context, *QueueInfoRequest) (*QueueInfoResponse, error) + // GET /queue/all/verbose + GetQueueAllInfo(context.Context, *QueueAllInfoRequest) (*QueueAllInfoResponse, error) +} + +func RegisterTaskServiceServer(s *grpc.Server, srv TaskServiceServer) { + s.RegisterService(&_TaskService_serviceDesc, srv) +} + +func _TaskService_Poll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PollRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).Poll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/Poll", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).Poll(ctx, req.(*PollRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_BatchPoll_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BatchPollRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TaskServiceServer).BatchPoll(m, &taskServiceBatchPollServer{stream}) +} + +type TaskService_BatchPollServer interface { + Send(*model.Task) error + grpc.ServerStream +} + +type taskServiceBatchPollServer struct { + grpc.ServerStream +} + +func (x *taskServiceBatchPollServer) Send(m *model.Task) error { + return x.ServerStream.SendMsg(m) +} + +func _TaskService_GetTasksInProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TasksInProgressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetTasksInProgress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetTasksInProgress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetTasksInProgress(ctx, req.(*TasksInProgressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_GetPendingTaskForWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PendingTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetPendingTaskForWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetPendingTaskForWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetPendingTaskForWorkflow(ctx, req.(*PendingTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_UpdateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).UpdateTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/UpdateTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).UpdateTask(ctx, req.(*UpdateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_AckTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AckTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).AckTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/AckTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).AckTask(ctx, req.(*AckTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_AddLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).AddLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/AddLog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).AddLog(ctx, req.(*AddLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_GetTaskLogs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskLogsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetTaskLogs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetTaskLogs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetTaskLogs(ctx, req.(*GetTaskLogsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_RemoveTaskFromQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).RemoveTaskFromQueue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/RemoveTaskFromQueue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).RemoveTaskFromQueue(ctx, req.(*RemoveTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_GetQueueSizesForTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueSizesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetQueueSizesForTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetQueueSizesForTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetQueueSizesForTasks(ctx, req.(*QueueSizesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_GetQueueInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetQueueInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetQueueInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetQueueInfo(ctx, req.(*QueueInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskService_GetQueueAllInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueueAllInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskServiceServer).GetQueueAllInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.tasks.TaskService/GetQueueAllInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskServiceServer).GetQueueAllInfo(ctx, req.(*QueueAllInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TaskService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "conductor.grpc.tasks.TaskService", + HandlerType: (*TaskServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Poll", + Handler: _TaskService_Poll_Handler, + }, + { + MethodName: "GetTasksInProgress", + Handler: _TaskService_GetTasksInProgress_Handler, + }, + { + MethodName: "GetPendingTaskForWorkflow", + Handler: _TaskService_GetPendingTaskForWorkflow_Handler, + }, + { + MethodName: "UpdateTask", + Handler: _TaskService_UpdateTask_Handler, + }, + { + MethodName: "AckTask", + Handler: _TaskService_AckTask_Handler, + }, + { + MethodName: "AddLog", + Handler: _TaskService_AddLog_Handler, + }, + { + MethodName: "GetTaskLogs", + Handler: _TaskService_GetTaskLogs_Handler, + }, + { + MethodName: "GetTask", + Handler: _TaskService_GetTask_Handler, + }, + { + MethodName: "RemoveTaskFromQueue", + Handler: _TaskService_RemoveTaskFromQueue_Handler, + }, + { + MethodName: "GetQueueSizesForTasks", + Handler: _TaskService_GetQueueSizesForTasks_Handler, + }, + { + MethodName: "GetQueueInfo", + Handler: _TaskService_GetQueueInfo_Handler, + }, + { + MethodName: "GetQueueAllInfo", + Handler: _TaskService_GetQueueAllInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "BatchPoll", + Handler: _TaskService_BatchPoll_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/task_service.proto", +} + +func init() { + proto.RegisterFile("grpc/task_service.proto", fileDescriptor_task_service_1133a2fd800ff6c6) +} + +var fileDescriptor_task_service_1133a2fd800ff6c6 = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x72, 0xdb, 0x54, + 0x10, 0x1e, 0xc5, 0x89, 0x53, 0xaf, 0x93, 0xd4, 0x39, 0xf9, 0x33, 0x2a, 0x0c, 0x41, 0x2d, 0x6d, + 0x02, 0x54, 0xe9, 0x24, 0x0c, 0xd0, 0x0c, 0x33, 0x6d, 0x32, 0x43, 0x82, 0x69, 0xe8, 0x04, 0xa5, + 0x94, 0x9f, 0x1b, 0xa3, 0x48, 0xc7, 0xb2, 0xb0, 0xac, 0xe3, 0x48, 0x47, 0x69, 0xd2, 0xe7, 0xe0, + 0x8e, 0x07, 0xe1, 0x4d, 0x78, 0x07, 0x5e, 0x81, 0x2b, 0xe6, 0xfc, 0x48, 0x3a, 0x8e, 0x25, 0xdb, + 0x19, 0xa6, 0x77, 0x3a, 0xab, 0xdd, 0xfd, 0xbe, 0x5d, 0xad, 0xf7, 0x3b, 0x86, 0x0d, 0x2f, 0x1a, + 0x38, 0x3b, 0xd4, 0x8e, 0x7b, 0xed, 0x18, 0x47, 0x97, 0xbe, 0x83, 0xcd, 0x41, 0x44, 0x28, 0x41, + 0xab, 0x0e, 0x09, 0xdd, 0xc4, 0xa1, 0x24, 0x32, 0x99, 0x8b, 0xc9, 0x5c, 0x62, 0x7d, 0xa3, 0x4f, + 0x5c, 0x1c, 0x70, 0x7f, 0x7c, 0x85, 0x9d, 0x80, 0x78, 0xc2, 0x5d, 0x5f, 0xcf, 0x5f, 0x44, 0x38, + 0x4e, 0x02, 0x2a, 0xed, 0x8d, 0xdc, 0x2e, 0x2c, 0x46, 0x1b, 0xea, 0xa7, 0x24, 0x08, 0x2c, 0x7c, + 0x91, 0xe0, 0x98, 0xa2, 0x7b, 0x50, 0xe3, 0xe8, 0xf4, 0x7a, 0x80, 0x9b, 0xda, 0xa6, 0xb6, 0x55, + 0xb3, 0xee, 0x30, 0xc3, 0xab, 0xeb, 0x01, 0x66, 0x2f, 0xdf, 0x90, 0xa8, 0x87, 0xa3, 0xb6, 0xef, + 0x36, 0x67, 0xc4, 0x4b, 0x61, 0x68, 0xb9, 0x68, 0x1d, 0xaa, 0x2e, 0xe9, 0xdb, 0x7e, 0xd8, 0xac, + 0xf0, 0x37, 0xf2, 0x64, 0x3c, 0x85, 0x05, 0x01, 0x10, 0x0f, 0x48, 0x18, 0x63, 0xb4, 0x0d, 0xb3, + 0x2c, 0x21, 0x4f, 0x5e, 0xdf, 0x5d, 0x33, 0xf3, 0xc2, 0x38, 0x21, 0xf3, 0x95, 0x1d, 0xf7, 0x2c, + 0xee, 0x62, 0xfc, 0xa1, 0x41, 0xe3, 0xd0, 0xa6, 0x4e, 0xf7, 0x9d, 0x32, 0x44, 0xab, 0x30, 0xe7, + 0x90, 0x24, 0xa4, 0xcd, 0xd9, 0x4d, 0x6d, 0x6b, 0xce, 0x12, 0x07, 0xd4, 0x84, 0x79, 0xea, 0xf7, + 0x31, 0x49, 0x68, 0x73, 0x8e, 0xdb, 0xd3, 0xa3, 0xd1, 0x85, 0x75, 0x46, 0x32, 0x6e, 0x85, 0xa7, + 0x11, 0xf1, 0x22, 0x1c, 0xc7, 0xd3, 0x72, 0x8b, 0xa9, 0x1d, 0xd1, 0x76, 0x0f, 0x5f, 0xa7, 0xdc, + 0xb8, 0xe1, 0x05, 0xbe, 0xce, 0x39, 0x54, 0x14, 0x0e, 0xc6, 0x11, 0x6c, 0x8c, 0x20, 0xc9, 0x36, + 0x7e, 0x0a, 0x73, 0x7c, 0x06, 0x9a, 0xda, 0x66, 0xa5, 0xbc, 0x8f, 0xc2, 0xc7, 0xf8, 0x05, 0xd0, + 0x29, 0x0e, 0x5d, 0x3f, 0xf4, 0xb8, 0x55, 0xb2, 0xfd, 0x10, 0xea, 0xac, 0x37, 0x9d, 0x80, 0xbc, + 0x61, 0xed, 0x12, 0x7c, 0x21, 0x35, 0xb5, 0x5c, 0x64, 0xc0, 0x22, 0x2f, 0x27, 0xc2, 0x9d, 0x76, + 0x68, 0xf7, 0xb1, 0x64, 0x5d, 0xa7, 0x3c, 0x49, 0xe7, 0xa5, 0xdd, 0xc7, 0xc6, 0x73, 0x58, 0x19, + 0x4a, 0x7d, 0xfb, 0xaf, 0xfc, 0x2d, 0x2c, 0xff, 0x38, 0x70, 0x6d, 0x8a, 0x55, 0x6e, 0x7b, 0x50, + 0x15, 0x83, 0x2b, 0x33, 0xdc, 0x2b, 0xce, 0xc0, 0x5d, 0x2c, 0xe9, 0x6a, 0x3c, 0x06, 0xa4, 0x66, + 0x92, 0x54, 0x36, 0x60, 0x9e, 0x57, 0x91, 0x95, 0x58, 0x65, 0xc7, 0x96, 0x6b, 0x1c, 0xc1, 0xd2, + 0x81, 0xd3, 0x53, 0x51, 0xcb, 0x5c, 0xc7, 0xce, 0x95, 0x71, 0x1f, 0xee, 0x66, 0x79, 0x24, 0x66, + 0x03, 0x2a, 0xb6, 0x23, 0xaa, 0xbf, 0x63, 0xb1, 0x47, 0x63, 0x1f, 0x16, 0x0f, 0x5c, 0xf7, 0x84, + 0x78, 0x13, 0xb1, 0x1a, 0x50, 0x09, 0x88, 0x27, 0x51, 0xd8, 0xa3, 0xd1, 0x80, 0xa5, 0x34, 0x56, + 0xe4, 0x67, 0x95, 0x1e, 0x63, 0xca, 0x20, 0x4f, 0x88, 0x17, 0x4f, 0x4a, 0x69, 0x1c, 0xc3, 0xca, + 0x90, 0xbb, 0x64, 0xf9, 0x04, 0x66, 0x03, 0xe2, 0xa5, 0x23, 0xf4, 0x7e, 0x61, 0x8b, 0xbf, 0xb9, + 0xc2, 0x0e, 0x43, 0xe6, 0x9e, 0xc6, 0x36, 0x2c, 0xc9, 0x44, 0x13, 0x31, 0xbf, 0x86, 0xbb, 0x99, + 0xeb, 0xed, 0x87, 0xa2, 0x05, 0xcb, 0x16, 0xee, 0x93, 0xcb, 0xa1, 0xa1, 0x18, 0xfb, 0xf3, 0x52, + 0x88, 0xcc, 0x0c, 0x11, 0x59, 0x05, 0xa4, 0xa6, 0x92, 0x1d, 0xdc, 0x85, 0xe5, 0x1f, 0x12, 0x9c, + 0xe0, 0x33, 0xff, 0x2d, 0xce, 0x1a, 0xf8, 0x01, 0x40, 0x06, 0x20, 0xda, 0x52, 0xb3, 0x6a, 0x29, + 0x42, 0x6c, 0xfc, 0xa5, 0x01, 0x52, 0x83, 0x64, 0x59, 0xbf, 0xc1, 0xd2, 0x05, 0xb3, 0xb6, 0x3b, + 0x24, 0x6a, 0xcb, 0x02, 0x59, 0x43, 0xf7, 0xcd, 0xa2, 0xa5, 0x6d, 0x8e, 0x66, 0x10, 0xa6, 0x23, + 0x12, 0xf1, 0x86, 0x87, 0x34, 0xba, 0xb6, 0x16, 0x2e, 0x14, 0x93, 0xfe, 0x4c, 0x92, 0x55, 0x5d, + 0xd8, 0x9c, 0xb0, 0x4d, 0x22, 0xfa, 0xc0, 0x1e, 0xd9, 0x12, 0xb9, 0xb4, 0x83, 0x44, 0xfc, 0x4e, + 0xe7, 0x2c, 0x71, 0xd8, 0x9f, 0xf9, 0x4a, 0x33, 0x10, 0x34, 0x78, 0x82, 0x56, 0xd8, 0x21, 0xb2, + 0x58, 0xe3, 0x4f, 0x4d, 0x66, 0x15, 0x46, 0x59, 0xcc, 0x0b, 0xa8, 0x72, 0xe8, 0x74, 0x2a, 0xf6, + 0xc6, 0x14, 0xa1, 0x06, 0x0a, 0x4b, 0x2c, 0xd8, 0xcb, 0x14, 0xfa, 0x53, 0xa8, 0x2b, 0xe6, 0x49, + 0x8c, 0x2b, 0x2a, 0xe3, 0x35, 0x58, 0xe1, 0xa1, 0x07, 0x41, 0xa0, 0x92, 0xfe, 0xa7, 0x02, 0xab, + 0xc3, 0x76, 0xc9, 0xfb, 0xe5, 0x0d, 0xde, 0x5f, 0x8c, 0xe1, 0x7d, 0x23, 0xb6, 0x90, 0xfa, 0x97, + 0x50, 0x3b, 0xeb, 0xda, 0x91, 0xcb, 0x1c, 0x11, 0x82, 0xd9, 0xd8, 0x7f, 0x2b, 0x66, 0xae, 0x62, + 0xf1, 0x67, 0xa6, 0x26, 0x89, 0xed, 0xf4, 0xb0, 0x2b, 0xb9, 0xcb, 0x93, 0xfe, 0xb7, 0x06, 0xb5, + 0xac, 0x3b, 0xe8, 0x67, 0xa8, 0xc6, 0x2c, 0x4d, 0x4a, 0xeb, 0xf9, 0x6d, 0x69, 0x31, 0x8b, 0xc9, + 0x99, 0xa4, 0x04, 0x45, 0x3e, 0x9d, 0x40, 0x5d, 0x31, 0x17, 0xf4, 0xf6, 0x3b, 0xb5, 0xb7, 0xf5, + 0xdd, 0xcf, 0x6f, 0x81, 0x9c, 0x55, 0xae, 0x7c, 0x11, 0x06, 0x38, 0xfe, 0x63, 0xfe, 0x0f, 0xc0, + 0x7c, 0x9c, 0x72, 0xc0, 0xdd, 0x7f, 0x6b, 0x50, 0x67, 0xe3, 0x7e, 0x26, 0x6e, 0x42, 0xe8, 0x7b, + 0x98, 0x65, 0x17, 0x01, 0xf4, 0x51, 0x71, 0x62, 0xe5, 0x92, 0xa0, 0x1b, 0xe3, 0x5c, 0xe4, 0xc4, + 0x9c, 0x40, 0x2d, 0xbb, 0x5c, 0xa0, 0x87, 0xc5, 0x01, 0x37, 0x6f, 0x1f, 0x7a, 0xf1, 0xd2, 0x7a, + 0xa2, 0xa1, 0x8b, 0x6c, 0x23, 0x2b, 0x6a, 0x8d, 0x3e, 0x2b, 0x4e, 0x5b, 0x7c, 0x7d, 0xd0, 0x1f, + 0x4f, 0xe9, 0x2d, 0x0b, 0x18, 0xc0, 0x7b, 0xc7, 0x98, 0x2a, 0xea, 0x7b, 0x44, 0xa2, 0x9f, 0xa4, + 0x7a, 0xa3, 0xad, 0x92, 0x0e, 0x8c, 0x5c, 0x03, 0xf4, 0xed, 0x29, 0x3c, 0x25, 0x62, 0x1b, 0x20, + 0x17, 0x58, 0xf4, 0xa8, 0x38, 0x70, 0x44, 0xcc, 0xf5, 0xad, 0xc9, 0x8e, 0x12, 0xe0, 0x35, 0xcc, + 0x4b, 0x29, 0x45, 0x0f, 0x8a, 0x83, 0x86, 0x15, 0x5b, 0xff, 0x78, 0x82, 0x97, 0xcc, 0x7b, 0x06, + 0x55, 0xa1, 0xa0, 0xe8, 0x7e, 0x49, 0x80, 0xaa, 0xcd, 0xfa, 0x83, 0xf1, 0x4e, 0x32, 0xe9, 0x39, + 0xd4, 0x15, 0x55, 0x2d, 0xeb, 0xf8, 0xa8, 0x4e, 0x97, 0x75, 0xbc, 0x48, 0xa2, 0x5f, 0xc3, 0xbc, + 0x34, 0x97, 0x35, 0x64, 0x58, 0x8f, 0xcb, 0x1a, 0x72, 0x53, 0x8a, 0xbb, 0xb0, 0x92, 0x8b, 0xe2, + 0x51, 0x44, 0xfa, 0xfc, 0x17, 0x58, 0xf6, 0x49, 0x47, 0xa4, 0xb8, 0xec, 0x93, 0x8e, 0x0a, 0x2d, + 0xfa, 0x1d, 0xd6, 0x8e, 0x31, 0xcd, 0x45, 0x4f, 0x6a, 0x58, 0x5c, 0x86, 0x35, 0xa2, 0xca, 0x65, + 0x58, 0x05, 0x4a, 0xdc, 0x86, 0x85, 0x14, 0x8b, 0x6f, 0xdf, 0x87, 0x13, 0xc5, 0x4b, 0x20, 0x3c, + 0x9a, 0x52, 0xe4, 0x50, 0x97, 0x5f, 0x6a, 0xd4, 0x15, 0x86, 0xb6, 0xa7, 0x59, 0x73, 0x02, 0xe6, + 0x93, 0xe9, 0x37, 0xe2, 0x61, 0x00, 0xba, 0x43, 0xfa, 0x66, 0x88, 0x69, 0x27, 0xf0, 0xaf, 0x6e, + 0x04, 0x1e, 0x2e, 0x2a, 0x7b, 0xf1, 0xf4, 0xfc, 0xd7, 0x67, 0x9e, 0x4f, 0xbb, 0xc9, 0xb9, 0xe9, + 0x90, 0xfe, 0x8e, 0x8c, 0xd8, 0xc9, 0x22, 0x76, 0x9c, 0xc0, 0xc7, 0x21, 0xdd, 0xf1, 0x08, 0xff, + 0x73, 0x99, 0xdb, 0xb3, 0xff, 0x9a, 0xf1, 0x79, 0x95, 0xef, 0xb2, 0xbd, 0xff, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xc5, 0xda, 0xa9, 0x5e, 0x80, 0x0e, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/grpc/workflows/workflow_service.pb.go b/client/gogrpc/conductor/grpc/workflows/workflow_service.pb.go new file mode 100644 index 0000000000..51abf61f5c --- /dev/null +++ b/client/gogrpc/conductor/grpc/workflows/workflow_service.pb.go @@ -0,0 +1,1822 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/workflow_service.proto + +package workflows // import "github.com/netflix/conductor/client/gogrpc/conductor/grpc/workflows" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import search "github.com/netflix/conductor/client/gogrpc/conductor/grpc/search" +import model "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StartWorkflowResponse struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartWorkflowResponse) Reset() { *m = StartWorkflowResponse{} } +func (m *StartWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*StartWorkflowResponse) ProtoMessage() {} +func (*StartWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{0} +} +func (m *StartWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartWorkflowResponse.Unmarshal(m, b) +} +func (m *StartWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *StartWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartWorkflowResponse.Merge(dst, src) +} +func (m *StartWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_StartWorkflowResponse.Size(m) +} +func (m *StartWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartWorkflowResponse proto.InternalMessageInfo + +func (m *StartWorkflowResponse) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type GetWorkflowsRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CorrelationId []string `protobuf:"bytes,2,rep,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + IncludeClosed bool `protobuf:"varint,3,opt,name=include_closed,json=includeClosed,proto3" json:"include_closed,omitempty"` + IncludeTasks bool `protobuf:"varint,4,opt,name=include_tasks,json=includeTasks,proto3" json:"include_tasks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsRequest) Reset() { *m = GetWorkflowsRequest{} } +func (m *GetWorkflowsRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsRequest) ProtoMessage() {} +func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{1} +} +func (m *GetWorkflowsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowsRequest.Unmarshal(m, b) +} +func (m *GetWorkflowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowsRequest.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsRequest.Merge(dst, src) +} +func (m *GetWorkflowsRequest) XXX_Size() int { + return xxx_messageInfo_GetWorkflowsRequest.Size(m) +} +func (m *GetWorkflowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsRequest proto.InternalMessageInfo + +func (m *GetWorkflowsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetWorkflowsRequest) GetCorrelationId() []string { + if m != nil { + return m.CorrelationId + } + return nil +} + +func (m *GetWorkflowsRequest) GetIncludeClosed() bool { + if m != nil { + return m.IncludeClosed + } + return false +} + +func (m *GetWorkflowsRequest) GetIncludeTasks() bool { + if m != nil { + return m.IncludeTasks + } + return false +} + +type GetWorkflowsResponse struct { + WorkflowsById map[string]*GetWorkflowsResponse_Workflows `protobuf:"bytes,1,rep,name=workflows_by_id,json=workflowsById,proto3" json:"workflows_by_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsResponse) Reset() { *m = GetWorkflowsResponse{} } +func (m *GetWorkflowsResponse) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsResponse) ProtoMessage() {} +func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{2} +} +func (m *GetWorkflowsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowsResponse.Unmarshal(m, b) +} +func (m *GetWorkflowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowsResponse.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsResponse.Merge(dst, src) +} +func (m *GetWorkflowsResponse) XXX_Size() int { + return xxx_messageInfo_GetWorkflowsResponse.Size(m) +} +func (m *GetWorkflowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsResponse proto.InternalMessageInfo + +func (m *GetWorkflowsResponse) GetWorkflowsById() map[string]*GetWorkflowsResponse_Workflows { + if m != nil { + return m.WorkflowsById + } + return nil +} + +type GetWorkflowsResponse_Workflows struct { + Workflows []*model.Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowsResponse_Workflows) Reset() { *m = GetWorkflowsResponse_Workflows{} } +func (m *GetWorkflowsResponse_Workflows) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowsResponse_Workflows) ProtoMessage() {} +func (*GetWorkflowsResponse_Workflows) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{2, 0} +} +func (m *GetWorkflowsResponse_Workflows) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowsResponse_Workflows.Unmarshal(m, b) +} +func (m *GetWorkflowsResponse_Workflows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowsResponse_Workflows.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowsResponse_Workflows) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowsResponse_Workflows.Merge(dst, src) +} +func (m *GetWorkflowsResponse_Workflows) XXX_Size() int { + return xxx_messageInfo_GetWorkflowsResponse_Workflows.Size(m) +} +func (m *GetWorkflowsResponse_Workflows) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowsResponse_Workflows.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowsResponse_Workflows proto.InternalMessageInfo + +func (m *GetWorkflowsResponse_Workflows) GetWorkflows() []*model.Workflow { + if m != nil { + return m.Workflows + } + return nil +} + +type GetWorkflowStatusRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + IncludeTasks bool `protobuf:"varint,2,opt,name=include_tasks,json=includeTasks,proto3" json:"include_tasks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowStatusRequest) Reset() { *m = GetWorkflowStatusRequest{} } +func (m *GetWorkflowStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowStatusRequest) ProtoMessage() {} +func (*GetWorkflowStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{3} +} +func (m *GetWorkflowStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowStatusRequest.Unmarshal(m, b) +} +func (m *GetWorkflowStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowStatusRequest.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowStatusRequest.Merge(dst, src) +} +func (m *GetWorkflowStatusRequest) XXX_Size() int { + return xxx_messageInfo_GetWorkflowStatusRequest.Size(m) +} +func (m *GetWorkflowStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowStatusRequest proto.InternalMessageInfo + +func (m *GetWorkflowStatusRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *GetWorkflowStatusRequest) GetIncludeTasks() bool { + if m != nil { + return m.IncludeTasks + } + return false +} + +type GetWorkflowStatusResponse struct { + Workflow *model.Workflow `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetWorkflowStatusResponse) Reset() { *m = GetWorkflowStatusResponse{} } +func (m *GetWorkflowStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowStatusResponse) ProtoMessage() {} +func (*GetWorkflowStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{4} +} +func (m *GetWorkflowStatusResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetWorkflowStatusResponse.Unmarshal(m, b) +} +func (m *GetWorkflowStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetWorkflowStatusResponse.Marshal(b, m, deterministic) +} +func (dst *GetWorkflowStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetWorkflowStatusResponse.Merge(dst, src) +} +func (m *GetWorkflowStatusResponse) XXX_Size() int { + return xxx_messageInfo_GetWorkflowStatusResponse.Size(m) +} +func (m *GetWorkflowStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetWorkflowStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetWorkflowStatusResponse proto.InternalMessageInfo + +func (m *GetWorkflowStatusResponse) GetWorkflow() *model.Workflow { + if m != nil { + return m.Workflow + } + return nil +} + +type RemoveWorkflowRequest struct { + WorkflodId string `protobuf:"bytes,1,opt,name=workflod_id,json=workflodId,proto3" json:"workflod_id,omitempty"` + ArchiveWorkflow bool `protobuf:"varint,2,opt,name=archive_workflow,json=archiveWorkflow,proto3" json:"archive_workflow,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveWorkflowRequest) Reset() { *m = RemoveWorkflowRequest{} } +func (m *RemoveWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*RemoveWorkflowRequest) ProtoMessage() {} +func (*RemoveWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{5} +} +func (m *RemoveWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoveWorkflowRequest.Unmarshal(m, b) +} +func (m *RemoveWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoveWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *RemoveWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveWorkflowRequest.Merge(dst, src) +} +func (m *RemoveWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_RemoveWorkflowRequest.Size(m) +} +func (m *RemoveWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveWorkflowRequest proto.InternalMessageInfo + +func (m *RemoveWorkflowRequest) GetWorkflodId() string { + if m != nil { + return m.WorkflodId + } + return "" +} + +func (m *RemoveWorkflowRequest) GetArchiveWorkflow() bool { + if m != nil { + return m.ArchiveWorkflow + } + return false +} + +type RemoveWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveWorkflowResponse) Reset() { *m = RemoveWorkflowResponse{} } +func (m *RemoveWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*RemoveWorkflowResponse) ProtoMessage() {} +func (*RemoveWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{6} +} +func (m *RemoveWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoveWorkflowResponse.Unmarshal(m, b) +} +func (m *RemoveWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoveWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *RemoveWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveWorkflowResponse.Merge(dst, src) +} +func (m *RemoveWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_RemoveWorkflowResponse.Size(m) +} +func (m *RemoveWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveWorkflowResponse proto.InternalMessageInfo + +type GetRunningWorkflowsRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + StartTime int64 `protobuf:"varint,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRunningWorkflowsRequest) Reset() { *m = GetRunningWorkflowsRequest{} } +func (m *GetRunningWorkflowsRequest) String() string { return proto.CompactTextString(m) } +func (*GetRunningWorkflowsRequest) ProtoMessage() {} +func (*GetRunningWorkflowsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{7} +} +func (m *GetRunningWorkflowsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRunningWorkflowsRequest.Unmarshal(m, b) +} +func (m *GetRunningWorkflowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRunningWorkflowsRequest.Marshal(b, m, deterministic) +} +func (dst *GetRunningWorkflowsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRunningWorkflowsRequest.Merge(dst, src) +} +func (m *GetRunningWorkflowsRequest) XXX_Size() int { + return xxx_messageInfo_GetRunningWorkflowsRequest.Size(m) +} +func (m *GetRunningWorkflowsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRunningWorkflowsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRunningWorkflowsRequest proto.InternalMessageInfo + +func (m *GetRunningWorkflowsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetRunningWorkflowsRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *GetRunningWorkflowsRequest) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *GetRunningWorkflowsRequest) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + +type GetRunningWorkflowsResponse struct { + WorkflowIds []string `protobuf:"bytes,1,rep,name=workflow_ids,json=workflowIds,proto3" json:"workflow_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRunningWorkflowsResponse) Reset() { *m = GetRunningWorkflowsResponse{} } +func (m *GetRunningWorkflowsResponse) String() string { return proto.CompactTextString(m) } +func (*GetRunningWorkflowsResponse) ProtoMessage() {} +func (*GetRunningWorkflowsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{8} +} +func (m *GetRunningWorkflowsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRunningWorkflowsResponse.Unmarshal(m, b) +} +func (m *GetRunningWorkflowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRunningWorkflowsResponse.Marshal(b, m, deterministic) +} +func (dst *GetRunningWorkflowsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRunningWorkflowsResponse.Merge(dst, src) +} +func (m *GetRunningWorkflowsResponse) XXX_Size() int { + return xxx_messageInfo_GetRunningWorkflowsResponse.Size(m) +} +func (m *GetRunningWorkflowsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetRunningWorkflowsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRunningWorkflowsResponse proto.InternalMessageInfo + +func (m *GetRunningWorkflowsResponse) GetWorkflowIds() []string { + if m != nil { + return m.WorkflowIds + } + return nil +} + +type DecideWorkflowRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecideWorkflowRequest) Reset() { *m = DecideWorkflowRequest{} } +func (m *DecideWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*DecideWorkflowRequest) ProtoMessage() {} +func (*DecideWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{9} +} +func (m *DecideWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecideWorkflowRequest.Unmarshal(m, b) +} +func (m *DecideWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecideWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *DecideWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecideWorkflowRequest.Merge(dst, src) +} +func (m *DecideWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_DecideWorkflowRequest.Size(m) +} +func (m *DecideWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DecideWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DecideWorkflowRequest proto.InternalMessageInfo + +func (m *DecideWorkflowRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type DecideWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DecideWorkflowResponse) Reset() { *m = DecideWorkflowResponse{} } +func (m *DecideWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*DecideWorkflowResponse) ProtoMessage() {} +func (*DecideWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{10} +} +func (m *DecideWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DecideWorkflowResponse.Unmarshal(m, b) +} +func (m *DecideWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DecideWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *DecideWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DecideWorkflowResponse.Merge(dst, src) +} +func (m *DecideWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_DecideWorkflowResponse.Size(m) +} +func (m *DecideWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DecideWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DecideWorkflowResponse proto.InternalMessageInfo + +type PauseWorkflowRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PauseWorkflowRequest) Reset() { *m = PauseWorkflowRequest{} } +func (m *PauseWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*PauseWorkflowRequest) ProtoMessage() {} +func (*PauseWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{11} +} +func (m *PauseWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PauseWorkflowRequest.Unmarshal(m, b) +} +func (m *PauseWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PauseWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *PauseWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PauseWorkflowRequest.Merge(dst, src) +} +func (m *PauseWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_PauseWorkflowRequest.Size(m) +} +func (m *PauseWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PauseWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PauseWorkflowRequest proto.InternalMessageInfo + +func (m *PauseWorkflowRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type PauseWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PauseWorkflowResponse) Reset() { *m = PauseWorkflowResponse{} } +func (m *PauseWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*PauseWorkflowResponse) ProtoMessage() {} +func (*PauseWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{12} +} +func (m *PauseWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PauseWorkflowResponse.Unmarshal(m, b) +} +func (m *PauseWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PauseWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *PauseWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PauseWorkflowResponse.Merge(dst, src) +} +func (m *PauseWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_PauseWorkflowResponse.Size(m) +} +func (m *PauseWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PauseWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PauseWorkflowResponse proto.InternalMessageInfo + +type ResumeWorkflowRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResumeWorkflowRequest) Reset() { *m = ResumeWorkflowRequest{} } +func (m *ResumeWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*ResumeWorkflowRequest) ProtoMessage() {} +func (*ResumeWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{13} +} +func (m *ResumeWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResumeWorkflowRequest.Unmarshal(m, b) +} +func (m *ResumeWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResumeWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *ResumeWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResumeWorkflowRequest.Merge(dst, src) +} +func (m *ResumeWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_ResumeWorkflowRequest.Size(m) +} +func (m *ResumeWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResumeWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResumeWorkflowRequest proto.InternalMessageInfo + +func (m *ResumeWorkflowRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type ResumeWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResumeWorkflowResponse) Reset() { *m = ResumeWorkflowResponse{} } +func (m *ResumeWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*ResumeWorkflowResponse) ProtoMessage() {} +func (*ResumeWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{14} +} +func (m *ResumeWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResumeWorkflowResponse.Unmarshal(m, b) +} +func (m *ResumeWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResumeWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *ResumeWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResumeWorkflowResponse.Merge(dst, src) +} +func (m *ResumeWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_ResumeWorkflowResponse.Size(m) +} +func (m *ResumeWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResumeWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResumeWorkflowResponse proto.InternalMessageInfo + +type SkipTaskRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + TaskReferenceName string `protobuf:"bytes,2,opt,name=task_reference_name,json=taskReferenceName,proto3" json:"task_reference_name,omitempty"` + Request *model.SkipTaskRequest `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SkipTaskRequest) Reset() { *m = SkipTaskRequest{} } +func (m *SkipTaskRequest) String() string { return proto.CompactTextString(m) } +func (*SkipTaskRequest) ProtoMessage() {} +func (*SkipTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{15} +} +func (m *SkipTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SkipTaskRequest.Unmarshal(m, b) +} +func (m *SkipTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SkipTaskRequest.Marshal(b, m, deterministic) +} +func (dst *SkipTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SkipTaskRequest.Merge(dst, src) +} +func (m *SkipTaskRequest) XXX_Size() int { + return xxx_messageInfo_SkipTaskRequest.Size(m) +} +func (m *SkipTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SkipTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SkipTaskRequest proto.InternalMessageInfo + +func (m *SkipTaskRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *SkipTaskRequest) GetTaskReferenceName() string { + if m != nil { + return m.TaskReferenceName + } + return "" +} + +func (m *SkipTaskRequest) GetRequest() *model.SkipTaskRequest { + if m != nil { + return m.Request + } + return nil +} + +type SkipTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SkipTaskResponse) Reset() { *m = SkipTaskResponse{} } +func (m *SkipTaskResponse) String() string { return proto.CompactTextString(m) } +func (*SkipTaskResponse) ProtoMessage() {} +func (*SkipTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{16} +} +func (m *SkipTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SkipTaskResponse.Unmarshal(m, b) +} +func (m *SkipTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SkipTaskResponse.Marshal(b, m, deterministic) +} +func (dst *SkipTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SkipTaskResponse.Merge(dst, src) +} +func (m *SkipTaskResponse) XXX_Size() int { + return xxx_messageInfo_SkipTaskResponse.Size(m) +} +func (m *SkipTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SkipTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SkipTaskResponse proto.InternalMessageInfo + +type RerunWorkflowResponse struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RerunWorkflowResponse) Reset() { *m = RerunWorkflowResponse{} } +func (m *RerunWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*RerunWorkflowResponse) ProtoMessage() {} +func (*RerunWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{17} +} +func (m *RerunWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RerunWorkflowResponse.Unmarshal(m, b) +} +func (m *RerunWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RerunWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *RerunWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RerunWorkflowResponse.Merge(dst, src) +} +func (m *RerunWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_RerunWorkflowResponse.Size(m) +} +func (m *RerunWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RerunWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RerunWorkflowResponse proto.InternalMessageInfo + +func (m *RerunWorkflowResponse) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type RestartWorkflowRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestartWorkflowRequest) Reset() { *m = RestartWorkflowRequest{} } +func (m *RestartWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*RestartWorkflowRequest) ProtoMessage() {} +func (*RestartWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{18} +} +func (m *RestartWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestartWorkflowRequest.Unmarshal(m, b) +} +func (m *RestartWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestartWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *RestartWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestartWorkflowRequest.Merge(dst, src) +} +func (m *RestartWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_RestartWorkflowRequest.Size(m) +} +func (m *RestartWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestartWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestartWorkflowRequest proto.InternalMessageInfo + +func (m *RestartWorkflowRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type RestartWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestartWorkflowResponse) Reset() { *m = RestartWorkflowResponse{} } +func (m *RestartWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*RestartWorkflowResponse) ProtoMessage() {} +func (*RestartWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{19} +} +func (m *RestartWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestartWorkflowResponse.Unmarshal(m, b) +} +func (m *RestartWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestartWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *RestartWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestartWorkflowResponse.Merge(dst, src) +} +func (m *RestartWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_RestartWorkflowResponse.Size(m) +} +func (m *RestartWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RestartWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RestartWorkflowResponse proto.InternalMessageInfo + +type RetryWorkflowRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RetryWorkflowRequest) Reset() { *m = RetryWorkflowRequest{} } +func (m *RetryWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*RetryWorkflowRequest) ProtoMessage() {} +func (*RetryWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{20} +} +func (m *RetryWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RetryWorkflowRequest.Unmarshal(m, b) +} +func (m *RetryWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RetryWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *RetryWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryWorkflowRequest.Merge(dst, src) +} +func (m *RetryWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_RetryWorkflowRequest.Size(m) +} +func (m *RetryWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RetryWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryWorkflowRequest proto.InternalMessageInfo + +func (m *RetryWorkflowRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type RetryWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RetryWorkflowResponse) Reset() { *m = RetryWorkflowResponse{} } +func (m *RetryWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*RetryWorkflowResponse) ProtoMessage() {} +func (*RetryWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{21} +} +func (m *RetryWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RetryWorkflowResponse.Unmarshal(m, b) +} +func (m *RetryWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RetryWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *RetryWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryWorkflowResponse.Merge(dst, src) +} +func (m *RetryWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_RetryWorkflowResponse.Size(m) +} +func (m *RetryWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RetryWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryWorkflowResponse proto.InternalMessageInfo + +type ResetWorkflowCallbacksRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetWorkflowCallbacksRequest) Reset() { *m = ResetWorkflowCallbacksRequest{} } +func (m *ResetWorkflowCallbacksRequest) String() string { return proto.CompactTextString(m) } +func (*ResetWorkflowCallbacksRequest) ProtoMessage() {} +func (*ResetWorkflowCallbacksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{22} +} +func (m *ResetWorkflowCallbacksRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetWorkflowCallbacksRequest.Unmarshal(m, b) +} +func (m *ResetWorkflowCallbacksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetWorkflowCallbacksRequest.Marshal(b, m, deterministic) +} +func (dst *ResetWorkflowCallbacksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetWorkflowCallbacksRequest.Merge(dst, src) +} +func (m *ResetWorkflowCallbacksRequest) XXX_Size() int { + return xxx_messageInfo_ResetWorkflowCallbacksRequest.Size(m) +} +func (m *ResetWorkflowCallbacksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResetWorkflowCallbacksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetWorkflowCallbacksRequest proto.InternalMessageInfo + +func (m *ResetWorkflowCallbacksRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +type ResetWorkflowCallbacksResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetWorkflowCallbacksResponse) Reset() { *m = ResetWorkflowCallbacksResponse{} } +func (m *ResetWorkflowCallbacksResponse) String() string { return proto.CompactTextString(m) } +func (*ResetWorkflowCallbacksResponse) ProtoMessage() {} +func (*ResetWorkflowCallbacksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{23} +} +func (m *ResetWorkflowCallbacksResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetWorkflowCallbacksResponse.Unmarshal(m, b) +} +func (m *ResetWorkflowCallbacksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetWorkflowCallbacksResponse.Marshal(b, m, deterministic) +} +func (dst *ResetWorkflowCallbacksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetWorkflowCallbacksResponse.Merge(dst, src) +} +func (m *ResetWorkflowCallbacksResponse) XXX_Size() int { + return xxx_messageInfo_ResetWorkflowCallbacksResponse.Size(m) +} +func (m *ResetWorkflowCallbacksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResetWorkflowCallbacksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetWorkflowCallbacksResponse proto.InternalMessageInfo + +type TerminateWorkflowRequest struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TerminateWorkflowRequest) Reset() { *m = TerminateWorkflowRequest{} } +func (m *TerminateWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*TerminateWorkflowRequest) ProtoMessage() {} +func (*TerminateWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{24} +} +func (m *TerminateWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TerminateWorkflowRequest.Unmarshal(m, b) +} +func (m *TerminateWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TerminateWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *TerminateWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TerminateWorkflowRequest.Merge(dst, src) +} +func (m *TerminateWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_TerminateWorkflowRequest.Size(m) +} +func (m *TerminateWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TerminateWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TerminateWorkflowRequest proto.InternalMessageInfo + +func (m *TerminateWorkflowRequest) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *TerminateWorkflowRequest) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +type TerminateWorkflowResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TerminateWorkflowResponse) Reset() { *m = TerminateWorkflowResponse{} } +func (m *TerminateWorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*TerminateWorkflowResponse) ProtoMessage() {} +func (*TerminateWorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{25} +} +func (m *TerminateWorkflowResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TerminateWorkflowResponse.Unmarshal(m, b) +} +func (m *TerminateWorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TerminateWorkflowResponse.Marshal(b, m, deterministic) +} +func (dst *TerminateWorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TerminateWorkflowResponse.Merge(dst, src) +} +func (m *TerminateWorkflowResponse) XXX_Size() int { + return xxx_messageInfo_TerminateWorkflowResponse.Size(m) +} +func (m *TerminateWorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TerminateWorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TerminateWorkflowResponse proto.InternalMessageInfo + +type WorkflowSummarySearchResult struct { + TotalHits int64 `protobuf:"varint,1,opt,name=total_hits,json=totalHits,proto3" json:"total_hits,omitempty"` + Results []*model.WorkflowSummary `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowSummarySearchResult) Reset() { *m = WorkflowSummarySearchResult{} } +func (m *WorkflowSummarySearchResult) String() string { return proto.CompactTextString(m) } +func (*WorkflowSummarySearchResult) ProtoMessage() {} +func (*WorkflowSummarySearchResult) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_service_ad4bde2e77de2037, []int{26} +} +func (m *WorkflowSummarySearchResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WorkflowSummarySearchResult.Unmarshal(m, b) +} +func (m *WorkflowSummarySearchResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WorkflowSummarySearchResult.Marshal(b, m, deterministic) +} +func (dst *WorkflowSummarySearchResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowSummarySearchResult.Merge(dst, src) +} +func (m *WorkflowSummarySearchResult) XXX_Size() int { + return xxx_messageInfo_WorkflowSummarySearchResult.Size(m) +} +func (m *WorkflowSummarySearchResult) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowSummarySearchResult.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowSummarySearchResult proto.InternalMessageInfo + +func (m *WorkflowSummarySearchResult) GetTotalHits() int64 { + if m != nil { + return m.TotalHits + } + return 0 +} + +func (m *WorkflowSummarySearchResult) GetResults() []*model.WorkflowSummary { + if m != nil { + return m.Results + } + return nil +} + +func init() { + proto.RegisterType((*StartWorkflowResponse)(nil), "conductor.grpc.workflows.StartWorkflowResponse") + proto.RegisterType((*GetWorkflowsRequest)(nil), "conductor.grpc.workflows.GetWorkflowsRequest") + proto.RegisterType((*GetWorkflowsResponse)(nil), "conductor.grpc.workflows.GetWorkflowsResponse") + proto.RegisterMapType((map[string]*GetWorkflowsResponse_Workflows)(nil), "conductor.grpc.workflows.GetWorkflowsResponse.WorkflowsByIdEntry") + proto.RegisterType((*GetWorkflowsResponse_Workflows)(nil), "conductor.grpc.workflows.GetWorkflowsResponse.Workflows") + proto.RegisterType((*GetWorkflowStatusRequest)(nil), "conductor.grpc.workflows.GetWorkflowStatusRequest") + proto.RegisterType((*GetWorkflowStatusResponse)(nil), "conductor.grpc.workflows.GetWorkflowStatusResponse") + proto.RegisterType((*RemoveWorkflowRequest)(nil), "conductor.grpc.workflows.RemoveWorkflowRequest") + proto.RegisterType((*RemoveWorkflowResponse)(nil), "conductor.grpc.workflows.RemoveWorkflowResponse") + proto.RegisterType((*GetRunningWorkflowsRequest)(nil), "conductor.grpc.workflows.GetRunningWorkflowsRequest") + proto.RegisterType((*GetRunningWorkflowsResponse)(nil), "conductor.grpc.workflows.GetRunningWorkflowsResponse") + proto.RegisterType((*DecideWorkflowRequest)(nil), "conductor.grpc.workflows.DecideWorkflowRequest") + proto.RegisterType((*DecideWorkflowResponse)(nil), "conductor.grpc.workflows.DecideWorkflowResponse") + proto.RegisterType((*PauseWorkflowRequest)(nil), "conductor.grpc.workflows.PauseWorkflowRequest") + proto.RegisterType((*PauseWorkflowResponse)(nil), "conductor.grpc.workflows.PauseWorkflowResponse") + proto.RegisterType((*ResumeWorkflowRequest)(nil), "conductor.grpc.workflows.ResumeWorkflowRequest") + proto.RegisterType((*ResumeWorkflowResponse)(nil), "conductor.grpc.workflows.ResumeWorkflowResponse") + proto.RegisterType((*SkipTaskRequest)(nil), "conductor.grpc.workflows.SkipTaskRequest") + proto.RegisterType((*SkipTaskResponse)(nil), "conductor.grpc.workflows.SkipTaskResponse") + proto.RegisterType((*RerunWorkflowResponse)(nil), "conductor.grpc.workflows.RerunWorkflowResponse") + proto.RegisterType((*RestartWorkflowRequest)(nil), "conductor.grpc.workflows.RestartWorkflowRequest") + proto.RegisterType((*RestartWorkflowResponse)(nil), "conductor.grpc.workflows.RestartWorkflowResponse") + proto.RegisterType((*RetryWorkflowRequest)(nil), "conductor.grpc.workflows.RetryWorkflowRequest") + proto.RegisterType((*RetryWorkflowResponse)(nil), "conductor.grpc.workflows.RetryWorkflowResponse") + proto.RegisterType((*ResetWorkflowCallbacksRequest)(nil), "conductor.grpc.workflows.ResetWorkflowCallbacksRequest") + proto.RegisterType((*ResetWorkflowCallbacksResponse)(nil), "conductor.grpc.workflows.ResetWorkflowCallbacksResponse") + proto.RegisterType((*TerminateWorkflowRequest)(nil), "conductor.grpc.workflows.TerminateWorkflowRequest") + proto.RegisterType((*TerminateWorkflowResponse)(nil), "conductor.grpc.workflows.TerminateWorkflowResponse") + proto.RegisterType((*WorkflowSummarySearchResult)(nil), "conductor.grpc.workflows.WorkflowSummarySearchResult") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// WorkflowServiceClient is the client API for WorkflowService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WorkflowServiceClient interface { + // POST / + StartWorkflow(ctx context.Context, in *model.StartWorkflowRequest, opts ...grpc.CallOption) (*StartWorkflowResponse, error) + // GET /{name}/correlated/{correlationId} + GetWorkflows(ctx context.Context, in *GetWorkflowsRequest, opts ...grpc.CallOption) (*GetWorkflowsResponse, error) + // GET /{workflowId} + GetWorkflowStatus(ctx context.Context, in *GetWorkflowStatusRequest, opts ...grpc.CallOption) (*model.Workflow, error) + // DELETE /{workflodId}/remove + RemoveWorkflow(ctx context.Context, in *RemoveWorkflowRequest, opts ...grpc.CallOption) (*RemoveWorkflowResponse, error) + // GET /running/{name} + GetRunningWorkflows(ctx context.Context, in *GetRunningWorkflowsRequest, opts ...grpc.CallOption) (*GetRunningWorkflowsResponse, error) + // PUT /decide/{workflowId} + DecideWorkflow(ctx context.Context, in *DecideWorkflowRequest, opts ...grpc.CallOption) (*DecideWorkflowResponse, error) + // PUT /{workflowId}/pause + PauseWorkflow(ctx context.Context, in *PauseWorkflowRequest, opts ...grpc.CallOption) (*PauseWorkflowResponse, error) + // PUT /{workflowId}/pause + ResumeWorkflow(ctx context.Context, in *ResumeWorkflowRequest, opts ...grpc.CallOption) (*ResumeWorkflowResponse, error) + // PUT /{workflowId}/skiptask/{taskReferenceName} + SkipTaskFromWorkflow(ctx context.Context, in *SkipTaskRequest, opts ...grpc.CallOption) (*SkipTaskResponse, error) + // POST /{workflowId}/rerun + RerunWorkflow(ctx context.Context, in *model.RerunWorkflowRequest, opts ...grpc.CallOption) (*RerunWorkflowResponse, error) + // POST /{workflowId}/restart + RestartWorkflow(ctx context.Context, in *RestartWorkflowRequest, opts ...grpc.CallOption) (*RestartWorkflowResponse, error) + // POST /{workflowId}retry + RetryWorkflow(ctx context.Context, in *RetryWorkflowRequest, opts ...grpc.CallOption) (*RetryWorkflowResponse, error) + // POST /{workflowId}/resetcallbacks + ResetWorkflowCallbacks(ctx context.Context, in *ResetWorkflowCallbacksRequest, opts ...grpc.CallOption) (*ResetWorkflowCallbacksResponse, error) + // DELETE /{workflowId} + TerminateWorkflow(ctx context.Context, in *TerminateWorkflowRequest, opts ...grpc.CallOption) (*TerminateWorkflowResponse, error) + // GET /search + Search(ctx context.Context, in *search.Request, opts ...grpc.CallOption) (*WorkflowSummarySearchResult, error) + SearchByTasks(ctx context.Context, in *search.Request, opts ...grpc.CallOption) (*WorkflowSummarySearchResult, error) +} + +type workflowServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkflowServiceClient(cc *grpc.ClientConn) WorkflowServiceClient { + return &workflowServiceClient{cc} +} + +func (c *workflowServiceClient) StartWorkflow(ctx context.Context, in *model.StartWorkflowRequest, opts ...grpc.CallOption) (*StartWorkflowResponse, error) { + out := new(StartWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/StartWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) GetWorkflows(ctx context.Context, in *GetWorkflowsRequest, opts ...grpc.CallOption) (*GetWorkflowsResponse, error) { + out := new(GetWorkflowsResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/GetWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) GetWorkflowStatus(ctx context.Context, in *GetWorkflowStatusRequest, opts ...grpc.CallOption) (*model.Workflow, error) { + out := new(model.Workflow) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/GetWorkflowStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) RemoveWorkflow(ctx context.Context, in *RemoveWorkflowRequest, opts ...grpc.CallOption) (*RemoveWorkflowResponse, error) { + out := new(RemoveWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/RemoveWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) GetRunningWorkflows(ctx context.Context, in *GetRunningWorkflowsRequest, opts ...grpc.CallOption) (*GetRunningWorkflowsResponse, error) { + out := new(GetRunningWorkflowsResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/GetRunningWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) DecideWorkflow(ctx context.Context, in *DecideWorkflowRequest, opts ...grpc.CallOption) (*DecideWorkflowResponse, error) { + out := new(DecideWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/DecideWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) PauseWorkflow(ctx context.Context, in *PauseWorkflowRequest, opts ...grpc.CallOption) (*PauseWorkflowResponse, error) { + out := new(PauseWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/PauseWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) ResumeWorkflow(ctx context.Context, in *ResumeWorkflowRequest, opts ...grpc.CallOption) (*ResumeWorkflowResponse, error) { + out := new(ResumeWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/ResumeWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) SkipTaskFromWorkflow(ctx context.Context, in *SkipTaskRequest, opts ...grpc.CallOption) (*SkipTaskResponse, error) { + out := new(SkipTaskResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/SkipTaskFromWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) RerunWorkflow(ctx context.Context, in *model.RerunWorkflowRequest, opts ...grpc.CallOption) (*RerunWorkflowResponse, error) { + out := new(RerunWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/RerunWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) RestartWorkflow(ctx context.Context, in *RestartWorkflowRequest, opts ...grpc.CallOption) (*RestartWorkflowResponse, error) { + out := new(RestartWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/RestartWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) RetryWorkflow(ctx context.Context, in *RetryWorkflowRequest, opts ...grpc.CallOption) (*RetryWorkflowResponse, error) { + out := new(RetryWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/RetryWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) ResetWorkflowCallbacks(ctx context.Context, in *ResetWorkflowCallbacksRequest, opts ...grpc.CallOption) (*ResetWorkflowCallbacksResponse, error) { + out := new(ResetWorkflowCallbacksResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/ResetWorkflowCallbacks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) TerminateWorkflow(ctx context.Context, in *TerminateWorkflowRequest, opts ...grpc.CallOption) (*TerminateWorkflowResponse, error) { + out := new(TerminateWorkflowResponse) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/TerminateWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Search(ctx context.Context, in *search.Request, opts ...grpc.CallOption) (*WorkflowSummarySearchResult, error) { + out := new(WorkflowSummarySearchResult) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) SearchByTasks(ctx context.Context, in *search.Request, opts ...grpc.CallOption) (*WorkflowSummarySearchResult, error) { + out := new(WorkflowSummarySearchResult) + err := c.cc.Invoke(ctx, "/conductor.grpc.workflows.WorkflowService/SearchByTasks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// WorkflowServiceServer is the server API for WorkflowService service. +type WorkflowServiceServer interface { + // POST / + StartWorkflow(context.Context, *model.StartWorkflowRequest) (*StartWorkflowResponse, error) + // GET /{name}/correlated/{correlationId} + GetWorkflows(context.Context, *GetWorkflowsRequest) (*GetWorkflowsResponse, error) + // GET /{workflowId} + GetWorkflowStatus(context.Context, *GetWorkflowStatusRequest) (*model.Workflow, error) + // DELETE /{workflodId}/remove + RemoveWorkflow(context.Context, *RemoveWorkflowRequest) (*RemoveWorkflowResponse, error) + // GET /running/{name} + GetRunningWorkflows(context.Context, *GetRunningWorkflowsRequest) (*GetRunningWorkflowsResponse, error) + // PUT /decide/{workflowId} + DecideWorkflow(context.Context, *DecideWorkflowRequest) (*DecideWorkflowResponse, error) + // PUT /{workflowId}/pause + PauseWorkflow(context.Context, *PauseWorkflowRequest) (*PauseWorkflowResponse, error) + // PUT /{workflowId}/pause + ResumeWorkflow(context.Context, *ResumeWorkflowRequest) (*ResumeWorkflowResponse, error) + // PUT /{workflowId}/skiptask/{taskReferenceName} + SkipTaskFromWorkflow(context.Context, *SkipTaskRequest) (*SkipTaskResponse, error) + // POST /{workflowId}/rerun + RerunWorkflow(context.Context, *model.RerunWorkflowRequest) (*RerunWorkflowResponse, error) + // POST /{workflowId}/restart + RestartWorkflow(context.Context, *RestartWorkflowRequest) (*RestartWorkflowResponse, error) + // POST /{workflowId}retry + RetryWorkflow(context.Context, *RetryWorkflowRequest) (*RetryWorkflowResponse, error) + // POST /{workflowId}/resetcallbacks + ResetWorkflowCallbacks(context.Context, *ResetWorkflowCallbacksRequest) (*ResetWorkflowCallbacksResponse, error) + // DELETE /{workflowId} + TerminateWorkflow(context.Context, *TerminateWorkflowRequest) (*TerminateWorkflowResponse, error) + // GET /search + Search(context.Context, *search.Request) (*WorkflowSummarySearchResult, error) + SearchByTasks(context.Context, *search.Request) (*WorkflowSummarySearchResult, error) +} + +func RegisterWorkflowServiceServer(s *grpc.Server, srv WorkflowServiceServer) { + s.RegisterService(&_WorkflowService_serviceDesc, srv) +} + +func _WorkflowService_StartWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(model.StartWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).StartWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/StartWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).StartWorkflow(ctx, req.(*model.StartWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_GetWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).GetWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/GetWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).GetWorkflows(ctx, req.(*GetWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_GetWorkflowStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkflowStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).GetWorkflowStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/GetWorkflowStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).GetWorkflowStatus(ctx, req.(*GetWorkflowStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_RemoveWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).RemoveWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/RemoveWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).RemoveWorkflow(ctx, req.(*RemoveWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_GetRunningWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRunningWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).GetRunningWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/GetRunningWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).GetRunningWorkflows(ctx, req.(*GetRunningWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_DecideWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecideWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).DecideWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/DecideWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).DecideWorkflow(ctx, req.(*DecideWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_PauseWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).PauseWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/PauseWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).PauseWorkflow(ctx, req.(*PauseWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_ResumeWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResumeWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).ResumeWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/ResumeWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).ResumeWorkflow(ctx, req.(*ResumeWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_SkipTaskFromWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SkipTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).SkipTaskFromWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/SkipTaskFromWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).SkipTaskFromWorkflow(ctx, req.(*SkipTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_RerunWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(model.RerunWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).RerunWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/RerunWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).RerunWorkflow(ctx, req.(*model.RerunWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_RestartWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestartWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).RestartWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/RestartWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).RestartWorkflow(ctx, req.(*RestartWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_RetryWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RetryWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).RetryWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/RetryWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).RetryWorkflow(ctx, req.(*RetryWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_ResetWorkflowCallbacks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResetWorkflowCallbacksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).ResetWorkflowCallbacks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/ResetWorkflowCallbacks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).ResetWorkflowCallbacks(ctx, req.(*ResetWorkflowCallbacksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_TerminateWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TerminateWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).TerminateWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/TerminateWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).TerminateWorkflow(ctx, req.(*TerminateWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(search.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Search(ctx, req.(*search.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_SearchByTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(search.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).SearchByTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/conductor.grpc.workflows.WorkflowService/SearchByTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).SearchByTasks(ctx, req.(*search.Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkflowService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "conductor.grpc.workflows.WorkflowService", + HandlerType: (*WorkflowServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "StartWorkflow", + Handler: _WorkflowService_StartWorkflow_Handler, + }, + { + MethodName: "GetWorkflows", + Handler: _WorkflowService_GetWorkflows_Handler, + }, + { + MethodName: "GetWorkflowStatus", + Handler: _WorkflowService_GetWorkflowStatus_Handler, + }, + { + MethodName: "RemoveWorkflow", + Handler: _WorkflowService_RemoveWorkflow_Handler, + }, + { + MethodName: "GetRunningWorkflows", + Handler: _WorkflowService_GetRunningWorkflows_Handler, + }, + { + MethodName: "DecideWorkflow", + Handler: _WorkflowService_DecideWorkflow_Handler, + }, + { + MethodName: "PauseWorkflow", + Handler: _WorkflowService_PauseWorkflow_Handler, + }, + { + MethodName: "ResumeWorkflow", + Handler: _WorkflowService_ResumeWorkflow_Handler, + }, + { + MethodName: "SkipTaskFromWorkflow", + Handler: _WorkflowService_SkipTaskFromWorkflow_Handler, + }, + { + MethodName: "RerunWorkflow", + Handler: _WorkflowService_RerunWorkflow_Handler, + }, + { + MethodName: "RestartWorkflow", + Handler: _WorkflowService_RestartWorkflow_Handler, + }, + { + MethodName: "RetryWorkflow", + Handler: _WorkflowService_RetryWorkflow_Handler, + }, + { + MethodName: "ResetWorkflowCallbacks", + Handler: _WorkflowService_ResetWorkflowCallbacks_Handler, + }, + { + MethodName: "TerminateWorkflow", + Handler: _WorkflowService_TerminateWorkflow_Handler, + }, + { + MethodName: "Search", + Handler: _WorkflowService_Search_Handler, + }, + { + MethodName: "SearchByTasks", + Handler: _WorkflowService_SearchByTasks_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc/workflow_service.proto", +} + +func init() { + proto.RegisterFile("grpc/workflow_service.proto", fileDescriptor_workflow_service_ad4bde2e77de2037) +} + +var fileDescriptor_workflow_service_ad4bde2e77de2037 = []byte{ + // 1121 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x86, 0xa4, 0xc4, 0xb6, 0xc6, 0x96, 0x7f, 0x36, 0xb6, 0x43, 0xd3, 0x48, 0xaa, 0xb2, 0x08, + 0xe0, 0x14, 0x28, 0xd5, 0x2a, 0x0d, 0xac, 0xe6, 0x94, 0xda, 0x69, 0x53, 0x5f, 0x82, 0x60, 0x65, + 0xa0, 0x40, 0x2f, 0x2c, 0x45, 0xae, 0x65, 0x42, 0xfc, 0x51, 0x77, 0x97, 0x72, 0x54, 0xf4, 0x54, + 0xf4, 0xd6, 0x57, 0x28, 0xfa, 0x20, 0xbd, 0xf6, 0xc5, 0x0a, 0x2e, 0x97, 0x94, 0x48, 0xad, 0x18, + 0xc9, 0x40, 0x6f, 0xd2, 0xcc, 0x7c, 0x33, 0xb3, 0x33, 0xb3, 0xf3, 0xad, 0x04, 0xa7, 0x43, 0x3a, + 0x76, 0x3a, 0x77, 0x11, 0x1d, 0xdd, 0xf8, 0xd1, 0x9d, 0xc5, 0x08, 0x9d, 0x78, 0x0e, 0x31, 0xc7, + 0x34, 0xe2, 0x11, 0xd2, 0x9c, 0x28, 0x74, 0x63, 0x87, 0x47, 0xd4, 0x4c, 0xcc, 0xcc, 0xcc, 0x8c, + 0xe9, 0x07, 0x02, 0xc6, 0x88, 0x4d, 0x9d, 0xdb, 0xd4, 0x58, 0x3f, 0x0c, 0x22, 0x97, 0xf8, 0xb9, + 0x2b, 0x29, 0x3d, 0x2d, 0x4a, 0x59, 0x1c, 0x04, 0x36, 0x9d, 0x16, 0x95, 0x6c, 0xe4, 0x8d, 0xb9, + 0xcd, 0x46, 0x94, 0xfc, 0x12, 0x13, 0xc6, 0xa5, 0xb2, 0x2d, 0x95, 0xdc, 0xa6, 0x3c, 0x83, 0x2b, + 0x2d, 0x28, 0xa1, 0x71, 0xa8, 0xb4, 0x30, 0x7a, 0x70, 0xd4, 0x4f, 0xf0, 0x3f, 0x4a, 0x2d, 0x26, + 0x6c, 0x1c, 0x85, 0x8c, 0xa0, 0x4f, 0x60, 0x3b, 0x3f, 0xb3, 0xe7, 0x6a, 0xb5, 0x76, 0xed, 0xac, + 0x89, 0x21, 0x13, 0x5d, 0xb9, 0xc6, 0x5f, 0x35, 0x78, 0xf4, 0x96, 0xe4, 0x40, 0x86, 0x53, 0xbf, + 0x08, 0xc1, 0x83, 0xd0, 0x0e, 0x88, 0x44, 0x88, 0xcf, 0xe8, 0x19, 0xec, 0x3a, 0x11, 0xa5, 0xc4, + 0xb7, 0xb9, 0x17, 0x85, 0x89, 0xbf, 0x7a, 0xbb, 0x71, 0xd6, 0xc4, 0xad, 0x39, 0xe9, 0x95, 0x9b, + 0x98, 0x79, 0xa1, 0xe3, 0xc7, 0x2e, 0xb1, 0x1c, 0x3f, 0x62, 0xc4, 0xd5, 0x1a, 0xed, 0xda, 0xd9, + 0x16, 0x6e, 0x49, 0xe9, 0xa5, 0x10, 0xa2, 0xcf, 0x20, 0x13, 0x58, 0x49, 0x51, 0x98, 0xf6, 0x40, + 0x58, 0xed, 0x48, 0xe1, 0x75, 0x22, 0x33, 0xfe, 0xad, 0xc3, 0x61, 0x31, 0x3d, 0x79, 0x30, 0x0f, + 0xf6, 0xf2, 0x5a, 0x5b, 0x83, 0x69, 0x7a, 0xb8, 0xc6, 0xd9, 0x76, 0xf7, 0x5b, 0x73, 0x59, 0x33, + 0x4d, 0x95, 0x23, 0x33, 0x97, 0x5c, 0x4c, 0xaf, 0xdc, 0xef, 0x42, 0x4e, 0xa7, 0xb8, 0x75, 0x37, + 0x2f, 0xd3, 0xdf, 0x40, 0x33, 0x37, 0x42, 0xe7, 0xd0, 0xcc, 0xb5, 0x32, 0xe2, 0xc9, 0x5c, 0x44, + 0xd1, 0x8e, 0xdc, 0x27, 0x9e, 0xd9, 0xea, 0xbf, 0x02, 0x5a, 0x0c, 0x85, 0xf6, 0xa1, 0x31, 0x22, + 0x53, 0x59, 0xe5, 0xe4, 0x23, 0x7a, 0x07, 0x0f, 0x27, 0xb6, 0x1f, 0x13, 0xad, 0xde, 0xae, 0x9d, + 0x6d, 0x77, 0x7b, 0xf7, 0x3d, 0x0e, 0x4e, 0xdd, 0xbc, 0xaa, 0xf7, 0x6a, 0xc6, 0xcf, 0xa0, 0xcd, + 0x19, 0xf7, 0xb9, 0xcd, 0xe3, 0xbc, 0xd1, 0x1f, 0x9b, 0x90, 0xc5, 0x3e, 0xd5, 0x15, 0x7d, 0xc2, + 0x70, 0xa2, 0x88, 0x20, 0x7b, 0xf5, 0x12, 0xb6, 0x32, 0x7f, 0xc2, 0x7f, 0x65, 0xc9, 0x72, 0x53, + 0xc3, 0x81, 0x23, 0x4c, 0x82, 0x68, 0x42, 0x66, 0x53, 0x5d, 0x4e, 0xd9, 0x5d, 0x4c, 0xd9, 0xbd, + 0x72, 0xd1, 0x73, 0xd8, 0x4f, 0x2e, 0xac, 0x37, 0x21, 0x56, 0x1e, 0x38, 0xcd, 0x7a, 0x4f, 0xca, + 0x33, 0x97, 0x86, 0x06, 0xc7, 0xe5, 0x20, 0x69, 0xd6, 0xc6, 0x1f, 0x35, 0xd0, 0xdf, 0x12, 0x8e, + 0xe3, 0x30, 0xf4, 0xc2, 0xe1, 0x4a, 0x17, 0x44, 0x83, 0xcd, 0x09, 0xa1, 0xcc, 0x8b, 0x42, 0x11, + 0xee, 0x21, 0xce, 0xbe, 0xa2, 0x27, 0x00, 0xe2, 0x82, 0x5b, 0xdc, 0x0b, 0x88, 0xb8, 0x0f, 0x0d, + 0xdc, 0x14, 0x92, 0x6b, 0x2f, 0x20, 0xe8, 0x04, 0xb6, 0x48, 0xe8, 0xa6, 0xca, 0x07, 0x42, 0xb9, + 0x49, 0x42, 0x37, 0x51, 0x19, 0xaf, 0xe1, 0x54, 0x99, 0x85, 0xac, 0xed, 0xa7, 0xb0, 0x33, 0xd7, + 0xbe, 0x74, 0x24, 0x9b, 0x78, 0x7b, 0xd6, 0x3f, 0x96, 0x2c, 0x87, 0x37, 0xc4, 0xf1, 0xdc, 0x8a, + 0x3a, 0x2e, 0x59, 0x0e, 0x1a, 0x1c, 0x97, 0x91, 0xb2, 0x38, 0xe7, 0x70, 0xf8, 0xde, 0x8e, 0xd9, + 0xfa, 0x2e, 0x1f, 0xc3, 0x51, 0x09, 0x28, 0x3d, 0xf6, 0x92, 0x6e, 0xb3, 0x38, 0xb8, 0x57, 0x96, + 0x65, 0xa4, 0xf4, 0xf9, 0x77, 0x0d, 0xf6, 0xfa, 0x23, 0x6f, 0x9c, 0xcc, 0xe8, 0xca, 0xf3, 0x6e, + 0xc2, 0xa3, 0x64, 0xce, 0x2d, 0x4a, 0x6e, 0x08, 0x25, 0xa1, 0x43, 0x2c, 0xd1, 0xe7, 0xba, 0x30, + 0x3c, 0xe0, 0xc2, 0x95, 0xd4, 0xbc, 0x4b, 0x9a, 0xfe, 0x0a, 0x36, 0xe5, 0x32, 0x16, 0x7d, 0xdd, + 0xee, 0xb6, 0x17, 0x86, 0xbb, 0x94, 0x03, 0xce, 0x00, 0x06, 0x82, 0xfd, 0x99, 0x6e, 0xbe, 0x10, + 0x34, 0x0e, 0xd7, 0xdf, 0xe5, 0xdf, 0x88, 0x42, 0x14, 0x79, 0x60, 0xc5, 0x1a, 0x9e, 0xc0, 0xe3, + 0x05, 0xe8, 0xac, 0xd5, 0x98, 0x70, 0x3a, 0xbd, 0x4f, 0xab, 0x4b, 0x40, 0xe9, 0xf1, 0x35, 0x3c, + 0xc1, 0x84, 0xcd, 0xd6, 0xc5, 0xa5, 0xed, 0xfb, 0x03, 0xdb, 0x19, 0xad, 0xbc, 0x93, 0x8c, 0x36, + 0x3c, 0x5d, 0xe6, 0x41, 0xc6, 0xe8, 0x83, 0x76, 0x4d, 0x68, 0xe0, 0x85, 0x36, 0x5f, 0x7b, 0xa2, + 0xd0, 0x31, 0x6c, 0x50, 0x62, 0x33, 0x79, 0x8d, 0x9b, 0x58, 0x7e, 0x33, 0x4e, 0xe1, 0x44, 0xe1, + 0x54, 0x46, 0xfc, 0x00, 0xa7, 0xf9, 0xfe, 0x4b, 0xd9, 0xbf, 0x2f, 0x9e, 0x0d, 0xc9, 0x6c, 0xfa, + 0x3c, 0xd9, 0x00, 0x3c, 0xe2, 0xb6, 0x6f, 0xdd, 0x7a, 0x9c, 0x89, 0x98, 0x0d, 0xdc, 0x14, 0x92, + 0x1f, 0x3c, 0xce, 0xd2, 0x29, 0x4a, 0x0c, 0x99, 0x20, 0x55, 0xd5, 0x14, 0x95, 0xbc, 0xe3, 0x0c, + 0xd0, 0xfd, 0x67, 0x17, 0xf6, 0x72, 0x65, 0xfa, 0xb0, 0x41, 0x43, 0x68, 0x15, 0x5e, 0x04, 0xe8, + 0xd9, 0xe2, 0x54, 0x2a, 0x26, 0x45, 0xef, 0x2c, 0xe7, 0x1b, 0xf5, 0x0b, 0x23, 0x80, 0x9d, 0x79, + 0x22, 0x42, 0x5f, 0xac, 0x4a, 0x58, 0x69, 0x3c, 0x73, 0x3d, 0x7e, 0x43, 0x03, 0x38, 0x58, 0x20, + 0x1a, 0xd4, 0x5d, 0xc9, 0x49, 0x81, 0xf7, 0xf4, 0xe5, 0x14, 0x84, 0x18, 0xec, 0x16, 0x39, 0x01, + 0x55, 0x54, 0x45, 0x49, 0x51, 0xfa, 0x97, 0xab, 0x03, 0xe4, 0xc1, 0x7e, 0x4f, 0x1f, 0x62, 0xe5, + 0x45, 0x8f, 0xbe, 0xae, 0x3c, 0xdb, 0x12, 0x76, 0xd2, 0x5f, 0xae, 0x89, 0x92, 0x49, 0x30, 0xd8, + 0x2d, 0x2e, 0xfc, 0xaa, 0x93, 0x2b, 0x49, 0xa5, 0xea, 0xe4, 0x6a, 0x2e, 0x41, 0x63, 0x68, 0x15, + 0x28, 0x01, 0x55, 0xcc, 0x84, 0x8a, 0x74, 0xaa, 0x66, 0x56, 0xc9, 0x35, 0x69, 0x83, 0xe7, 0x19, + 0xa3, 0xba, 0xc1, 0x0a, 0x56, 0xaa, 0x6e, 0xb0, 0x8a, 0x8c, 0x50, 0x00, 0x87, 0xd9, 0xae, 0xff, + 0x9e, 0x46, 0x41, 0x1e, 0xfa, 0x79, 0xc5, 0x8d, 0x2b, 0xf2, 0x86, 0xfe, 0xf9, 0x2a, 0xa6, 0x32, + 0xdc, 0x10, 0x5a, 0x05, 0x1a, 0x51, 0x2c, 0x80, 0x12, 0xcd, 0x7c, 0xb4, 0x98, 0x6a, 0x5a, 0x9a, + 0xc0, 0x5e, 0x89, 0x3a, 0x50, 0x75, 0x71, 0x54, 0x6b, 0xe7, 0xab, 0x35, 0x10, 0xb3, 0xb1, 0x29, + 0xd0, 0x4b, 0xd5, 0xd8, 0xa8, 0x08, 0xac, 0xfa, 0xa4, 0x0a, 0xde, 0x42, 0x7f, 0xd6, 0x04, 0xc1, + 0x2a, 0x68, 0x07, 0x9d, 0x57, 0xe6, 0xbf, 0x9c, 0xea, 0xf4, 0xde, 0xfa, 0x40, 0x99, 0xcd, 0x6f, + 0x70, 0xb0, 0x40, 0x46, 0x55, 0x9b, 0x70, 0x19, 0x1d, 0xea, 0x2f, 0xd6, 0xc2, 0xc8, 0xe8, 0x16, + 0x6c, 0xa4, 0xf4, 0x86, 0x9e, 0x96, 0xe1, 0xf2, 0xd7, 0xf2, 0x0a, 0xab, 0xa8, 0x8a, 0x2f, 0x6f, + 0xa0, 0x95, 0x7e, 0xbf, 0x98, 0x8a, 0x9f, 0x18, 0xff, 0x53, 0x9c, 0x0b, 0x0e, 0xba, 0x13, 0x05, + 0x66, 0x48, 0xf8, 0x8d, 0xef, 0x7d, 0x28, 0xf9, 0xb8, 0x38, 0x28, 0xf1, 0xea, 0xfb, 0xc1, 0x4f, + 0x97, 0x43, 0x8f, 0xdf, 0xc6, 0x03, 0xd3, 0x89, 0x82, 0x8e, 0x44, 0x75, 0x72, 0x54, 0xc7, 0xf1, + 0x3d, 0x12, 0xf2, 0xce, 0x30, 0x12, 0x7f, 0x1c, 0xcc, 0xe4, 0x85, 0xbf, 0x1f, 0xd8, 0x60, 0x43, + 0xdc, 0xc0, 0x17, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x39, 0x12, 0x2a, 0x80, 0x97, 0x10, 0x00, + 0x00, +} diff --git a/client/gogrpc/conductor/model/dynamicforkjointask.pb.go b/client/gogrpc/conductor/model/dynamicforkjointask.pb.go new file mode 100644 index 0000000000..8c7110ef78 --- /dev/null +++ b/client/gogrpc/conductor/model/dynamicforkjointask.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/dynamicforkjointask.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type DynamicForkJoinTask struct { + TaskName string `protobuf:"bytes,1,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + WorkflowName string `protobuf:"bytes,2,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty"` + ReferenceName string `protobuf:"bytes,3,opt,name=reference_name,json=referenceName,proto3" json:"reference_name,omitempty"` + Input map[string]*_struct.Value `protobuf:"bytes,4,rep,name=input,proto3" json:"input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicForkJoinTask) Reset() { *m = DynamicForkJoinTask{} } +func (m *DynamicForkJoinTask) String() string { return proto.CompactTextString(m) } +func (*DynamicForkJoinTask) ProtoMessage() {} +func (*DynamicForkJoinTask) Descriptor() ([]byte, []int) { + return fileDescriptor_dynamicforkjointask_300c110eb897c85e, []int{0} +} +func (m *DynamicForkJoinTask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicForkJoinTask.Unmarshal(m, b) +} +func (m *DynamicForkJoinTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicForkJoinTask.Marshal(b, m, deterministic) +} +func (dst *DynamicForkJoinTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicForkJoinTask.Merge(dst, src) +} +func (m *DynamicForkJoinTask) XXX_Size() int { + return xxx_messageInfo_DynamicForkJoinTask.Size(m) +} +func (m *DynamicForkJoinTask) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicForkJoinTask.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicForkJoinTask proto.InternalMessageInfo + +func (m *DynamicForkJoinTask) GetTaskName() string { + if m != nil { + return m.TaskName + } + return "" +} + +func (m *DynamicForkJoinTask) GetWorkflowName() string { + if m != nil { + return m.WorkflowName + } + return "" +} + +func (m *DynamicForkJoinTask) GetReferenceName() string { + if m != nil { + return m.ReferenceName + } + return "" +} + +func (m *DynamicForkJoinTask) GetInput() map[string]*_struct.Value { + if m != nil { + return m.Input + } + return nil +} + +func (m *DynamicForkJoinTask) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func init() { + proto.RegisterType((*DynamicForkJoinTask)(nil), "conductor.proto.DynamicForkJoinTask") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.DynamicForkJoinTask.InputEntry") +} + +func init() { + proto.RegisterFile("model/dynamicforkjointask.proto", fileDescriptor_dynamicforkjointask_300c110eb897c85e) +} + +var fileDescriptor_dynamicforkjointask_300c110eb897c85e = []byte{ + // 325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0x03, 0x31, + 0x10, 0xc5, 0xe9, 0x3f, 0xb1, 0xa9, 0x55, 0x89, 0x28, 0xa5, 0x15, 0x2c, 0x8a, 0xd0, 0x83, 0x24, + 0x50, 0x2f, 0xd2, 0x63, 0xb1, 0x82, 0x1e, 0xa4, 0x14, 0xf1, 0xe0, 0x45, 0x76, 0xd3, 0xd9, 0x35, + 0xee, 0x6e, 0x66, 0x49, 0x13, 0xeb, 0x7e, 0x26, 0xbf, 0xa4, 0x6c, 0xd2, 0x56, 0x29, 0xbd, 0x4d, + 0xde, 0xfc, 0xde, 0xe4, 0x65, 0x42, 0x2e, 0x32, 0x9c, 0x43, 0xca, 0xe7, 0x85, 0x0a, 0x32, 0x29, + 0x22, 0xd4, 0xc9, 0x27, 0x4a, 0x65, 0x82, 0x45, 0xc2, 0x72, 0x8d, 0x06, 0xe9, 0x91, 0x40, 0x35, + 0xb7, 0xc2, 0xa0, 0xf6, 0x42, 0xf7, 0x3c, 0x46, 0x8c, 0x53, 0xe0, 0xee, 0x14, 0xda, 0x88, 0x2f, + 0x8c, 0xb6, 0xc2, 0xf8, 0xee, 0xe5, 0x4f, 0x95, 0x9c, 0xdc, 0xfb, 0x61, 0x0f, 0xa8, 0x93, 0x27, + 0x94, 0xea, 0x25, 0x58, 0x24, 0xb4, 0x47, 0x9a, 0xe5, 0xd0, 0x77, 0x15, 0x64, 0xd0, 0xa9, 0xf4, + 0x2b, 0x83, 0xe6, 0x6c, 0xbf, 0x14, 0x9e, 0x83, 0x0c, 0xe8, 0x15, 0x69, 0x2f, 0x51, 0x27, 0x51, + 0x8a, 0x4b, 0x0f, 0x54, 0x1d, 0x70, 0xb0, 0x16, 0x1d, 0x74, 0x4d, 0x0e, 0x35, 0x44, 0xa0, 0x41, + 0x09, 0xf0, 0x54, 0xcd, 0x51, 0xed, 0x8d, 0xea, 0xb0, 0x09, 0x69, 0x48, 0x95, 0x5b, 0xd3, 0xa9, + 0xf7, 0x6b, 0x83, 0xd6, 0x90, 0xb3, 0xad, 0xfc, 0x6c, 0x47, 0x3a, 0xf6, 0x58, 0x3a, 0x26, 0xca, + 0xe8, 0x62, 0xe6, 0xdd, 0x94, 0x92, 0xba, 0x29, 0x72, 0xe8, 0x34, 0xdc, 0x1d, 0xae, 0xee, 0x4e, + 0x09, 0xf9, 0x03, 0xe9, 0x31, 0xa9, 0x25, 0x50, 0xac, 0xde, 0x52, 0x96, 0xf4, 0x86, 0x34, 0xbe, + 0x82, 0xd4, 0xfa, 0xf8, 0xad, 0xe1, 0x19, 0xf3, 0x9b, 0x62, 0xeb, 0x4d, 0xb1, 0xd7, 0xb2, 0x3b, + 0xf3, 0xd0, 0xa8, 0x7a, 0x57, 0x19, 0xe7, 0xa4, 0x27, 0x30, 0x63, 0x0a, 0x4c, 0x94, 0xca, 0xef, + 0xed, 0xa8, 0xe3, 0xd3, 0x1d, 0x59, 0xa7, 0xe1, 0xdb, 0x28, 0x96, 0xe6, 0xc3, 0x86, 0x4c, 0x60, + 0xc6, 0x57, 0x56, 0xbe, 0xb1, 0x72, 0x91, 0x4a, 0x50, 0x86, 0xc7, 0x18, 0xeb, 0x5c, 0xfc, 0xd3, + 0xdd, 0x47, 0x87, 0x7b, 0x6e, 0xf2, 0xed, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6d, 0xe6, 0x21, + 0x30, 0xf8, 0x01, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/dynamicforkjointasklist.pb.go b/client/gogrpc/conductor/model/dynamicforkjointasklist.pb.go new file mode 100644 index 0000000000..c02fc9b0d6 --- /dev/null +++ b/client/gogrpc/conductor/model/dynamicforkjointasklist.pb.go @@ -0,0 +1,82 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/dynamicforkjointasklist.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type DynamicForkJoinTaskList struct { + DynamicTasks []*DynamicForkJoinTask `protobuf:"bytes,1,rep,name=dynamic_tasks,json=dynamicTasks,proto3" json:"dynamic_tasks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicForkJoinTaskList) Reset() { *m = DynamicForkJoinTaskList{} } +func (m *DynamicForkJoinTaskList) String() string { return proto.CompactTextString(m) } +func (*DynamicForkJoinTaskList) ProtoMessage() {} +func (*DynamicForkJoinTaskList) Descriptor() ([]byte, []int) { + return fileDescriptor_dynamicforkjointasklist_4634756916e85673, []int{0} +} +func (m *DynamicForkJoinTaskList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicForkJoinTaskList.Unmarshal(m, b) +} +func (m *DynamicForkJoinTaskList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicForkJoinTaskList.Marshal(b, m, deterministic) +} +func (dst *DynamicForkJoinTaskList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicForkJoinTaskList.Merge(dst, src) +} +func (m *DynamicForkJoinTaskList) XXX_Size() int { + return xxx_messageInfo_DynamicForkJoinTaskList.Size(m) +} +func (m *DynamicForkJoinTaskList) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicForkJoinTaskList.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicForkJoinTaskList proto.InternalMessageInfo + +func (m *DynamicForkJoinTaskList) GetDynamicTasks() []*DynamicForkJoinTask { + if m != nil { + return m.DynamicTasks + } + return nil +} + +func init() { + proto.RegisterType((*DynamicForkJoinTaskList)(nil), "conductor.proto.DynamicForkJoinTaskList") +} + +func init() { + proto.RegisterFile("model/dynamicforkjointasklist.proto", fileDescriptor_dynamicforkjointasklist_4634756916e85673) +} + +var fileDescriptor_dynamicforkjointasklist_4634756916e85673 = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xce, 0xcd, 0x4f, 0x49, + 0xcd, 0xd1, 0x4f, 0xa9, 0xcc, 0x4b, 0xcc, 0xcd, 0x4c, 0x4e, 0xcb, 0x2f, 0xca, 0xce, 0xca, 0xcf, + 0xcc, 0x2b, 0x49, 0x2c, 0xce, 0xce, 0xc9, 0x2c, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0xe2, 0x4f, 0xce, 0xcf, 0x4b, 0x29, 0x4d, 0x2e, 0xc9, 0x2f, 0x82, 0x08, 0x48, 0xc9, 0xe3, 0xd4, + 0x05, 0x51, 0xa0, 0x94, 0xc2, 0x25, 0xee, 0x02, 0x91, 0x74, 0xcb, 0x2f, 0xca, 0xf6, 0xca, 0xcf, + 0xcc, 0x0b, 0x49, 0x2c, 0xce, 0xf6, 0xc9, 0x2c, 0x2e, 0x11, 0xf2, 0xe4, 0xe2, 0x85, 0xea, 0x8b, + 0x07, 0x69, 0x28, 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd1, 0x43, 0xb3, 0x44, 0x0f, + 0x8b, 0x01, 0x41, 0x3c, 0x50, 0xad, 0x20, 0x4e, 0xb1, 0x53, 0x09, 0x97, 0x74, 0x72, 0x7e, 0xae, + 0x5e, 0x5e, 0x6a, 0x49, 0x5a, 0x4e, 0x66, 0x05, 0xba, 0x01, 0x4e, 0x92, 0x38, 0x9c, 0x10, 0x90, + 0x14, 0x65, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0xd5, 0xae, + 0x0f, 0xd7, 0xae, 0x9f, 0x9c, 0x93, 0x99, 0x9a, 0x57, 0xa2, 0x9f, 0x9e, 0x9f, 0x5e, 0x54, 0x90, + 0x8c, 0x24, 0x0e, 0xf6, 0x75, 0x12, 0x1b, 0xd8, 0x74, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x76, 0xa8, 0x2e, 0xed, 0x3b, 0x01, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/eventexecution.pb.go b/client/gogrpc/conductor/model/eventexecution.pb.go new file mode 100644 index 0000000000..7c76aa9853 --- /dev/null +++ b/client/gogrpc/conductor/model/eventexecution.pb.go @@ -0,0 +1,185 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/eventexecution.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EventExecution_Status int32 + +const ( + EventExecution_IN_PROGRESS EventExecution_Status = 0 + EventExecution_COMPLETED EventExecution_Status = 1 + EventExecution_FAILED EventExecution_Status = 2 + EventExecution_SKIPPED EventExecution_Status = 3 +) + +var EventExecution_Status_name = map[int32]string{ + 0: "IN_PROGRESS", + 1: "COMPLETED", + 2: "FAILED", + 3: "SKIPPED", +} +var EventExecution_Status_value = map[string]int32{ + "IN_PROGRESS": 0, + "COMPLETED": 1, + "FAILED": 2, + "SKIPPED": 3, +} + +func (x EventExecution_Status) String() string { + return proto.EnumName(EventExecution_Status_name, int32(x)) +} +func (EventExecution_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eventexecution_461922c614048c4a, []int{0, 0} +} + +type EventExecution struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + MessageId string `protobuf:"bytes,2,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Event string `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` + Created int64 `protobuf:"varint,5,opt,name=created,proto3" json:"created,omitempty"` + Status EventExecution_Status `protobuf:"varint,6,opt,name=status,proto3,enum=conductor.proto.EventExecution_Status" json:"status,omitempty"` + Action EventHandler_Action_Type `protobuf:"varint,7,opt,name=action,proto3,enum=conductor.proto.EventHandler_Action_Type" json:"action,omitempty"` + Output map[string]*_struct.Value `protobuf:"bytes,8,rep,name=output,proto3" json:"output,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventExecution) Reset() { *m = EventExecution{} } +func (m *EventExecution) String() string { return proto.CompactTextString(m) } +func (*EventExecution) ProtoMessage() {} +func (*EventExecution) Descriptor() ([]byte, []int) { + return fileDescriptor_eventexecution_461922c614048c4a, []int{0} +} +func (m *EventExecution) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventExecution.Unmarshal(m, b) +} +func (m *EventExecution) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventExecution.Marshal(b, m, deterministic) +} +func (dst *EventExecution) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventExecution.Merge(dst, src) +} +func (m *EventExecution) XXX_Size() int { + return xxx_messageInfo_EventExecution.Size(m) +} +func (m *EventExecution) XXX_DiscardUnknown() { + xxx_messageInfo_EventExecution.DiscardUnknown(m) +} + +var xxx_messageInfo_EventExecution proto.InternalMessageInfo + +func (m *EventExecution) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *EventExecution) GetMessageId() string { + if m != nil { + return m.MessageId + } + return "" +} + +func (m *EventExecution) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EventExecution) GetEvent() string { + if m != nil { + return m.Event + } + return "" +} + +func (m *EventExecution) GetCreated() int64 { + if m != nil { + return m.Created + } + return 0 +} + +func (m *EventExecution) GetStatus() EventExecution_Status { + if m != nil { + return m.Status + } + return EventExecution_IN_PROGRESS +} + +func (m *EventExecution) GetAction() EventHandler_Action_Type { + if m != nil { + return m.Action + } + return EventHandler_Action_START_WORKFLOW +} + +func (m *EventExecution) GetOutput() map[string]*_struct.Value { + if m != nil { + return m.Output + } + return nil +} + +func init() { + proto.RegisterType((*EventExecution)(nil), "conductor.proto.EventExecution") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.EventExecution.OutputEntry") + proto.RegisterEnum("conductor.proto.EventExecution_Status", EventExecution_Status_name, EventExecution_Status_value) +} + +func init() { + proto.RegisterFile("model/eventexecution.proto", fileDescriptor_eventexecution_461922c614048c4a) +} + +var fileDescriptor_eventexecution_461922c614048c4a = []byte{ + // 434 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x8b, 0xd3, 0x40, + 0x14, 0xc6, 0x4d, 0xb2, 0x4d, 0xed, 0x0b, 0x76, 0xc3, 0x20, 0x32, 0x54, 0x85, 0xb2, 0x07, 0xa9, + 0x28, 0x13, 0xa8, 0x17, 0xd9, 0x83, 0xd0, 0xdd, 0x46, 0x2d, 0xae, 0x36, 0xa6, 0x8b, 0x07, 0x2f, + 0x4b, 0x3a, 0x79, 0x9b, 0x0d, 0x9b, 0xcc, 0x94, 0x64, 0x66, 0xd9, 0xfe, 0xb9, 0xfe, 0x27, 0xd2, + 0x49, 0x22, 0xdd, 0x22, 0xec, 0x6d, 0xde, 0xf7, 0x7d, 0xbf, 0xe4, 0xbd, 0x37, 0x03, 0xa3, 0x52, + 0xa6, 0x58, 0x04, 0x78, 0x87, 0x42, 0xe1, 0x3d, 0x72, 0xad, 0x72, 0x29, 0xd8, 0xa6, 0x92, 0x4a, + 0x92, 0x63, 0x2e, 0x45, 0xaa, 0xb9, 0x92, 0x55, 0x23, 0x8c, 0xe8, 0x5e, 0xf8, 0x26, 0x11, 0x69, + 0x81, 0x9d, 0xf3, 0x2a, 0x93, 0x32, 0x2b, 0x30, 0x30, 0xd5, 0x5a, 0x5f, 0x07, 0xb5, 0xaa, 0x34, + 0x57, 0x8d, 0x7b, 0xf2, 0xc7, 0x81, 0x61, 0xb8, 0x83, 0xc2, 0xee, 0x0f, 0x64, 0x08, 0x76, 0x9e, + 0x52, 0x6b, 0x6c, 0x4d, 0x06, 0xb1, 0x9d, 0xa7, 0xe4, 0x35, 0x40, 0x89, 0x75, 0x9d, 0x64, 0x78, + 0x95, 0xa7, 0xd4, 0x36, 0xfa, 0xa0, 0x55, 0x16, 0x29, 0x21, 0x70, 0x24, 0x92, 0x12, 0xa9, 0x63, + 0x0c, 0x73, 0x26, 0xcf, 0xa1, 0x67, 0x3a, 0xa1, 0x47, 0x46, 0x6c, 0x0a, 0x42, 0xa1, 0xcf, 0x2b, + 0x4c, 0x14, 0xa6, 0xb4, 0x37, 0xb6, 0x26, 0x4e, 0xdc, 0x95, 0xe4, 0x13, 0xb8, 0xb5, 0x4a, 0x94, + 0xae, 0xa9, 0x3b, 0xb6, 0x26, 0xc3, 0xe9, 0x1b, 0x76, 0x30, 0x1f, 0x7b, 0xd8, 0x23, 0x5b, 0x99, + 0x74, 0xdc, 0x52, 0x64, 0x06, 0x6e, 0xc2, 0x77, 0x06, 0xed, 0x1b, 0xfe, 0xed, 0xff, 0xf9, 0xaf, + 0xed, 0x62, 0x66, 0x26, 0xcb, 0x2e, 0xb7, 0x1b, 0x8c, 0x5b, 0x90, 0x9c, 0x83, 0x2b, 0xb5, 0xda, + 0x68, 0x45, 0x9f, 0x8e, 0x9d, 0x89, 0x37, 0x7d, 0xf7, 0x58, 0x0b, 0x4b, 0x93, 0x0e, 0x85, 0xaa, + 0xb6, 0x71, 0x8b, 0x8e, 0x7e, 0x82, 0xb7, 0x27, 0x13, 0x1f, 0x9c, 0x5b, 0xdc, 0xb6, 0xab, 0xdc, + 0x1d, 0xc9, 0x7b, 0xe8, 0xdd, 0x25, 0x85, 0x46, 0xb3, 0x46, 0x6f, 0xfa, 0x82, 0x35, 0x97, 0xc3, + 0xba, 0xcb, 0x61, 0xbf, 0x76, 0x6e, 0xdc, 0x84, 0x4e, 0xed, 0x8f, 0xd6, 0xc9, 0x0c, 0xdc, 0x66, + 0x58, 0x72, 0x0c, 0xde, 0xe2, 0xc7, 0x55, 0x14, 0x2f, 0xbf, 0xc4, 0xe1, 0x6a, 0xe5, 0x3f, 0x21, + 0xcf, 0x60, 0x70, 0xbe, 0xfc, 0x1e, 0x5d, 0x84, 0x97, 0xe1, 0xdc, 0xb7, 0x08, 0x80, 0xfb, 0x79, + 0xb6, 0xb8, 0x08, 0xe7, 0xbe, 0x4d, 0x3c, 0xe8, 0xaf, 0xbe, 0x2d, 0xa2, 0x28, 0x9c, 0xfb, 0xce, + 0xd9, 0x2d, 0xbc, 0xe4, 0xb2, 0x64, 0x02, 0xd5, 0x75, 0x91, 0xdf, 0x1f, 0xce, 0x75, 0xe6, 0x3f, + 0x1c, 0x2c, 0x5a, 0xff, 0x3e, 0xcd, 0x72, 0x75, 0xa3, 0xd7, 0x8c, 0xcb, 0x32, 0x68, 0xa9, 0xe0, + 0x1f, 0x15, 0xf0, 0x22, 0x47, 0xa1, 0x82, 0x4c, 0x66, 0xd5, 0x86, 0xef, 0xe9, 0xe6, 0x05, 0xae, + 0x5d, 0xf3, 0xd1, 0x0f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x30, 0x90, 0x3d, 0xc6, 0xbe, 0x02, + 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/eventhandler.pb.go b/client/gogrpc/conductor/model/eventhandler.pb.go new file mode 100644 index 0000000000..6a268b097f --- /dev/null +++ b/client/gogrpc/conductor/model/eventhandler.pb.go @@ -0,0 +1,379 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/eventhandler.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EventHandler_Action_Type int32 + +const ( + EventHandler_Action_START_WORKFLOW EventHandler_Action_Type = 0 + EventHandler_Action_COMPLETE_TASK EventHandler_Action_Type = 1 + EventHandler_Action_FAIL_TASK EventHandler_Action_Type = 2 +) + +var EventHandler_Action_Type_name = map[int32]string{ + 0: "START_WORKFLOW", + 1: "COMPLETE_TASK", + 2: "FAIL_TASK", +} +var EventHandler_Action_Type_value = map[string]int32{ + "START_WORKFLOW": 0, + "COMPLETE_TASK": 1, + "FAIL_TASK": 2, +} + +func (x EventHandler_Action_Type) String() string { + return proto.EnumName(EventHandler_Action_Type_name, int32(x)) +} +func (EventHandler_Action_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eventhandler_9283dbc454fc79ea, []int{0, 2, 0} +} + +type EventHandler struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Event string `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` + Condition string `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` + Actions []*EventHandler_Action `protobuf:"bytes,4,rep,name=actions,proto3" json:"actions,omitempty"` + Active bool `protobuf:"varint,5,opt,name=active,proto3" json:"active,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventHandler) Reset() { *m = EventHandler{} } +func (m *EventHandler) String() string { return proto.CompactTextString(m) } +func (*EventHandler) ProtoMessage() {} +func (*EventHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_eventhandler_9283dbc454fc79ea, []int{0} +} +func (m *EventHandler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventHandler.Unmarshal(m, b) +} +func (m *EventHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventHandler.Marshal(b, m, deterministic) +} +func (dst *EventHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventHandler.Merge(dst, src) +} +func (m *EventHandler) XXX_Size() int { + return xxx_messageInfo_EventHandler.Size(m) +} +func (m *EventHandler) XXX_DiscardUnknown() { + xxx_messageInfo_EventHandler.DiscardUnknown(m) +} + +var xxx_messageInfo_EventHandler proto.InternalMessageInfo + +func (m *EventHandler) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EventHandler) GetEvent() string { + if m != nil { + return m.Event + } + return "" +} + +func (m *EventHandler) GetCondition() string { + if m != nil { + return m.Condition + } + return "" +} + +func (m *EventHandler) GetActions() []*EventHandler_Action { + if m != nil { + return m.Actions + } + return nil +} + +func (m *EventHandler) GetActive() bool { + if m != nil { + return m.Active + } + return false +} + +type EventHandler_StartWorkflow struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + CorrelationId string `protobuf:"bytes,3,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + Input map[string]*_struct.Value `protobuf:"bytes,4,rep,name=input,proto3" json:"input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + InputMessage *any.Any `protobuf:"bytes,5,opt,name=input_message,json=inputMessage,proto3" json:"input_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventHandler_StartWorkflow) Reset() { *m = EventHandler_StartWorkflow{} } +func (m *EventHandler_StartWorkflow) String() string { return proto.CompactTextString(m) } +func (*EventHandler_StartWorkflow) ProtoMessage() {} +func (*EventHandler_StartWorkflow) Descriptor() ([]byte, []int) { + return fileDescriptor_eventhandler_9283dbc454fc79ea, []int{0, 0} +} +func (m *EventHandler_StartWorkflow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventHandler_StartWorkflow.Unmarshal(m, b) +} +func (m *EventHandler_StartWorkflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventHandler_StartWorkflow.Marshal(b, m, deterministic) +} +func (dst *EventHandler_StartWorkflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventHandler_StartWorkflow.Merge(dst, src) +} +func (m *EventHandler_StartWorkflow) XXX_Size() int { + return xxx_messageInfo_EventHandler_StartWorkflow.Size(m) +} +func (m *EventHandler_StartWorkflow) XXX_DiscardUnknown() { + xxx_messageInfo_EventHandler_StartWorkflow.DiscardUnknown(m) +} + +var xxx_messageInfo_EventHandler_StartWorkflow proto.InternalMessageInfo + +func (m *EventHandler_StartWorkflow) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EventHandler_StartWorkflow) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *EventHandler_StartWorkflow) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func (m *EventHandler_StartWorkflow) GetInput() map[string]*_struct.Value { + if m != nil { + return m.Input + } + return nil +} + +func (m *EventHandler_StartWorkflow) GetInputMessage() *any.Any { + if m != nil { + return m.InputMessage + } + return nil +} + +type EventHandler_TaskDetails struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + TaskRefName string `protobuf:"bytes,2,opt,name=task_ref_name,json=taskRefName,proto3" json:"task_ref_name,omitempty"` + Output map[string]*_struct.Value `protobuf:"bytes,3,rep,name=output,proto3" json:"output,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + OutputMessage *any.Any `protobuf:"bytes,4,opt,name=output_message,json=outputMessage,proto3" json:"output_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventHandler_TaskDetails) Reset() { *m = EventHandler_TaskDetails{} } +func (m *EventHandler_TaskDetails) String() string { return proto.CompactTextString(m) } +func (*EventHandler_TaskDetails) ProtoMessage() {} +func (*EventHandler_TaskDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_eventhandler_9283dbc454fc79ea, []int{0, 1} +} +func (m *EventHandler_TaskDetails) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventHandler_TaskDetails.Unmarshal(m, b) +} +func (m *EventHandler_TaskDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventHandler_TaskDetails.Marshal(b, m, deterministic) +} +func (dst *EventHandler_TaskDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventHandler_TaskDetails.Merge(dst, src) +} +func (m *EventHandler_TaskDetails) XXX_Size() int { + return xxx_messageInfo_EventHandler_TaskDetails.Size(m) +} +func (m *EventHandler_TaskDetails) XXX_DiscardUnknown() { + xxx_messageInfo_EventHandler_TaskDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_EventHandler_TaskDetails proto.InternalMessageInfo + +func (m *EventHandler_TaskDetails) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *EventHandler_TaskDetails) GetTaskRefName() string { + if m != nil { + return m.TaskRefName + } + return "" +} + +func (m *EventHandler_TaskDetails) GetOutput() map[string]*_struct.Value { + if m != nil { + return m.Output + } + return nil +} + +func (m *EventHandler_TaskDetails) GetOutputMessage() *any.Any { + if m != nil { + return m.OutputMessage + } + return nil +} + +type EventHandler_Action struct { + Action EventHandler_Action_Type `protobuf:"varint,1,opt,name=action,proto3,enum=conductor.proto.EventHandler_Action_Type" json:"action,omitempty"` + StartWorkflow *EventHandler_StartWorkflow `protobuf:"bytes,2,opt,name=start_workflow,json=startWorkflow,proto3" json:"start_workflow,omitempty"` + CompleteTask *EventHandler_TaskDetails `protobuf:"bytes,3,opt,name=complete_task,json=completeTask,proto3" json:"complete_task,omitempty"` + FailTask *EventHandler_TaskDetails `protobuf:"bytes,4,opt,name=fail_task,json=failTask,proto3" json:"fail_task,omitempty"` + ExpandInlineJson bool `protobuf:"varint,5,opt,name=expand_inline_json,json=expandInlineJson,proto3" json:"expand_inline_json,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EventHandler_Action) Reset() { *m = EventHandler_Action{} } +func (m *EventHandler_Action) String() string { return proto.CompactTextString(m) } +func (*EventHandler_Action) ProtoMessage() {} +func (*EventHandler_Action) Descriptor() ([]byte, []int) { + return fileDescriptor_eventhandler_9283dbc454fc79ea, []int{0, 2} +} +func (m *EventHandler_Action) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EventHandler_Action.Unmarshal(m, b) +} +func (m *EventHandler_Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EventHandler_Action.Marshal(b, m, deterministic) +} +func (dst *EventHandler_Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventHandler_Action.Merge(dst, src) +} +func (m *EventHandler_Action) XXX_Size() int { + return xxx_messageInfo_EventHandler_Action.Size(m) +} +func (m *EventHandler_Action) XXX_DiscardUnknown() { + xxx_messageInfo_EventHandler_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_EventHandler_Action proto.InternalMessageInfo + +func (m *EventHandler_Action) GetAction() EventHandler_Action_Type { + if m != nil { + return m.Action + } + return EventHandler_Action_START_WORKFLOW +} + +func (m *EventHandler_Action) GetStartWorkflow() *EventHandler_StartWorkflow { + if m != nil { + return m.StartWorkflow + } + return nil +} + +func (m *EventHandler_Action) GetCompleteTask() *EventHandler_TaskDetails { + if m != nil { + return m.CompleteTask + } + return nil +} + +func (m *EventHandler_Action) GetFailTask() *EventHandler_TaskDetails { + if m != nil { + return m.FailTask + } + return nil +} + +func (m *EventHandler_Action) GetExpandInlineJson() bool { + if m != nil { + return m.ExpandInlineJson + } + return false +} + +func init() { + proto.RegisterType((*EventHandler)(nil), "conductor.proto.EventHandler") + proto.RegisterType((*EventHandler_StartWorkflow)(nil), "conductor.proto.EventHandler.StartWorkflow") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.EventHandler.StartWorkflow.InputEntry") + proto.RegisterType((*EventHandler_TaskDetails)(nil), "conductor.proto.EventHandler.TaskDetails") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.EventHandler.TaskDetails.OutputEntry") + proto.RegisterType((*EventHandler_Action)(nil), "conductor.proto.EventHandler.Action") + proto.RegisterEnum("conductor.proto.EventHandler_Action_Type", EventHandler_Action_Type_name, EventHandler_Action_Type_value) +} + +func init() { + proto.RegisterFile("model/eventhandler.proto", fileDescriptor_eventhandler_9283dbc454fc79ea) +} + +var fileDescriptor_eventhandler_9283dbc454fc79ea = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x6f, 0x4f, 0xd3, 0x40, + 0x18, 0x77, 0x7f, 0x61, 0x4f, 0xe9, 0x9c, 0x17, 0x42, 0xea, 0x24, 0x91, 0x10, 0x4d, 0x30, 0x92, + 0x36, 0x99, 0xd1, 0x28, 0x1a, 0x93, 0xa1, 0x23, 0x4e, 0x06, 0xc3, 0x63, 0x91, 0xc4, 0x37, 0xcd, + 0xad, 0xbd, 0x8d, 0xba, 0xee, 0x6e, 0x69, 0xaf, 0x83, 0x7d, 0x1e, 0x3f, 0x81, 0x9f, 0xc0, 0xf7, + 0x7e, 0x2a, 0x73, 0x77, 0x2d, 0x14, 0x34, 0x28, 0x89, 0xef, 0x9e, 0xbf, 0xbf, 0xe7, 0x77, 0xbf, + 0xe7, 0x69, 0xc1, 0x9a, 0x72, 0x9f, 0x86, 0x0e, 0x9d, 0x53, 0x26, 0x4e, 0x09, 0xf3, 0x43, 0x1a, + 0xd9, 0xb3, 0x88, 0x0b, 0x8e, 0xee, 0x7a, 0x9c, 0xf9, 0x89, 0x27, 0x78, 0x1a, 0x68, 0xae, 0x8f, + 0x39, 0x1f, 0x87, 0xd4, 0x51, 0xde, 0x30, 0x19, 0x39, 0xb1, 0x88, 0x12, 0x4f, 0xa4, 0xd9, 0xfb, + 0xd7, 0xb3, 0x84, 0x2d, 0x74, 0x6a, 0xf3, 0x67, 0x0d, 0x56, 0x3a, 0x72, 0xc0, 0x07, 0x3d, 0x00, + 0x21, 0x28, 0x33, 0x32, 0xa5, 0x56, 0x61, 0xa3, 0xb0, 0x55, 0xc3, 0xca, 0x46, 0xab, 0x50, 0x51, + 0x24, 0xac, 0xa2, 0x0a, 0x6a, 0x07, 0xad, 0x43, 0x4d, 0xd2, 0x08, 0x44, 0xc0, 0x99, 0x55, 0x52, + 0x99, 0xcb, 0x00, 0x7a, 0x0b, 0x4b, 0xc4, 0x93, 0x56, 0x6c, 0x95, 0x37, 0x4a, 0x5b, 0x46, 0xeb, + 0x91, 0x7d, 0x8d, 0xb4, 0x9d, 0x9f, 0x6b, 0xb7, 0x55, 0x31, 0xce, 0x9a, 0xd0, 0x1a, 0x54, 0xa5, + 0x39, 0xa7, 0x56, 0x65, 0xa3, 0xb0, 0xb5, 0x8c, 0x53, 0xaf, 0xf9, 0xa3, 0x08, 0xe6, 0xb1, 0x20, + 0x91, 0x38, 0xe1, 0xd1, 0x64, 0x14, 0xf2, 0xb3, 0x3f, 0x32, 0xb6, 0x60, 0x69, 0x4e, 0xa3, 0x58, + 0x32, 0x93, 0x9c, 0x2b, 0x38, 0x73, 0xd1, 0x63, 0xa8, 0x7b, 0x3c, 0x8a, 0x68, 0x48, 0xe4, 0x1c, + 0x37, 0xf0, 0x53, 0xea, 0x66, 0x2e, 0xda, 0xf5, 0x51, 0x0f, 0x2a, 0x01, 0x9b, 0x25, 0x22, 0x25, + 0xff, 0xe2, 0x66, 0xf2, 0x57, 0x08, 0xd9, 0x5d, 0xd9, 0xd8, 0x61, 0x22, 0x5a, 0x60, 0x0d, 0x82, + 0x5e, 0x81, 0xa9, 0x0c, 0x77, 0x4a, 0xe3, 0x98, 0x8c, 0xf5, 0x9b, 0x8c, 0xd6, 0xaa, 0xad, 0x17, + 0x63, 0x67, 0x8b, 0xb1, 0xdb, 0x6c, 0x81, 0x57, 0x54, 0xe9, 0x81, 0xae, 0x6c, 0x1e, 0x01, 0x5c, + 0xe2, 0xa1, 0x06, 0x94, 0x26, 0x74, 0x91, 0x3e, 0x55, 0x9a, 0x68, 0x1b, 0x2a, 0x73, 0x12, 0x26, + 0x54, 0xbd, 0xd3, 0x68, 0xad, 0xfd, 0x06, 0xf9, 0x59, 0x66, 0xb1, 0x2e, 0xda, 0x29, 0xbe, 0x2c, + 0x34, 0xbf, 0x17, 0xc1, 0x18, 0x90, 0x78, 0xf2, 0x9e, 0x0a, 0x12, 0x84, 0x31, 0x7a, 0x08, 0xc6, + 0x59, 0x4a, 0x5d, 0xca, 0xa1, 0xb1, 0x21, 0x0b, 0x75, 0x7d, 0xb4, 0x09, 0xa6, 0x20, 0xf1, 0xc4, + 0x8d, 0xe8, 0xc8, 0x55, 0x4a, 0xeb, 0x33, 0x30, 0x64, 0x10, 0xd3, 0xd1, 0xa1, 0x14, 0xfc, 0x00, + 0xaa, 0x3c, 0x11, 0x52, 0xb0, 0x92, 0x12, 0xec, 0xf9, 0xcd, 0x82, 0xe5, 0xe6, 0xdb, 0x7d, 0xd5, + 0xa7, 0xf5, 0x4a, 0x41, 0xd0, 0x6b, 0xa8, 0x6b, 0xeb, 0x42, 0xb1, 0xf2, 0x0d, 0x8a, 0x99, 0xba, + 0x36, 0x93, 0xec, 0x13, 0x18, 0x39, 0xcc, 0xff, 0xa2, 0xd9, 0xb7, 0x12, 0x54, 0xf5, 0x85, 0xa2, + 0xb6, 0x3e, 0x4c, 0xce, 0x14, 0x62, 0xbd, 0xf5, 0xe4, 0x5f, 0xee, 0xda, 0x1e, 0x2c, 0x66, 0x14, + 0xa7, 0x8d, 0x08, 0x43, 0x3d, 0x96, 0x17, 0xe3, 0x66, 0x22, 0xa7, 0x44, 0x9e, 0xde, 0xe2, 0xca, + 0xb0, 0x19, 0x5f, 0xf9, 0x0a, 0x0e, 0xc1, 0xf4, 0xf8, 0x74, 0x16, 0x52, 0x41, 0x5d, 0xb9, 0x18, + 0x75, 0xd6, 0xc6, 0xdf, 0xd8, 0xe5, 0xf6, 0x80, 0x57, 0xb2, 0x7e, 0x19, 0x44, 0x7b, 0x50, 0x1b, + 0x91, 0x20, 0xd4, 0x58, 0xe5, 0xdb, 0x62, 0x2d, 0xcb, 0x5e, 0x85, 0xb3, 0x0d, 0x88, 0x9e, 0xcf, + 0x08, 0xf3, 0xdd, 0x80, 0x85, 0x01, 0xa3, 0xee, 0xd7, 0x98, 0xb3, 0xf4, 0x9b, 0x6e, 0xe8, 0x4c, + 0x57, 0x25, 0x3e, 0xc6, 0x9c, 0x6d, 0xbe, 0x81, 0xb2, 0x54, 0x0a, 0x21, 0xa8, 0x1f, 0x0f, 0xda, + 0x78, 0xe0, 0x9e, 0xf4, 0xf1, 0xfe, 0x5e, 0xaf, 0x7f, 0xd2, 0xb8, 0x83, 0xee, 0x81, 0xf9, 0xae, + 0x7f, 0x70, 0xd4, 0xeb, 0x0c, 0x3a, 0xee, 0xa0, 0x7d, 0xbc, 0xdf, 0x28, 0x20, 0x13, 0x6a, 0x7b, + 0xed, 0x6e, 0x4f, 0xbb, 0xc5, 0xdd, 0x00, 0x1e, 0x78, 0x7c, 0x6a, 0x33, 0x2a, 0x46, 0x61, 0x70, + 0x7e, 0x9d, 0xed, 0x6e, 0x3d, 0x4f, 0xf7, 0x68, 0xf8, 0x65, 0x67, 0x1c, 0x88, 0xd3, 0x64, 0x68, + 0x7b, 0x7c, 0xea, 0xa4, 0x3d, 0xce, 0x45, 0x8f, 0xe3, 0x85, 0x01, 0x65, 0xc2, 0x19, 0xf3, 0x71, + 0x34, 0xf3, 0x72, 0x71, 0xf5, 0x53, 0x1e, 0x56, 0x15, 0xe4, 0xb3, 0x5f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x15, 0xb8, 0xa4, 0xd6, 0xa4, 0x05, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/polldata.pb.go b/client/gogrpc/conductor/model/polldata.pb.go new file mode 100644 index 0000000000..3b569f532b --- /dev/null +++ b/client/gogrpc/conductor/model/polldata.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/polldata.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PollData struct { + QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + WorkerId string `protobuf:"bytes,3,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + LastPollTime int64 `protobuf:"varint,4,opt,name=last_poll_time,json=lastPollTime,proto3" json:"last_poll_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollData) Reset() { *m = PollData{} } +func (m *PollData) String() string { return proto.CompactTextString(m) } +func (*PollData) ProtoMessage() {} +func (*PollData) Descriptor() ([]byte, []int) { + return fileDescriptor_polldata_c64f15389955536a, []int{0} +} +func (m *PollData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollData.Unmarshal(m, b) +} +func (m *PollData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollData.Marshal(b, m, deterministic) +} +func (dst *PollData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollData.Merge(dst, src) +} +func (m *PollData) XXX_Size() int { + return xxx_messageInfo_PollData.Size(m) +} +func (m *PollData) XXX_DiscardUnknown() { + xxx_messageInfo_PollData.DiscardUnknown(m) +} + +var xxx_messageInfo_PollData proto.InternalMessageInfo + +func (m *PollData) GetQueueName() string { + if m != nil { + return m.QueueName + } + return "" +} + +func (m *PollData) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *PollData) GetWorkerId() string { + if m != nil { + return m.WorkerId + } + return "" +} + +func (m *PollData) GetLastPollTime() int64 { + if m != nil { + return m.LastPollTime + } + return 0 +} + +func init() { + proto.RegisterType((*PollData)(nil), "conductor.proto.PollData") +} + +func init() { proto.RegisterFile("model/polldata.proto", fileDescriptor_polldata_c64f15389955536a) } + +var fileDescriptor_polldata_c64f15389955536a = []byte{ + // 229 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x90, 0xc1, 0x4a, 0x03, 0x31, + 0x10, 0x86, 0x59, 0x2b, 0xa5, 0x3b, 0x88, 0x42, 0x10, 0x59, 0x28, 0x42, 0x11, 0x0f, 0x3d, 0x6d, + 0x0e, 0xde, 0x3c, 0x16, 0x2f, 0x5e, 0xa4, 0x14, 0x4f, 0x5e, 0x96, 0x6c, 0x32, 0xae, 0xc1, 0x4c, + 0x66, 0x8d, 0x13, 0xf4, 0x01, 0x7c, 0x70, 0xd9, 0xb4, 0x8a, 0x78, 0x9c, 0xef, 0xff, 0x99, 0xf9, + 0x18, 0x38, 0x27, 0x76, 0x18, 0xf4, 0xc8, 0x21, 0x38, 0x23, 0xa6, 0x1d, 0x13, 0x0b, 0xab, 0x33, + 0xcb, 0xd1, 0x65, 0x2b, 0x9c, 0xf6, 0xe0, 0xea, 0xab, 0x82, 0xc5, 0x96, 0x43, 0xb8, 0x33, 0x62, + 0xd4, 0x25, 0xc0, 0x5b, 0xc6, 0x8c, 0x5d, 0x34, 0x84, 0x4d, 0xb5, 0xaa, 0xd6, 0xf5, 0xae, 0x2e, + 0xe4, 0xc1, 0x10, 0xaa, 0x0b, 0x98, 0x3b, 0x26, 0xe3, 0x63, 0x73, 0x54, 0xa2, 0xc3, 0xa4, 0x96, + 0x50, 0x7f, 0x70, 0x7a, 0xc5, 0xd4, 0x79, 0xd7, 0xcc, 0x4a, 0xb4, 0xd8, 0x83, 0x7b, 0xa7, 0xae, + 0xe1, 0x34, 0x98, 0x77, 0xe9, 0x26, 0x91, 0x4e, 0x3c, 0x61, 0x73, 0xbc, 0xaa, 0xd6, 0xb3, 0xdd, + 0xc9, 0x44, 0xa7, 0xcb, 0x8f, 0x9e, 0x70, 0x83, 0xb0, 0xb4, 0x4c, 0x6d, 0x44, 0x79, 0x0e, 0xfe, + 0xb3, 0xfd, 0x67, 0xb9, 0x81, 0x1f, 0xc5, 0x6d, 0xff, 0x74, 0x3b, 0x78, 0x79, 0xc9, 0x7d, 0x6b, + 0x99, 0xf4, 0xa1, 0xaf, 0x7f, 0xfb, 0xda, 0x06, 0x8f, 0x51, 0xf4, 0xc0, 0x43, 0x1a, 0xed, 0x1f, + 0x5e, 0xbe, 0xd1, 0xcf, 0xcb, 0xba, 0x9b, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0x37, 0x71, + 0xb0, 0x1d, 0x01, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/rerunworkflowrequest.pb.go b/client/gogrpc/conductor/model/rerunworkflowrequest.pb.go new file mode 100644 index 0000000000..cf04669222 --- /dev/null +++ b/client/gogrpc/conductor/model/rerunworkflowrequest.pb.go @@ -0,0 +1,128 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/rerunworkflowrequest.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RerunWorkflowRequest struct { + ReRunFromWorkflowId string `protobuf:"bytes,1,opt,name=re_run_from_workflow_id,json=reRunFromWorkflowId,proto3" json:"re_run_from_workflow_id,omitempty"` + WorkflowInput map[string]*_struct.Value `protobuf:"bytes,2,rep,name=workflow_input,json=workflowInput,proto3" json:"workflow_input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ReRunFromTaskId string `protobuf:"bytes,3,opt,name=re_run_from_task_id,json=reRunFromTaskId,proto3" json:"re_run_from_task_id,omitempty"` + TaskInput map[string]*_struct.Value `protobuf:"bytes,4,rep,name=task_input,json=taskInput,proto3" json:"task_input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CorrelationId string `protobuf:"bytes,5,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RerunWorkflowRequest) Reset() { *m = RerunWorkflowRequest{} } +func (m *RerunWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*RerunWorkflowRequest) ProtoMessage() {} +func (*RerunWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rerunworkflowrequest_ec6d727a6700f219, []int{0} +} +func (m *RerunWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RerunWorkflowRequest.Unmarshal(m, b) +} +func (m *RerunWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RerunWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *RerunWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RerunWorkflowRequest.Merge(dst, src) +} +func (m *RerunWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_RerunWorkflowRequest.Size(m) +} +func (m *RerunWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RerunWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RerunWorkflowRequest proto.InternalMessageInfo + +func (m *RerunWorkflowRequest) GetReRunFromWorkflowId() string { + if m != nil { + return m.ReRunFromWorkflowId + } + return "" +} + +func (m *RerunWorkflowRequest) GetWorkflowInput() map[string]*_struct.Value { + if m != nil { + return m.WorkflowInput + } + return nil +} + +func (m *RerunWorkflowRequest) GetReRunFromTaskId() string { + if m != nil { + return m.ReRunFromTaskId + } + return "" +} + +func (m *RerunWorkflowRequest) GetTaskInput() map[string]*_struct.Value { + if m != nil { + return m.TaskInput + } + return nil +} + +func (m *RerunWorkflowRequest) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func init() { + proto.RegisterType((*RerunWorkflowRequest)(nil), "conductor.proto.RerunWorkflowRequest") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.RerunWorkflowRequest.TaskInputEntry") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.RerunWorkflowRequest.WorkflowInputEntry") +} + +func init() { + proto.RegisterFile("model/rerunworkflowrequest.proto", fileDescriptor_rerunworkflowrequest_ec6d727a6700f219) +} + +var fileDescriptor_rerunworkflowrequest_ec6d727a6700f219 = []byte{ + // 369 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0x4f, 0xef, 0xd2, 0x30, + 0x18, 0xc7, 0x33, 0xf8, 0x61, 0x42, 0x09, 0x60, 0x8a, 0x41, 0x82, 0x1e, 0x16, 0x13, 0x13, 0x0e, + 0xa4, 0x4b, 0x90, 0x03, 0xe1, 0x48, 0xa2, 0x09, 0x37, 0x33, 0x89, 0x1a, 0x2f, 0xcb, 0xd6, 0x75, + 0x73, 0xd9, 0xd6, 0x07, 0x9e, 0xb5, 0x22, 0xaf, 0xc0, 0xb7, 0x6d, 0xd6, 0x6d, 0x30, 0x91, 0x83, + 0x07, 0x6f, 0xeb, 0xb3, 0xef, 0x9f, 0xcf, 0x9e, 0x8e, 0xd8, 0x39, 0x84, 0x22, 0x73, 0x50, 0xa0, + 0x96, 0x67, 0xc0, 0x34, 0xca, 0xe0, 0x8c, 0xe2, 0xa4, 0x45, 0xa1, 0xd8, 0x11, 0x41, 0x01, 0x1d, + 0x73, 0x90, 0xa1, 0xe6, 0x0a, 0xb0, 0x1a, 0xcc, 0x5f, 0xc7, 0x00, 0x71, 0x26, 0x1c, 0x73, 0x0a, + 0x74, 0xe4, 0x14, 0x0a, 0x35, 0xaf, 0xe5, 0x6f, 0x7e, 0x3d, 0x91, 0x17, 0x6e, 0x99, 0xf6, 0xa5, + 0x4e, 0x73, 0xab, 0x34, 0xba, 0x26, 0x2f, 0x51, 0x78, 0xa8, 0xa5, 0x17, 0x21, 0xe4, 0x5e, 0x53, + 0xe6, 0x25, 0xe1, 0xcc, 0xb2, 0xad, 0x45, 0xdf, 0x9d, 0xa0, 0x70, 0xb5, 0xfc, 0x80, 0x90, 0x37, + 0xd6, 0x7d, 0x48, 0x3d, 0x32, 0xba, 0x29, 0xe5, 0x51, 0xab, 0x59, 0xc7, 0xee, 0x2e, 0x06, 0xab, + 0x0d, 0xbb, 0xc3, 0x62, 0x8f, 0x4a, 0xd9, 0x35, 0xa9, 0xb4, 0xbe, 0x97, 0x0a, 0x2f, 0xee, 0xf0, + 0xdc, 0x9e, 0xd1, 0x25, 0x99, 0xb4, 0xb1, 0x94, 0x5f, 0xa4, 0x25, 0x52, 0xd7, 0x20, 0x8d, 0xaf, + 0x48, 0x07, 0xbf, 0x48, 0xf7, 0x21, 0xfd, 0x44, 0x48, 0xa5, 0x30, 0x28, 0x4f, 0x06, 0x65, 0xfd, + 0x6f, 0x28, 0x26, 0xe1, 0x86, 0xd1, 0x57, 0xcd, 0x99, 0xbe, 0x25, 0x23, 0x0e, 0x88, 0x22, 0xf3, + 0x55, 0x02, 0xb2, 0x6c, 0xef, 0x99, 0xf6, 0x61, 0x6b, 0xba, 0x0f, 0xe7, 0x5f, 0x09, 0xfd, 0xfb, + 0x73, 0xe8, 0x73, 0xd2, 0x4d, 0xc5, 0xa5, 0x5e, 0x61, 0xf9, 0x48, 0x97, 0xa4, 0xf7, 0xc3, 0xcf, + 0xb4, 0x98, 0x75, 0x6c, 0x6b, 0x31, 0x58, 0x4d, 0x59, 0x75, 0x5f, 0xac, 0xb9, 0x2f, 0xf6, 0xb9, + 0x7c, 0xeb, 0x56, 0xa2, 0x6d, 0x67, 0x63, 0xcd, 0x0f, 0x64, 0xf4, 0x27, 0xdd, 0xff, 0x48, 0xdd, + 0x9d, 0xc8, 0x2b, 0x0e, 0x39, 0x93, 0x42, 0x45, 0x59, 0xf2, 0xf3, 0x7e, 0x49, 0xbb, 0xe9, 0xa3, + 0x2d, 0x7d, 0x0c, 0xbe, 0x6d, 0xe3, 0x44, 0x7d, 0xd7, 0x01, 0xe3, 0x90, 0x3b, 0xb5, 0xd7, 0xb9, + 0x7a, 0x1d, 0x9e, 0x25, 0x42, 0x2a, 0x27, 0x86, 0x18, 0x8f, 0xbc, 0x35, 0x37, 0xbf, 0x71, 0xf0, + 0xcc, 0x44, 0xbf, 0xfb, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x3a, 0x9b, 0x51, 0xd6, 0x02, 0x00, + 0x00, +} diff --git a/client/gogrpc/conductor/model/skiptaskrequest.pb.go b/client/gogrpc/conductor/model/skiptaskrequest.pb.go new file mode 100644 index 0000000000..42cd06bc1d --- /dev/null +++ b/client/gogrpc/conductor/model/skiptaskrequest.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/skiptaskrequest.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SkipTaskRequest struct { + TaskInput map[string]*_struct.Value `protobuf:"bytes,1,rep,name=task_input,json=taskInput,proto3" json:"task_input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TaskOutput map[string]*_struct.Value `protobuf:"bytes,2,rep,name=task_output,json=taskOutput,proto3" json:"task_output,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TaskInputMessage *any.Any `protobuf:"bytes,3,opt,name=task_input_message,json=taskInputMessage,proto3" json:"task_input_message,omitempty"` + TaskOutputMessage *any.Any `protobuf:"bytes,4,opt,name=task_output_message,json=taskOutputMessage,proto3" json:"task_output_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SkipTaskRequest) Reset() { *m = SkipTaskRequest{} } +func (m *SkipTaskRequest) String() string { return proto.CompactTextString(m) } +func (*SkipTaskRequest) ProtoMessage() {} +func (*SkipTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_skiptaskrequest_3fbc52032537d94c, []int{0} +} +func (m *SkipTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SkipTaskRequest.Unmarshal(m, b) +} +func (m *SkipTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SkipTaskRequest.Marshal(b, m, deterministic) +} +func (dst *SkipTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SkipTaskRequest.Merge(dst, src) +} +func (m *SkipTaskRequest) XXX_Size() int { + return xxx_messageInfo_SkipTaskRequest.Size(m) +} +func (m *SkipTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SkipTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SkipTaskRequest proto.InternalMessageInfo + +func (m *SkipTaskRequest) GetTaskInput() map[string]*_struct.Value { + if m != nil { + return m.TaskInput + } + return nil +} + +func (m *SkipTaskRequest) GetTaskOutput() map[string]*_struct.Value { + if m != nil { + return m.TaskOutput + } + return nil +} + +func (m *SkipTaskRequest) GetTaskInputMessage() *any.Any { + if m != nil { + return m.TaskInputMessage + } + return nil +} + +func (m *SkipTaskRequest) GetTaskOutputMessage() *any.Any { + if m != nil { + return m.TaskOutputMessage + } + return nil +} + +func init() { + proto.RegisterType((*SkipTaskRequest)(nil), "conductor.proto.SkipTaskRequest") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.SkipTaskRequest.TaskInputEntry") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.SkipTaskRequest.TaskOutputEntry") +} + +func init() { + proto.RegisterFile("model/skiptaskrequest.proto", fileDescriptor_skiptaskrequest_3fbc52032537d94c) +} + +var fileDescriptor_skiptaskrequest_3fbc52032537d94c = []byte{ + // 348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xbd, 0x4e, 0xc3, 0x30, + 0x14, 0x85, 0x95, 0x06, 0x90, 0xea, 0x4a, 0xb4, 0x35, 0x08, 0x85, 0x94, 0xa1, 0x62, 0xea, 0x80, + 0x6c, 0x54, 0x16, 0xd4, 0x8d, 0x0a, 0x06, 0x06, 0xfe, 0x42, 0x61, 0x60, 0xa9, 0x12, 0xd7, 0x0d, + 0x51, 0x12, 0x3b, 0xc4, 0x36, 0x22, 0xcf, 0xcc, 0x4b, 0xa0, 0xd8, 0xfd, 0xc3, 0x48, 0x88, 0x81, + 0xcd, 0xbe, 0xf7, 0x9e, 0xef, 0x9e, 0x23, 0x1b, 0xf4, 0x72, 0x3e, 0xa3, 0x19, 0x16, 0x69, 0x52, + 0xc8, 0x50, 0xa4, 0x25, 0x7d, 0x53, 0x54, 0x48, 0x54, 0x94, 0x5c, 0x72, 0xd8, 0x26, 0x9c, 0xcd, + 0x14, 0x91, 0xbc, 0x34, 0x05, 0xff, 0x28, 0xe6, 0x3c, 0xce, 0x28, 0xd6, 0xb7, 0x48, 0xcd, 0xb1, + 0x90, 0xa5, 0x22, 0x8b, 0x71, 0xff, 0xd0, 0xee, 0x86, 0xac, 0x32, 0xad, 0xe3, 0x4f, 0x17, 0xb4, + 0x1f, 0xd3, 0xa4, 0x98, 0x84, 0x22, 0x0d, 0xcc, 0x0e, 0x78, 0x0b, 0x40, 0xbd, 0x72, 0x9a, 0xb0, + 0x42, 0x49, 0xcf, 0xe9, 0xbb, 0x83, 0xd6, 0x10, 0x23, 0x6b, 0x25, 0xb2, 0x54, 0xa8, 0x3e, 0x5f, + 0xd7, 0x8a, 0x2b, 0x26, 0xcb, 0x2a, 0x68, 0xca, 0xe5, 0x1d, 0x3e, 0x80, 0x96, 0xe6, 0x71, 0x25, + 0x6b, 0x60, 0x43, 0x03, 0x4f, 0xff, 0x04, 0xbc, 0xd3, 0x12, 0x43, 0xd4, 0xa6, 0x4c, 0x01, 0x8e, + 0x01, 0x5c, 0x5b, 0x9c, 0xe6, 0x54, 0x88, 0x30, 0xa6, 0x9e, 0xdb, 0x77, 0x06, 0xad, 0xe1, 0x3e, + 0x32, 0x71, 0xd1, 0x32, 0x2e, 0xba, 0x60, 0x55, 0xd0, 0x59, 0xf9, 0xb9, 0x31, 0xd3, 0xf0, 0x12, + 0xec, 0x6d, 0xd8, 0x5a, 0x41, 0xb6, 0x7e, 0x81, 0x74, 0xd7, 0x16, 0x16, 0x14, 0x7f, 0x02, 0x76, + 0xbf, 0x27, 0x87, 0x1d, 0xe0, 0xa6, 0xb4, 0xf2, 0x9c, 0xbe, 0x33, 0x68, 0x06, 0xf5, 0x11, 0x9e, + 0x80, 0xed, 0xf7, 0x30, 0x53, 0xd4, 0x6b, 0x68, 0xf6, 0xc1, 0x0f, 0xf6, 0x73, 0xdd, 0x0d, 0xcc, + 0xd0, 0xa8, 0x71, 0xee, 0xf8, 0x4f, 0xa0, 0x6d, 0xc5, 0xff, 0x0f, 0xec, 0x38, 0x03, 0x3d, 0xc2, + 0x73, 0xc4, 0xa8, 0x9c, 0x67, 0xc9, 0x87, 0xfd, 0x02, 0xe3, 0xae, 0xf5, 0x04, 0xf7, 0xd1, 0xcb, + 0x28, 0x4e, 0xe4, 0xab, 0x8a, 0x10, 0xe1, 0x39, 0x5e, 0xc8, 0xf0, 0x4a, 0x86, 0x49, 0x96, 0x50, + 0x26, 0x71, 0xcc, 0xe3, 0xb2, 0x20, 0x1b, 0x75, 0xfd, 0x77, 0xa3, 0x1d, 0x4d, 0x3d, 0xfb, 0x0a, + 0x00, 0x00, 0xff, 0xff, 0xcb, 0xb6, 0xee, 0xfd, 0xcb, 0x02, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/startworkflowrequest.pb.go b/client/gogrpc/conductor/model/startworkflowrequest.pb.go new file mode 100644 index 0000000000..bd2ca8e26f --- /dev/null +++ b/client/gogrpc/conductor/model/startworkflowrequest.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/startworkflowrequest.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StartWorkflowRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + CorrelationId string `protobuf:"bytes,3,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + Input map[string]*_struct.Value `protobuf:"bytes,4,rep,name=input,proto3" json:"input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TaskToDomain map[string]string `protobuf:"bytes,5,rep,name=task_to_domain,json=taskToDomain,proto3" json:"task_to_domain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkflowDef *WorkflowDef `protobuf:"bytes,6,opt,name=workflow_def,json=workflowDef,proto3" json:"workflow_def,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartWorkflowRequest) Reset() { *m = StartWorkflowRequest{} } +func (m *StartWorkflowRequest) String() string { return proto.CompactTextString(m) } +func (*StartWorkflowRequest) ProtoMessage() {} +func (*StartWorkflowRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_startworkflowrequest_57b778443ff5f3ba, []int{0} +} +func (m *StartWorkflowRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartWorkflowRequest.Unmarshal(m, b) +} +func (m *StartWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartWorkflowRequest.Marshal(b, m, deterministic) +} +func (dst *StartWorkflowRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartWorkflowRequest.Merge(dst, src) +} +func (m *StartWorkflowRequest) XXX_Size() int { + return xxx_messageInfo_StartWorkflowRequest.Size(m) +} +func (m *StartWorkflowRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartWorkflowRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartWorkflowRequest proto.InternalMessageInfo + +func (m *StartWorkflowRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StartWorkflowRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *StartWorkflowRequest) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func (m *StartWorkflowRequest) GetInput() map[string]*_struct.Value { + if m != nil { + return m.Input + } + return nil +} + +func (m *StartWorkflowRequest) GetTaskToDomain() map[string]string { + if m != nil { + return m.TaskToDomain + } + return nil +} + +func (m *StartWorkflowRequest) GetWorkflowDef() *WorkflowDef { + if m != nil { + return m.WorkflowDef + } + return nil +} + +func init() { + proto.RegisterType((*StartWorkflowRequest)(nil), "conductor.proto.StartWorkflowRequest") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.StartWorkflowRequest.InputEntry") + proto.RegisterMapType((map[string]string)(nil), "conductor.proto.StartWorkflowRequest.TaskToDomainEntry") +} + +func init() { + proto.RegisterFile("model/startworkflowrequest.proto", fileDescriptor_startworkflowrequest_57b778443ff5f3ba) +} + +var fileDescriptor_startworkflowrequest_57b778443ff5f3ba = []byte{ + // 396 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x51, 0xab, 0xd3, 0x30, + 0x14, 0x80, 0xe9, 0xed, 0xed, 0x95, 0x9b, 0x5e, 0xaf, 0x1a, 0x2e, 0xd7, 0x32, 0xf7, 0x50, 0x04, + 0xa1, 0x0f, 0x92, 0xca, 0x7c, 0x50, 0xf6, 0x32, 0x18, 0x53, 0xd8, 0xdb, 0xa8, 0x43, 0x41, 0x90, + 0xd2, 0xa6, 0x69, 0x0d, 0x6d, 0x73, 0xb6, 0x34, 0xdd, 0xdc, 0x1f, 0xf6, 0x77, 0x48, 0xd3, 0xd6, + 0x95, 0x6d, 0x0f, 0xf7, 0x2d, 0xe7, 0x24, 0xdf, 0x97, 0x93, 0x73, 0x82, 0xdc, 0x12, 0x12, 0x56, + 0xf8, 0x95, 0x8a, 0xa4, 0xda, 0x83, 0xcc, 0xd3, 0x02, 0xf6, 0x92, 0x6d, 0x6b, 0x56, 0x29, 0xb2, + 0x91, 0xa0, 0x00, 0xbf, 0xa0, 0x20, 0x92, 0x9a, 0x2a, 0x90, 0x6d, 0x62, 0xf4, 0xba, 0x45, 0xfa, + 0xd3, 0x09, 0x4b, 0xbb, 0x8d, 0x71, 0x06, 0x90, 0x15, 0xcc, 0xd7, 0x51, 0x5c, 0xa7, 0x7e, 0xa5, + 0x64, 0x4d, 0x3b, 0xcf, 0xdb, 0xbf, 0x26, 0x7a, 0xf8, 0xd6, 0x5c, 0xf3, 0xa3, 0x03, 0x83, 0xf6, + 0x1a, 0x8c, 0xd1, 0xb5, 0x88, 0x4a, 0xe6, 0x18, 0xae, 0xe1, 0xdd, 0x06, 0x7a, 0x8d, 0x1d, 0xf4, + 0x6c, 0xc7, 0x64, 0xc5, 0x41, 0x38, 0x57, 0xae, 0xe1, 0x59, 0x41, 0x1f, 0xe2, 0x77, 0xe8, 0x9e, + 0x82, 0x94, 0xac, 0x88, 0x14, 0x07, 0x11, 0xf2, 0xc4, 0x31, 0x35, 0xf7, 0x7c, 0x90, 0x5d, 0x26, + 0xf8, 0x2b, 0xb2, 0xb8, 0xd8, 0xd4, 0xca, 0xb9, 0x76, 0x4d, 0xcf, 0x9e, 0x7c, 0x20, 0x27, 0xaf, + 0x20, 0x97, 0x4a, 0x21, 0xcb, 0x06, 0xf9, 0x22, 0x94, 0x3c, 0x04, 0x2d, 0x8e, 0x7f, 0xa1, 0x7b, + 0x15, 0x55, 0x79, 0xa8, 0x20, 0x4c, 0xa0, 0x8c, 0xb8, 0x70, 0x2c, 0x2d, 0xfc, 0xf4, 0x34, 0xe1, + 0x3a, 0xaa, 0xf2, 0x35, 0x2c, 0x34, 0xd9, 0x7a, 0xef, 0xd4, 0x20, 0x85, 0x67, 0xe8, 0xae, 0xef, + 0x63, 0x98, 0xb0, 0xd4, 0xb9, 0x71, 0x0d, 0xcf, 0x9e, 0x8c, 0xcf, 0xe4, 0xbd, 0x77, 0xc1, 0xd2, + 0xc0, 0xde, 0x1f, 0x83, 0xd1, 0x0a, 0xa1, 0x63, 0xd1, 0xf8, 0x25, 0x32, 0x73, 0x76, 0xe8, 0x3a, + 0xd9, 0x2c, 0xf1, 0x7b, 0x64, 0xed, 0xa2, 0xa2, 0x66, 0xba, 0x8d, 0xf6, 0xe4, 0x91, 0xb4, 0x33, + 0x22, 0xfd, 0x8c, 0xc8, 0xf7, 0x66, 0x37, 0x68, 0x0f, 0x4d, 0xaf, 0x3e, 0x1b, 0xa3, 0x19, 0x7a, + 0x75, 0x56, 0xf5, 0x05, 0xf1, 0xc3, 0x50, 0x7c, 0x3b, 0x10, 0xcc, 0xb7, 0xe8, 0x0d, 0x85, 0x92, + 0x08, 0xa6, 0xd2, 0x82, 0xff, 0x39, 0x7d, 0xca, 0xfc, 0xf1, 0x52, 0xa3, 0x56, 0xf1, 0xcf, 0x69, + 0xc6, 0xd5, 0xef, 0x3a, 0x26, 0x14, 0x4a, 0xbf, 0x63, 0xfd, 0xff, 0xac, 0x4f, 0x0b, 0xce, 0x84, + 0xf2, 0x33, 0xc8, 0xe4, 0x86, 0x0e, 0xf2, 0xfa, 0x2f, 0xc6, 0x37, 0x5a, 0xfd, 0xf1, 0x5f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x26, 0x3f, 0xa7, 0x2d, 0xce, 0x02, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/subworkflowparams.pb.go b/client/gogrpc/conductor/model/subworkflowparams.pb.go new file mode 100644 index 0000000000..ac4a65deb1 --- /dev/null +++ b/client/gogrpc/conductor/model/subworkflowparams.pb.go @@ -0,0 +1,89 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/subworkflowparams.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SubWorkflowParams struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubWorkflowParams) Reset() { *m = SubWorkflowParams{} } +func (m *SubWorkflowParams) String() string { return proto.CompactTextString(m) } +func (*SubWorkflowParams) ProtoMessage() {} +func (*SubWorkflowParams) Descriptor() ([]byte, []int) { + return fileDescriptor_subworkflowparams_957362175bba13c8, []int{0} +} +func (m *SubWorkflowParams) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubWorkflowParams.Unmarshal(m, b) +} +func (m *SubWorkflowParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubWorkflowParams.Marshal(b, m, deterministic) +} +func (dst *SubWorkflowParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubWorkflowParams.Merge(dst, src) +} +func (m *SubWorkflowParams) XXX_Size() int { + return xxx_messageInfo_SubWorkflowParams.Size(m) +} +func (m *SubWorkflowParams) XXX_DiscardUnknown() { + xxx_messageInfo_SubWorkflowParams.DiscardUnknown(m) +} + +var xxx_messageInfo_SubWorkflowParams proto.InternalMessageInfo + +func (m *SubWorkflowParams) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SubWorkflowParams) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func init() { + proto.RegisterType((*SubWorkflowParams)(nil), "conductor.proto.SubWorkflowParams") +} + +func init() { + proto.RegisterFile("model/subworkflowparams.proto", fileDescriptor_subworkflowparams_957362175bba13c8) +} + +var fileDescriptor_subworkflowparams_957362175bba13c8 = []byte{ + // 183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcd, 0xcd, 0x4f, 0x49, + 0xcd, 0xd1, 0x2f, 0x2e, 0x4d, 0x2a, 0xcf, 0x2f, 0xca, 0x4e, 0xcb, 0xc9, 0x2f, 0x2f, 0x48, 0x2c, + 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xce, 0xcf, 0x4b, 0x29, + 0x4d, 0x2e, 0xc9, 0x2f, 0x82, 0x08, 0x28, 0x39, 0x72, 0x09, 0x06, 0x97, 0x26, 0x85, 0x43, 0xd5, + 0x06, 0x80, 0xd5, 0x0a, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x81, 0xd9, 0x42, 0x12, 0x5c, 0xec, 0x65, 0xa9, 0x45, 0xc5, 0x99, 0xf9, 0x79, 0x12, + 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x30, 0xae, 0x53, 0x1e, 0x97, 0x74, 0x72, 0x7e, 0xae, 0x5e, + 0x5e, 0x6a, 0x49, 0x5a, 0x4e, 0x66, 0x85, 0x1e, 0x9a, 0x0d, 0x4e, 0xc2, 0x18, 0xe6, 0x07, 0x24, + 0x45, 0x59, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x43, 0x35, 0xea, + 0xc3, 0x35, 0xea, 0x27, 0xe7, 0x64, 0xa6, 0xe6, 0x95, 0xe8, 0xa7, 0xe7, 0xa7, 0x17, 0x15, 0x24, + 0x23, 0x89, 0x83, 0xfd, 0x96, 0xc4, 0x06, 0x36, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x52, + 0x9c, 0xc5, 0x01, 0xeb, 0x00, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/task.pb.go b/client/gogrpc/conductor/model/task.pb.go new file mode 100644 index 0000000000..0c10b923ff --- /dev/null +++ b/client/gogrpc/conductor/model/task.pb.go @@ -0,0 +1,422 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/task.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Task_Status int32 + +const ( + Task_IN_PROGRESS Task_Status = 0 + Task_CANCELED Task_Status = 1 + Task_FAILED Task_Status = 2 + Task_FAILED_WITH_TERMINAL_ERROR Task_Status = 3 + Task_COMPLETED Task_Status = 4 + Task_COMPLETED_WITH_ERRORS Task_Status = 5 + Task_SCHEDULED Task_Status = 6 + Task_TIMED_OUT Task_Status = 7 + Task_READY_FOR_RERUN Task_Status = 8 + Task_SKIPPED Task_Status = 9 +) + +var Task_Status_name = map[int32]string{ + 0: "IN_PROGRESS", + 1: "CANCELED", + 2: "FAILED", + 3: "FAILED_WITH_TERMINAL_ERROR", + 4: "COMPLETED", + 5: "COMPLETED_WITH_ERRORS", + 6: "SCHEDULED", + 7: "TIMED_OUT", + 8: "READY_FOR_RERUN", + 9: "SKIPPED", +} +var Task_Status_value = map[string]int32{ + "IN_PROGRESS": 0, + "CANCELED": 1, + "FAILED": 2, + "FAILED_WITH_TERMINAL_ERROR": 3, + "COMPLETED": 4, + "COMPLETED_WITH_ERRORS": 5, + "SCHEDULED": 6, + "TIMED_OUT": 7, + "READY_FOR_RERUN": 8, + "SKIPPED": 9, +} + +func (x Task_Status) String() string { + return proto.EnumName(Task_Status_name, int32(x)) +} +func (Task_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_task_755b02a04c3b7e73, []int{0, 0} +} + +type Task struct { + TaskType string `protobuf:"bytes,1,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + Status Task_Status `protobuf:"varint,2,opt,name=status,proto3,enum=conductor.proto.Task_Status" json:"status,omitempty"` + InputData map[string]*_struct.Value `protobuf:"bytes,3,rep,name=input_data,json=inputData,proto3" json:"input_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ReferenceTaskName string `protobuf:"bytes,4,opt,name=reference_task_name,json=referenceTaskName,proto3" json:"reference_task_name,omitempty"` + RetryCount int32 `protobuf:"varint,5,opt,name=retry_count,json=retryCount,proto3" json:"retry_count,omitempty"` + Seq int32 `protobuf:"varint,6,opt,name=seq,proto3" json:"seq,omitempty"` + CorrelationId string `protobuf:"bytes,7,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + PollCount int32 `protobuf:"varint,8,opt,name=poll_count,json=pollCount,proto3" json:"poll_count,omitempty"` + TaskDefName string `protobuf:"bytes,9,opt,name=task_def_name,json=taskDefName,proto3" json:"task_def_name,omitempty"` + ScheduledTime int64 `protobuf:"varint,10,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartTime int64 `protobuf:"varint,11,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,12,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + UpdateTime int64 `protobuf:"varint,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + StartDelayInSeconds int32 `protobuf:"varint,14,opt,name=start_delay_in_seconds,json=startDelayInSeconds,proto3" json:"start_delay_in_seconds,omitempty"` + RetriedTaskId string `protobuf:"bytes,15,opt,name=retried_task_id,json=retriedTaskId,proto3" json:"retried_task_id,omitempty"` + Retried bool `protobuf:"varint,16,opt,name=retried,proto3" json:"retried,omitempty"` + Executed bool `protobuf:"varint,17,opt,name=executed,proto3" json:"executed,omitempty"` + CallbackFromWorker bool `protobuf:"varint,18,opt,name=callback_from_worker,json=callbackFromWorker,proto3" json:"callback_from_worker,omitempty"` + ResponseTimeoutSeconds int32 `protobuf:"varint,19,opt,name=response_timeout_seconds,json=responseTimeoutSeconds,proto3" json:"response_timeout_seconds,omitempty"` + WorkflowInstanceId string `protobuf:"bytes,20,opt,name=workflow_instance_id,json=workflowInstanceId,proto3" json:"workflow_instance_id,omitempty"` + WorkflowType string `protobuf:"bytes,21,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + TaskId string `protobuf:"bytes,22,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + ReasonForIncompletion string `protobuf:"bytes,23,opt,name=reason_for_incompletion,json=reasonForIncompletion,proto3" json:"reason_for_incompletion,omitempty"` + CallbackAfterSeconds int64 `protobuf:"varint,24,opt,name=callback_after_seconds,json=callbackAfterSeconds,proto3" json:"callback_after_seconds,omitempty"` + WorkerId string `protobuf:"bytes,25,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + OutputData map[string]*_struct.Value `protobuf:"bytes,26,rep,name=output_data,json=outputData,proto3" json:"output_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkflowTask *WorkflowTask `protobuf:"bytes,27,opt,name=workflow_task,json=workflowTask,proto3" json:"workflow_task,omitempty"` + Domain string `protobuf:"bytes,28,opt,name=domain,proto3" json:"domain,omitempty"` + InputMessage *any.Any `protobuf:"bytes,29,opt,name=input_message,json=inputMessage,proto3" json:"input_message,omitempty"` + OutputMessage *any.Any `protobuf:"bytes,30,opt,name=output_message,json=outputMessage,proto3" json:"output_message,omitempty"` + RateLimitPerSecond int32 `protobuf:"varint,31,opt,name=rate_limit_per_second,json=rateLimitPerSecond,proto3" json:"rate_limit_per_second,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Task) Reset() { *m = Task{} } +func (m *Task) String() string { return proto.CompactTextString(m) } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { + return fileDescriptor_task_755b02a04c3b7e73, []int{0} +} +func (m *Task) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Task.Unmarshal(m, b) +} +func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) +} +func (dst *Task) XXX_Merge(src proto.Message) { + xxx_messageInfo_Task.Merge(dst, src) +} +func (m *Task) XXX_Size() int { + return xxx_messageInfo_Task.Size(m) +} +func (m *Task) XXX_DiscardUnknown() { + xxx_messageInfo_Task.DiscardUnknown(m) +} + +var xxx_messageInfo_Task proto.InternalMessageInfo + +func (m *Task) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +func (m *Task) GetStatus() Task_Status { + if m != nil { + return m.Status + } + return Task_IN_PROGRESS +} + +func (m *Task) GetInputData() map[string]*_struct.Value { + if m != nil { + return m.InputData + } + return nil +} + +func (m *Task) GetReferenceTaskName() string { + if m != nil { + return m.ReferenceTaskName + } + return "" +} + +func (m *Task) GetRetryCount() int32 { + if m != nil { + return m.RetryCount + } + return 0 +} + +func (m *Task) GetSeq() int32 { + if m != nil { + return m.Seq + } + return 0 +} + +func (m *Task) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func (m *Task) GetPollCount() int32 { + if m != nil { + return m.PollCount + } + return 0 +} + +func (m *Task) GetTaskDefName() string { + if m != nil { + return m.TaskDefName + } + return "" +} + +func (m *Task) GetScheduledTime() int64 { + if m != nil { + return m.ScheduledTime + } + return 0 +} + +func (m *Task) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *Task) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + +func (m *Task) GetUpdateTime() int64 { + if m != nil { + return m.UpdateTime + } + return 0 +} + +func (m *Task) GetStartDelayInSeconds() int32 { + if m != nil { + return m.StartDelayInSeconds + } + return 0 +} + +func (m *Task) GetRetriedTaskId() string { + if m != nil { + return m.RetriedTaskId + } + return "" +} + +func (m *Task) GetRetried() bool { + if m != nil { + return m.Retried + } + return false +} + +func (m *Task) GetExecuted() bool { + if m != nil { + return m.Executed + } + return false +} + +func (m *Task) GetCallbackFromWorker() bool { + if m != nil { + return m.CallbackFromWorker + } + return false +} + +func (m *Task) GetResponseTimeoutSeconds() int32 { + if m != nil { + return m.ResponseTimeoutSeconds + } + return 0 +} + +func (m *Task) GetWorkflowInstanceId() string { + if m != nil { + return m.WorkflowInstanceId + } + return "" +} + +func (m *Task) GetWorkflowType() string { + if m != nil { + return m.WorkflowType + } + return "" +} + +func (m *Task) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *Task) GetReasonForIncompletion() string { + if m != nil { + return m.ReasonForIncompletion + } + return "" +} + +func (m *Task) GetCallbackAfterSeconds() int64 { + if m != nil { + return m.CallbackAfterSeconds + } + return 0 +} + +func (m *Task) GetWorkerId() string { + if m != nil { + return m.WorkerId + } + return "" +} + +func (m *Task) GetOutputData() map[string]*_struct.Value { + if m != nil { + return m.OutputData + } + return nil +} + +func (m *Task) GetWorkflowTask() *WorkflowTask { + if m != nil { + return m.WorkflowTask + } + return nil +} + +func (m *Task) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *Task) GetInputMessage() *any.Any { + if m != nil { + return m.InputMessage + } + return nil +} + +func (m *Task) GetOutputMessage() *any.Any { + if m != nil { + return m.OutputMessage + } + return nil +} + +func (m *Task) GetRateLimitPerSecond() int32 { + if m != nil { + return m.RateLimitPerSecond + } + return 0 +} + +func init() { + proto.RegisterType((*Task)(nil), "conductor.proto.Task") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.Task.InputDataEntry") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.Task.OutputDataEntry") + proto.RegisterEnum("conductor.proto.Task_Status", Task_Status_name, Task_Status_value) +} + +func init() { proto.RegisterFile("model/task.proto", fileDescriptor_task_755b02a04c3b7e73) } + +var fileDescriptor_task_755b02a04c3b7e73 = []byte{ + // 1004 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5d, 0x73, 0xda, 0x46, + 0x14, 0x2d, 0xb1, 0xcd, 0xc7, 0xc5, 0x80, 0xb2, 0xb6, 0xf1, 0x1a, 0xdb, 0x31, 0xe3, 0xd6, 0x1d, + 0x1e, 0x3a, 0x90, 0x3a, 0x99, 0x4e, 0x9a, 0x3e, 0x61, 0x90, 0x1b, 0x4d, 0x6d, 0xc3, 0x08, 0x5c, + 0x4f, 0xfb, 0xb2, 0xb3, 0x96, 0x16, 0xa2, 0x41, 0xd2, 0xd2, 0xd5, 0xaa, 0x09, 0xbf, 0xa7, 0xbf, + 0xa3, 0xff, 0xad, 0xb3, 0xbb, 0x48, 0xa1, 0x4e, 0xa6, 0x4f, 0x7d, 0xdb, 0x3d, 0xe7, 0xdc, 0xcb, + 0xbd, 0x47, 0x7b, 0x2f, 0x60, 0x45, 0xdc, 0x67, 0x61, 0x4f, 0xd2, 0x64, 0xd1, 0x5d, 0x0a, 0x2e, + 0x39, 0x6a, 0x78, 0x3c, 0xf6, 0x53, 0x4f, 0x72, 0x61, 0x80, 0x16, 0x36, 0x92, 0x0f, 0x5c, 0x2c, + 0x66, 0x21, 0xff, 0xf0, 0x49, 0xda, 0x3a, 0x99, 0x73, 0x3e, 0x0f, 0x59, 0x4f, 0xdf, 0x1e, 0xd3, + 0x59, 0x2f, 0x91, 0x22, 0xf5, 0xe4, 0x9a, 0x3d, 0x7a, 0xca, 0xd2, 0x78, 0x65, 0xa8, 0xf3, 0xbf, + 0x6a, 0xb0, 0x3d, 0xa5, 0xc9, 0x02, 0x1d, 0x43, 0x45, 0xe5, 0x23, 0x72, 0xb5, 0x64, 0xb8, 0xd0, + 0x2e, 0x74, 0x2a, 0x6e, 0x59, 0x01, 0xd3, 0xd5, 0x92, 0xa1, 0xd7, 0x50, 0x4c, 0x24, 0x95, 0x69, + 0x82, 0x9f, 0xb5, 0x0b, 0x9d, 0xfa, 0xe5, 0x49, 0xf7, 0x49, 0x69, 0x5d, 0x95, 0xa3, 0x3b, 0xd1, + 0x1a, 0x77, 0xad, 0x45, 0x03, 0x80, 0x20, 0x5e, 0xa6, 0x92, 0xf8, 0x54, 0x52, 0xbc, 0xd5, 0xde, + 0xea, 0x54, 0x2f, 0xbf, 0xf9, 0x72, 0xa4, 0xa3, 0x74, 0x43, 0x2a, 0xa9, 0x1d, 0x4b, 0xb1, 0x72, + 0x2b, 0x41, 0x76, 0x47, 0x5d, 0xd8, 0x13, 0x6c, 0xc6, 0x04, 0x8b, 0x3d, 0x46, 0x74, 0x85, 0x31, + 0x8d, 0x18, 0xde, 0xd6, 0x15, 0x3e, 0xcf, 0x29, 0x95, 0xe5, 0x8e, 0x46, 0x0c, 0x9d, 0x41, 0x55, + 0x30, 0x29, 0x56, 0xc4, 0xe3, 0x69, 0x2c, 0xf1, 0x4e, 0xbb, 0xd0, 0xd9, 0x71, 0x41, 0x43, 0x03, + 0x85, 0x20, 0x0b, 0xb6, 0x12, 0xf6, 0x07, 0x2e, 0x6a, 0x42, 0x1d, 0xd1, 0x05, 0xd4, 0x3d, 0x2e, + 0x04, 0x0b, 0xa9, 0x0c, 0x78, 0x4c, 0x02, 0x1f, 0x97, 0x74, 0xf6, 0xda, 0x06, 0xea, 0xf8, 0xe8, + 0x14, 0x60, 0xc9, 0xc3, 0x70, 0x9d, 0xb8, 0xac, 0xe3, 0x2b, 0x0a, 0x31, 0x79, 0xcf, 0xa1, 0xa6, + 0xcb, 0xf3, 0xd9, 0xcc, 0x94, 0x58, 0xd1, 0x49, 0xaa, 0x0a, 0x1c, 0xb2, 0x99, 0x2e, 0xee, 0x02, + 0xea, 0x89, 0xf7, 0x9e, 0xf9, 0x69, 0xc8, 0x7c, 0x22, 0x83, 0x88, 0x61, 0x68, 0x17, 0x3a, 0x5b, + 0x6e, 0x2d, 0x47, 0xa7, 0x41, 0xc4, 0xd4, 0x2f, 0x25, 0x92, 0x0a, 0x69, 0x24, 0x55, 0x2d, 0xa9, + 0x68, 0x44, 0xd3, 0x47, 0x50, 0x66, 0xf1, 0x3a, 0x7e, 0x57, 0x93, 0x25, 0x16, 0x9b, 0xc8, 0x33, + 0xa8, 0xa6, 0x4b, 0x9f, 0x4a, 0x66, 0xd8, 0x9a, 0x66, 0xc1, 0x40, 0x5a, 0xf0, 0x0a, 0x9a, 0x26, + 0xb5, 0xcf, 0x42, 0xba, 0x22, 0x41, 0x4c, 0x12, 0xa6, 0xbe, 0x48, 0x82, 0xeb, 0xba, 0xa1, 0x3d, + 0xcd, 0x0e, 0x15, 0xe9, 0xc4, 0x13, 0x43, 0xa1, 0x6f, 0xa1, 0xa1, 0x0c, 0x0c, 0x54, 0xd1, 0xaa, + 0xc5, 0xc0, 0xc7, 0x0d, 0xe3, 0xd0, 0x1a, 0x56, 0xee, 0x3b, 0x3e, 0xc2, 0x50, 0x5a, 0x03, 0xd8, + 0x6a, 0x17, 0x3a, 0x65, 0x37, 0xbb, 0xa2, 0x16, 0x94, 0xd9, 0x47, 0xe6, 0xa5, 0x92, 0xf9, 0xf8, + 0xb9, 0xa6, 0xf2, 0x3b, 0x7a, 0x09, 0xfb, 0x1e, 0x0d, 0xc3, 0x47, 0xea, 0x2d, 0xc8, 0x4c, 0xf0, + 0x88, 0xa8, 0xf7, 0xcd, 0x04, 0x46, 0x5a, 0x87, 0x32, 0xee, 0x5a, 0xf0, 0xe8, 0x41, 0x33, 0xe8, + 0x0d, 0x60, 0xc1, 0x92, 0x25, 0x8f, 0x13, 0xd3, 0x27, 0x4f, 0x65, 0xde, 0xc6, 0x9e, 0x6e, 0xa3, + 0x99, 0xf1, 0x53, 0x43, 0x67, 0x9d, 0xbc, 0x84, 0xfd, 0x6c, 0x7a, 0x48, 0x10, 0x27, 0x92, 0xaa, + 0x57, 0x15, 0xf8, 0x78, 0x5f, 0xb7, 0x83, 0x32, 0xce, 0x59, 0x53, 0x8e, 0x8f, 0xbe, 0x86, 0x5a, + 0x1e, 0xa1, 0x67, 0xe3, 0x40, 0x4b, 0x77, 0x33, 0x50, 0xcf, 0xc7, 0x21, 0x94, 0x32, 0x63, 0x9a, + 0x9a, 0x2e, 0x4a, 0xe3, 0xc8, 0x0f, 0x70, 0x28, 0x18, 0x4d, 0x78, 0x4c, 0x66, 0x5c, 0x90, 0x20, + 0xf6, 0x78, 0xb4, 0x0c, 0x99, 0x7a, 0x50, 0xf8, 0x50, 0x0b, 0x0f, 0x0c, 0x7d, 0xcd, 0x85, 0xb3, + 0x41, 0xa2, 0xd7, 0xd0, 0xcc, 0x3d, 0xa1, 0x33, 0xc9, 0x44, 0xde, 0x1f, 0xd6, 0x9f, 0x34, 0x77, + 0xac, 0xaf, 0xc8, 0xac, 0xbb, 0x63, 0xa8, 0x18, 0xef, 0x54, 0x21, 0x47, 0x66, 0x86, 0x0d, 0xe0, + 0xf8, 0xe8, 0x1a, 0xaa, 0x3c, 0x95, 0xf9, 0x38, 0xb6, 0xf4, 0x38, 0x5e, 0x7c, 0x79, 0x1c, 0x47, + 0x5a, 0xf8, 0x69, 0x1e, 0x81, 0xe7, 0x00, 0xba, 0xda, 0x34, 0x84, 0x26, 0x0b, 0x7c, 0xdc, 0x2e, + 0x74, 0xaa, 0x97, 0xa7, 0x9f, 0x65, 0x7a, 0xc8, 0x1c, 0xa2, 0xc9, 0x62, 0xc3, 0x2f, 0xb5, 0x6c, + 0x9a, 0x50, 0xf4, 0x79, 0x44, 0x83, 0x18, 0x9f, 0x18, 0xbb, 0xcc, 0x0d, 0xfd, 0x08, 0x35, 0xb3, + 0x31, 0x22, 0x96, 0x24, 0x74, 0xce, 0xf0, 0xa9, 0xce, 0xbd, 0xdf, 0x35, 0x0b, 0xac, 0x9b, 0x2d, + 0xb0, 0x6e, 0x3f, 0x5e, 0xb9, 0xbb, 0x5a, 0x7a, 0x6b, 0x94, 0xe8, 0x27, 0xa8, 0xaf, 0xdb, 0xcb, + 0x62, 0x5f, 0xfc, 0x47, 0x6c, 0xcd, 0x68, 0xb3, 0xe0, 0xef, 0xe1, 0x40, 0xa8, 0xa1, 0x09, 0x83, + 0x28, 0x90, 0x64, 0x99, 0xdb, 0x8d, 0xcf, 0xf4, 0x6b, 0x42, 0x8a, 0xbc, 0x51, 0xdc, 0x38, 0x33, + 0xbb, 0x35, 0x85, 0xfa, 0xbf, 0x97, 0x96, 0x5a, 0x2c, 0x0b, 0xb6, 0x5a, 0xef, 0x4e, 0x75, 0x44, + 0xdf, 0xc1, 0xce, 0x9f, 0x34, 0x4c, 0x99, 0xde, 0x9a, 0xd5, 0xcb, 0xe6, 0x67, 0xa5, 0xfc, 0xaa, + 0x58, 0xd7, 0x88, 0xde, 0x3e, 0x7b, 0x53, 0x68, 0xdd, 0x43, 0xe3, 0x89, 0xf7, 0xff, 0x47, 0xda, + 0xf3, 0xbf, 0x0b, 0x50, 0x34, 0xcb, 0x19, 0x35, 0xa0, 0xea, 0xdc, 0x91, 0xb1, 0x3b, 0xfa, 0xd9, + 0xb5, 0x27, 0x13, 0xeb, 0x2b, 0xb4, 0x0b, 0xe5, 0x41, 0xff, 0x6e, 0x60, 0xdf, 0xd8, 0x43, 0xab, + 0x80, 0x00, 0x8a, 0xd7, 0x7d, 0x47, 0x9d, 0x9f, 0xa1, 0x17, 0xd0, 0x32, 0x67, 0xf2, 0xe0, 0x4c, + 0xdf, 0x91, 0xa9, 0xed, 0xde, 0x3a, 0x77, 0xfd, 0x1b, 0x62, 0xbb, 0xee, 0xc8, 0xb5, 0xb6, 0x50, + 0x0d, 0x2a, 0x83, 0xd1, 0xed, 0xf8, 0xc6, 0x9e, 0xda, 0x43, 0x6b, 0x1b, 0x1d, 0xc1, 0x41, 0x7e, + 0x35, 0x11, 0x5a, 0x38, 0xb1, 0x76, 0x94, 0x72, 0x32, 0x78, 0x67, 0x0f, 0xef, 0x55, 0xe2, 0xa2, + 0xba, 0x4e, 0x9d, 0x5b, 0x7b, 0x48, 0x46, 0xf7, 0x53, 0xab, 0x84, 0xf6, 0xa0, 0xe1, 0xda, 0xfd, + 0xe1, 0x6f, 0xe4, 0x7a, 0xe4, 0x12, 0xd7, 0x76, 0xef, 0xef, 0xac, 0x32, 0xaa, 0x42, 0x69, 0xf2, + 0x8b, 0x33, 0x1e, 0xdb, 0x43, 0xab, 0x72, 0x45, 0xe1, 0xd8, 0xe3, 0x51, 0x37, 0x66, 0x72, 0x16, + 0x06, 0x1f, 0x9f, 0xbe, 0xb4, 0xab, 0xa2, 0x7a, 0x54, 0xe3, 0xc7, 0xdf, 0xdf, 0xce, 0x03, 0xf9, + 0x3e, 0x7d, 0xec, 0x7a, 0x3c, 0xea, 0xad, 0xb5, 0xbd, 0x5c, 0xdb, 0xf3, 0xc2, 0x80, 0xc5, 0xb2, + 0x37, 0xe7, 0x73, 0xb1, 0xf4, 0x36, 0x70, 0xfd, 0xa7, 0xfa, 0x58, 0xd4, 0xa9, 0x5e, 0xfd, 0x13, + 0x00, 0x00, 0xff, 0xff, 0xe9, 0xb6, 0x2c, 0x87, 0x87, 0x07, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/taskdef.pb.go b/client/gogrpc/conductor/model/taskdef.pb.go new file mode 100644 index 0000000000..8113caeefb --- /dev/null +++ b/client/gogrpc/conductor/model/taskdef.pb.go @@ -0,0 +1,254 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/taskdef.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type TaskDef_RetryLogic int32 + +const ( + TaskDef_FIXED TaskDef_RetryLogic = 0 + TaskDef_EXPONENTIAL_BACKOFF TaskDef_RetryLogic = 1 +) + +var TaskDef_RetryLogic_name = map[int32]string{ + 0: "FIXED", + 1: "EXPONENTIAL_BACKOFF", +} +var TaskDef_RetryLogic_value = map[string]int32{ + "FIXED": 0, + "EXPONENTIAL_BACKOFF": 1, +} + +func (x TaskDef_RetryLogic) String() string { + return proto.EnumName(TaskDef_RetryLogic_name, int32(x)) +} +func (TaskDef_RetryLogic) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_taskdef_3520607b0d645224, []int{0, 0} +} + +type TaskDef_TimeoutPolicy int32 + +const ( + TaskDef_RETRY TaskDef_TimeoutPolicy = 0 + TaskDef_TIME_OUT_WF TaskDef_TimeoutPolicy = 1 + TaskDef_ALERT_ONLY TaskDef_TimeoutPolicy = 2 +) + +var TaskDef_TimeoutPolicy_name = map[int32]string{ + 0: "RETRY", + 1: "TIME_OUT_WF", + 2: "ALERT_ONLY", +} +var TaskDef_TimeoutPolicy_value = map[string]int32{ + "RETRY": 0, + "TIME_OUT_WF": 1, + "ALERT_ONLY": 2, +} + +func (x TaskDef_TimeoutPolicy) String() string { + return proto.EnumName(TaskDef_TimeoutPolicy_name, int32(x)) +} +func (TaskDef_TimeoutPolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_taskdef_3520607b0d645224, []int{0, 1} +} + +type TaskDef struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + RetryCount int32 `protobuf:"varint,3,opt,name=retry_count,json=retryCount,proto3" json:"retry_count,omitempty"` + TimeoutSeconds int64 `protobuf:"varint,4,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + InputKeys []string `protobuf:"bytes,5,rep,name=input_keys,json=inputKeys,proto3" json:"input_keys,omitempty"` + OutputKeys []string `protobuf:"bytes,6,rep,name=output_keys,json=outputKeys,proto3" json:"output_keys,omitempty"` + TimeoutPolicy TaskDef_TimeoutPolicy `protobuf:"varint,7,opt,name=timeout_policy,json=timeoutPolicy,proto3,enum=conductor.proto.TaskDef_TimeoutPolicy" json:"timeout_policy,omitempty"` + RetryLogic TaskDef_RetryLogic `protobuf:"varint,8,opt,name=retry_logic,json=retryLogic,proto3,enum=conductor.proto.TaskDef_RetryLogic" json:"retry_logic,omitempty"` + RetryDelaySeconds int32 `protobuf:"varint,9,opt,name=retry_delay_seconds,json=retryDelaySeconds,proto3" json:"retry_delay_seconds,omitempty"` + ResponseTimeoutSeconds int32 `protobuf:"varint,10,opt,name=response_timeout_seconds,json=responseTimeoutSeconds,proto3" json:"response_timeout_seconds,omitempty"` + ConcurrentExecLimit int32 `protobuf:"varint,11,opt,name=concurrent_exec_limit,json=concurrentExecLimit,proto3" json:"concurrent_exec_limit,omitempty"` + InputTemplate map[string]*_struct.Value `protobuf:"bytes,12,rep,name=input_template,json=inputTemplate,proto3" json:"input_template,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RateLimitPerSecond int32 `protobuf:"varint,13,opt,name=rate_limit_per_second,json=rateLimitPerSecond,proto3" json:"rate_limit_per_second,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskDef) Reset() { *m = TaskDef{} } +func (m *TaskDef) String() string { return proto.CompactTextString(m) } +func (*TaskDef) ProtoMessage() {} +func (*TaskDef) Descriptor() ([]byte, []int) { + return fileDescriptor_taskdef_3520607b0d645224, []int{0} +} +func (m *TaskDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskDef.Unmarshal(m, b) +} +func (m *TaskDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskDef.Marshal(b, m, deterministic) +} +func (dst *TaskDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskDef.Merge(dst, src) +} +func (m *TaskDef) XXX_Size() int { + return xxx_messageInfo_TaskDef.Size(m) +} +func (m *TaskDef) XXX_DiscardUnknown() { + xxx_messageInfo_TaskDef.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskDef proto.InternalMessageInfo + +func (m *TaskDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TaskDef) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *TaskDef) GetRetryCount() int32 { + if m != nil { + return m.RetryCount + } + return 0 +} + +func (m *TaskDef) GetTimeoutSeconds() int64 { + if m != nil { + return m.TimeoutSeconds + } + return 0 +} + +func (m *TaskDef) GetInputKeys() []string { + if m != nil { + return m.InputKeys + } + return nil +} + +func (m *TaskDef) GetOutputKeys() []string { + if m != nil { + return m.OutputKeys + } + return nil +} + +func (m *TaskDef) GetTimeoutPolicy() TaskDef_TimeoutPolicy { + if m != nil { + return m.TimeoutPolicy + } + return TaskDef_RETRY +} + +func (m *TaskDef) GetRetryLogic() TaskDef_RetryLogic { + if m != nil { + return m.RetryLogic + } + return TaskDef_FIXED +} + +func (m *TaskDef) GetRetryDelaySeconds() int32 { + if m != nil { + return m.RetryDelaySeconds + } + return 0 +} + +func (m *TaskDef) GetResponseTimeoutSeconds() int32 { + if m != nil { + return m.ResponseTimeoutSeconds + } + return 0 +} + +func (m *TaskDef) GetConcurrentExecLimit() int32 { + if m != nil { + return m.ConcurrentExecLimit + } + return 0 +} + +func (m *TaskDef) GetInputTemplate() map[string]*_struct.Value { + if m != nil { + return m.InputTemplate + } + return nil +} + +func (m *TaskDef) GetRateLimitPerSecond() int32 { + if m != nil { + return m.RateLimitPerSecond + } + return 0 +} + +func init() { + proto.RegisterType((*TaskDef)(nil), "conductor.proto.TaskDef") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.TaskDef.InputTemplateEntry") + proto.RegisterEnum("conductor.proto.TaskDef_RetryLogic", TaskDef_RetryLogic_name, TaskDef_RetryLogic_value) + proto.RegisterEnum("conductor.proto.TaskDef_TimeoutPolicy", TaskDef_TimeoutPolicy_name, TaskDef_TimeoutPolicy_value) +} + +func init() { proto.RegisterFile("model/taskdef.proto", fileDescriptor_taskdef_3520607b0d645224) } + +var fileDescriptor_taskdef_3520607b0d645224 = []byte{ + // 593 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x53, 0x51, 0x6f, 0xd3, 0x4c, + 0x10, 0xac, 0x9b, 0xa6, 0xfd, 0xb2, 0xf9, 0x92, 0x86, 0x8b, 0x5a, 0xac, 0x02, 0xc2, 0x2a, 0x12, + 0x44, 0x02, 0xd9, 0x10, 0x5e, 0xaa, 0xf2, 0xd4, 0x36, 0x8e, 0x14, 0x35, 0x6d, 0x22, 0x63, 0xa0, + 0xe5, 0xc5, 0x72, 0x2e, 0x1b, 0x63, 0xc5, 0xf6, 0x59, 0xe7, 0x33, 0xaa, 0xff, 0x23, 0x3f, 0x0a, + 0xdd, 0xd9, 0x69, 0xd3, 0xa0, 0xbe, 0xdd, 0xcd, 0xcc, 0xcd, 0xee, 0x8e, 0xd7, 0xd0, 0x8d, 0xd9, + 0x1c, 0x23, 0x4b, 0xf8, 0xd9, 0x72, 0x8e, 0x0b, 0x33, 0xe5, 0x4c, 0x30, 0xb2, 0x4f, 0x59, 0x32, + 0xcf, 0xa9, 0x60, 0xbc, 0x04, 0x8e, 0x5e, 0x06, 0x8c, 0x05, 0x11, 0x5a, 0xea, 0x36, 0xcb, 0x17, + 0x56, 0x26, 0x78, 0x4e, 0x45, 0xc9, 0x1e, 0xff, 0xd9, 0x85, 0x3d, 0xd7, 0xcf, 0x96, 0x03, 0x5c, + 0x10, 0x02, 0x3b, 0x89, 0x1f, 0xa3, 0xae, 0x19, 0x5a, 0xaf, 0xe1, 0xa8, 0x33, 0x31, 0xa0, 0x39, + 0xc7, 0x8c, 0xf2, 0x30, 0x15, 0x21, 0x4b, 0xf4, 0x6d, 0x45, 0xad, 0x43, 0xe4, 0x35, 0x34, 0x39, + 0x0a, 0x5e, 0x78, 0x94, 0xe5, 0x89, 0xd0, 0x6b, 0x86, 0xd6, 0xab, 0x3b, 0xa0, 0xa0, 0x0b, 0x89, + 0x90, 0x77, 0xb0, 0x2f, 0xc2, 0x18, 0x59, 0x2e, 0xbc, 0x0c, 0x65, 0x77, 0x99, 0xbe, 0x63, 0x68, + 0xbd, 0x9a, 0xd3, 0xae, 0xe0, 0xaf, 0x25, 0x4a, 0x5e, 0x01, 0x84, 0x49, 0x9a, 0x0b, 0x6f, 0x89, + 0x45, 0xa6, 0xd7, 0x8d, 0x5a, 0xaf, 0xe1, 0x34, 0x14, 0x72, 0x89, 0x45, 0x26, 0x0b, 0xb1, 0x5c, + 0xdc, 0xf3, 0xbb, 0x8a, 0x87, 0x12, 0x52, 0x82, 0x2b, 0x58, 0x39, 0x7a, 0x29, 0x8b, 0x42, 0x5a, + 0xe8, 0x7b, 0x86, 0xd6, 0x6b, 0xf7, 0xdf, 0x9a, 0x1b, 0x99, 0x98, 0xd5, 0xc4, 0xa6, 0x5b, 0xca, + 0xa7, 0x4a, 0xed, 0xb4, 0xc4, 0xfa, 0x95, 0x0c, 0x56, 0x83, 0x45, 0x2c, 0x08, 0xa9, 0xfe, 0x9f, + 0xf2, 0x7a, 0xf3, 0xa4, 0x97, 0x23, 0xb5, 0x63, 0x29, 0xad, 0xa6, 0x57, 0x67, 0x62, 0x42, 0xb7, + 0x74, 0x99, 0x63, 0xe4, 0x17, 0xf7, 0x09, 0x34, 0x54, 0x4c, 0xcf, 0x14, 0x35, 0x90, 0xcc, 0x2a, + 0x84, 0x13, 0xd0, 0x39, 0x66, 0x29, 0x4b, 0x32, 0xf4, 0x36, 0x63, 0x03, 0xf5, 0xe8, 0x70, 0xc5, + 0xbb, 0x8f, 0xe3, 0xeb, 0xc3, 0x01, 0x65, 0x09, 0xcd, 0x39, 0xc7, 0x44, 0x78, 0x78, 0x87, 0xd4, + 0x8b, 0xc2, 0x38, 0x14, 0x7a, 0x53, 0x3d, 0xeb, 0x3e, 0x90, 0xf6, 0x1d, 0xd2, 0xb1, 0xa4, 0x88, + 0x03, 0xed, 0x32, 0x72, 0x81, 0x71, 0x1a, 0xf9, 0x02, 0xf5, 0xff, 0x8d, 0x5a, 0xaf, 0xd9, 0x7f, + 0xff, 0xe4, 0x98, 0x23, 0x29, 0x77, 0x2b, 0xb5, 0x9d, 0x08, 0x5e, 0x38, 0xad, 0x70, 0x1d, 0x23, + 0x9f, 0xe0, 0x80, 0xfb, 0x02, 0xcb, 0xe2, 0x5e, 0x8a, 0xbc, 0xea, 0x5f, 0x6f, 0xa9, 0x3e, 0x88, + 0x24, 0x55, 0xf5, 0x29, 0xf2, 0xb2, 0xf7, 0xa3, 0x1b, 0x20, 0xff, 0xfa, 0x92, 0x0e, 0xd4, 0x96, + 0x58, 0x54, 0xeb, 0x28, 0x8f, 0xe4, 0x03, 0xd4, 0x7f, 0xfb, 0x51, 0x8e, 0x6a, 0x0f, 0x9b, 0xfd, + 0x43, 0xb3, 0xdc, 0x6d, 0x73, 0xb5, 0xdb, 0xe6, 0x77, 0xc9, 0x3a, 0xa5, 0xe8, 0x74, 0xfb, 0x44, + 0x3b, 0xfe, 0x08, 0xf0, 0xf0, 0x61, 0x48, 0x03, 0xea, 0xc3, 0xd1, 0x8d, 0x3d, 0xe8, 0x6c, 0x91, + 0xe7, 0xd0, 0xb5, 0x6f, 0xa6, 0x93, 0x6b, 0xfb, 0xda, 0x1d, 0x9d, 0x8d, 0xbd, 0xf3, 0xb3, 0x8b, + 0xcb, 0xc9, 0x70, 0xd8, 0xd1, 0x8e, 0xbf, 0x40, 0xeb, 0xd1, 0x5a, 0xc8, 0x47, 0x8e, 0xed, 0x3a, + 0xb7, 0x9d, 0x2d, 0xb2, 0x0f, 0x4d, 0x77, 0x74, 0x65, 0x7b, 0x93, 0x6f, 0xae, 0xf7, 0x63, 0xd8, + 0xd1, 0x48, 0x1b, 0xe0, 0x6c, 0x6c, 0x3b, 0xae, 0x37, 0xb9, 0x1e, 0xdf, 0x76, 0xb6, 0xcf, 0xe7, + 0xf0, 0x82, 0xb2, 0xd8, 0x4c, 0x50, 0x2c, 0xa2, 0xf0, 0x6e, 0x33, 0xc4, 0xf3, 0x46, 0x95, 0xe2, + 0x74, 0xf6, 0xf3, 0x34, 0x08, 0xc5, 0xaf, 0x7c, 0x66, 0x52, 0x16, 0x5b, 0x95, 0xdc, 0xba, 0x97, + 0x5b, 0x34, 0x0a, 0x31, 0x11, 0x56, 0xc0, 0x02, 0x9e, 0xd2, 0x35, 0x5c, 0xfd, 0xf1, 0xb3, 0x5d, + 0xe5, 0xf6, 0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc2, 0xd8, 0x2b, 0x35, 0x01, 0x04, 0x00, + 0x00, +} diff --git a/client/gogrpc/conductor/model/taskexeclog.pb.go b/client/gogrpc/conductor/model/taskexeclog.pb.go new file mode 100644 index 0000000000..6c65dd502e --- /dev/null +++ b/client/gogrpc/conductor/model/taskexeclog.pb.go @@ -0,0 +1,98 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/taskexeclog.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type TaskExecLog struct { + Log string `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` + TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + CreatedTime int64 `protobuf:"varint,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskExecLog) Reset() { *m = TaskExecLog{} } +func (m *TaskExecLog) String() string { return proto.CompactTextString(m) } +func (*TaskExecLog) ProtoMessage() {} +func (*TaskExecLog) Descriptor() ([]byte, []int) { + return fileDescriptor_taskexeclog_31ce5708c84ca255, []int{0} +} +func (m *TaskExecLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskExecLog.Unmarshal(m, b) +} +func (m *TaskExecLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskExecLog.Marshal(b, m, deterministic) +} +func (dst *TaskExecLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskExecLog.Merge(dst, src) +} +func (m *TaskExecLog) XXX_Size() int { + return xxx_messageInfo_TaskExecLog.Size(m) +} +func (m *TaskExecLog) XXX_DiscardUnknown() { + xxx_messageInfo_TaskExecLog.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskExecLog proto.InternalMessageInfo + +func (m *TaskExecLog) GetLog() string { + if m != nil { + return m.Log + } + return "" +} + +func (m *TaskExecLog) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *TaskExecLog) GetCreatedTime() int64 { + if m != nil { + return m.CreatedTime + } + return 0 +} + +func init() { + proto.RegisterType((*TaskExecLog)(nil), "conductor.proto.TaskExecLog") +} + +func init() { + proto.RegisterFile("model/taskexeclog.proto", fileDescriptor_taskexeclog_31ce5708c84ca255) +} + +var fileDescriptor_taskexeclog_31ce5708c84ca255 = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0x31, 0x4b, 0xc4, 0x40, + 0x10, 0x85, 0x89, 0x81, 0x13, 0xf7, 0x14, 0x65, 0x9b, 0x0b, 0xd8, 0x9c, 0x56, 0x57, 0xed, 0x16, + 0x76, 0x96, 0x07, 0x16, 0x82, 0x85, 0x84, 0x54, 0x5a, 0x84, 0x64, 0x76, 0xdc, 0x2c, 0xd9, 0xcd, + 0x84, 0xcd, 0x04, 0xf2, 0xf3, 0x25, 0x31, 0x48, 0xb8, 0x6e, 0xe6, 0x83, 0xf7, 0x3e, 0x9e, 0x38, + 0x04, 0x32, 0xe8, 0x35, 0x57, 0x43, 0x8b, 0x13, 0x82, 0x27, 0xab, 0xfa, 0x48, 0x4c, 0xf2, 0x1e, + 0xa8, 0x33, 0x23, 0x30, 0xc5, 0x3f, 0xf0, 0xfc, 0x2d, 0xf6, 0x45, 0x35, 0xb4, 0x6f, 0x13, 0xc2, + 0x07, 0x59, 0xf9, 0x20, 0x52, 0x4f, 0x36, 0x4b, 0x8e, 0xc9, 0xe9, 0x26, 0x9f, 0x4f, 0x79, 0x10, + 0xd7, 0x73, 0x4d, 0xe9, 0x4c, 0x76, 0xb5, 0xd0, 0xdd, 0xfc, 0xbe, 0x1b, 0xf9, 0x24, 0x6e, 0x21, + 0x62, 0xc5, 0x68, 0x4a, 0x76, 0x01, 0xb3, 0xf4, 0x98, 0x9c, 0xd2, 0x7c, 0xbf, 0xb2, 0xc2, 0x05, + 0x3c, 0x37, 0xe2, 0x11, 0x28, 0xa8, 0x0e, 0xf9, 0xc7, 0xbb, 0x49, 0x5d, 0xb8, 0xcf, 0x77, 0x1b, + 0xf3, 0x67, 0xfd, 0xf5, 0x6a, 0x1d, 0x37, 0x63, 0xad, 0x80, 0x82, 0x5e, 0x23, 0xfa, 0x3f, 0xa2, + 0xc1, 0x3b, 0xec, 0x58, 0x5b, 0xb2, 0xb1, 0x87, 0x0d, 0x5f, 0x96, 0xd6, 0xbb, 0xa5, 0xf1, 0xe5, + 0x37, 0x00, 0x00, 0xff, 0xff, 0x78, 0x61, 0x87, 0x8e, 0xf9, 0x00, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/taskresult.pb.go b/client/gogrpc/conductor/model/taskresult.pb.go new file mode 100644 index 0000000000..8e90a79e56 --- /dev/null +++ b/client/gogrpc/conductor/model/taskresult.pb.go @@ -0,0 +1,192 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/taskresult.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type TaskResult_Status int32 + +const ( + TaskResult_IN_PROGRESS TaskResult_Status = 0 + TaskResult_FAILED TaskResult_Status = 1 + TaskResult_FAILED_WITH_TERMINAL_ERROR TaskResult_Status = 2 + TaskResult_COMPLETED TaskResult_Status = 3 + TaskResult_SCHEDULED TaskResult_Status = 4 +) + +var TaskResult_Status_name = map[int32]string{ + 0: "IN_PROGRESS", + 1: "FAILED", + 2: "FAILED_WITH_TERMINAL_ERROR", + 3: "COMPLETED", + 4: "SCHEDULED", +} +var TaskResult_Status_value = map[string]int32{ + "IN_PROGRESS": 0, + "FAILED": 1, + "FAILED_WITH_TERMINAL_ERROR": 2, + "COMPLETED": 3, + "SCHEDULED": 4, +} + +func (x TaskResult_Status) String() string { + return proto.EnumName(TaskResult_Status_name, int32(x)) +} +func (TaskResult_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_taskresult_50ab9135b69785bc, []int{0, 0} +} + +type TaskResult struct { + WorkflowInstanceId string `protobuf:"bytes,1,opt,name=workflow_instance_id,json=workflowInstanceId,proto3" json:"workflow_instance_id,omitempty"` + TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + ReasonForIncompletion string `protobuf:"bytes,3,opt,name=reason_for_incompletion,json=reasonForIncompletion,proto3" json:"reason_for_incompletion,omitempty"` + CallbackAfterSeconds int64 `protobuf:"varint,4,opt,name=callback_after_seconds,json=callbackAfterSeconds,proto3" json:"callback_after_seconds,omitempty"` + WorkerId string `protobuf:"bytes,5,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + Status TaskResult_Status `protobuf:"varint,6,opt,name=status,proto3,enum=conductor.proto.TaskResult_Status" json:"status,omitempty"` + OutputData map[string]*_struct.Value `protobuf:"bytes,7,rep,name=output_data,json=outputData,proto3" json:"output_data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + OutputMessage *any.Any `protobuf:"bytes,8,opt,name=output_message,json=outputMessage,proto3" json:"output_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskResult) Reset() { *m = TaskResult{} } +func (m *TaskResult) String() string { return proto.CompactTextString(m) } +func (*TaskResult) ProtoMessage() {} +func (*TaskResult) Descriptor() ([]byte, []int) { + return fileDescriptor_taskresult_50ab9135b69785bc, []int{0} +} +func (m *TaskResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskResult.Unmarshal(m, b) +} +func (m *TaskResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskResult.Marshal(b, m, deterministic) +} +func (dst *TaskResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskResult.Merge(dst, src) +} +func (m *TaskResult) XXX_Size() int { + return xxx_messageInfo_TaskResult.Size(m) +} +func (m *TaskResult) XXX_DiscardUnknown() { + xxx_messageInfo_TaskResult.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskResult proto.InternalMessageInfo + +func (m *TaskResult) GetWorkflowInstanceId() string { + if m != nil { + return m.WorkflowInstanceId + } + return "" +} + +func (m *TaskResult) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *TaskResult) GetReasonForIncompletion() string { + if m != nil { + return m.ReasonForIncompletion + } + return "" +} + +func (m *TaskResult) GetCallbackAfterSeconds() int64 { + if m != nil { + return m.CallbackAfterSeconds + } + return 0 +} + +func (m *TaskResult) GetWorkerId() string { + if m != nil { + return m.WorkerId + } + return "" +} + +func (m *TaskResult) GetStatus() TaskResult_Status { + if m != nil { + return m.Status + } + return TaskResult_IN_PROGRESS +} + +func (m *TaskResult) GetOutputData() map[string]*_struct.Value { + if m != nil { + return m.OutputData + } + return nil +} + +func (m *TaskResult) GetOutputMessage() *any.Any { + if m != nil { + return m.OutputMessage + } + return nil +} + +func init() { + proto.RegisterType((*TaskResult)(nil), "conductor.proto.TaskResult") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.TaskResult.OutputDataEntry") + proto.RegisterEnum("conductor.proto.TaskResult_Status", TaskResult_Status_name, TaskResult_Status_value) +} + +func init() { proto.RegisterFile("model/taskresult.proto", fileDescriptor_taskresult_50ab9135b69785bc) } + +var fileDescriptor_taskresult_50ab9135b69785bc = []byte{ + // 517 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0xdf, 0x6e, 0xda, 0x30, + 0x14, 0xc6, 0x17, 0xa0, 0x69, 0x39, 0xac, 0x05, 0x59, 0x8c, 0x66, 0x74, 0x9a, 0x10, 0x57, 0x48, + 0x9b, 0x92, 0x89, 0x4d, 0xd3, 0xc4, 0xae, 0x68, 0x49, 0xd7, 0x48, 0x50, 0x90, 0xa1, 0x9b, 0xb4, + 0x9b, 0xc8, 0x38, 0x26, 0x8b, 0x08, 0x31, 0xb2, 0x9d, 0x75, 0x3c, 0xf0, 0xde, 0x63, 0x4a, 0x1c, + 0xda, 0x8a, 0x49, 0xbd, 0xf3, 0x39, 0xbf, 0xef, 0x7c, 0x3a, 0x7f, 0x64, 0x68, 0x6d, 0x78, 0xc0, + 0x62, 0x47, 0x11, 0xb9, 0x16, 0x4c, 0xa6, 0xb1, 0xb2, 0xb7, 0x82, 0x2b, 0x8e, 0xea, 0x94, 0x27, + 0x41, 0x4a, 0x15, 0x17, 0x3a, 0xd1, 0x7e, 0x13, 0x72, 0x1e, 0xc6, 0xcc, 0xc9, 0xa3, 0x65, 0xba, + 0x72, 0xa4, 0x12, 0x29, 0x2d, 0xe4, 0xed, 0xd7, 0x87, 0x94, 0x24, 0x3b, 0x8d, 0xba, 0x7f, 0x2b, + 0x00, 0x0b, 0x22, 0xd7, 0x38, 0xb7, 0x47, 0x1f, 0xa0, 0x79, 0xcf, 0xc5, 0x7a, 0x15, 0xf3, 0x7b, + 0x3f, 0x4a, 0xa4, 0x22, 0x09, 0x65, 0x7e, 0x14, 0x58, 0x46, 0xc7, 0xe8, 0x55, 0x31, 0xda, 0x33, + 0xaf, 0x40, 0x5e, 0x80, 0xce, 0xe1, 0x38, 0x6b, 0x2f, 0x13, 0x95, 0x72, 0x91, 0x99, 0x85, 0x5e, + 0x80, 0x3e, 0xc3, 0xb9, 0x60, 0x44, 0xf2, 0xc4, 0x5f, 0x71, 0xe1, 0x47, 0x09, 0xe5, 0x9b, 0x6d, + 0xcc, 0x54, 0xc4, 0x13, 0xab, 0x9c, 0x0b, 0x5f, 0x69, 0x7c, 0xcd, 0x85, 0xf7, 0x04, 0xa2, 0x4f, + 0xd0, 0xa2, 0x24, 0x8e, 0x97, 0x84, 0xae, 0x7d, 0xb2, 0x52, 0x4c, 0xf8, 0x92, 0x65, 0xe3, 0x4a, + 0xab, 0xd2, 0x31, 0x7a, 0x65, 0xdc, 0xdc, 0xd3, 0x61, 0x06, 0xe7, 0x9a, 0xa1, 0x0b, 0xa8, 0x66, + 0xcd, 0x31, 0x91, 0x35, 0x72, 0x94, 0xfb, 0x9f, 0xe8, 0x84, 0x17, 0xa0, 0x01, 0x98, 0x52, 0x11, + 0x95, 0x4a, 0xcb, 0xec, 0x18, 0xbd, 0xb3, 0x7e, 0xd7, 0x3e, 0xd8, 0x9f, 0xfd, 0xb8, 0x02, 0x7b, + 0x9e, 0x2b, 0x71, 0x51, 0x81, 0xc6, 0x50, 0xe3, 0xa9, 0xda, 0xa6, 0xca, 0x0f, 0x88, 0x22, 0xd6, + 0x71, 0xa7, 0xdc, 0xab, 0xf5, 0xdf, 0x3d, 0x67, 0x30, 0xcd, 0xe5, 0x23, 0xa2, 0x88, 0x9b, 0x28, + 0xb1, 0xc3, 0xc0, 0x1f, 0x12, 0xe8, 0x2b, 0x9c, 0x15, 0x6e, 0x1b, 0x26, 0x25, 0x09, 0x99, 0x75, + 0xd2, 0x31, 0x7a, 0xb5, 0x7e, 0xd3, 0xd6, 0x27, 0xb2, 0xf7, 0x27, 0xb2, 0x87, 0xc9, 0x0e, 0x9f, + 0x6a, 0xed, 0x44, 0x4b, 0xdb, 0x77, 0x50, 0x3f, 0xf0, 0x46, 0x0d, 0x28, 0xaf, 0xd9, 0xae, 0x38, + 0x4f, 0xf6, 0x44, 0xef, 0xe1, 0xe8, 0x37, 0x89, 0x53, 0x96, 0x5f, 0xa3, 0xd6, 0x6f, 0xfd, 0x67, + 0xfc, 0x3d, 0xa3, 0x58, 0x8b, 0x06, 0xa5, 0x2f, 0x46, 0x97, 0x82, 0xa9, 0x67, 0x46, 0x75, 0xa8, + 0x79, 0xb7, 0xfe, 0x0c, 0x4f, 0xbf, 0x61, 0x77, 0x3e, 0x6f, 0xbc, 0x40, 0x00, 0xe6, 0xf5, 0xd0, + 0x1b, 0xbb, 0xa3, 0x86, 0x81, 0xde, 0x42, 0x5b, 0xbf, 0xfd, 0x1f, 0xde, 0xe2, 0xc6, 0x5f, 0xb8, + 0x78, 0xe2, 0xdd, 0x0e, 0xc7, 0xbe, 0x8b, 0xf1, 0x14, 0x37, 0x4a, 0xe8, 0x14, 0xaa, 0x57, 0xd3, + 0xc9, 0x6c, 0xec, 0x2e, 0xdc, 0x51, 0xa3, 0x9c, 0x85, 0xf3, 0xab, 0x1b, 0x77, 0x74, 0x97, 0x55, + 0x57, 0x2e, 0x43, 0xb8, 0xa0, 0x7c, 0x63, 0x27, 0x4c, 0xad, 0xe2, 0xe8, 0xcf, 0xe1, 0xfa, 0x2e, + 0x5f, 0x3e, 0xee, 0x6f, 0xb6, 0xfc, 0x39, 0x08, 0x23, 0xf5, 0x2b, 0x5d, 0xda, 0x94, 0x6f, 0x9c, + 0xa2, 0xc2, 0x79, 0xa8, 0x70, 0x68, 0x1c, 0xb1, 0x44, 0x39, 0x21, 0x0f, 0xc5, 0x96, 0x3e, 0xc9, + 0xe7, 0x7f, 0x65, 0x69, 0xe6, 0x86, 0x1f, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xff, 0xd5, 0x82, + 0xee, 0x3b, 0x03, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/tasksummary.pb.go b/client/gogrpc/conductor/model/tasksummary.pb.go new file mode 100644 index 0000000000..94078ec7bf --- /dev/null +++ b/client/gogrpc/conductor/model/tasksummary.pb.go @@ -0,0 +1,217 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/tasksummary.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type TaskSummary struct { + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + WorkflowType string `protobuf:"bytes,2,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + CorrelationId string `protobuf:"bytes,3,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + ScheduledTime string `protobuf:"bytes,4,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartTime string `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + UpdateTime string `protobuf:"bytes,6,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + EndTime string `protobuf:"bytes,7,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + Status Task_Status `protobuf:"varint,8,opt,name=status,proto3,enum=conductor.proto.Task_Status" json:"status,omitempty"` + ReasonForIncompletion string `protobuf:"bytes,9,opt,name=reason_for_incompletion,json=reasonForIncompletion,proto3" json:"reason_for_incompletion,omitempty"` + ExecutionTime int64 `protobuf:"varint,10,opt,name=execution_time,json=executionTime,proto3" json:"execution_time,omitempty"` + QueueWaitTime int64 `protobuf:"varint,11,opt,name=queue_wait_time,json=queueWaitTime,proto3" json:"queue_wait_time,omitempty"` + TaskDefName string `protobuf:"bytes,12,opt,name=task_def_name,json=taskDefName,proto3" json:"task_def_name,omitempty"` + TaskType string `protobuf:"bytes,13,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + Input string `protobuf:"bytes,14,opt,name=input,proto3" json:"input,omitempty"` + Output string `protobuf:"bytes,15,opt,name=output,proto3" json:"output,omitempty"` + TaskId string `protobuf:"bytes,16,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskSummary) Reset() { *m = TaskSummary{} } +func (m *TaskSummary) String() string { return proto.CompactTextString(m) } +func (*TaskSummary) ProtoMessage() {} +func (*TaskSummary) Descriptor() ([]byte, []int) { + return fileDescriptor_tasksummary_ccb082d5e959585d, []int{0} +} +func (m *TaskSummary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskSummary.Unmarshal(m, b) +} +func (m *TaskSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskSummary.Marshal(b, m, deterministic) +} +func (dst *TaskSummary) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskSummary.Merge(dst, src) +} +func (m *TaskSummary) XXX_Size() int { + return xxx_messageInfo_TaskSummary.Size(m) +} +func (m *TaskSummary) XXX_DiscardUnknown() { + xxx_messageInfo_TaskSummary.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskSummary proto.InternalMessageInfo + +func (m *TaskSummary) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *TaskSummary) GetWorkflowType() string { + if m != nil { + return m.WorkflowType + } + return "" +} + +func (m *TaskSummary) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func (m *TaskSummary) GetScheduledTime() string { + if m != nil { + return m.ScheduledTime + } + return "" +} + +func (m *TaskSummary) GetStartTime() string { + if m != nil { + return m.StartTime + } + return "" +} + +func (m *TaskSummary) GetUpdateTime() string { + if m != nil { + return m.UpdateTime + } + return "" +} + +func (m *TaskSummary) GetEndTime() string { + if m != nil { + return m.EndTime + } + return "" +} + +func (m *TaskSummary) GetStatus() Task_Status { + if m != nil { + return m.Status + } + return Task_IN_PROGRESS +} + +func (m *TaskSummary) GetReasonForIncompletion() string { + if m != nil { + return m.ReasonForIncompletion + } + return "" +} + +func (m *TaskSummary) GetExecutionTime() int64 { + if m != nil { + return m.ExecutionTime + } + return 0 +} + +func (m *TaskSummary) GetQueueWaitTime() int64 { + if m != nil { + return m.QueueWaitTime + } + return 0 +} + +func (m *TaskSummary) GetTaskDefName() string { + if m != nil { + return m.TaskDefName + } + return "" +} + +func (m *TaskSummary) GetTaskType() string { + if m != nil { + return m.TaskType + } + return "" +} + +func (m *TaskSummary) GetInput() string { + if m != nil { + return m.Input + } + return "" +} + +func (m *TaskSummary) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *TaskSummary) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func init() { + proto.RegisterType((*TaskSummary)(nil), "conductor.proto.TaskSummary") +} + +func init() { + proto.RegisterFile("model/tasksummary.proto", fileDescriptor_tasksummary_ccb082d5e959585d) +} + +var fileDescriptor_tasksummary_ccb082d5e959585d = []byte{ + // 446 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xcf, 0x8b, 0x13, 0x31, + 0x14, 0xc7, 0xa9, 0xbb, 0xdb, 0x1f, 0xaf, 0x3b, 0xed, 0x32, 0xa8, 0x1d, 0x5d, 0x65, 0xcb, 0x8a, + 0xd2, 0xd3, 0x14, 0x54, 0x3c, 0x78, 0x5c, 0x44, 0xe8, 0x45, 0xa4, 0x5b, 0x10, 0xbc, 0x0c, 0x69, + 0xf2, 0xa6, 0x0d, 0x9d, 0x24, 0x63, 0x26, 0xa1, 0xdb, 0x3f, 0xcf, 0xff, 0x4c, 0xf2, 0x32, 0x5b, + 0xcb, 0x1e, 0xf3, 0xf9, 0x7e, 0xf2, 0x92, 0xf7, 0x12, 0x98, 0x28, 0x23, 0xb0, 0x9a, 0x3b, 0xd6, + 0xec, 0x1a, 0xaf, 0x14, 0xb3, 0x87, 0xbc, 0xb6, 0xc6, 0x99, 0x74, 0xcc, 0x8d, 0x16, 0x9e, 0x3b, + 0x63, 0x23, 0x78, 0x7d, 0xf5, 0xdf, 0x8c, 0xe4, 0xf6, 0xef, 0x39, 0x0c, 0x57, 0xac, 0xd9, 0xdd, + 0xc7, 0x8d, 0xe9, 0x0d, 0x0c, 0xf7, 0xc6, 0xee, 0xca, 0xca, 0xec, 0x0b, 0x29, 0xb2, 0xce, 0xb4, + 0x33, 0x1b, 0x2c, 0xe1, 0x11, 0x2d, 0x44, 0xfa, 0x0e, 0x92, 0xa3, 0xe0, 0x0e, 0x35, 0x66, 0xcf, + 0x48, 0xb9, 0x7c, 0x84, 0xab, 0x43, 0x8d, 0xe9, 0x7b, 0x18, 0x71, 0x63, 0x2d, 0x56, 0xcc, 0x49, + 0xa3, 0x43, 0xa1, 0x33, 0xb2, 0x92, 0x13, 0xba, 0x10, 0x41, 0x6b, 0xf8, 0x16, 0x85, 0xaf, 0x50, + 0x14, 0x4e, 0x2a, 0xcc, 0xce, 0xa3, 0x76, 0xa4, 0x2b, 0xa9, 0x30, 0x7d, 0x0b, 0xd0, 0x38, 0x66, + 0x5d, 0x54, 0x2e, 0x48, 0x19, 0x10, 0xa1, 0xf8, 0x06, 0x86, 0xbe, 0x16, 0xcc, 0x61, 0xcc, 0xbb, + 0xf1, 0xca, 0x11, 0x91, 0xf0, 0x0a, 0xfa, 0xa8, 0xdb, 0x03, 0x7a, 0x94, 0xf6, 0x50, 0xc7, 0xd2, + 0x9f, 0xa1, 0xdb, 0x38, 0xe6, 0x7c, 0x93, 0xf5, 0xa7, 0x9d, 0xd9, 0xe8, 0xe3, 0x9b, 0xfc, 0xc9, + 0xc8, 0xf2, 0x30, 0x9c, 0xfc, 0x9e, 0x9c, 0x65, 0xeb, 0xa6, 0x5f, 0x60, 0x62, 0x91, 0x35, 0x46, + 0x17, 0xa5, 0xb1, 0x85, 0xd4, 0xdc, 0xa8, 0xba, 0xc2, 0xd0, 0x54, 0x36, 0xa0, 0xfa, 0x2f, 0x62, + 0xfc, 0xdd, 0xd8, 0xc5, 0x49, 0x18, 0xfa, 0xc5, 0x07, 0xe4, 0x9e, 0x86, 0x42, 0xd7, 0x81, 0x69, + 0x67, 0x76, 0xb6, 0x4c, 0x8e, 0x94, 0x2e, 0xf5, 0x01, 0xc6, 0x7f, 0x3c, 0x7a, 0x2c, 0xf6, 0x4c, + 0xb6, 0x4d, 0x0f, 0xa3, 0x47, 0xf8, 0x17, 0x93, 0xb1, 0xf1, 0x5b, 0x48, 0xc2, 0x4b, 0x16, 0x02, + 0xcb, 0x42, 0x33, 0x85, 0xd9, 0x25, 0x1d, 0x3e, 0x0c, 0xf0, 0x1b, 0x96, 0x3f, 0x98, 0xc2, 0xf4, + 0x1a, 0x06, 0xe4, 0xd0, 0x53, 0x25, 0x94, 0xf7, 0x03, 0xa0, 0x67, 0x7a, 0x0e, 0x17, 0x52, 0xd7, + 0xde, 0x65, 0x23, 0x0a, 0xe2, 0x22, 0x7d, 0x09, 0x5d, 0xe3, 0x5d, 0xc0, 0x63, 0xc2, 0xed, 0x2a, + 0x9d, 0x40, 0x8f, 0x4a, 0x49, 0x91, 0x5d, 0xc5, 0x20, 0x2c, 0x17, 0xe2, 0x6e, 0x0b, 0xd7, 0xdc, + 0xa8, 0x5c, 0xa3, 0x2b, 0x2b, 0xf9, 0xf0, 0x74, 0x82, 0x77, 0xc9, 0xc9, 0xff, 0xfa, 0xb9, 0xfe, + 0xfd, 0x75, 0x23, 0xdd, 0xd6, 0xaf, 0x73, 0x6e, 0xd4, 0xbc, 0xdd, 0x32, 0x3f, 0x6e, 0x99, 0xf3, + 0x4a, 0xa2, 0x76, 0xf3, 0x8d, 0xd9, 0xd8, 0x9a, 0x9f, 0x70, 0xfa, 0xb8, 0xeb, 0x2e, 0x55, 0xfc, + 0xf4, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x32, 0xdb, 0x34, 0x28, 0xf2, 0x02, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/workflow.pb.go b/client/gogrpc/conductor/model/workflow.pb.go new file mode 100644 index 0000000000..f0dfc42f9d --- /dev/null +++ b/client/gogrpc/conductor/model/workflow.pb.go @@ -0,0 +1,289 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/workflow.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Workflow_WorkflowStatus int32 + +const ( + Workflow_RUNNING Workflow_WorkflowStatus = 0 + Workflow_COMPLETED Workflow_WorkflowStatus = 1 + Workflow_FAILED Workflow_WorkflowStatus = 2 + Workflow_TIMED_OUT Workflow_WorkflowStatus = 3 + Workflow_TERMINATED Workflow_WorkflowStatus = 4 + Workflow_PAUSED Workflow_WorkflowStatus = 5 +) + +var Workflow_WorkflowStatus_name = map[int32]string{ + 0: "RUNNING", + 1: "COMPLETED", + 2: "FAILED", + 3: "TIMED_OUT", + 4: "TERMINATED", + 5: "PAUSED", +} +var Workflow_WorkflowStatus_value = map[string]int32{ + "RUNNING": 0, + "COMPLETED": 1, + "FAILED": 2, + "TIMED_OUT": 3, + "TERMINATED": 4, + "PAUSED": 5, +} + +func (x Workflow_WorkflowStatus) String() string { + return proto.EnumName(Workflow_WorkflowStatus_name, int32(x)) +} +func (Workflow_WorkflowStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_workflow_b47c95a464502efa, []int{0, 0} +} + +type Workflow struct { + Status Workflow_WorkflowStatus `protobuf:"varint,1,opt,name=status,proto3,enum=conductor.proto.Workflow_WorkflowStatus" json:"status,omitempty"` + EndTime int64 `protobuf:"varint,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + ParentWorkflowId string `protobuf:"bytes,4,opt,name=parent_workflow_id,json=parentWorkflowId,proto3" json:"parent_workflow_id,omitempty"` + ParentWorkflowTaskId string `protobuf:"bytes,5,opt,name=parent_workflow_task_id,json=parentWorkflowTaskId,proto3" json:"parent_workflow_task_id,omitempty"` + Tasks []*Task `protobuf:"bytes,6,rep,name=tasks,proto3" json:"tasks,omitempty"` + Input map[string]*_struct.Value `protobuf:"bytes,8,rep,name=input,proto3" json:"input,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Output map[string]*_struct.Value `protobuf:"bytes,9,rep,name=output,proto3" json:"output,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkflowType string `protobuf:"bytes,10,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + Version int32 `protobuf:"varint,11,opt,name=version,proto3" json:"version,omitempty"` + CorrelationId string `protobuf:"bytes,12,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + ReRunFromWorkflowId string `protobuf:"bytes,13,opt,name=re_run_from_workflow_id,json=reRunFromWorkflowId,proto3" json:"re_run_from_workflow_id,omitempty"` + ReasonForIncompletion string `protobuf:"bytes,14,opt,name=reason_for_incompletion,json=reasonForIncompletion,proto3" json:"reason_for_incompletion,omitempty"` + SchemaVersion int32 `protobuf:"varint,15,opt,name=schema_version,json=schemaVersion,proto3" json:"schema_version,omitempty"` + Event string `protobuf:"bytes,16,opt,name=event,proto3" json:"event,omitempty"` + TaskToDomain map[string]string `protobuf:"bytes,17,rep,name=task_to_domain,json=taskToDomain,proto3" json:"task_to_domain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FailedReferenceTaskNames []string `protobuf:"bytes,18,rep,name=failed_reference_task_names,json=failedReferenceTaskNames,proto3" json:"failed_reference_task_names,omitempty"` + WorkflowDefinition *WorkflowDef `protobuf:"bytes,19,opt,name=workflow_definition,json=workflowDefinition,proto3" json:"workflow_definition,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Workflow) Reset() { *m = Workflow{} } +func (m *Workflow) String() string { return proto.CompactTextString(m) } +func (*Workflow) ProtoMessage() {} +func (*Workflow) Descriptor() ([]byte, []int) { + return fileDescriptor_workflow_b47c95a464502efa, []int{0} +} +func (m *Workflow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Workflow.Unmarshal(m, b) +} +func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Workflow.Marshal(b, m, deterministic) +} +func (dst *Workflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow.Merge(dst, src) +} +func (m *Workflow) XXX_Size() int { + return xxx_messageInfo_Workflow.Size(m) +} +func (m *Workflow) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow.DiscardUnknown(m) +} + +var xxx_messageInfo_Workflow proto.InternalMessageInfo + +func (m *Workflow) GetStatus() Workflow_WorkflowStatus { + if m != nil { + return m.Status + } + return Workflow_RUNNING +} + +func (m *Workflow) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + +func (m *Workflow) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *Workflow) GetParentWorkflowId() string { + if m != nil { + return m.ParentWorkflowId + } + return "" +} + +func (m *Workflow) GetParentWorkflowTaskId() string { + if m != nil { + return m.ParentWorkflowTaskId + } + return "" +} + +func (m *Workflow) GetTasks() []*Task { + if m != nil { + return m.Tasks + } + return nil +} + +func (m *Workflow) GetInput() map[string]*_struct.Value { + if m != nil { + return m.Input + } + return nil +} + +func (m *Workflow) GetOutput() map[string]*_struct.Value { + if m != nil { + return m.Output + } + return nil +} + +func (m *Workflow) GetWorkflowType() string { + if m != nil { + return m.WorkflowType + } + return "" +} + +func (m *Workflow) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Workflow) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func (m *Workflow) GetReRunFromWorkflowId() string { + if m != nil { + return m.ReRunFromWorkflowId + } + return "" +} + +func (m *Workflow) GetReasonForIncompletion() string { + if m != nil { + return m.ReasonForIncompletion + } + return "" +} + +func (m *Workflow) GetSchemaVersion() int32 { + if m != nil { + return m.SchemaVersion + } + return 0 +} + +func (m *Workflow) GetEvent() string { + if m != nil { + return m.Event + } + return "" +} + +func (m *Workflow) GetTaskToDomain() map[string]string { + if m != nil { + return m.TaskToDomain + } + return nil +} + +func (m *Workflow) GetFailedReferenceTaskNames() []string { + if m != nil { + return m.FailedReferenceTaskNames + } + return nil +} + +func (m *Workflow) GetWorkflowDefinition() *WorkflowDef { + if m != nil { + return m.WorkflowDefinition + } + return nil +} + +func init() { + proto.RegisterType((*Workflow)(nil), "conductor.proto.Workflow") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.Workflow.InputEntry") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.Workflow.OutputEntry") + proto.RegisterMapType((map[string]string)(nil), "conductor.proto.Workflow.TaskToDomainEntry") + proto.RegisterEnum("conductor.proto.Workflow_WorkflowStatus", Workflow_WorkflowStatus_name, Workflow_WorkflowStatus_value) +} + +func init() { proto.RegisterFile("model/workflow.proto", fileDescriptor_workflow_b47c95a464502efa) } + +var fileDescriptor_workflow_b47c95a464502efa = []byte{ + // 727 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x5f, 0x4f, 0xe3, 0x46, + 0x14, 0xc5, 0x6b, 0xb2, 0x09, 0xe4, 0x86, 0x64, 0xbd, 0x43, 0xb6, 0xb8, 0xec, 0x4a, 0x8d, 0xb6, + 0x5d, 0xc9, 0xd2, 0x22, 0x47, 0x4a, 0xff, 0xa8, 0x42, 0x42, 0x2d, 0x34, 0xa1, 0xb2, 0x44, 0x42, + 0x30, 0x06, 0xa4, 0xbe, 0x58, 0x8e, 0x3d, 0x0e, 0x56, 0xec, 0x99, 0x68, 0x3c, 0x86, 0xe6, 0x6b, + 0xf6, 0x13, 0x55, 0x33, 0x63, 0x13, 0x07, 0x9a, 0xb7, 0x7d, 0xcb, 0xdc, 0xfb, 0x3b, 0x27, 0x77, + 0xce, 0xcc, 0x18, 0xba, 0x29, 0x0d, 0x71, 0xd2, 0x7f, 0xa2, 0x6c, 0x11, 0x25, 0xf4, 0xc9, 0x5a, + 0x32, 0xca, 0x29, 0x7a, 0x1b, 0x50, 0x12, 0xe6, 0x01, 0xa7, 0x4c, 0x15, 0x8e, 0x0e, 0x37, 0xb1, + 0x10, 0x47, 0x45, 0x43, 0x57, 0x0d, 0xee, 0x67, 0x8b, 0xa2, 0xf2, 0x71, 0x4e, 0xe9, 0x3c, 0xc1, + 0x7d, 0xb9, 0x9a, 0xe5, 0x51, 0x3f, 0xe3, 0x2c, 0x0f, 0xb8, 0xea, 0x7e, 0xfa, 0xb7, 0x09, 0x7b, + 0xf7, 0x85, 0x0b, 0xfa, 0x03, 0x1a, 0x19, 0xf7, 0x79, 0x9e, 0x19, 0x5a, 0x4f, 0x33, 0x3b, 0x03, + 0xd3, 0x7a, 0xf1, 0xbf, 0x56, 0x89, 0x3e, 0xff, 0xb8, 0x91, 0xbc, 0x53, 0xe8, 0xd0, 0x77, 0xb0, + 0x87, 0x49, 0xe8, 0xf1, 0x38, 0xc5, 0xc6, 0x4e, 0x4f, 0x33, 0x6b, 0xce, 0x2e, 0x26, 0xa1, 0x1b, + 0xa7, 0x18, 0x7d, 0x0f, 0xad, 0x72, 0x5c, 0x2f, 0x0e, 0x8d, 0x5a, 0x4f, 0x33, 0x9b, 0x0e, 0x94, + 0x25, 0x3b, 0x44, 0xc7, 0x80, 0x96, 0x3e, 0xc3, 0x84, 0x7b, 0x55, 0xee, 0x8d, 0xe4, 0x74, 0xd5, + 0xb9, 0x5f, 0xd3, 0xbf, 0xc0, 0xe1, 0x4b, 0x5a, 0x6c, 0x5a, 0x48, 0xea, 0x52, 0xd2, 0xdd, 0x94, + 0xb8, 0x7e, 0xb6, 0xb0, 0x43, 0xf4, 0x05, 0xea, 0x02, 0xcb, 0x8c, 0x46, 0xaf, 0x66, 0xb6, 0x06, + 0xef, 0x5f, 0xed, 0x50, 0x70, 0x8e, 0x62, 0xd0, 0x09, 0xd4, 0x63, 0xb2, 0xcc, 0xb9, 0xb1, 0x27, + 0xe1, 0x1f, 0xb7, 0xc7, 0x61, 0x0b, 0x6c, 0x44, 0x38, 0x5b, 0x39, 0x4a, 0x82, 0x4e, 0xa1, 0x41, + 0x73, 0x2e, 0xc4, 0x4d, 0x29, 0xfe, 0xbc, 0x5d, 0x7c, 0x25, 0x39, 0xa5, 0x2e, 0x44, 0xe8, 0x07, + 0x68, 0xaf, 0xf7, 0xb5, 0x5a, 0x62, 0x03, 0xe4, 0xa6, 0xf6, 0xcb, 0xa2, 0xbb, 0x5a, 0x62, 0x64, + 0xc0, 0xee, 0x23, 0x66, 0x59, 0x4c, 0x89, 0xd1, 0xea, 0x69, 0x66, 0xdd, 0x29, 0x97, 0xe8, 0x33, + 0x74, 0x02, 0xca, 0x18, 0x4e, 0x7c, 0x1e, 0x53, 0x22, 0x42, 0xd9, 0x97, 0xfa, 0x76, 0xa5, 0x6a, + 0x87, 0xe8, 0x67, 0x38, 0x64, 0xd8, 0x63, 0x39, 0xf1, 0x22, 0x46, 0xd3, 0x8d, 0xdc, 0xdb, 0x92, + 0x3f, 0x60, 0xd8, 0xc9, 0xc9, 0x05, 0xa3, 0x69, 0x25, 0xfa, 0x5f, 0x85, 0xca, 0xcf, 0x28, 0xf1, + 0x22, 0xca, 0xbc, 0x98, 0x04, 0x34, 0x5d, 0x26, 0x58, 0x58, 0x1a, 0x1d, 0xa9, 0x7a, 0xaf, 0xda, + 0x17, 0x94, 0xd9, 0x95, 0xa6, 0x18, 0x2a, 0x0b, 0x1e, 0x70, 0xea, 0x7b, 0xe5, 0xd4, 0x6f, 0xe5, + 0xd4, 0x6d, 0x55, 0xbd, 0x2b, 0x66, 0xef, 0x42, 0x1d, 0x3f, 0x62, 0xc2, 0x0d, 0x5d, 0x9a, 0xa9, + 0x05, 0xba, 0x86, 0x8e, 0x3c, 0x5f, 0x4e, 0xbd, 0x90, 0xa6, 0x7e, 0x4c, 0x8c, 0x77, 0x32, 0xd7, + 0x2f, 0xdb, 0x73, 0x15, 0x47, 0xe9, 0xd2, 0xa1, 0xa4, 0x55, 0xba, 0xfb, 0xbc, 0x52, 0x42, 0xa7, + 0xf0, 0x21, 0xf2, 0xe3, 0x04, 0x87, 0x1e, 0xc3, 0x11, 0x66, 0x98, 0x04, 0x58, 0xdd, 0x21, 0xe2, + 0xa7, 0x38, 0x33, 0x50, 0xaf, 0x66, 0x36, 0x1d, 0x43, 0x21, 0x4e, 0x49, 0x08, 0xd3, 0x89, 0xe8, + 0xa3, 0x31, 0x1c, 0x3c, 0x07, 0x16, 0xe2, 0x28, 0x26, 0xb1, 0x8c, 0xe0, 0xa0, 0xa7, 0x99, 0xad, + 0xc1, 0xc7, 0xad, 0x63, 0x0d, 0x71, 0xe4, 0xa0, 0xa7, 0xf5, 0xa2, 0xd0, 0x1d, 0x4d, 0x01, 0xd6, + 0xb7, 0x08, 0xe9, 0x50, 0x5b, 0xe0, 0x95, 0x7c, 0x87, 0x4d, 0x47, 0xfc, 0x44, 0xc7, 0x50, 0x7f, + 0xf4, 0x93, 0x5c, 0xbd, 0xab, 0xd6, 0xe0, 0x5b, 0x4b, 0xbd, 0x6b, 0xab, 0x7c, 0xd7, 0xd6, 0x9d, + 0xe8, 0x3a, 0x0a, 0x3a, 0xd9, 0xf9, 0x4d, 0x3b, 0xba, 0x86, 0x56, 0xe5, 0x6a, 0x7d, 0x15, 0xcb, + 0xdf, 0xe1, 0xdd, 0xab, 0x54, 0xff, 0xc7, 0xb8, 0x5b, 0x35, 0x6e, 0x56, 0x0c, 0x3e, 0x05, 0xd0, + 0xd9, 0xfc, 0x74, 0xa0, 0x16, 0xec, 0x3a, 0xb7, 0x93, 0x89, 0x3d, 0xf9, 0x4b, 0xff, 0x06, 0xb5, + 0xa1, 0xf9, 0xe7, 0xd5, 0x78, 0x7a, 0x39, 0x72, 0x47, 0x43, 0x5d, 0x43, 0x00, 0x8d, 0x8b, 0x33, + 0xfb, 0x72, 0x34, 0xd4, 0x77, 0x44, 0xcb, 0xb5, 0xc7, 0xa3, 0xa1, 0x77, 0x75, 0xeb, 0xea, 0x35, + 0xd4, 0x01, 0x70, 0x47, 0xce, 0xd8, 0x9e, 0x9c, 0x09, 0xf4, 0x8d, 0x40, 0xa7, 0x67, 0xb7, 0x37, + 0xa3, 0xa1, 0x5e, 0x3f, 0xc7, 0xf0, 0x21, 0xa0, 0xa9, 0x45, 0x30, 0x8f, 0x92, 0xf8, 0x9f, 0x97, + 0x27, 0x71, 0x0e, 0xe5, 0x04, 0xd3, 0xd9, 0xdf, 0x27, 0xf3, 0x98, 0x3f, 0xe4, 0x33, 0x2b, 0xa0, + 0x69, 0xbf, 0xe0, 0xfb, 0xcf, 0x7c, 0x3f, 0x48, 0x62, 0x4c, 0x78, 0x7f, 0x4e, 0xe7, 0x6c, 0x19, + 0x54, 0xea, 0xf2, 0x23, 0x3b, 0x6b, 0x48, 0xbb, 0x9f, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1f, + 0xe2, 0x8d, 0x7f, 0xb4, 0x05, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/workflowdef.pb.go b/client/gogrpc/conductor/model/workflowdef.pb.go new file mode 100644 index 0000000000..00cd04c69b --- /dev/null +++ b/client/gogrpc/conductor/model/workflowdef.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/workflowdef.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WorkflowDef struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + Tasks []*WorkflowTask `protobuf:"bytes,4,rep,name=tasks,proto3" json:"tasks,omitempty"` + InputParameters []string `protobuf:"bytes,5,rep,name=input_parameters,json=inputParameters,proto3" json:"input_parameters,omitempty"` + OutputParameters map[string]*_struct.Value `protobuf:"bytes,6,rep,name=output_parameters,json=outputParameters,proto3" json:"output_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FailureWorkflow string `protobuf:"bytes,7,opt,name=failure_workflow,json=failureWorkflow,proto3" json:"failure_workflow,omitempty"` + SchemaVersion int32 `protobuf:"varint,8,opt,name=schema_version,json=schemaVersion,proto3" json:"schema_version,omitempty"` + Restartable bool `protobuf:"varint,9,opt,name=restartable,proto3" json:"restartable,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowDef) Reset() { *m = WorkflowDef{} } +func (m *WorkflowDef) String() string { return proto.CompactTextString(m) } +func (*WorkflowDef) ProtoMessage() {} +func (*WorkflowDef) Descriptor() ([]byte, []int) { + return fileDescriptor_workflowdef_7fb3769e2566471c, []int{0} +} +func (m *WorkflowDef) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WorkflowDef.Unmarshal(m, b) +} +func (m *WorkflowDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WorkflowDef.Marshal(b, m, deterministic) +} +func (dst *WorkflowDef) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowDef.Merge(dst, src) +} +func (m *WorkflowDef) XXX_Size() int { + return xxx_messageInfo_WorkflowDef.Size(m) +} +func (m *WorkflowDef) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowDef.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowDef proto.InternalMessageInfo + +func (m *WorkflowDef) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *WorkflowDef) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *WorkflowDef) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *WorkflowDef) GetTasks() []*WorkflowTask { + if m != nil { + return m.Tasks + } + return nil +} + +func (m *WorkflowDef) GetInputParameters() []string { + if m != nil { + return m.InputParameters + } + return nil +} + +func (m *WorkflowDef) GetOutputParameters() map[string]*_struct.Value { + if m != nil { + return m.OutputParameters + } + return nil +} + +func (m *WorkflowDef) GetFailureWorkflow() string { + if m != nil { + return m.FailureWorkflow + } + return "" +} + +func (m *WorkflowDef) GetSchemaVersion() int32 { + if m != nil { + return m.SchemaVersion + } + return 0 +} + +func (m *WorkflowDef) GetRestartable() bool { + if m != nil { + return m.Restartable + } + return false +} + +func init() { + proto.RegisterType((*WorkflowDef)(nil), "conductor.proto.WorkflowDef") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.WorkflowDef.OutputParametersEntry") +} + +func init() { + proto.RegisterFile("model/workflowdef.proto", fileDescriptor_workflowdef_7fb3769e2566471c) +} + +var fileDescriptor_workflowdef_7fb3769e2566471c = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x6b, 0xd5, 0x30, + 0x14, 0xc7, 0xe9, 0xba, 0x6e, 0xbb, 0x29, 0xf3, 0x5e, 0x03, 0x6a, 0x98, 0x0a, 0x45, 0x10, 0x2a, + 0x48, 0x0a, 0x77, 0x2f, 0xb2, 0xc7, 0x31, 0x9f, 0x1d, 0x45, 0x26, 0xe8, 0x43, 0x49, 0xd3, 0xd3, + 0xde, 0xd2, 0xb4, 0x29, 0xf9, 0xb1, 0xb9, 0xbf, 0xda, 0x7f, 0x41, 0x9a, 0xb6, 0xb3, 0x16, 0x7d, + 0xcb, 0xf9, 0x7c, 0xcf, 0x2f, 0xbe, 0x27, 0xe8, 0x55, 0x2b, 0x0b, 0x10, 0xc9, 0x83, 0x54, 0x4d, + 0x29, 0xe4, 0x43, 0x01, 0x25, 0xed, 0x95, 0x34, 0x12, 0x6f, 0xb9, 0xec, 0x0a, 0xcb, 0x8d, 0x54, + 0x23, 0xb8, 0x20, 0x7f, 0x67, 0x1a, 0xa6, 0x9b, 0x49, 0x79, 0x53, 0x49, 0x59, 0x09, 0x48, 0x5c, + 0x94, 0xdb, 0x32, 0xd1, 0x46, 0x59, 0x6e, 0x46, 0xf5, 0xdd, 0x2f, 0x1f, 0x85, 0xdf, 0xa6, 0xa2, + 0x1b, 0x28, 0x31, 0x46, 0xc7, 0x1d, 0x6b, 0x81, 0x78, 0x91, 0x17, 0x6f, 0x52, 0xf7, 0xc6, 0x11, + 0x0a, 0x0b, 0xd0, 0x5c, 0xd5, 0xbd, 0xa9, 0x65, 0x47, 0x8e, 0x9c, 0xb4, 0x44, 0x98, 0xa0, 0xd3, + 0x7b, 0x50, 0x7a, 0x50, 0xfd, 0xc8, 0x8b, 0x83, 0x74, 0x0e, 0xf1, 0x25, 0x0a, 0x86, 0x5d, 0x34, + 0x39, 0x8e, 0xfc, 0x38, 0xdc, 0xbf, 0xa5, 0xab, 0xc5, 0xe9, 0x3c, 0xfc, 0x2b, 0xd3, 0x4d, 0x3a, + 0xe6, 0xe2, 0x0f, 0x68, 0x57, 0x77, 0xbd, 0x35, 0x59, 0xcf, 0x14, 0x6b, 0xc1, 0x80, 0xd2, 0x24, + 0x88, 0xfc, 0x78, 0x93, 0x6e, 0x1d, 0xbf, 0x7d, 0xc2, 0x38, 0x43, 0xcf, 0xa5, 0x35, 0xab, 0xdc, + 0x13, 0x37, 0x6b, 0xff, 0xdf, 0x59, 0x37, 0x50, 0xd2, 0x2f, 0xae, 0xea, 0x4f, 0xa7, 0xcf, 0x9d, + 0x51, 0x8f, 0xe9, 0x4e, 0xae, 0xf0, 0xb0, 0x4b, 0xc9, 0x6a, 0x61, 0x15, 0x64, 0xb3, 0xb9, 0xe4, + 0xd4, 0x39, 0xb0, 0x9d, 0xf8, 0xdc, 0x15, 0xbf, 0x47, 0xcf, 0x34, 0x3f, 0x40, 0xcb, 0xb2, 0xd9, + 0x8c, 0x33, 0x67, 0xc6, 0xf9, 0x48, 0xef, 0x26, 0x4b, 0x22, 0x14, 0x2a, 0xd0, 0x86, 0x29, 0xc3, + 0x72, 0x01, 0x64, 0x13, 0x79, 0xf1, 0x59, 0xba, 0x44, 0x17, 0x3f, 0xd0, 0x8b, 0x7f, 0xae, 0x87, + 0x77, 0xc8, 0x6f, 0xe0, 0x71, 0x3a, 0xce, 0xf0, 0xc4, 0x1f, 0x51, 0x70, 0xcf, 0x84, 0x05, 0x77, + 0x95, 0x70, 0xff, 0x92, 0x8e, 0xd7, 0xa6, 0xf3, 0xb5, 0xe9, 0xdd, 0xa0, 0xa6, 0x63, 0xd2, 0xd5, + 0xd1, 0x27, 0xef, 0xfa, 0x80, 0x5e, 0x73, 0xd9, 0xd2, 0x0e, 0x4c, 0x29, 0xea, 0x9f, 0x6b, 0x8f, + 0xae, 0xcf, 0x17, 0x26, 0xdd, 0xe6, 0xdf, 0xaf, 0xaa, 0xda, 0x1c, 0x6c, 0x4e, 0xb9, 0x6c, 0x93, + 0xa9, 0x24, 0x79, 0x2a, 0x49, 0xb8, 0xa8, 0xa1, 0x33, 0x49, 0x25, 0x2b, 0xd5, 0xf3, 0x05, 0x77, + 0x9f, 0x31, 0x3f, 0x71, 0x1d, 0x2f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x70, 0x4f, 0x0f, 0xeb, + 0xc6, 0x02, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/workflowsummary.pb.go b/client/gogrpc/conductor/model/workflowsummary.pb.go new file mode 100644 index 0000000000..63847d00b8 --- /dev/null +++ b/client/gogrpc/conductor/model/workflowsummary.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/workflowsummary.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WorkflowSummary struct { + WorkflowType string `protobuf:"bytes,1,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + CorrelationId string `protobuf:"bytes,4,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"` + StartTime string `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + UpdateTime string `protobuf:"bytes,6,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + EndTime string `protobuf:"bytes,7,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + Status Workflow_WorkflowStatus `protobuf:"varint,8,opt,name=status,proto3,enum=conductor.proto.Workflow_WorkflowStatus" json:"status,omitempty"` + Input string `protobuf:"bytes,9,opt,name=input,proto3" json:"input,omitempty"` + Output string `protobuf:"bytes,10,opt,name=output,proto3" json:"output,omitempty"` + ReasonForIncompletion string `protobuf:"bytes,11,opt,name=reason_for_incompletion,json=reasonForIncompletion,proto3" json:"reason_for_incompletion,omitempty"` + ExecutionTime int64 `protobuf:"varint,12,opt,name=execution_time,json=executionTime,proto3" json:"execution_time,omitempty"` + Event string `protobuf:"bytes,13,opt,name=event,proto3" json:"event,omitempty"` + FailedReferenceTaskNames string `protobuf:"bytes,14,opt,name=failed_reference_task_names,json=failedReferenceTaskNames,proto3" json:"failed_reference_task_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowSummary) Reset() { *m = WorkflowSummary{} } +func (m *WorkflowSummary) String() string { return proto.CompactTextString(m) } +func (*WorkflowSummary) ProtoMessage() {} +func (*WorkflowSummary) Descriptor() ([]byte, []int) { + return fileDescriptor_workflowsummary_4b94875bbd67cbd7, []int{0} +} +func (m *WorkflowSummary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WorkflowSummary.Unmarshal(m, b) +} +func (m *WorkflowSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WorkflowSummary.Marshal(b, m, deterministic) +} +func (dst *WorkflowSummary) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowSummary.Merge(dst, src) +} +func (m *WorkflowSummary) XXX_Size() int { + return xxx_messageInfo_WorkflowSummary.Size(m) +} +func (m *WorkflowSummary) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowSummary.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowSummary proto.InternalMessageInfo + +func (m *WorkflowSummary) GetWorkflowType() string { + if m != nil { + return m.WorkflowType + } + return "" +} + +func (m *WorkflowSummary) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *WorkflowSummary) GetWorkflowId() string { + if m != nil { + return m.WorkflowId + } + return "" +} + +func (m *WorkflowSummary) GetCorrelationId() string { + if m != nil { + return m.CorrelationId + } + return "" +} + +func (m *WorkflowSummary) GetStartTime() string { + if m != nil { + return m.StartTime + } + return "" +} + +func (m *WorkflowSummary) GetUpdateTime() string { + if m != nil { + return m.UpdateTime + } + return "" +} + +func (m *WorkflowSummary) GetEndTime() string { + if m != nil { + return m.EndTime + } + return "" +} + +func (m *WorkflowSummary) GetStatus() Workflow_WorkflowStatus { + if m != nil { + return m.Status + } + return Workflow_RUNNING +} + +func (m *WorkflowSummary) GetInput() string { + if m != nil { + return m.Input + } + return "" +} + +func (m *WorkflowSummary) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *WorkflowSummary) GetReasonForIncompletion() string { + if m != nil { + return m.ReasonForIncompletion + } + return "" +} + +func (m *WorkflowSummary) GetExecutionTime() int64 { + if m != nil { + return m.ExecutionTime + } + return 0 +} + +func (m *WorkflowSummary) GetEvent() string { + if m != nil { + return m.Event + } + return "" +} + +func (m *WorkflowSummary) GetFailedReferenceTaskNames() string { + if m != nil { + return m.FailedReferenceTaskNames + } + return "" +} + +func init() { + proto.RegisterType((*WorkflowSummary)(nil), "conductor.proto.WorkflowSummary") +} + +func init() { + proto.RegisterFile("model/workflowsummary.proto", fileDescriptor_workflowsummary_4b94875bbd67cbd7) +} + +var fileDescriptor_workflowsummary_4b94875bbd67cbd7 = []byte{ + // 428 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0x51, 0x8b, 0xd3, 0x40, + 0x10, 0xc7, 0x89, 0x77, 0x6d, 0xaf, 0x7b, 0xd7, 0x1e, 0x2e, 0x55, 0x57, 0x8b, 0x58, 0x14, 0x21, + 0x4f, 0x09, 0x28, 0xf8, 0x20, 0x08, 0x72, 0x0f, 0x42, 0x5f, 0x44, 0x72, 0x05, 0xc1, 0x97, 0xb0, + 0xdd, 0x4c, 0xea, 0xd2, 0x64, 0x27, 0xec, 0x6e, 0xee, 0xda, 0x4f, 0xe2, 0xd7, 0x95, 0xcc, 0x36, + 0xa5, 0xd7, 0xb7, 0xcc, 0xef, 0xff, 0x9b, 0xdd, 0xcc, 0xb0, 0x6c, 0x5e, 0x63, 0x01, 0x55, 0xfa, + 0x88, 0x76, 0x5b, 0x56, 0xf8, 0xe8, 0xda, 0xba, 0x96, 0x76, 0x9f, 0x34, 0x16, 0x3d, 0xf2, 0x5b, + 0x85, 0xa6, 0x68, 0x95, 0x47, 0x1b, 0xc0, 0x9b, 0xd9, 0x53, 0x3b, 0xd0, 0xf7, 0xff, 0x2e, 0xd9, + 0xed, 0xef, 0x03, 0xba, 0x0f, 0x07, 0xf0, 0x0f, 0x6c, 0xd2, 0x5b, 0xb9, 0xdf, 0x37, 0x20, 0xa2, + 0x45, 0x14, 0x8f, 0xb3, 0x9b, 0x1e, 0xae, 0xf6, 0x0d, 0x70, 0xc1, 0x46, 0x0f, 0x60, 0x9d, 0x46, + 0x23, 0x9e, 0x2d, 0xa2, 0x78, 0x90, 0xf5, 0x25, 0x7f, 0xc7, 0xae, 0x8f, 0xed, 0xba, 0x10, 0x17, + 0xd4, 0xcc, 0x7a, 0xb4, 0x2c, 0xf8, 0x47, 0x36, 0x55, 0x68, 0x2d, 0x54, 0xd2, 0x6b, 0x34, 0x9d, + 0x73, 0x49, 0xce, 0xe4, 0x84, 0x2e, 0x0b, 0xfe, 0x96, 0x31, 0xe7, 0xa5, 0xf5, 0xb9, 0xd7, 0x35, + 0x88, 0x01, 0x29, 0x63, 0x22, 0x2b, 0x5d, 0x43, 0x77, 0x4d, 0xdb, 0x14, 0xd2, 0x43, 0xc8, 0x87, + 0xe1, 0x9a, 0x80, 0x48, 0x78, 0xcd, 0xae, 0xc0, 0x14, 0x21, 0x1d, 0x51, 0x3a, 0x02, 0x53, 0x50, + 0xf4, 0x9d, 0x0d, 0x9d, 0x97, 0xbe, 0x75, 0xe2, 0x6a, 0x11, 0xc5, 0xd3, 0x4f, 0x71, 0x72, 0xb6, + 0xad, 0xa4, 0xdf, 0xc9, 0xf1, 0xe3, 0x9e, 0xfc, 0xec, 0xd0, 0xc7, 0x67, 0x6c, 0xa0, 0x4d, 0xd3, + 0x7a, 0x31, 0xa6, 0x93, 0x43, 0xc1, 0x5f, 0xb2, 0x21, 0xb6, 0xbe, 0xc3, 0x8c, 0xf0, 0xa1, 0xe2, + 0x5f, 0xd8, 0x2b, 0x0b, 0xd2, 0xa1, 0xc9, 0x4b, 0xb4, 0xb9, 0x36, 0x0a, 0xeb, 0xa6, 0x82, 0x6e, + 0x4e, 0x71, 0x4d, 0xe2, 0x8b, 0x10, 0xff, 0x40, 0xbb, 0x3c, 0x09, 0xbb, 0x4d, 0xc1, 0x0e, 0x54, + 0x4b, 0x7b, 0xa2, 0x41, 0x6e, 0x16, 0x51, 0x7c, 0x91, 0x4d, 0x8e, 0x94, 0xc6, 0x99, 0xb1, 0x01, + 0x3c, 0x80, 0xf1, 0x62, 0x12, 0x7e, 0x86, 0x0a, 0xfe, 0x8d, 0xcd, 0x4b, 0xa9, 0x2b, 0x28, 0x72, + 0x0b, 0x25, 0x58, 0x30, 0x0a, 0x72, 0x2f, 0xdd, 0x36, 0x37, 0xb2, 0x06, 0x27, 0xa6, 0xe4, 0x8a, + 0xa0, 0x64, 0xbd, 0xb1, 0x92, 0x6e, 0xfb, 0xb3, 0xcb, 0xef, 0x2a, 0x36, 0x57, 0x58, 0x27, 0x06, + 0x7c, 0x59, 0xe9, 0xdd, 0xf9, 0x82, 0xee, 0x9e, 0x9f, 0xbd, 0x9a, 0x5f, 0xeb, 0x3f, 0x5f, 0x37, + 0xda, 0xff, 0x6d, 0xd7, 0x89, 0xc2, 0x3a, 0x3d, 0xb4, 0xa5, 0xc7, 0xb6, 0x54, 0x55, 0x1a, 0x8c, + 0x4f, 0x37, 0xb8, 0xb1, 0x8d, 0x3a, 0xe1, 0xf4, 0x2c, 0xd7, 0x43, 0x3a, 0xf5, 0xf3, 0xff, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xa0, 0xee, 0x86, 0xf0, 0xd4, 0x02, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/model/workflowtask.pb.go b/client/gogrpc/conductor/model/workflowtask.pb.go new file mode 100644 index 0000000000..54ed68c273 --- /dev/null +++ b/client/gogrpc/conductor/model/workflowtask.pb.go @@ -0,0 +1,308 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: model/workflowtask.proto + +package model // import "github.com/netflix/conductor/client/gogrpc/conductor/model" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _struct "github.com/golang/protobuf/ptypes/struct" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WorkflowTask struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + TaskReferenceName string `protobuf:"bytes,2,opt,name=task_reference_name,json=taskReferenceName,proto3" json:"task_reference_name,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + InputParameters map[string]*_struct.Value `protobuf:"bytes,4,rep,name=input_parameters,json=inputParameters,proto3" json:"input_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + DynamicTaskNameParam string `protobuf:"bytes,6,opt,name=dynamic_task_name_param,json=dynamicTaskNameParam,proto3" json:"dynamic_task_name_param,omitempty"` + CaseValueParam string `protobuf:"bytes,7,opt,name=case_value_param,json=caseValueParam,proto3" json:"case_value_param,omitempty"` + CaseExpression string `protobuf:"bytes,8,opt,name=case_expression,json=caseExpression,proto3" json:"case_expression,omitempty"` + DecisionCases map[string]*WorkflowTask_WorkflowTaskList `protobuf:"bytes,9,rep,name=decision_cases,json=decisionCases,proto3" json:"decision_cases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DynamicForkTasksParam string `protobuf:"bytes,10,opt,name=dynamic_fork_tasks_param,json=dynamicForkTasksParam,proto3" json:"dynamic_fork_tasks_param,omitempty"` + DynamicForkTasksInputParamName string `protobuf:"bytes,11,opt,name=dynamic_fork_tasks_input_param_name,json=dynamicForkTasksInputParamName,proto3" json:"dynamic_fork_tasks_input_param_name,omitempty"` + DefaultCase []*WorkflowTask `protobuf:"bytes,12,rep,name=default_case,json=defaultCase,proto3" json:"default_case,omitempty"` + ForkTasks []*WorkflowTask_WorkflowTaskList `protobuf:"bytes,13,rep,name=fork_tasks,json=forkTasks,proto3" json:"fork_tasks,omitempty"` + StartDelay int32 `protobuf:"varint,14,opt,name=start_delay,json=startDelay,proto3" json:"start_delay,omitempty"` + SubWorkflowParam *SubWorkflowParams `protobuf:"bytes,15,opt,name=sub_workflow_param,json=subWorkflowParam,proto3" json:"sub_workflow_param,omitempty"` + JoinOn []string `protobuf:"bytes,16,rep,name=join_on,json=joinOn,proto3" json:"join_on,omitempty"` + Sink string `protobuf:"bytes,17,opt,name=sink,proto3" json:"sink,omitempty"` + Optional bool `protobuf:"varint,18,opt,name=optional,proto3" json:"optional,omitempty"` + TaskDefinition *TaskDef `protobuf:"bytes,19,opt,name=task_definition,json=taskDefinition,proto3" json:"task_definition,omitempty"` + RateLimited bool `protobuf:"varint,20,opt,name=rate_limited,json=rateLimited,proto3" json:"rate_limited,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTask) Reset() { *m = WorkflowTask{} } +func (m *WorkflowTask) String() string { return proto.CompactTextString(m) } +func (*WorkflowTask) ProtoMessage() {} +func (*WorkflowTask) Descriptor() ([]byte, []int) { + return fileDescriptor_workflowtask_5d520fab7400c6cf, []int{0} +} +func (m *WorkflowTask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WorkflowTask.Unmarshal(m, b) +} +func (m *WorkflowTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WorkflowTask.Marshal(b, m, deterministic) +} +func (dst *WorkflowTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTask.Merge(dst, src) +} +func (m *WorkflowTask) XXX_Size() int { + return xxx_messageInfo_WorkflowTask.Size(m) +} +func (m *WorkflowTask) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTask.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTask proto.InternalMessageInfo + +func (m *WorkflowTask) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *WorkflowTask) GetTaskReferenceName() string { + if m != nil { + return m.TaskReferenceName + } + return "" +} + +func (m *WorkflowTask) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *WorkflowTask) GetInputParameters() map[string]*_struct.Value { + if m != nil { + return m.InputParameters + } + return nil +} + +func (m *WorkflowTask) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *WorkflowTask) GetDynamicTaskNameParam() string { + if m != nil { + return m.DynamicTaskNameParam + } + return "" +} + +func (m *WorkflowTask) GetCaseValueParam() string { + if m != nil { + return m.CaseValueParam + } + return "" +} + +func (m *WorkflowTask) GetCaseExpression() string { + if m != nil { + return m.CaseExpression + } + return "" +} + +func (m *WorkflowTask) GetDecisionCases() map[string]*WorkflowTask_WorkflowTaskList { + if m != nil { + return m.DecisionCases + } + return nil +} + +func (m *WorkflowTask) GetDynamicForkTasksParam() string { + if m != nil { + return m.DynamicForkTasksParam + } + return "" +} + +func (m *WorkflowTask) GetDynamicForkTasksInputParamName() string { + if m != nil { + return m.DynamicForkTasksInputParamName + } + return "" +} + +func (m *WorkflowTask) GetDefaultCase() []*WorkflowTask { + if m != nil { + return m.DefaultCase + } + return nil +} + +func (m *WorkflowTask) GetForkTasks() []*WorkflowTask_WorkflowTaskList { + if m != nil { + return m.ForkTasks + } + return nil +} + +func (m *WorkflowTask) GetStartDelay() int32 { + if m != nil { + return m.StartDelay + } + return 0 +} + +func (m *WorkflowTask) GetSubWorkflowParam() *SubWorkflowParams { + if m != nil { + return m.SubWorkflowParam + } + return nil +} + +func (m *WorkflowTask) GetJoinOn() []string { + if m != nil { + return m.JoinOn + } + return nil +} + +func (m *WorkflowTask) GetSink() string { + if m != nil { + return m.Sink + } + return "" +} + +func (m *WorkflowTask) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +func (m *WorkflowTask) GetTaskDefinition() *TaskDef { + if m != nil { + return m.TaskDefinition + } + return nil +} + +func (m *WorkflowTask) GetRateLimited() bool { + if m != nil { + return m.RateLimited + } + return false +} + +type WorkflowTask_WorkflowTaskList struct { + Tasks []*WorkflowTask `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTask_WorkflowTaskList) Reset() { *m = WorkflowTask_WorkflowTaskList{} } +func (m *WorkflowTask_WorkflowTaskList) String() string { return proto.CompactTextString(m) } +func (*WorkflowTask_WorkflowTaskList) ProtoMessage() {} +func (*WorkflowTask_WorkflowTaskList) Descriptor() ([]byte, []int) { + return fileDescriptor_workflowtask_5d520fab7400c6cf, []int{0, 0} +} +func (m *WorkflowTask_WorkflowTaskList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WorkflowTask_WorkflowTaskList.Unmarshal(m, b) +} +func (m *WorkflowTask_WorkflowTaskList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WorkflowTask_WorkflowTaskList.Marshal(b, m, deterministic) +} +func (dst *WorkflowTask_WorkflowTaskList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTask_WorkflowTaskList.Merge(dst, src) +} +func (m *WorkflowTask_WorkflowTaskList) XXX_Size() int { + return xxx_messageInfo_WorkflowTask_WorkflowTaskList.Size(m) +} +func (m *WorkflowTask_WorkflowTaskList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTask_WorkflowTaskList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTask_WorkflowTaskList proto.InternalMessageInfo + +func (m *WorkflowTask_WorkflowTaskList) GetTasks() []*WorkflowTask { + if m != nil { + return m.Tasks + } + return nil +} + +func init() { + proto.RegisterType((*WorkflowTask)(nil), "conductor.proto.WorkflowTask") + proto.RegisterMapType((map[string]*WorkflowTask_WorkflowTaskList)(nil), "conductor.proto.WorkflowTask.DecisionCasesEntry") + proto.RegisterMapType((map[string]*_struct.Value)(nil), "conductor.proto.WorkflowTask.InputParametersEntry") + proto.RegisterType((*WorkflowTask_WorkflowTaskList)(nil), "conductor.proto.WorkflowTask.WorkflowTaskList") +} + +func init() { + proto.RegisterFile("model/workflowtask.proto", fileDescriptor_workflowtask_5d520fab7400c6cf) +} + +var fileDescriptor_workflowtask_5d520fab7400c6cf = []byte{ + // 708 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x6f, 0xd3, 0x3a, + 0x14, 0x56, 0xd7, 0x75, 0x5b, 0x4f, 0xbb, 0xb6, 0xf3, 0x76, 0xef, 0xac, 0xde, 0xbb, 0x7b, 0xcb, + 0x78, 0xa0, 0x0f, 0x28, 0x45, 0x9d, 0x10, 0x68, 0x4f, 0x30, 0x3a, 0x10, 0x62, 0xc0, 0x14, 0x10, + 0x93, 0x26, 0xa1, 0x28, 0x4d, 0x9c, 0x62, 0x9a, 0xc6, 0x91, 0xed, 0xb0, 0xf5, 0xaf, 0xf0, 0x6b, + 0x91, 0x8f, 0x93, 0x36, 0xeb, 0xa6, 0x09, 0xde, 0xec, 0xef, 0x7c, 0xdf, 0x39, 0xe7, 0xb3, 0x7d, + 0x0c, 0x74, 0x26, 0x42, 0x16, 0x0f, 0xae, 0x84, 0x9c, 0x46, 0xb1, 0xb8, 0xd2, 0xbe, 0x9a, 0x3a, + 0xa9, 0x14, 0x5a, 0x90, 0x76, 0x20, 0x92, 0x30, 0x0b, 0xb4, 0x90, 0x16, 0xe8, 0xee, 0x5a, 0xaa, + 0xa1, 0x84, 0x2c, 0xca, 0xc1, 0x03, 0x0b, 0xaa, 0x6c, 0x5c, 0xa4, 0x48, 0x7d, 0xe9, 0xcf, 0x54, + 0x1e, 0xfe, 0x77, 0x22, 0xc4, 0x24, 0x66, 0x03, 0xdc, 0x8d, 0xb3, 0x68, 0xa0, 0xb4, 0xcc, 0x02, + 0x6d, 0xa3, 0x87, 0x3f, 0x01, 0x9a, 0x17, 0xb9, 0xec, 0xb3, 0xaf, 0xa6, 0x84, 0xc0, 0x7a, 0xe2, + 0xcf, 0x18, 0xad, 0xf4, 0x2a, 0xfd, 0xba, 0x8b, 0x6b, 0xe2, 0xc0, 0xae, 0x29, 0xe9, 0x49, 0x16, + 0x31, 0xc9, 0x92, 0x80, 0x79, 0x48, 0x59, 0x43, 0xca, 0x8e, 0x09, 0xb9, 0x45, 0xe4, 0x83, 0xe1, + 0xf7, 0xa0, 0x11, 0x32, 0x15, 0x48, 0x9e, 0x6a, 0x2e, 0x12, 0x5a, 0x45, 0x5e, 0x19, 0x22, 0x5f, + 0xa1, 0xc3, 0x93, 0x34, 0xd3, 0x1e, 0xb6, 0xca, 0x34, 0x93, 0x8a, 0xae, 0xf7, 0xaa, 0xfd, 0xc6, + 0x70, 0xe8, 0xac, 0x98, 0x76, 0xca, 0xed, 0x39, 0x6f, 0x8d, 0xea, 0x7c, 0x21, 0x3a, 0x4d, 0xb4, + 0x9c, 0xbb, 0x6d, 0x7e, 0x13, 0x35, 0x26, 0xf4, 0x3c, 0x65, 0xb4, 0x66, 0x4d, 0x98, 0x35, 0x79, + 0x0a, 0xfb, 0xe1, 0x3c, 0xf1, 0x67, 0x3c, 0xf0, 0xd0, 0x8c, 0xb1, 0x60, 0xcb, 0xd3, 0x0d, 0xa4, + 0xed, 0xe5, 0x61, 0x53, 0xc7, 0xd8, 0xc0, 0x7c, 0xa4, 0x0f, 0x9d, 0xc0, 0x57, 0xcc, 0xfb, 0xe1, + 0xc7, 0x59, 0xc1, 0xdf, 0x44, 0x7e, 0xcb, 0xe0, 0x5f, 0x0c, 0x6c, 0x99, 0x8f, 0xa0, 0x8d, 0x4c, + 0x76, 0x9d, 0x4a, 0xa6, 0x94, 0x71, 0xbe, 0xb5, 0x24, 0x9e, 0x2e, 0x50, 0x72, 0x01, 0xad, 0x90, + 0x05, 0xdc, 0xac, 0x3d, 0x13, 0x52, 0xb4, 0x8e, 0xd6, 0x9f, 0xdc, 0x6f, 0x7d, 0x94, 0x6b, 0x5e, + 0x19, 0x89, 0x35, 0xbe, 0x1d, 0x96, 0x31, 0xf2, 0x0c, 0x68, 0x61, 0x31, 0x12, 0x72, 0x8a, 0x3e, + 0x55, 0xde, 0x33, 0x60, 0x2b, 0x7f, 0xe5, 0xf1, 0xd7, 0x42, 0x4e, 0x4d, 0x52, 0x65, 0x5b, 0x7f, + 0x07, 0x0f, 0xef, 0x10, 0x96, 0x6e, 0xc8, 0x5e, 0x78, 0x03, 0x73, 0xfc, 0xb7, 0x9a, 0x63, 0x79, + 0x27, 0x78, 0xfb, 0x2f, 0xa0, 0x19, 0xb2, 0xc8, 0xcf, 0x62, 0x8d, 0xee, 0x68, 0x13, 0xcd, 0x1d, + 0xdc, 0x6b, 0xce, 0xbc, 0x0e, 0x94, 0x18, 0x23, 0xe4, 0x3d, 0xc0, 0xb2, 0x0d, 0xba, 0x8d, 0x7a, + 0xe7, 0xfe, 0xc3, 0x29, 0x6f, 0xce, 0xb8, 0xd2, 0x6e, 0x3d, 0x2a, 0xda, 0x23, 0xff, 0x43, 0x43, + 0x69, 0x5f, 0x6a, 0x2f, 0x64, 0xb1, 0x3f, 0xa7, 0xad, 0x5e, 0xa5, 0x5f, 0x73, 0x01, 0xa1, 0x91, + 0x41, 0xc8, 0x39, 0x10, 0x95, 0x8d, 0xbd, 0x62, 0x7c, 0xf2, 0x13, 0x6b, 0xf7, 0x2a, 0xfd, 0xc6, + 0xf0, 0xf0, 0x56, 0xdd, 0x4f, 0xd9, 0xb8, 0xa8, 0x86, 0xa6, 0x95, 0xdb, 0x51, 0x2b, 0x10, 0xd9, + 0x87, 0xcd, 0xef, 0x82, 0x27, 0x9e, 0x48, 0x68, 0xa7, 0x57, 0xed, 0xd7, 0xdd, 0x0d, 0xb3, 0xfd, + 0x98, 0x98, 0x97, 0xa9, 0x78, 0x32, 0xa5, 0x3b, 0xf6, 0x65, 0x9a, 0x35, 0xe9, 0xc2, 0x96, 0xc0, + 0xb1, 0xf0, 0x63, 0x4a, 0x7a, 0x95, 0xfe, 0x96, 0xbb, 0xd8, 0x93, 0x97, 0xd0, 0xc6, 0xd7, 0x1a, + 0xb2, 0x88, 0x27, 0x1c, 0xc7, 0x69, 0x17, 0xfb, 0xa2, 0xb7, 0xfa, 0x32, 0x66, 0x47, 0x2c, 0x72, + 0x5b, 0xda, 0x2e, 0x72, 0x3e, 0x79, 0x00, 0x4d, 0xe9, 0x6b, 0xe6, 0xc5, 0x7c, 0xc6, 0x35, 0x0b, + 0xe9, 0x1e, 0x96, 0x68, 0x18, 0xec, 0xcc, 0x42, 0xdd, 0x37, 0xd0, 0x59, 0x3d, 0x40, 0x72, 0x04, + 0x35, 0x7b, 0xfe, 0x95, 0xdf, 0xb9, 0x3f, 0xcb, 0xed, 0x5e, 0xc2, 0xde, 0x5d, 0x13, 0x4a, 0x3a, + 0x50, 0x9d, 0xb2, 0x79, 0xfe, 0xa9, 0x98, 0x25, 0x79, 0x0c, 0x35, 0x1c, 0x29, 0xfc, 0x45, 0x1a, + 0xc3, 0xbf, 0x1d, 0xfb, 0x4d, 0x39, 0xc5, 0x37, 0xe5, 0xe0, 0x64, 0xb9, 0x96, 0x74, 0xbc, 0xf6, + 0xbc, 0xd2, 0x4d, 0x81, 0xdc, 0x1e, 0x81, 0x3b, 0x32, 0x8f, 0x6e, 0x66, 0xfe, 0xd3, 0x87, 0xb3, + 0xac, 0x78, 0xc2, 0xe1, 0x9f, 0x40, 0xcc, 0x9c, 0x84, 0xe9, 0x28, 0xe6, 0xd7, 0xab, 0x79, 0x4e, + 0x5a, 0x65, 0xed, 0xf9, 0xf8, 0xf2, 0x78, 0xc2, 0xf5, 0xb7, 0x6c, 0xec, 0x04, 0x62, 0x36, 0xc8, + 0x35, 0x83, 0x85, 0x66, 0x10, 0xc4, 0x9c, 0x25, 0x7a, 0x30, 0x11, 0x13, 0x99, 0x06, 0x25, 0x1c, + 0x7f, 0xef, 0xf1, 0x06, 0xa6, 0x3c, 0xfa, 0x15, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x25, 0x52, 0x75, + 0x0d, 0x06, 0x00, 0x00, +} diff --git a/client/gogrpc/conductor/worker.go b/client/gogrpc/conductor/worker.go new file mode 100644 index 0000000000..63a67fb8bd --- /dev/null +++ b/client/gogrpc/conductor/worker.go @@ -0,0 +1,177 @@ +package conductor + +import ( + "context" + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/netflix/conductor/client/gogrpc/conductor/grpc/tasks" + "github.com/netflix/conductor/client/gogrpc/conductor/model" +) + +// An Executor is a struct that executes the logic required to resolve +// a task. Each Worker instance uses an Executor to run the polled tasks. +type Executor interface { + // Execute attempt to resolve the given Task and returns a TaskResult + // with its output. The given Context carries a deadline which must be + // enforced by the implementation. + // This function will be called by the Worker for each incoming Task, + // and must be threadsafe as it can be called by several goroutines + // concurrently. + Execute(context.Context, *model.Task) (*model.TaskResult, error) + + // ConnectionError is called by a Worker whenever there's an error with + // a GRPC connection. The GRPC error is passed in as its only argument. + // If this function returns nil, the Worker will continue retrying the + // connection; if it returns a non-nill error, the Worker will stop its + // execution and return the given error as the result of the Worker.Run + // function. + ConnectionError(error) error +} + +// A Worker uses a TaskClient to poll the Conductor server for new tasks and +// executes them using an Executor instance, returning the result of the task +// to the upstream server. +// The Worker struct must be created manually with the desired settings, and then +// ran with Worker.Run. +// Client implementations usually run a single Worker per process, or one worker per Task Type +// if a process needs to execute tasks of different types. The Concurrency +// field allows the worker to execute tasks concurrently in several goroutines. +type Worker struct { + // TaskType is the identifier for the type of tasks that this worker can + // execute. This will be send to Conductor when polling for new tasks. + TaskType string + + // TaskTimeout is the total duration that a task will be executed for. This + // includes the time required to poll, execute and return the task's results. + // If not set, tasks will not timeout. + TaskTimeout time.Duration + + // Identifier is an unique identifier for this worker. If not set, it defaults + // to the local hostname. + Identifier string + + // Concurrency is the amount of goroutines that wil poll for tasks and execute + // them concurrently. If not set, it defaults to GOMAXPROCS, a sensible default. + Concurrency int + + // Executor is an instance of an Executor that will actually run the logic required + // for each task. See conductor.Executor. + Executor Executor + + // Client is an instance of a conductor.Client that implements a Task service. + // See conductor.Client + Client TasksClient + + waitThreads sync.WaitGroup + active int32 // atomic + shutdown chan struct{} + shutdownFlag sync.Once + result error +} + +// Run executes the main loop of the Worker, spawning several gorutines to poll and +// resolve tasks from a Conductor server. +// This is a blocking call that will not return until Worker.Shutdown is called from +// another goroutine. When shutting down cleanly, this function returns nil; otherwise +// an error is returned if there's been a problem with the GRPC connection and the Worker +// cannot continue running. +func (worker *Worker) Run() error { + if worker.TaskType == "" { + return fmt.Errorf("conductor: missing field 'TaskType'") + } + if worker.Executor == nil { + return fmt.Errorf("conductor: missing field 'Executor'") + } + if worker.Client == nil { + return fmt.Errorf("conductor: missing field 'Client'") + } + if worker.Identifier == "" { + hostname, err := os.Hostname() + if err != nil { + return err + } + worker.Identifier = fmt.Sprintf("%s (conductor-go)", hostname) + } + if worker.Concurrency == 0 { + worker.Concurrency = runtime.GOMAXPROCS(0) + } + + worker.active = 0 + worker.result = nil + worker.shutdown = make(chan struct{}) + worker.waitThreads.Add(worker.Concurrency) + + for i := 0; i < worker.Concurrency; i++ { + go worker.thread() + } + + worker.waitThreads.Wait() + return worker.result +} + +// Shutdown stops this worker gracefully. This function is thread-safe and may +// be called from any goroutine. Only the first call to Shutdown will have +// an effect. +func (worker *Worker) Shutdown() { + worker.shutdownOnce(nil) +} + +func (worker *Worker) shutdownOnce(err error) { + worker.shutdownFlag.Do(func() { + worker.result = err + close(worker.shutdown) + worker.waitThreads.Wait() + worker.Client.Shutdown() + }) +} + +func (worker *Worker) onError(err error) { + userErr := worker.Executor.ConnectionError(err) + if userErr != nil { + worker.shutdownOnce(userErr) + } +} + +func (worker *Worker) runTask(req *tasks.PollRequest) error { + ctx, cancel := context.WithTimeout(context.Background(), worker.TaskTimeout) + defer cancel() + + task, err := worker.Client.Tasks().Poll(ctx, req) + if err != nil { + return err + } + + result, err := worker.Executor.Execute(ctx, task.Task) + // TODO: what if the task failed? + if err == nil { + request := tasks.UpdateTaskRequest{Result: result} + _, err := worker.Client.Tasks().UpdateTask(context.Background(), &request) + if err != nil { + return err + } + } + return nil +} + +func (worker *Worker) thread() { + defer worker.waitThreads.Done() + + pollRequest := &tasks.PollRequest{ + TaskType: worker.TaskType, + WorkerId: worker.Identifier, + } + + for range worker.shutdown { + atomic.AddInt32(&worker.active, 1) + err := worker.runTask(pollRequest) + if err != nil { + worker.onError(err) + } + atomic.AddInt32(&worker.active, -1) + } +} diff --git a/client/gogrpc/conductor/worker_test.go b/client/gogrpc/conductor/worker_test.go new file mode 100644 index 0000000000..152197c2e9 --- /dev/null +++ b/client/gogrpc/conductor/worker_test.go @@ -0,0 +1,179 @@ +package conductor + +import ( + "context" + "flag" + "fmt" + "io" + "math/rand" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/ptypes/empty" + pb "github.com/netflix/conductor/client/gogrpc/conductor/grpc" + "github.com/netflix/conductor/client/gogrpc/conductor/model" + "google.golang.org/grpc" + + "github.com/stretchr/testify/assert" +) + +var doTrace = flag.Bool("dotrace", false, "print tracing information") + +func trace(format string, args ...interface{}) { + if *doTrace { + fmt.Printf(format, args...) + } +} + +type fakeTaskService struct { + latency time.Duration + shutdown chan struct{} + mu sync.Mutex + completed map[string]bool + result error +} + +func randomTaskID() string { + return fmt.Sprintf("task-%08x", rand.Int63()) +} + +var ErrNotImplemented = fmt.Errorf("API call not implemented") + +func (s *fakeTaskService) newTask(req *pb.PollRequest) (*model.Task, error) { + id := randomTaskID() + + s.mu.Lock() + s.completed[id] = false + s.mu.Unlock() + + return &model.Task{ + TaskType: req.GetTaskType(), + Status: model.Task_SCHEDULED, + TaskId: id, + }, nil +} + +func (s *fakeTaskService) updateTask(res *model.TaskResult) (*pb.TaskUpdateResponse, error) { + id := res.GetTaskId() + + s.mu.Lock() + if _, found := s.completed[id]; !found { + panic("missing task: " + id) + } + s.completed[id] = true + s.mu.Unlock() + + return &pb.TaskUpdateResponse{ + TaskId: id, + }, nil +} + +func (s *fakeTaskService) Poll(ctx context.Context, in *pb.PollRequest, opts ...grpc.CallOption) (*model.Task, error) { + select { + case <-time.After(s.latency): + return s.newTask(in) + case <-s.shutdown: + return nil, s.result + } +} +func (s *fakeTaskService) PollStream(ctx context.Context, opts ...grpc.CallOption) (pb.TaskService_PollStreamClient, error) { + return nil, ErrNotImplemented +} +func (s *fakeTaskService) GetTasksInProgress(ctx context.Context, in *pb.TasksInProgressRequest, opts ...grpc.CallOption) (*pb.TasksInProgressResponse, error) { + return nil, ErrNotImplemented +} +func (s *fakeTaskService) GetPendingTaskForWorkflow(ctx context.Context, in *pb.PendingTaskRequest, opts ...grpc.CallOption) (*model.Task, error) { + return nil, ErrNotImplemented +} +func (s *fakeTaskService) UpdateTask(ctx context.Context, in *model.TaskResult, opts ...grpc.CallOption) (*pb.TaskUpdateResponse, error) { + select { + case <-time.After(s.latency): + return s.updateTask(in) + case <-s.shutdown: + return nil, s.result + } +} +func (s *fakeTaskService) AckTask(ctx context.Context, in *pb.AckTaskRequest, opts ...grpc.CallOption) (*pb.AckTaskResponse, error) { + return nil, ErrNotImplemented +} +func (s *fakeTaskService) AddLog(ctx context.Context, in *pb.AddLogRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return nil, ErrNotImplemented +} +func (s *fakeTaskService) GetLogs(ctx context.Context, in *pb.TaskId, opts ...grpc.CallOption) (*pb.GetLogsResponse, error) { + return nil, ErrNotImplemented +} + +type fakeTaskClient struct { + tasks *fakeTaskService +} + +func (c *fakeTaskClient) Tasks() pb.TaskServiceClient { + return c.tasks +} + +func (c *fakeTaskClient) forceShutdown(err error) { + c.tasks.result = err + close(c.tasks.shutdown) +} + +func (c *fakeTaskClient) Shutdown() { + c.tasks.result = io.EOF + close(c.tasks.shutdown) +} + +func newFakeTaskClient(latency time.Duration) *fakeTaskClient { + return &fakeTaskClient{ + tasks: &fakeTaskService{ + shutdown: make(chan struct{}), + latency: latency, + }, + } +} + +type slowExecutor struct { + mu sync.Mutex + recv []*model.Task + delay time.Duration +} + +func (exe *slowExecutor) Execute(ctx context.Context, m *model.Task) (*model.TaskResult, error) { + exe.mu.Lock() + exe.recv = append(exe.recv, m) + exe.mu.Unlock() + + time.Sleep(exe.delay) + return &model.TaskResult{ + TaskId: m.GetTaskId(), + Status: model.TaskResult_COMPLETED, + }, nil +} + +func (exe *slowExecutor) ConnectionError(err error) error { + panic(err) +} + +func TestWorkerInterface(t *testing.T) { + mock := newFakeTaskClient(200 * time.Millisecond) + exec := &slowExecutor{ + delay: 100 * time.Millisecond, + } + + worker := &Worker{ + TaskType: "fake-task", + Concurrency: 4, + Executor: exec, + Client: mock, + } + + time.AfterFunc(1*time.Second, func() { + worker.Shutdown() + }) + + assert.NoError(t, worker.Run()) + + for id, completed := range mock.tasks.completed { + assert.Truef(t, completed, "task %s was not reported as completed", id) + } + assert.Equal(t, len(mock.tasks.completed), len(exec.recv)) +} diff --git a/client/src/main/java/com/netflix/conductor/client/http/ClientBase.java b/client/src/main/java/com/netflix/conductor/client/http/ClientBase.java index 90684d47a8..3d81fb40ac 100644 --- a/client/src/main/java/com/netflix/conductor/client/http/ClientBase.java +++ b/client/src/main/java/com/netflix/conductor/client/http/ClientBase.java @@ -16,8 +16,6 @@ package com.netflix.conductor.client.http; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; import com.google.common.base.Preconditions; @@ -27,6 +25,7 @@ import com.netflix.conductor.client.exceptions.ErrorResponse; import com.netflix.conductor.common.run.ExternalStorageLocation; import com.netflix.conductor.common.utils.ExternalPayloadStorage; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandler; import com.sun.jersey.api.client.ClientHandlerException; @@ -80,12 +79,7 @@ protected ClientBase(ClientConfig config, ClientHandler handler) { } protected ClientBase(ClientConfig config, ConductorClientConfiguration clientConfiguration, ClientHandler handler) { - objectMapper = new ObjectMapper(); - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - objectMapper.setSerializationInclusion(Include.NON_NULL); - objectMapper.setSerializationInclusion(Include.NON_EMPTY); + objectMapper = new JsonMapperProvider().get(); JacksonJsonProvider provider = new JacksonJsonProvider(objectMapper); config.getSingletons().add(provider); diff --git a/client/src/main/java/com/netflix/conductor/client/http/WorkflowClient.java b/client/src/main/java/com/netflix/conductor/client/http/WorkflowClient.java index 096d901c91..83cb51bf55 100644 --- a/client/src/main/java/com/netflix/conductor/client/http/WorkflowClient.java +++ b/client/src/main/java/com/netflix/conductor/client/http/WorkflowClient.java @@ -247,7 +247,7 @@ public List getWorkflows(String name, String correlationId, boolean in */ private void populateWorkflowOutput(Workflow workflow) { if (StringUtils.isNotBlank(workflow.getExternalOutputPayloadStoragePath())) { - WorkflowTaskMetrics.incrementExternalPayloadUsedCount(workflow.getWorkflowType(), ExternalPayloadStorage.Operation.READ.name(), ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT.name()); + WorkflowTaskMetrics.incrementExternalPayloadUsedCount(workflow.getWorkflowName(), ExternalPayloadStorage.Operation.READ.name(), ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT.name()); workflow.setOutput(downloadFromExternalStorage(ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT, workflow.getExternalOutputPayloadStoragePath())); } } diff --git a/client/src/test/java/com/netflix/conductor/client/metadata/workflow/TestWorkflowTask.java b/client/src/test/java/com/netflix/conductor/client/metadata/workflow/TestWorkflowTask.java index 5dd82164d6..dbd4868036 100644 --- a/client/src/test/java/com/netflix/conductor/client/metadata/workflow/TestWorkflowTask.java +++ b/client/src/test/java/com/netflix/conductor/client/metadata/workflow/TestWorkflowTask.java @@ -17,11 +17,11 @@ import static org.junit.Assert.*; +import com.netflix.conductor.common.metadata.workflow.TaskType; import org.junit.Test; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; /** * @@ -45,7 +45,7 @@ public void test() throws Exception { assertEquals(task.getType(), read.getType()); task = new WorkflowTask(); - task.setWorkflowTaskType(Type.SUB_WORKFLOW); + task.setWorkflowTaskType(TaskType.SUB_WORKFLOW); task.setName("name"); json = om.writeValueAsString(task); @@ -54,7 +54,7 @@ public void test() throws Exception { assertNotNull(read); assertEquals(task.getName(), read.getName()); assertEquals(task.getType(), read.getType()); - assertEquals(Type.SUB_WORKFLOW.name(), read.getType()); + assertEquals(TaskType.SUB_WORKFLOW.name(), read.getType()); } } diff --git a/common/build.gradle b/common/build.gradle index 281bedba4d..fc0b019e11 100644 --- a/common/build.gradle +++ b/common/build.gradle @@ -1,4 +1,33 @@ +buildscript { + repositories { + jcenter() + } + dependencies { + classpath "com.github.vmg.protogen:protogen-codegen:${revProtogenCodegen}" + } +} + dependencies { compile "com.github.rholder:guava-retrying:${revGuavaRetrying}" compile "org.slf4j:slf4j-api:${revSlf4j}" -} \ No newline at end of file + compile "com.google.protobuf:protobuf-java:${revProtoBuf}" + compile "com.fasterxml.jackson.core:jackson-databind:${revJacksonDatabind}" + compile "com.fasterxml.jackson.core:jackson-core:${revJacksonCore}" + compile "javax.inject:javax.inject:${revJavaxInject}" + compile "com.github.vmg.protogen:protogen-annotations:${revProtogenAnnotations}" +} + +import com.github.vmg.protogen.ProtoGenTask; + +task protogen(dependsOn: jar, type: ProtoGenTask) { + protoPackage = "conductor.proto" + javaPackage = "com.netflix.conductor.proto" + goPackage = "github.com/netflix/conductor/client/gogrpc/conductor/model" + + protosDir = new File("${rootDir}/grpc/src/main/proto") + mapperDir = new File("${rootDir}/grpc/src/main/java/com/netflix/conductor/grpc") + mapperPackage = "com.netflix.conductor.grpc"; + + sourceJar = jar.archivePath + sourcePackage = "com.netflix.conductor.common" +} diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/events/EventExecution.java b/common/src/main/java/com/netflix/conductor/common/metadata/events/EventExecution.java index e034cd7146..43f96d241b 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/events/EventExecution.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/events/EventExecution.java @@ -21,36 +21,47 @@ import java.util.HashMap; import java.util.Map; +import com.github.vmg.protogen.annotations.*; import com.netflix.conductor.common.metadata.events.EventHandler.Action; /** * @author Viren * */ +@ProtoMessage public class EventExecution { + @ProtoEnum public enum Status { IN_PROGRESS, COMPLETED, FAILED, SKIPPED } - + + @ProtoField(id = 1) private String id; - + + @ProtoField(id = 2) private String messageId; - + + @ProtoField(id = 3) private String name; - + + @ProtoField(id = 4) private String event; - + + @ProtoField(id = 5) private long created; - + + @ProtoField(id = 6) private Status status; - + + @ProtoField(id = 7) private Action.Type action; - + + @ProtoField(id = 8) private Map output = new HashMap<>(); public EventExecution() { - + } public EventExecution(String id, String messageId) { diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java b/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java index 775e6cbfe2..0f1216011c 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java @@ -18,6 +18,9 @@ */ package com.netflix.conductor.common.metadata.events; +import com.google.protobuf.Any; +import com.github.vmg.protogen.annotations.*; + import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -27,16 +30,22 @@ * @author Viren * Defines an event handler */ +@ProtoMessage public class EventHandler { + @ProtoField(id = 1) private String name; - + + @ProtoField(id = 2) private String event; - + + @ProtoField(id = 3) private String condition; + @ProtoField(id = 4) private List actions = new LinkedList<>(); - + + @ProtoField(id = 5) private boolean active; public EventHandler() { @@ -119,19 +128,26 @@ public void setActive(boolean active) { } + @ProtoMessage public static class Action { - - public enum Type { start_workflow, complete_task, fail_task } - + + @ProtoEnum + public enum Type { START_WORKFLOW, COMPLETE_TASK, FAIL_TASK } + + @ProtoField(id = 1) private Type action; - - private StartWorkflow start_workflow; - - private TaskDetails complete_task; - - private TaskDetails fail_task; - - private boolean expandInlineJSON; + + @ProtoField(id = 2) + private StartWorkflow startWorkflow; + + @ProtoField(id = 3) + private TaskDetails completeTask; + + @ProtoField(id = 4) + private TaskDetails failTask; + + @ProtoField(id = 5) + private boolean expandInlineJson; /** * @return the action @@ -149,75 +165,82 @@ public void setAction(Type action) { } /** - * @return the start_workflow + * @return the startWorkflow */ - public StartWorkflow getStart_workflow() { - return start_workflow; + public StartWorkflow getStartWorkflow() { + return startWorkflow; } /** - * @param start_workflow the start_workflow to set + * @param startWorkflow the startWorkflow to set * */ - public void setStart_workflow(StartWorkflow start_workflow) { - this.start_workflow = start_workflow; + public void setStartWorkflow(StartWorkflow startWorkflow) { + this.startWorkflow = startWorkflow; } /** - * @return the complete_task + * @return the completeTask */ - public TaskDetails getComplete_task() { - return complete_task; + public TaskDetails getCompleteTask() { + return completeTask; } /** - * @param complete_task the complete_task to set + * @param completeTask the completeTask to set * */ - public void setComplete_task(TaskDetails complete_task) { - this.complete_task = complete_task; + public void setCompleteTask(TaskDetails completeTask) { + this.completeTask = completeTask; } /** - * @return the fail_task + * @return the failTask */ - public TaskDetails getFail_task() { - return fail_task; + public TaskDetails getFailTask() { + return failTask; } /** - * @param fail_task the fail_task to set + * @param failTask the failTask to set * */ - public void setFail_task(TaskDetails fail_task) { - this.fail_task = fail_task; + public void setFailTask(TaskDetails failTask) { + this.failTask = failTask; } /** * - * @param expandInlineJSON when set to true, the in-lined JSON strings are expanded to a full json document + * @param expandInlineJson when set to true, the in-lined JSON strings are expanded to a full json document */ - public void setExpandInlineJSON(boolean expandInlineJSON) { - this.expandInlineJSON = expandInlineJSON; + public void setExpandInlineJson(boolean expandInlineJson) { + this.expandInlineJson = expandInlineJson; } /** * * @return true if the json strings within the payload should be expanded. */ - public boolean isExpandInlineJSON() { - return expandInlineJSON; + public boolean isExpandInlineJson() { + return expandInlineJson; } } - + + @ProtoMessage public static class TaskDetails { - + + @ProtoField(id = 1) private String workflowId; - + + @ProtoField(id = 2) private String taskRefName; - + + @ProtoField(id = 3) private Map output = new HashMap<>(); + @ProtoField(id = 4) + private Any outputMessage; + /** * @return the workflowId */ @@ -262,21 +285,34 @@ public Map getOutput() { public void setOutput(Map output) { this.output = output; } - - - + + public Any getOutputMessage() { + return outputMessage; + } + + public void setOutputMessage(Any outputMessage) { + this.outputMessage = outputMessage; + } } - + + @ProtoMessage public static class StartWorkflow { - + + @ProtoField(id = 1) private String name; - + + @ProtoField(id = 2) private Integer version; - + + @ProtoField(id = 3) private String correlationId; - + + @ProtoField(id = 4) private Map input = new HashMap<>(); + @ProtoField(id = 5) + private Any inputMessage; + /** * @return the name */ @@ -337,8 +373,14 @@ public Map getInput() { public void setInput(Map input) { this.input = input; } - - + + public Any getInputMessage() { + return inputMessage; + } + + public void setInputMessage(Any inputMessage) { + this.inputMessage = inputMessage; + } } } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/PollData.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/PollData.java index 7b8e0bc06b..5d8a135297 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/PollData.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/PollData.java @@ -1,5 +1,7 @@ package com.netflix.conductor.common.metadata.tasks; +import com.github.vmg.protogen.annotations.*; + /** * Copyright 2016 Netflix, Inc. * @@ -15,10 +17,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +@ProtoMessage public class PollData { + @ProtoField(id = 1) String queueName; + + @ProtoField(id = 2) String domain; + + @ProtoField(id = 3) String workerId; + + @ProtoField(id = 4) long lastPollTime; public PollData() { @@ -98,6 +108,14 @@ public synchronized boolean equals(Object obj) { return false; return true; } - - + + @Override + public String toString() { + return "PollData{" + + "queueName='" + queueName + '\'' + + ", domain='" + domain + '\'' + + ", workerId='" + workerId + '\'' + + ", lastPollTime=" + lastPollTime + + '}'; + } } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java index 7455398254..fa0ecae2b0 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java @@ -15,13 +15,21 @@ */ package com.netflix.conductor.common.metadata.tasks; +import com.github.vmg.protogen.annotations.ProtoEnum; +import com.github.vmg.protogen.annotations.ProtoField; +import com.github.vmg.protogen.annotations.ProtoMessage; +import com.google.protobuf.Any; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import java.util.HashMap; import java.util.Map; +import java.util.Optional; +@ProtoMessage public class Task { + @ProtoEnum public enum Status { IN_PROGRESS(false, true, true), @@ -60,80 +68,122 @@ public boolean isRetriable() { } } + @ProtoField(id = 1) private String taskType; + @ProtoField(id = 2) private Status status; + @ProtoField(id = 3) private Map inputData = new HashMap<>(); + @ProtoField(id = 4) private String referenceTaskName; + @ProtoField(id = 5) private int retryCount; + @ProtoField(id = 6) private int seq; + @ProtoField(id = 7) private String correlationId; + @ProtoField(id = 8) private int pollCount; + @ProtoField(id = 9) private String taskDefName; /** * Time when the task was scheduled */ + @ProtoField(id = 10) private long scheduledTime; /** * Time when the task was first polled */ + @ProtoField(id = 11) private long startTime; /** * Time when the task completed executing */ + @ProtoField(id = 12) private long endTime; /** * Time when the task was last updated */ + @ProtoField(id = 13) private long updateTime; + @ProtoField(id = 14) private int startDelayInSeconds; + @ProtoField(id = 15) private String retriedTaskId; + @ProtoField(id = 16) private boolean retried; + @ProtoField(id = 17) private boolean executed; + @ProtoField(id = 18) private boolean callbackFromWorker = true; + @ProtoField(id = 19) private int responseTimeoutSeconds; + @ProtoField(id = 20) private String workflowInstanceId; + @ProtoField(id = 21) private String workflowType; + @ProtoField(id = 22) private String taskId; + @ProtoField(id = 23) private String reasonForIncompletion; + @ProtoField(id = 24) private long callbackAfterSeconds; + @ProtoField(id = 25) private String workerId; + @ProtoField(id = 26) private Map outputData = new HashMap<>(); + @ProtoField(id = 27) private WorkflowTask workflowTask; + @ProtoField(id = 28) private String domain; + @ProtoField(id = 29) + private Any inputMessage; + + @ProtoField(id = 30) + private Any outputMessage; + + // This field is deprecated, do not reuse id 31. + //@ProtoField(id = 31) + //private int rateLimitPerSecond; + + @ProtoField(id = 32) private int rateLimitPerFrequency; + @ProtoField(id = 33) private int rateLimitFrequencyInSeconds; + @ProtoField(id = 34) private String externalInputPayloadStoragePath; + @ProtoField(id = 35) private String externalOutputPayloadStoragePath; public Task() { @@ -141,7 +191,7 @@ public Task() { /** * @return Type of the task - * @see WorkflowTask.Type + * @see TaskType */ public String getTaskType() { return taskType; @@ -547,14 +597,38 @@ public void setDomain(String domain) { this.domain = domain; } - public int getRateLimitPerFrequency() { - return rateLimitPerFrequency; + public Any getInputMessage() { + return inputMessage; + } + + public void setInputMessage(Any inputMessage) { + this.inputMessage = inputMessage; } public void setRateLimitPerFrequency(int rateLimitPerFrequency) { this.rateLimitPerFrequency = rateLimitPerFrequency; } + public Any getOutputMessage() { + return outputMessage; + } + + public void setOutputMessage(Any outputMessage) { + this.outputMessage = outputMessage; + } + + /** + * @return {@link Optional} containing the task definition if available + */ + public Optional getTaskDefinition() { + return Optional.ofNullable(this.getWorkflowTask()) + .map(workflowTask -> workflowTask.getTaskDefinition()); + } + + public int getRateLimitPerFrequency() { + return rateLimitPerFrequency; + } + public int getRateLimitFrequencyInSeconds() { return rateLimitFrequencyInSeconds; } @@ -612,10 +686,13 @@ public Task copy() { copy.setWorkerId(workerId); copy.setWorkflowTask(workflowTask); copy.setDomain(domain); + copy.setInputMessage(inputMessage); + copy.setOutputMessage(outputMessage); copy.setRateLimitPerFrequency(rateLimitPerFrequency); copy.setRateLimitFrequencyInSeconds(rateLimitFrequencyInSeconds); copy.setExternalInputPayloadStoragePath(externalInputPayloadStoragePath); copy.setExternalOutputPayloadStoragePath(externalOutputPayloadStoragePath); + return copy; } @@ -651,6 +728,8 @@ public String toString() { ", outputData=" + outputData + ", workflowTask=" + workflowTask + ", domain='" + domain + '\'' + + ", inputMessage='" + inputMessage + '\'' + + ", outputMessage='" + outputMessage + '\'' + ", rateLimitPerFrequency=" + rateLimitPerFrequency + ", rateLimitFrequencyInSeconds=" + rateLimitFrequencyInSeconds + ", externalInputPayloadStoragePath='" + externalInputPayloadStoragePath + '\'' + diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java index 30e2eea075..4a372a2898 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java @@ -18,59 +18,80 @@ */ package com.netflix.conductor.common.metadata.tasks; +import com.github.vmg.protogen.annotations.ProtoEnum; +import com.github.vmg.protogen.annotations.ProtoField; +import com.github.vmg.protogen.annotations.ProtoMessage; +import com.netflix.conductor.common.metadata.Auditable; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import com.netflix.conductor.common.metadata.Auditable; - /** * @author Viren * Defines a workflow task definition */ +@ProtoMessage public class TaskDef extends Auditable { + @ProtoEnum + public static enum TimeoutPolicy {RETRY, TIME_OUT_WF, ALERT_ONLY} + @ProtoEnum + public static enum RetryLogic {FIXED, EXPONENTIAL_BACKOFF} - - public enum TimeoutPolicy {RETRY, TIME_OUT_WF, ALERT_ONLY;} - - - public enum RetryLogic {FIXED, EXPONENTIAL_BACKOFF;} private static final int ONE_HOUR = 60 * 60; /** * Unique name identifying the task. The name is unique across */ + @ProtoField(id = 1) private String name; + @ProtoField(id = 2) private String description; + @ProtoField(id = 3) private int retryCount = 3; // Default + @ProtoField(id = 4) private long timeoutSeconds; + @ProtoField(id = 5) private List inputKeys = new ArrayList(); + @ProtoField(id = 6) private List outputKeys = new ArrayList(); + @ProtoField(id = 7) private TimeoutPolicy timeoutPolicy = TimeoutPolicy.TIME_OUT_WF; + @ProtoField(id = 8) private RetryLogic retryLogic = RetryLogic.FIXED; + @ProtoField(id = 9) private int retryDelaySeconds = 60; + @ProtoField(id = 10) private int responseTimeoutSeconds = ONE_HOUR; + @ProtoField(id = 11) private Integer concurrentExecLimit; + @ProtoField(id = 12) + private Map inputTemplate = new HashMap<>(); + + // This field is deprecated, do not use id 13. +// @ProtoField(id = 13) +// private Integer rateLimitPerSecond; + + @ProtoField(id = 14) private Integer rateLimitPerFrequency; + @ProtoField(id = 15) private Integer rateLimitFrequencyInSeconds; - private Map inputTemplate = new HashMap<>(); - public TaskDef() { } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskExecLog.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskExecLog.java index 092b45ac69..ba98ce2a30 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskExecLog.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskExecLog.java @@ -18,16 +18,22 @@ */ package com.netflix.conductor.common.metadata.tasks; +import com.github.vmg.protogen.annotations.*; + /** * @author Viren * Model that represents the task's execution log. */ +@ProtoMessage public class TaskExecLog { - + + @ProtoField(id = 1) private String log; - + + @ProtoField(id = 2) private String taskId; - + + @ProtoField(id = 3) private long createdTime; public TaskExecLog() {} diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskResult.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskResult.java index aecc801e8e..2ec2b9a0d6 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskResult.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskResult.java @@ -16,6 +16,9 @@ package com.netflix.conductor.common.metadata.tasks; +import com.google.protobuf.Any; +import com.github.vmg.protogen.annotations.*; + import java.util.HashMap; import java.util.List; import java.util.Map; @@ -26,26 +29,38 @@ * Result of the task execution. * */ +@ProtoMessage public class TaskResult { + @ProtoEnum public enum Status { IN_PROGRESS, FAILED, FAILED_WITH_TERMINAL_ERROR, COMPLETED, SCHEDULED; //SCHEDULED is added for the backward compatibility and should NOT be used when updating the task result } + @ProtoField(id = 1) private String workflowInstanceId; + @ProtoField(id = 2) private String taskId; + @ProtoField(id = 3) private String reasonForIncompletion; + @ProtoField(id = 4) private long callbackAfterSeconds; + @ProtoField(id = 5) private String workerId; + @ProtoField(id = 6) private Status status; + @ProtoField(id = 7) private Map outputData = new HashMap<>(); + @ProtoField(id = 8) + private Any outputMessage; + private List logs = new CopyOnWriteArrayList<>(); private String externalOutputPayloadStoragePath; @@ -165,6 +180,14 @@ public TaskResult addOutputData(String key, Object value) { return this; } + public Any getOutputMessage() { + return outputMessage; + } + + public void setOutputMessage(Any outputMessage) { + this.outputMessage = outputMessage; + } + /** * * @return Task execution logs @@ -218,6 +241,7 @@ public String toString() { ", workerId='" + workerId + '\'' + ", status=" + status + ", outputData=" + outputData + + ", outputMessage=" + outputMessage + ", logs=" + logs + ", externalOutputPayloadStoragePath='" + externalOutputPayloadStoragePath + '\'' + '}'; diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTask.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTask.java index dbbf1dfe6f..655f6bad1a 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTask.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTask.java @@ -18,15 +18,25 @@ import java.util.HashMap; import java.util.Map; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; +import com.github.vmg.protogen.annotations.*; +@ProtoMessage public class DynamicForkJoinTask { + @ProtoField(id = 1) private String taskName; + + @ProtoField(id = 2) private String workflowName; + + @ProtoField(id = 3) private String referenceName; + + @ProtoField(id = 4) private Map input = new HashMap<>(); - private String type = Type.SIMPLE.name(); + + @ProtoField(id = 5) + private String type = TaskType.SIMPLE.name(); public DynamicForkJoinTask() { } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTaskList.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTaskList.java index 88aa6355c3..a4e0b98553 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTaskList.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/DynamicForkJoinTaskList.java @@ -15,12 +15,16 @@ */ package com.netflix.conductor.common.metadata.workflow; +import com.github.vmg.protogen.annotations.*; + import java.util.ArrayList; import java.util.List; import java.util.Map; +@ProtoMessage public class DynamicForkJoinTaskList { - + + @ProtoField(id = 1) private List dynamicTasks = new ArrayList(); public void add(String taskName, String workflowName, String referenceName, Map input){ diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RerunWorkflowRequest.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RerunWorkflowRequest.java index d6e4a9112c..3da1a7c89b 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RerunWorkflowRequest.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RerunWorkflowRequest.java @@ -15,18 +15,26 @@ */ package com.netflix.conductor.common.metadata.workflow; +import com.github.vmg.protogen.annotations.*; + import java.util.Map; +@ProtoMessage public class RerunWorkflowRequest { + @ProtoField(id = 1) private String reRunFromWorkflowId; + @ProtoField(id = 2) private Map workflowInput; - + + @ProtoField(id = 3) private String reRunFromTaskId; + @ProtoField(id = 4) private Map taskInput; + @ProtoField(id = 5) private String correlationId; public String getReRunFromWorkflowId() { diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SkipTaskRequest.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SkipTaskRequest.java index 41cb3afe83..63725d393f 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SkipTaskRequest.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SkipTaskRequest.java @@ -15,13 +15,25 @@ */ package com.netflix.conductor.common.metadata.workflow; +import com.google.protobuf.Any; +import com.github.vmg.protogen.annotations.*; + import java.util.Map; +@ProtoMessage(toProto = false) public class SkipTaskRequest { + @ProtoField(id = 1) private Map taskInput; - + + @ProtoField(id = 2) private Map taskOutput; + @ProtoField(id = 3) + private Any taskInputMessage; + + @ProtoField(id = 4) + private Any taskOutputMessage; + public Map getTaskInput() { return taskInput; } @@ -37,6 +49,20 @@ public Map getTaskOutput() { public void setTaskOutput(Map taskOutput) { this.taskOutput = taskOutput; } - - + + public Any getTaskInputMessage() { + return taskInputMessage; + } + + public void setTaskInputMessage(Any taskInputMessage) { + this.taskInputMessage = taskInputMessage; + } + + public Any getTaskOutputMessage() { + return taskOutputMessage; + } + + public void setTaskOutputMessage(Any taskOutputMessage) { + this.taskOutputMessage = taskOutputMessage; + } } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java index 9157aa3127..786415ee69 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java @@ -1,17 +1,36 @@ package com.netflix.conductor.common.metadata.workflow; +import com.github.vmg.protogen.annotations.ProtoField; +import com.github.vmg.protogen.annotations.ProtoMessage; + import java.util.HashMap; import java.util.Map; +@ProtoMessage public class StartWorkflowRequest { + @ProtoField(id = 1) private String name; + + @ProtoField(id = 2) private Integer version; + + @ProtoField(id = 3) private String correlationId; - private String externalInputPayloadStoragePath; + + @ProtoField(id = 4) private Map input = new HashMap<>(); + + @ProtoField(id = 5) private Map taskToDomain = new HashMap<>(); - - public String getName() { + + @ProtoField(id = 6) + private WorkflowDef workflowDef; + + @ProtoField(id = 7) + private String externalInputPayloadStoragePath; + + + public String getName() { return name; } public void setName(String name) { @@ -73,6 +92,17 @@ public StartWorkflowRequest withTaskToDomain(Map taskToDomain) { this.taskToDomain = taskToDomain; return this; } - - + + public WorkflowDef getWorkflowDef() { + return workflowDef; + } + + public void setWorkflowDef(WorkflowDef workflowDef) { + this.workflowDef = workflowDef; + } + + public StartWorkflowRequest withWorkflowDef(WorkflowDef workflowDef) { + this.workflowDef = workflowDef; + return this; + } } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java index 2953ab4bea..cb9a8d99c5 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java @@ -18,16 +18,20 @@ */ package com.netflix.conductor.common.metadata.workflow; +import com.github.vmg.protogen.annotations.*; + /** * @author Viren * */ +@ProtoMessage public class SubWorkflowParams { + @ProtoField(id = 1) private String name; - //QQ why is this an object ?? - private Object version; + @ProtoField(id = 2) + private Integer version; /** * @return the name @@ -46,14 +50,14 @@ public void setName(String name) { /** * @return the version */ - public Object getVersion() { + public Integer getVersion() { return version; } /** * @param version the version to set */ - public void setVersion(Object version) { + public void setVersion(Integer version) { this.version = version; } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/TaskType.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/TaskType.java new file mode 100644 index 0000000000..866b3b3ea6 --- /dev/null +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/TaskType.java @@ -0,0 +1,56 @@ +package com.netflix.conductor.common.metadata.workflow; + +import com.github.vmg.protogen.annotations.ProtoEnum; + +import java.util.HashSet; +import java.util.Set; + +@ProtoEnum +public enum TaskType { + + SIMPLE(true), + DYNAMIC(true), + FORK_JOIN(true), + FORK_JOIN_DYNAMIC(true), + DECISION(true), + JOIN(true), + SUB_WORKFLOW(true), + EVENT(true), + WAIT(true), + USER_DEFINED(false); + + /** + * TaskType constants representing each of the possible enumeration values. + * Motivation: to not have any hardcoded/inline strings used in the code. + * Example of use: CoreModule + */ + public static final String TASK_TYPE_DECISION = "DECISION"; + public static final String TASK_TYPE_DYNAMIC = "DYNAMIC"; + public static final String TASK_TYPE_JOIN = "JOIN"; + public static final String TASK_TYPE_FORK_JOIN_DYNAMIC = "FORK_JOIN_DYNAMIC"; + public static final String TASK_TYPE_EVENT = "EVENT"; + public static final String TASK_TYPE_WAIT = "WAIT"; + public static final String TASK_TYPE_SUB_WORKFLOW = "SUB_WORKFLOW"; + public static final String TASK_TYPE_FORK_JOIN = "FORK_JOIN"; + public static final String TASK_TYPE_USER_DEFINED = "USER_DEFINED"; + public static final String TASK_TYPE_SIMPLE = "SIMPLE"; + + private boolean isSystemTask; + + TaskType(boolean isSystemTask) { + this.isSystemTask = isSystemTask; + } + + /* + * TODO: Update code to use only enums rather than Strings. + * This method is only used as a helper until the transition is done. + */ + public static boolean isSystemTask(String name) { + try { + TaskType taskType = TaskType.valueOf(name); + return taskType.isSystemTask; + } catch (IllegalArgumentException iae) { + return false; + } + } +} diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java index 95a1f2eeb3..f68765fa17 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java @@ -23,35 +23,49 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; +import com.github.vmg.protogen.annotations.ProtoField; +import com.github.vmg.protogen.annotations.ProtoMessage; +import com.google.common.base.MoreObjects; import com.netflix.conductor.common.metadata.Auditable; /** * @author Viren * */ +@ProtoMessage public class WorkflowDef extends Auditable { + @ProtoField(id = 1) private String name; - + + @ProtoField(id = 2) private String description; - + + @ProtoField(id = 3) private int version = 1; - - private LinkedList tasks = new LinkedList(); - - private List inputParameters = new LinkedList(); - + + @ProtoField(id = 4) + private List tasks = new LinkedList<>(); + + @ProtoField(id = 5) + private List inputParameters = new LinkedList<>(); + + @ProtoField(id = 6) private Map outputParameters = new HashMap<>(); + @ProtoField(id = 7) private String failureWorkflow; + @ProtoField(id = 8) + private int schemaVersion = 1; + //By default a workflow is restartable + @ProtoField(id = 9) private boolean restartable = true; - - private int schemaVersion = 1; - + /** * @return the name */ @@ -83,14 +97,14 @@ public void setDescription(String description) { /** * @return the tasks */ - public LinkedList getTasks() { + public List getTasks() { return tasks; } /** * @param tasks the tasks to set */ - public void setTasks(LinkedList tasks) { + public void setTasks(List tasks) { this.tasks = tasks; } @@ -215,20 +229,64 @@ public WorkflowTask getNextTask(String taskReferenceName){ } public WorkflowTask getTaskByRefName(String taskReferenceName){ - Optional found = all().stream() - .filter(wft -> wft.getTaskReferenceName().equals(taskReferenceName)) + Optional found = collectTasks().stream() + .filter(workflowTask -> workflowTask.getTaskReferenceName().equals(taskReferenceName)) .findFirst(); if(found.isPresent()){ return found.get(); } return null; } - - public List all(){ - List all = new LinkedList<>(); - for(WorkflowTask wft : tasks){ - all.addAll(wft.all()); + + public List collectTasks() { + List tasks = new LinkedList<>(); + for (WorkflowTask workflowTask : this.tasks) { + tasks.addAll(workflowTask.collectTasks()); } - return all; + return tasks; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WorkflowDef that = (WorkflowDef) o; + return getVersion() == that.getVersion() && + getSchemaVersion() == that.getSchemaVersion() && + Objects.equals(getName(), that.getName()) && + Objects.equals(getDescription(), that.getDescription()) && + Objects.equals(getTasks(), that.getTasks()) && + Objects.equals(getInputParameters(), that.getInputParameters()) && + Objects.equals(getOutputParameters(), that.getOutputParameters()) && + Objects.equals(getFailureWorkflow(), that.getFailureWorkflow()); + } + + @Override + public int hashCode() { + return Objects.hash( + getName(), + getDescription(), + getVersion(), + getTasks(), + getInputParameters(), + getOutputParameters(), + getFailureWorkflow(), + getSchemaVersion() + ); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(getClass()) + .add("name", name) + .add("description", description) + .add("version", version) + .add("tasks", tasks) + .add("inputParameters", inputParameters) + .add("outputParameters", outputParameters) + .add("failureWorkflow", failureWorkflow) + .add("schemaVersion", schemaVersion) + .add("restartable", restartable) + .toString(); } } diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java index 160729151f..c3686a0a74 100644 --- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java +++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java @@ -18,15 +18,18 @@ */ package com.netflix.conductor.common.metadata.workflow; +import com.github.vmg.protogen.annotations.ProtoField; +import com.github.vmg.protogen.annotations.ProtoMessage; +import com.netflix.conductor.common.metadata.tasks.TaskDef; + import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.Objects; /** * @author Viren @@ -34,74 +37,89 @@ * This is the task definition definied as part of the {@link WorkflowDef}. The tasks definied in the Workflow definition are saved * as part of {@link WorkflowDef#tasks} */ +@ProtoMessage public class WorkflowTask { - public enum Type { - SIMPLE, DYNAMIC, FORK_JOIN, FORK_JOIN_DYNAMIC, DECISION, JOIN, SUB_WORKFLOW, EVENT, WAIT, USER_DEFINED; - - private static Set systemTasks = new HashSet<>(); - static { - systemTasks.add(Type.SIMPLE.name()); - systemTasks.add(Type.DYNAMIC.name()); - systemTasks.add(Type.FORK_JOIN.name()); - systemTasks.add(Type.FORK_JOIN_DYNAMIC.name()); - systemTasks.add(Type.DECISION.name()); - systemTasks.add(Type.JOIN.name()); - systemTasks.add(Type.SUB_WORKFLOW.name()); - systemTasks.add(Type.EVENT.name()); - systemTasks.add(Type.WAIT.name()); - //Do NOT add USER_DEFINED here... - } - - public static boolean isSystemTask(String name) { - return systemTasks.contains(name); - } - } - + @ProtoField(id = 1) private String name; - + + @ProtoField(id = 2) private String taskReferenceName; + @ProtoField(id = 3) private String description; //Key: Name of the input parameter. MUST be one of the keys defined in TaskDef (e.g. fileName) //Value: mapping of the parameter from another task (e.g. task1.someOutputParameterAsFileName) + @ProtoField(id = 4) private Map inputParameters = new HashMap(); - private String type = Type.SIMPLE.name(); + @ProtoField(id = 5) + private String type = TaskType.SIMPLE.name(); + @ProtoField(id = 6) private String dynamicTaskNameParam; - + + @ProtoField(id = 7) private String caseValueParam; - + + @ProtoField(id = 8) private String caseExpression; - + + @ProtoMessage(wrapper = true) + public static class WorkflowTaskList { + public List getTasks() { + return tasks; + } + + public void setTasks(List tasks) { + this.tasks = tasks; + } + + @ProtoField(id = 1) + private List tasks; + } + //Populates for the tasks of the decision type + @ProtoField(id = 9) private Map> decisionCases = new LinkedHashMap<>(); - + @Deprecated private String dynamicForkJoinTasksParam; - + + @ProtoField(id = 10) private String dynamicForkTasksParam; - + + @ProtoField(id = 11) private String dynamicForkTasksInputParamName; - + + @ProtoField(id = 12) private List defaultCase = new LinkedList<>(); - + + @ProtoField(id = 13) private List> forkTasks = new LinkedList<>(); - + + @ProtoField(id = 14) private int startDelay; //No. of seconds (at-least) to wait before starting a task. - private SubWorkflowParams subWorkflow; - + @ProtoField(id = 15) + private SubWorkflowParams subWorkflowParam; + + @ProtoField(id = 16) private List joinOn = new LinkedList<>(); - + + @ProtoField(id = 17) private String sink; - - private Boolean optional; + @ProtoField(id = 18) + private boolean optional = false; + + @ProtoField(id = 19) + private TaskDef taskDefinition; + + @ProtoField(id = 20) private Boolean rateLimited; - + /** * @return the name */ @@ -165,7 +183,7 @@ public String getType() { return type; } - public void setWorkflowTaskType(Type type) { + public void setWorkflowTaskType(TaskType type) { this.type = type.name(); } @@ -313,14 +331,14 @@ public void setCaseExpression(String caseExpression) { * @return the subWorkflow */ public SubWorkflowParams getSubWorkflowParam() { - return subWorkflow; + return subWorkflowParam; } /** * @param subWorkflow the subWorkflowParam to set */ public void setSubWorkflowParam(SubWorkflowParams subWorkflow) { - this.subWorkflow = subWorkflow; + this.subWorkflowParam = subWorkflow; } /** @@ -354,26 +372,33 @@ public void setSink(String sink) { } /** - * + * * @return If the task is optional. When set to true, the workflow execution continues even when the task is in failed status. */ - public Boolean getOptional() { + public boolean isOptional() { return optional; } - + /** - * - * @return true if the task is optional. False otherwise. + * + * @return Task definition associated to the Workflow Task */ - public boolean isOptional() { - return (optional != null && optional.booleanValue()); + public TaskDef getTaskDefinition() { + return taskDefinition; } - + + /** + * @param taskDefinition Task definition + */ + public void setTaskDefinition(TaskDef taskDefinition) { + this.taskDefinition = taskDefinition; + } + /** * * @param optional when set to true, the task is marked as optional */ - public void setOptional(Boolean optional) { + public void setOptional(boolean optional) { this.optional = optional; } @@ -385,89 +410,91 @@ public void setRateLimited(Boolean rateLimited) { this.rateLimited = rateLimited; } - public boolean isRateLimited() { - return (rateLimited !=null && rateLimited.booleanValue()); + public Boolean isRateLimited() { + return rateLimited != null && rateLimited; } - private Collection> children(){ - Collection> v1 = new LinkedList<>(); - Type tt = Type.USER_DEFINED; - if(Type.isSystemTask(type)) { - tt = Type.valueOf(type); + private Collection> children() { + Collection> workflowTaskLists = new LinkedList<>(); + TaskType taskType = TaskType.USER_DEFINED; + if (TaskType.isSystemTask(type)) { + taskType = TaskType.valueOf(type); } - - switch(tt){ + + switch (taskType) { case DECISION: - v1.addAll(decisionCases.values()); - v1.add(defaultCase); + workflowTaskLists.addAll(decisionCases.values()); + workflowTaskLists.add(defaultCase); break; case FORK_JOIN: - v1.addAll(forkTasks); + workflowTaskLists.addAll(forkTasks); break; default: break; } - return v1; - + return workflowTaskLists; + } - - public List all(){ - List all = new LinkedList<>(); - all.add(this); - for (List wfts : children() ){ - for(WorkflowTask wft : wfts){ - all.addAll(wft.all()); + + public List collectTasks() { + List tasks = new LinkedList<>(); + tasks.add(this); + for (List workflowTaskList : children()) { + for (WorkflowTask workflowTask : workflowTaskList) { + tasks.addAll(workflowTask.collectTasks()); } } - return all; + return tasks; } - - public WorkflowTask next(String taskReferenceName, WorkflowTask parent){ - Type tt = Type.USER_DEFINED; - if(Type.isSystemTask(type)) { - tt = Type.valueOf(type); + + public WorkflowTask next(String taskReferenceName, WorkflowTask parent) { + TaskType taskType = TaskType.USER_DEFINED; + if (TaskType.isSystemTask(type)) { + taskType = TaskType.valueOf(type); } - - switch(tt){ + + switch (taskType) { case DECISION: - for (List wfts : children() ){ + for (List wfts : children()) { Iterator it = wfts.iterator(); - while(it.hasNext()){ + while (it.hasNext()) { WorkflowTask task = it.next(); - if(task.getTaskReferenceName().equals(taskReferenceName)){ + if (task.getTaskReferenceName().equals(taskReferenceName)) { break; } WorkflowTask nextTask = task.next(taskReferenceName, this); - if(nextTask != null){ + if (nextTask != null) { return nextTask; } - if(task.has(taskReferenceName)){ + if (task.has(taskReferenceName)) { break; } } - if(it.hasNext()) { return it.next(); } + if (it.hasNext()) { + return it.next(); + } } break; case FORK_JOIN: boolean found = false; - for (List wfts : children() ){ + for (List wfts : children()) { Iterator it = wfts.iterator(); - while(it.hasNext()){ + while (it.hasNext()) { WorkflowTask task = it.next(); - if(task.getTaskReferenceName().equals(taskReferenceName)){ + if (task.getTaskReferenceName().equals(taskReferenceName)) { found = true; break; } WorkflowTask nextTask = task.next(taskReferenceName, this); - if(nextTask != null){ + if (nextTask != null) { return nextTask; } } - if(it.hasNext()) { - return it.next(); + if (it.hasNext()) { + return it.next(); } - if(found && parent != null){ - return parent.next(this.taskReferenceName, parent); //we need to return join task... -- get my sibling from my parent.. + if (found && parent != null) { + return parent.next(this.taskReferenceName, parent); //we need to return join task... -- get my sibling from my parent.. } } break; @@ -486,9 +513,9 @@ public boolean has(String taskReferenceName){ return true; } - Type tt = Type.USER_DEFINED; - if(Type.isSystemTask(type)) { - tt = Type.valueOf(type); + TaskType tt = TaskType.USER_DEFINED; + if(TaskType.isSystemTask(type)) { + tt = TaskType.valueOf(type); } switch(tt){ @@ -511,35 +538,6 @@ public boolean has(String taskReferenceName){ } - public boolean has2(String taskReferenceName){ - - if(this.getTaskReferenceName().equals(taskReferenceName)){ - return true; - } - Type tt = Type.USER_DEFINED; - if(Type.isSystemTask(type)) { - tt = Type.valueOf(type); - } - - switch(tt){ - - case DECISION: - case FORK_JOIN: - for(List childx : children()){ - for(WorkflowTask child : childx){ - if(child.getTaskReferenceName().equals(taskReferenceName)){ - return true; - } - } - } - break; - default: - break; - } - return false; - - } - public WorkflowTask get(String taskReferenceName){ if(this.getTaskReferenceName().equals(taskReferenceName)){ @@ -561,4 +559,56 @@ public WorkflowTask get(String taskReferenceName){ public String toString() { return name + "/" + taskReferenceName; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WorkflowTask that = (WorkflowTask) o; + return getStartDelay() == that.getStartDelay() && + isOptional() == that.isOptional() && + Objects.equals(getName(), that.getName()) && + Objects.equals(getTaskReferenceName(), that.getTaskReferenceName()) && + Objects.equals(getDescription(), that.getDescription()) && + Objects.equals(getInputParameters(), that.getInputParameters()) && + Objects.equals(getType(), that.getType()) && + Objects.equals(getDynamicTaskNameParam(), that.getDynamicTaskNameParam()) && + Objects.equals(getCaseValueParam(), that.getCaseValueParam()) && + Objects.equals(getCaseExpression(), that.getCaseExpression()) && + Objects.equals(getDecisionCases(), that.getDecisionCases()) && + Objects.equals(getDynamicForkJoinTasksParam(), that.getDynamicForkJoinTasksParam()) && + Objects.equals(getDynamicForkTasksParam(), that.getDynamicForkTasksParam()) && + Objects.equals(getDynamicForkTasksInputParamName(), that.getDynamicForkTasksInputParamName()) && + Objects.equals(getDefaultCase(), that.getDefaultCase()) && + Objects.equals(getForkTasks(), that.getForkTasks()) && + Objects.equals(getSubWorkflowParam(), that.getSubWorkflowParam()) && + Objects.equals(getJoinOn(), that.getJoinOn()) && + Objects.equals(getSink(), that.getSink()); + } + + @Override + public int hashCode() { + + return Objects.hash( + getName(), + getTaskReferenceName(), + getDescription(), + getInputParameters(), + getType(), + getDynamicTaskNameParam(), + getCaseValueParam(), + getCaseExpression(), + getDecisionCases(), + getDynamicForkJoinTasksParam(), + getDynamicForkTasksParam(), + getDynamicForkTasksInputParamName(), + getDefaultCase(), + getForkTasks(), + getStartDelay(), + getSubWorkflowParam(), + getJoinOn(), + getSink(), + isOptional() + ); + } } diff --git a/common/src/main/java/com/netflix/conductor/common/run/TaskSummary.java b/common/src/main/java/com/netflix/conductor/common/run/TaskSummary.java index 383da3a8a5..f0fc91361c 100644 --- a/common/src/main/java/com/netflix/conductor/common/run/TaskSummary.java +++ b/common/src/main/java/com/netflix/conductor/common/run/TaskSummary.java @@ -22,6 +22,7 @@ import java.util.Date; import java.util.TimeZone; +import com.github.vmg.protogen.annotations.*; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; @@ -29,45 +30,62 @@ * @author Viren * */ +@ProtoMessage(fromProto = false) public class TaskSummary { /** * The time should be stored as GMT */ private static final TimeZone gmt = TimeZone.getTimeZone("GMT"); - + + @ProtoField(id = 1) private String workflowId; + @ProtoField(id = 2) private String workflowType; - + + @ProtoField(id = 3) private String correlationId; - + + @ProtoField(id = 4) private String scheduledTime; - + + @ProtoField(id = 5) private String startTime; - + + @ProtoField(id = 6) private String updateTime; - + + @ProtoField(id = 7) private String endTime; - + + @ProtoField(id = 8) private Status status; - + + @ProtoField(id = 9) private String reasonForIncompletion; - + + @ProtoField(id = 10) private long executionTime; - + + @ProtoField(id = 11) private long queueWaitTime; - + + @ProtoField(id = 12) private String taskDefName; - + + @ProtoField(id = 13) private String taskType; - + + @ProtoField(id = 14) private String input; - + + @ProtoField(id = 15) private String output; - + + @ProtoField(id = 16) private String taskId; - + public TaskSummary(Task task) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); diff --git a/common/src/main/java/com/netflix/conductor/common/run/Workflow.java b/common/src/main/java/com/netflix/conductor/common/run/Workflow.java index e70624ccc3..08b032c939 100644 --- a/common/src/main/java/com/netflix/conductor/common/run/Workflow.java +++ b/common/src/main/java/com/netflix/conductor/common/run/Workflow.java @@ -1,33 +1,37 @@ /* * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.common.run; +import com.github.vmg.protogen.annotations.ProtoEnum; +import com.github.vmg.protogen.annotations.ProtoField; +import com.github.vmg.protogen.annotations.ProtoMessage; import com.netflix.conductor.common.metadata.Auditable; import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; +@ProtoMessage public class Workflow extends Auditable{ + @ProtoEnum public enum WorkflowStatus { RUNNING(false, false), COMPLETED(true, true), FAILED(true, false), TIMED_OUT(true, false), TERMINATED(true, false), PAUSED(false, true); @@ -48,43 +52,67 @@ public boolean isSuccessful(){ return successful; } } - + @ProtoField(id = 1) private WorkflowStatus status = WorkflowStatus.RUNNING; - private long endTime; + @ProtoField(id = 2) + private long endTime; - private String workflowId; + @ProtoField(id = 3) + private String workflowId; - private String parentWorkflowId; + @ProtoField(id = 4) + private String parentWorkflowId; - private String parentWorkflowTaskId; + @ProtoField(id = 5) + private String parentWorkflowTaskId; - private List tasks = new LinkedList<>(); + @ProtoField(id = 6) + private List tasks = new LinkedList<>(); - private Map input = new HashMap<>(); + @ProtoField(id = 8) + private Map input = new HashMap<>(); - private Map output = new HashMap<>();; + @ProtoField(id = 9) + private Map output = new HashMap<>();; - private String workflowType; + @ProtoField(id = 10) + @Deprecated + private String workflowType; - private int version; + @ProtoField(id = 11) + @Deprecated + private int version; - private String correlationId; + @ProtoField(id = 12) + private String correlationId; + @ProtoField(id = 13) private String reRunFromWorkflowId; + @ProtoField(id = 14) private String reasonForIncompletion; - private int schemaVersion; + @ProtoField(id = 15) + @Deprecated + private int schemaVersion; + @ProtoField(id = 16) private String event; + @ProtoField(id = 17) private Map taskToDomain = new HashMap<>(); + @ProtoField(id = 18) private Set failedReferenceTaskNames = new HashSet<>(); - private String externalInputPayloadStoragePath; + @ProtoField(id = 19) + private WorkflowDef workflowDefinition; + + @ProtoField(id = 20) + private String externalInputPayloadStoragePath; + @ProtoField(id = 21) private String externalOutputPayloadStoragePath; public Workflow(){ @@ -214,6 +242,7 @@ public void setCorrelationId(String correlationId) { * * @return Workflow Type / Definition */ + @Deprecated public String getWorkflowType() { return workflowType; } @@ -222,20 +251,23 @@ public String getWorkflowType() { * * @param workflowType Workflow type */ + @Deprecated public void setWorkflowType(String workflowType) { this.workflowType = workflowType; } - /** * @return the version */ + @Deprecated public int getVersion() { return version; } + /** * @param version the version to set */ + @Deprecated public void setVersion(int version) { this.version = version; } @@ -262,6 +294,7 @@ public void setReasonForIncompletion(String reasonForIncompletion) { public String getParentWorkflowId() { return parentWorkflowId; } + /** * @param parentWorkflowId the parentWorkflowId to set */ @@ -275,21 +308,27 @@ public void setParentWorkflowId(String parentWorkflowId) { public String getParentWorkflowTaskId() { return parentWorkflowTaskId; } + /** * @param parentWorkflowTaskId the parentWorkflowTaskId to set */ public void setParentWorkflowTaskId(String parentWorkflowTaskId) { this.parentWorkflowTaskId = parentWorkflowTaskId; } + /** * @return the schemaVersion Version of the schema for the workflow definition */ - public int getSchemaVersion() { - return schemaVersion; - } + public int getSchemaVersion() { + return getWorkflowDefinition() != null ? + getWorkflowDefinition().getSchemaVersion() : + schemaVersion; + } + /** * @param schemaVersion the schemaVersion to set */ + @Deprecated public void setSchemaVersion(int schemaVersion) { this.schemaVersion = schemaVersion; } @@ -318,6 +357,15 @@ public void setFailedReferenceTaskNames(Set failedReferenceTaskNames) { this.failedReferenceTaskNames = failedReferenceTaskNames; } + public WorkflowDef getWorkflowDefinition() { + return workflowDefinition; + } + + public void setWorkflowDefinition(WorkflowDef workflowDefinition) { + this.workflowDefinition = workflowDefinition; + } + + /** * @return the external storage path of the workflow input payload */ @@ -339,6 +387,26 @@ public String getExternalOutputPayloadStoragePath() { return externalOutputPayloadStoragePath; } + /** + * Convenience method for accessing the workflow definition name. + * @return the workflow definition name. + */ + public String getWorkflowName() { + return getWorkflowDefinition() != null ? + getWorkflowDefinition().getName() : + workflowType; + } + + /** + * Convenience method for accessing the workflow definition version. + * @return the workflow definition version. + */ + public int getWorkflowVersion() { + return getWorkflowDefinition() != null ? + getWorkflowDefinition().getVersion() : + version; + } + /** * @param externalOutputPayloadStoragePath the external storage path where the workflow output payload is stored */ @@ -385,12 +453,11 @@ public Workflow copy() { copy.setParentWorkflowId(parentWorkflowId); copy.setParentWorkflowTaskId(parentWorkflowTaskId); copy.setReRunFromWorkflowId(reRunFromWorkflowId); - copy.setWorkflowType(workflowType); - copy.setVersion(version); copy.setCorrelationId(correlationId); copy.setEvent(event); copy.setReasonForIncompletion(reasonForIncompletion); - copy.setSchemaVersion(schemaVersion); + copy.setWorkflowDefinition(workflowDefinition); + copy.setTasks(tasks.stream() .map(Task::copy) .collect(Collectors.toList())); @@ -399,6 +466,59 @@ public Workflow copy() { @Override public String toString() { - return workflowType + "." + version + "/" + workflowId + "." + status; - } + return getWorkflowName() + "." + getWorkflowVersion() + "/" + workflowId + "." + status; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Workflow workflow = (Workflow) o; + return getEndTime() == workflow.getEndTime() && + getWorkflowVersion() == workflow.getWorkflowVersion() && + getSchemaVersion() == workflow.getSchemaVersion() && + getStatus() == workflow.getStatus() && + Objects.equals(getWorkflowId(), workflow.getWorkflowId()) && + Objects.equals(getParentWorkflowId(), workflow.getParentWorkflowId()) && + Objects.equals(getParentWorkflowTaskId(), workflow.getParentWorkflowTaskId()) && + Objects.equals(getTasks(), workflow.getTasks()) && + Objects.equals(getInput(), workflow.getInput()) && + Objects.equals(getOutput(), workflow.getOutput()) && + Objects.equals(getWorkflowName(), workflow.getWorkflowName()) && + Objects.equals(getCorrelationId(), workflow.getCorrelationId()) && + Objects.equals(getReRunFromWorkflowId(), workflow.getReRunFromWorkflowId()) && + Objects.equals(getReasonForIncompletion(), workflow.getReasonForIncompletion()) && + Objects.equals(getEvent(), workflow.getEvent()) && + Objects.equals(getTaskToDomain(), workflow.getTaskToDomain()) && + Objects.equals(getFailedReferenceTaskNames(), workflow.getFailedReferenceTaskNames()) && + Objects.equals(getExternalInputPayloadStoragePath(), workflow.getExternalInputPayloadStoragePath()) && + Objects.equals(getExternalOutputPayloadStoragePath(), workflow.getExternalOutputPayloadStoragePath()) && + Objects.equals(getWorkflowDefinition(), workflow.getWorkflowDefinition()); + } + + @Override + public int hashCode() { + return Objects.hash( + getStatus(), + getEndTime(), + getWorkflowId(), + getParentWorkflowId(), + getParentWorkflowTaskId(), + getTasks(), + getInput(), + getOutput(), + getWorkflowName(), + getWorkflowVersion(), + getCorrelationId(), + getReRunFromWorkflowId(), + getReasonForIncompletion(), + getSchemaVersion(), + getEvent(), + getTaskToDomain(), + getFailedReferenceTaskNames(), + getWorkflowDefinition(), + getExternalInputPayloadStoragePath(), + getExternalOutputPayloadStoragePath() + ); + } } diff --git a/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java b/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java index 43afb30174..d174a7d1f6 100644 --- a/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java +++ b/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java @@ -23,6 +23,7 @@ import java.util.TimeZone; import java.util.stream.Collectors; +import com.github.vmg.protogen.annotations.*; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; /** @@ -30,39 +31,54 @@ * * @author Viren */ +@ProtoMessage public class WorkflowSummary { /** * The time should be stored as GMT */ private static final TimeZone gmt = TimeZone.getTimeZone("GMT"); - + + @ProtoField(id = 1) private String workflowType; - + + @ProtoField(id = 2) private int version; - + + @ProtoField(id = 3) private String workflowId; - + + @ProtoField(id = 4) private String correlationId; - + + @ProtoField(id = 5) private String startTime; - + + @ProtoField(id = 6) private String updateTime; - + + @ProtoField(id = 7) private String endTime; - + + @ProtoField(id = 8) private WorkflowStatus status; - + + @ProtoField(id = 9) private String input; - + + @ProtoField(id = 10) private String output; - + + @ProtoField(id = 11) private String reasonForIncompletion; - + + @ProtoField(id = 12) private long executionTime; - + + @ProtoField(id = 13) private String event; + @ProtoField(id = 14) private String failedReferenceTaskNames = ""; public WorkflowSummary() { @@ -73,8 +89,8 @@ public WorkflowSummary(Workflow workflow) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); sdf.setTimeZone(gmt); - this.workflowType = workflow.getWorkflowType(); - this.version = workflow.getVersion(); + this.workflowType = workflow.getWorkflowName(); + this.version = workflow.getWorkflowVersion(); this.workflowId = workflow.getWorkflowId(); this.correlationId = workflow.getCorrelationId(); if(workflow.getCreateTime() != null){ @@ -214,4 +230,52 @@ public String getFailedReferenceTaskNames() { public void setFailedReferenceTaskNames(String failedReferenceTaskNames) { this.failedReferenceTaskNames = failedReferenceTaskNames; } + + public void setWorkflowType(String workflowType) { + this.workflowType = workflowType; + } + + public void setVersion(int version) { + this.version = version; + } + + public void setWorkflowId(String workflowId) { + this.workflowId = workflowId; + } + + public void setCorrelationId(String correlationId) { + this.correlationId = correlationId; + } + + public void setStartTime(String startTime) { + this.startTime = startTime; + } + + public void setUpdateTime(String updateTime) { + this.updateTime = updateTime; + } + + public void setEndTime(String endTime) { + this.endTime = endTime; + } + + public void setStatus(WorkflowStatus status) { + this.status = status; + } + + public void setInput(String input) { + this.input = input; + } + + public void setOutput(String output) { + this.output = output; + } + + public void setReasonForIncompletion(String reasonForIncompletion) { + this.reasonForIncompletion = reasonForIncompletion; + } + + public void setExecutionTime(long executionTime) { + this.executionTime = executionTime; + } } diff --git a/common/src/main/java/com/netflix/conductor/common/utils/JsonMapperProvider.java b/common/src/main/java/com/netflix/conductor/common/utils/JsonMapperProvider.java new file mode 100644 index 0000000000..597b0ed926 --- /dev/null +++ b/common/src/main/java/com/netflix/conductor/common/utils/JsonMapperProvider.java @@ -0,0 +1,146 @@ +package com.netflix.conductor.common.utils; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.*; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; + +import javax.inject.Provider; +import java.io.IOException; + +public class JsonMapperProvider implements Provider { + public JsonMapperProvider() {} + + /** + * JsonProtoModule can be registered into an {@link ObjectMapper} + * to enable the serialization and deserialization of ProtoBuf objects + * from/to JSON. + * + * Right now this module only provides (de)serialization for the {@link Any} + * ProtoBuf type, as this is the only ProtoBuf object which we're currently + * exposing through the REST API. + * + * {@see AnySerializer}, {@see AnyDeserializer} + */ + private static class JsonProtoModule extends SimpleModule { + private final static String JSON_TYPE = "@type"; + private final static String JSON_VALUE = "@value"; + + /** + * AnySerializer converts a ProtoBuf {@link Any} object into its JSON + * representation. + * + * This is not a canonical ProtoBuf JSON representation. Let us + * explain what we're trying to accomplish here: + * + * The {@link Any} ProtoBuf message is a type in the PB standard library that + * can store any other arbitrary ProtoBuf message in a type-safe way, even + * when the server has no knowledge of the schema of the stored message. + * + * It accomplishes this by storing a tuple of informtion: an URL-like type + * declaration for the stored message, and the serialized binary encoding + * of the stored message itself. Language specific implementations of ProtoBuf + * provide helper methods to encode and decode arbitrary messages into an + * {@link Any} object ({@link Any#pack(Message)} in Java). + * + * We want to expose these {@link Any} objects in the REST API because they've + * been introduced as part of the new GRPC interface to Conductor, but unfortunately + * we cannot encode them using their canonical ProtoBuf JSON encoding. According to + * the docs: + * + * The JSON representation of an `Any` value uses the regular + * representation of the deserialized, embedded message, with an + * additional field `@type` which contains the type URL. Example: + * + * package google.profile; + * message Person { + * string first_name = 1; + * string last_name = 2; + * } + * { + * "@type": "type.googleapis.com/google.profile.Person", + * "firstName": , + * "lastName": + * } + * + * In order to accomplish this representation, the PB-JSON encoder needs to have + * knowledge of all the ProtoBuf messages that could be serialized inside the + * {@link Any} message. This is not possible to accomplish inside the Conductor server, + * which is simply passing through arbitrary payloads from/to clients. + * + * Consequently, to actually expose the Message through the REST API, we must create + * a custom encoding that contains the raw data of the serialized message, as we are + * not able to deserialize it on the server. We simply return a dictionary with + * '@type' and '@value' keys, where '@type' is identical to the canonical representation, + * but '@value' contains a base64 encoded string with the binary data of the serialized + * message. + * + * Since all the provided Conductor clients are required to know this encoding, it's always + * possible to re-build the original {@link Any} message regardless of the client's language. + * + * {@see AnyDeserializer} + */ + protected class AnySerializer extends JsonSerializer { + @Override + public void serialize(Any value, JsonGenerator jgen, SerializerProvider provider) + throws IOException, JsonProcessingException { + jgen.writeStartObject(); + jgen.writeStringField(JSON_TYPE, value.getTypeUrl()); + jgen.writeBinaryField(JSON_VALUE, value.getValue().toByteArray()); + jgen.writeEndObject(); + } + } + + /** + * AnyDeserializer converts the custom JSON representation of an {@link Any} value + * into its original form. + * + * {@see AnySerializer} for details on this representation. + */ + protected class AnyDeserializer extends JsonDeserializer { + @Override + public Any deserialize(JsonParser p, DeserializationContext ctxt) + throws IOException, JsonProcessingException { + JsonNode root = p.getCodec().readTree(p); + JsonNode type = root.get(JSON_TYPE); + JsonNode value = root.get(JSON_VALUE); + + if (type == null || !type.isTextual()) { + throw ctxt.reportMappingException("invalid '@type' field when deserializing ProtoBuf Any object"); + } + + if (value == null || !value.isTextual()) { + throw ctxt.reportMappingException("invalid '@value' field when deserializing ProtoBuf Any object"); + } + + return Any.newBuilder() + .setTypeUrl(type.textValue()) + .setValue(ByteString.copyFrom(value.binaryValue())) + .build(); + } + } + + public JsonProtoModule() { + super("ConductorJsonProtoModule"); + addSerializer(Any.class, new AnySerializer()); + addDeserializer(Any.class, new AnyDeserializer()); + } + } + + @Override + public ObjectMapper get() { + final ObjectMapper objectMapper = new ObjectMapper(); + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); + objectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); + objectMapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); + objectMapper.setSerializationInclusion(JsonInclude.Include.NON_EMPTY); + objectMapper.registerModule(new JsonProtoModule()); + return objectMapper; + } +} diff --git a/common/src/test/java/com/netflix/conductor/common/tasks/TestTask.java b/common/src/test/java/com/netflix/conductor/common/tasks/TestTask.java index 9cf5d79478..1485bf849a 100644 --- a/common/src/test/java/com/netflix/conductor/common/tasks/TestTask.java +++ b/common/src/test/java/com/netflix/conductor/common/tasks/TestTask.java @@ -19,11 +19,16 @@ package com.netflix.conductor.common.tasks; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import java.util.Arrays; import java.util.Set; import java.util.stream.Collectors; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import org.junit.Test; import com.netflix.conductor.common.metadata.tasks.Task; @@ -59,4 +64,22 @@ public void test() { } } } + + @Test + public void testTaskDefinitionIfAvailable() { + Task task = new Task(); + task.setStatus(Status.FAILED); + assertEquals(Status.FAILED, task.getStatus()); + + assertNull(task.getWorkflowTask()); + assertFalse(task.getTaskDefinition().isPresent()); + + WorkflowTask workflowTask = new WorkflowTask(); + TaskDef taskDefinition = new TaskDef(); + workflowTask.setTaskDefinition(taskDefinition); + task.setWorkflowTask(workflowTask); + + assertTrue(task.getTaskDefinition().isPresent()); + assertEquals(taskDefinition, task.getTaskDefinition().get()); + } } diff --git a/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowDef.java b/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowDef.java index 7e77e5f01c..75bd698d88 100644 --- a/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowDef.java +++ b/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowDef.java @@ -18,9 +18,9 @@ */ package com.netflix.conductor.common.workflow; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import org.junit.Test; import java.util.ArrayList; @@ -61,7 +61,7 @@ public void test() { wf.setDescription(COND_TASK_WF); WorkflowTask subCaseTask = new WorkflowTask(); - subCaseTask.setType(Type.DECISION.name()); + subCaseTask.setType(TaskType.DECISION.name()); subCaseTask.setCaseValueParam("case2"); subCaseTask.setName("case2"); subCaseTask.setTaskReferenceName("case2"); @@ -72,7 +72,7 @@ public void test() { WorkflowTask caseTask = new WorkflowTask(); - caseTask.setType(Type.DECISION.name()); + caseTask.setType(TaskType.DECISION.name()); caseTask.setCaseValueParam("case"); caseTask.setName("case"); caseTask.setTaskReferenceName("case"); diff --git a/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowTask.java b/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowTask.java index c38e0e0fbd..46eb8353c6 100644 --- a/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowTask.java +++ b/common/src/test/java/com/netflix/conductor/common/workflow/TestWorkflowTask.java @@ -21,10 +21,10 @@ import static org.junit.Assert.*; +import com.netflix.conductor.common.metadata.workflow.TaskType; import org.junit.Test; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; /** * @author Viren @@ -35,10 +35,10 @@ public class TestWorkflowTask { @Test public void test() { WorkflowTask wt = new WorkflowTask(); - wt.setWorkflowTaskType(Type.DECISION); + wt.setWorkflowTaskType(TaskType.DECISION); assertNotNull(wt.getType()); - assertEquals(Type.DECISION.name(), wt.getType()); + assertEquals(TaskType.DECISION.name(), wt.getType()); } @Test diff --git a/contribs/build.gradle b/contribs/build.gradle index ee6019c4da..d2e767de9f 100644 --- a/contribs/build.gradle +++ b/contribs/build.gradle @@ -13,8 +13,8 @@ dependencies { compile "io.nats:java-nats-streaming:${revNatsStreaming}" - provided "javax.ws.rs:jsr311-api:${revJsr311Api}" - provided "io.swagger:swagger-jaxrs:${revSwagger}" + compileOnly "javax.ws.rs:jsr311-api:${revJsr311Api}" + compile "io.swagger:swagger-jaxrs:${revSwagger}" testCompile "org.eclipse.jetty:jetty-server:${revJetteyServer}" testCompile "org.eclipse.jetty:jetty-servlet:${revJettyServlet}" diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/ContribsModule.java b/contribs/src/main/java/com/netflix/conductor/contribs/ContribsModule.java index 6ebdc932b0..e5c1e8b1dc 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/ContribsModule.java +++ b/contribs/src/main/java/com/netflix/conductor/contribs/ContribsModule.java @@ -18,6 +18,8 @@ */ package com.netflix.conductor.contribs; +import static com.netflix.conductor.core.events.EventQueues.EVENT_QUEUE_PROVIDERS_QUALIFIER; + import java.util.HashMap; import java.util.Map; @@ -55,7 +57,7 @@ protected void configure() { @ProvidesIntoMap @StringMapKey("sqs") @Singleton - @Named("EventQueueProviders") + @Named(EVENT_QUEUE_PROVIDERS_QUALIFIER) public EventQueueProvider getSQSEventQueueProvider(AmazonSQSClient amazonSQSClient) { return new SQSEventQueueProvider(amazonSQSClient); } diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/NatsModule.java b/contribs/src/main/java/com/netflix/conductor/contribs/NatsModule.java index 2f09cd1c21..81f6ab02a1 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/NatsModule.java +++ b/contribs/src/main/java/com/netflix/conductor/contribs/NatsModule.java @@ -18,6 +18,8 @@ */ package com.netflix.conductor.contribs; +import static com.netflix.conductor.core.events.EventQueues.EVENT_QUEUE_PROVIDERS_QUALIFIER; + import com.google.inject.AbstractModule; import com.google.inject.Singleton; import com.google.inject.multibindings.ProvidesIntoMap; @@ -45,7 +47,7 @@ protected void configure() { @ProvidesIntoMap @StringMapKey("nats") @Singleton - @Named("EventQueueProviders") + @Named(EVENT_QUEUE_PROVIDERS_QUALIFIER) public EventQueueProvider getNATSEventQueueProvider(Configuration configuration) { return new NATSEventQueueProvider(configuration); } diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/NatsStreamModule.java b/contribs/src/main/java/com/netflix/conductor/contribs/NatsStreamModule.java index 0ff8ac311d..b589588462 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/NatsStreamModule.java +++ b/contribs/src/main/java/com/netflix/conductor/contribs/NatsStreamModule.java @@ -18,6 +18,8 @@ */ package com.netflix.conductor.contribs; +import static com.netflix.conductor.core.events.EventQueues.EVENT_QUEUE_PROVIDERS_QUALIFIER; + import com.google.inject.AbstractModule; import com.google.inject.Singleton; import com.google.inject.multibindings.ProvidesIntoMap; @@ -26,8 +28,6 @@ import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.nats.NATSStreamEventQueueProvider; -import com.netflix.conductor.core.events.queue.dyno.DynoEventQueueProvider; -import com.netflix.conductor.dao.QueueDAO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +47,7 @@ protected void configure() { @ProvidesIntoMap @StringMapKey("nats_stream") @Singleton - @Named("EventQueueProviders") + @Named(EVENT_QUEUE_PROVIDERS_QUALIFIER) public EventQueueProvider geNATSStreamEventQueueProvider(Configuration configuration) { return new NATSStreamEventQueueProvider(configuration); } diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/http/TestHttpTask.java b/contribs/src/test/java/com/netflix/conductor/contribs/http/TestHttpTask.java index 1f5b604845..ab3cfe69b8 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/http/TestHttpTask.java +++ b/contribs/src/test/java/com/netflix/conductor/contribs/http/TestHttpTask.java @@ -1,49 +1,36 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** - * + * */ package com.netflix.conductor.contribs.http; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.contribs.http.HttpTask.Input; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.DeciderService; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.WorkflowExecutor; -import com.netflix.conductor.core.execution.mapper.DecisionTaskMapper; -import com.netflix.conductor.core.execution.mapper.DynamicTaskMapper; -import com.netflix.conductor.core.execution.mapper.EventTaskMapper; -import com.netflix.conductor.core.execution.mapper.ForkJoinDynamicTaskMapper; -import com.netflix.conductor.core.execution.mapper.ForkJoinTaskMapper; -import com.netflix.conductor.core.execution.mapper.JoinTaskMapper; -import com.netflix.conductor.core.execution.mapper.SimpleTaskMapper; -import com.netflix.conductor.core.execution.mapper.SubWorkflowTaskMapper; import com.netflix.conductor.core.execution.mapper.TaskMapper; -import com.netflix.conductor.core.execution.mapper.UserDefinedTaskMapper; -import com.netflix.conductor.core.execution.mapper.WaitTaskMapper; import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils; -import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.dao.QueueDAO; import org.eclipse.jetty.server.Request; import org.eclipse.jetty.server.Server; @@ -79,337 +66,331 @@ @SuppressWarnings("unchecked") public class TestHttpTask { - private static final String ERROR_RESPONSE = "Something went wrong!"; - - private static final String TEXT_RESPONSE = "Text Response"; - - private static final double NUM_RESPONSE = 42.42d; - - private static String JSON_RESPONSE; - - private HttpTask httpTask; - - private WorkflowExecutor workflowExecutor; - - private Configuration config; - - private Workflow workflow = new Workflow(); - - private static Server server; - - private static ObjectMapper objectMapper = new ObjectMapper(); - - @BeforeClass - public static void init() throws Exception { - - Map map = new HashMap<>(); - map.put("key", "value1"); - map.put("num", 42); - JSON_RESPONSE = objectMapper.writeValueAsString(map); - - server = new Server(7009); - ServletContextHandler servletContextHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); - servletContextHandler.setHandler(new EchoHandler()); - server.start(); - } - - @AfterClass - public static void cleanup() { - if(server != null) { - try { - server.stop(); - } catch (Exception e) { - e.printStackTrace(); - } - } - } - - @Before - public void setup() { - RestClientManager rcm = new RestClientManager(); - workflowExecutor = mock(WorkflowExecutor.class); - config = mock(Configuration.class); - when(config.getServerId()).thenReturn("test_server_id"); - httpTask = new HttpTask(rcm, config); - } - - @Test - public void testPost() { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/post"); - Map body = new HashMap<>(); - body.put("input_key1", "value1"); - body.put("input_key2", 45.3d); - input.setBody(body); - input.setMethod("POST"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - assertEquals(task.getReasonForIncompletion(), Task.Status.COMPLETED, task.getStatus()); - Map hr = (Map) task.getOutputData().get("response"); - Object response = hr.get("body"); - assertEquals(Task.Status.COMPLETED, task.getStatus()); - assertTrue("response is: " + response, response instanceof Map); - Map map = (Map) response; - Set inputKeys = body.keySet(); - Set responseKeys = map.keySet(); - inputKeys.containsAll(responseKeys); - responseKeys.containsAll(inputKeys); - } - - - @Test - public void testPostNoContent() { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/post2"); - Map body = new HashMap<>(); - body.put("input_key1", "value1"); - body.put("input_key2", 45.3d); - input.setBody(body); - input.setMethod("POST"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - assertEquals(task.getReasonForIncompletion(), Task.Status.COMPLETED, task.getStatus()); - Map hr = (Map) task.getOutputData().get("response"); - Object response = hr.get("body"); - assertEquals(Task.Status.COMPLETED, task.getStatus()); - assertNull("response is: " + response, response); - } - - @Test - public void testFailure() { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/failure"); - input.setMethod("GET"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - assertEquals("Task output: " + task.getOutputData(), Task.Status.FAILED, task.getStatus()); - assertEquals(ERROR_RESPONSE, task.getReasonForIncompletion()); - - task.setStatus(Status.SCHEDULED); - task.getInputData().remove(HttpTask.REQUEST_PARAMETER_NAME); - httpTask.start(workflow, task, workflowExecutor); - assertEquals(Task.Status.FAILED, task.getStatus()); - assertEquals(HttpTask.MISSING_REQUEST, task.getReasonForIncompletion()); - } - - @Test - public void testTextGET() { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/text"); - input.setMethod("GET"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - Map hr = (Map) task.getOutputData().get("response"); - Object response = hr.get("body"); - assertEquals(Task.Status.COMPLETED, task.getStatus()); - assertEquals(TEXT_RESPONSE, response); - } - - @Test - public void testNumberGET() { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/numeric"); - input.setMethod("GET"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - Map hr = (Map) task.getOutputData().get("response"); - Object response = hr.get("body"); - assertEquals(Task.Status.COMPLETED, task.getStatus()); - assertEquals(NUM_RESPONSE, response); - assertTrue(response instanceof Number); - } - - @Test - public void testJsonGET() throws Exception { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/json"); - input.setMethod("GET"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - Map hr = (Map) task.getOutputData().get("response"); - Object response = hr.get("body"); - assertEquals(Task.Status.COMPLETED, task.getStatus()); - assertTrue(response instanceof Map); - Map map = (Map) response; - assertEquals(JSON_RESPONSE, objectMapper.writeValueAsString(map)); - } - - @Test - public void testExecute() { - - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/json"); - input.setMethod("GET"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - task.setStatus(Status.SCHEDULED); - task.setScheduledTime(0); - boolean executed = httpTask.execute(workflow, task, workflowExecutor); - assertFalse(executed); - } - - @Test - public void testOptional() { - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/failure"); - input.setMethod("GET"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - assertEquals("Task output: " + task.getOutputData(), Task.Status.FAILED, task.getStatus()); - assertEquals(ERROR_RESPONSE, task.getReasonForIncompletion()); - assertTrue(!task.getStatus().isSuccessful()); - - task.setStatus(Status.SCHEDULED); - task.getInputData().remove(HttpTask.REQUEST_PARAMETER_NAME); - task.setReferenceTaskName("t1"); - httpTask.start(workflow, task, workflowExecutor); - assertEquals(Task.Status.FAILED, task.getStatus()); - assertEquals(HttpTask.MISSING_REQUEST, task.getReasonForIncompletion()); - assertTrue(!task.getStatus().isSuccessful()); - - Workflow workflow = new Workflow(); - workflow.getTasks().add(task); - - WorkflowDef def = new WorkflowDef(); - WorkflowTask wft = new WorkflowTask(); - wft.setOptional(true); - wft.setName("HTTP"); - wft.setWorkflowTaskType(Type.USER_DEFINED); - wft.setTaskReferenceName("t1"); - def.getTasks().add(wft); - MetadataDAO metadataDAO = mock(MetadataDAO.class); - QueueDAO queueDAO = mock(QueueDAO.class); - ExternalPayloadStorageUtils externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class); - ParametersUtils parametersUtils = mock(ParametersUtils.class); - Map taskMappers = new HashMap<>(); - taskMappers.put("DECISION", new DecisionTaskMapper()); - taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("FORK_JOIN", new ForkJoinTaskMapper()); - taskMappers.put("JOIN", new JoinTaskMapper()); - taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper)); - taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("EVENT", new EventTaskMapper(parametersUtils)); - taskMappers.put("WAIT", new WaitTaskMapper(parametersUtils)); - new DeciderService(metadataDAO, parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers).decide(workflow, def); - - System.out.println(workflow.getTasks()); - System.out.println(workflow.getStatus()); - } - - @Test - public void testOAuth() { - Task task = new Task(); - Input input = new Input(); - input.setUri("http://localhost:7009/oauth"); - input.setMethod("POST"); - input.setOauthConsumerKey("someKey"); - input.setOauthConsumerSecret("someSecret"); - task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); - - httpTask.start(workflow, task, workflowExecutor); - - Map response = (Map) task.getOutputData().get("response"); - Map body = (Map) response.get("body"); - - assertEquals("someKey", body.get("oauth_consumer_key")); - assertTrue("Should have OAuth nonce", body.containsKey("oauth_nonce")); - assertTrue("Should have OAuth signature", body.containsKey("oauth_signature")); - assertTrue("Should have OAuth signature method", body.containsKey("oauth_signature_method")); - assertTrue("Should have OAuth oauth_timestamp", body.containsKey("oauth_timestamp")); - assertTrue("Should have OAuth oauth_version", body.containsKey("oauth_version")); - - assertEquals("Task output: " + task.getOutputData(), Status.COMPLETED, task.getStatus()); - } - - private static class EchoHandler extends AbstractHandler { - - private TypeReference> mapOfObj = new TypeReference>() {}; - - @Override - public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) - throws IOException { - if(request.getMethod().equals("GET") && request.getRequestURI().equals("/text")) { - PrintWriter writer = response.getWriter(); - writer.print(TEXT_RESPONSE); - writer.flush(); - writer.close(); - } else if(request.getMethod().equals("GET") && request.getRequestURI().equals("/json")) { - response.addHeader("Content-Type", "application/json"); - PrintWriter writer = response.getWriter(); - writer.print(JSON_RESPONSE); - writer.flush(); - writer.close(); - } else if(request.getMethod().equals("GET") && request.getRequestURI().equals("/failure")) { - response.addHeader("Content-Type", "text/plain"); - response.setStatus(500); - PrintWriter writer = response.getWriter(); - writer.print(ERROR_RESPONSE); - writer.flush(); - writer.close(); - } else if(request.getMethod().equals("POST") && request.getRequestURI().equals("/post")) { - response.addHeader("Content-Type", "application/json"); - BufferedReader reader = request.getReader(); - Map input = objectMapper.readValue(reader, mapOfObj); - Set keys = input.keySet(); - for(String key : keys) { - input.put(key, key); - } - PrintWriter writer = response.getWriter(); - writer.print(objectMapper.writeValueAsString(input)); - writer.flush(); - writer.close(); - } else if(request.getMethod().equals("POST") && request.getRequestURI().equals("/post2")) { - response.addHeader("Content-Type", "application/json"); - response.setStatus(204); - BufferedReader reader = request.getReader(); - Map input = objectMapper.readValue(reader, mapOfObj); - Set keys = input.keySet(); - System.out.println(keys); - response.getWriter().close(); - - } else if(request.getMethod().equals("GET") && request.getRequestURI().equals("/numeric")) { - PrintWriter writer = response.getWriter(); - writer.print(NUM_RESPONSE); - writer.flush(); - writer.close(); - } else if(request.getMethod().equals("POST") && request.getRequestURI().equals("/oauth")) { - //echo back oauth parameters generated in the Authorization header in the response - Map params = parseOauthParameters(request); - response.addHeader("Content-Type", "application/json"); - PrintWriter writer = response.getWriter(); - writer.print(objectMapper.writeValueAsString(params)); - writer.flush(); - writer.close(); - } - } - - private Map parseOauthParameters(HttpServletRequest request) { - String paramString = request.getHeader("Authorization").replaceAll("^OAuth (.*)", "$1"); - return Arrays.stream(paramString.split("\\s*,\\s*")) - .map(pair -> pair.split("=")) - .collect(Collectors.toMap(o -> o[0], o -> o[1].replaceAll("\"",""))); - } - } + private static final String ERROR_RESPONSE = "Something went wrong!"; + + private static final String TEXT_RESPONSE = "Text Response"; + + private static final double NUM_RESPONSE = 42.42d; + + private static String JSON_RESPONSE; + + private HttpTask httpTask; + + private WorkflowExecutor workflowExecutor; + private Configuration config; + + private Workflow workflow = new Workflow(); + + private static Server server; + + private static ObjectMapper objectMapper = new ObjectMapper(); + + @BeforeClass + public static void init() throws Exception { + + Map map = new HashMap<>(); + map.put("key", "value1"); + map.put("num", 42); + JSON_RESPONSE = objectMapper.writeValueAsString(map); + + server = new Server(7009); + ServletContextHandler servletContextHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); + servletContextHandler.setHandler(new EchoHandler()); + server.start(); + } + + @AfterClass + public static void cleanup() { + if (server != null) { + try { + server.stop(); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + @Before + public void setup() { + workflowExecutor = mock(WorkflowExecutor.class); + config = mock(Configuration.class); + RestClientManager rcm = new RestClientManager(); + when(config.getServerId()).thenReturn("test_server_id"); + httpTask = new HttpTask(rcm, config); + } + + @Test + public void testPost() { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/post"); + Map body = new HashMap<>(); + body.put("input_key1", "value1"); + body.put("input_key2", 45.3d); + input.setBody(body); + input.setMethod("POST"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + assertEquals(task.getReasonForIncompletion(), Task.Status.COMPLETED, task.getStatus()); + Map hr = (Map) task.getOutputData().get("response"); + Object response = hr.get("body"); + assertEquals(Task.Status.COMPLETED, task.getStatus()); + assertTrue("response is: " + response, response instanceof Map); + Map map = (Map) response; + Set inputKeys = body.keySet(); + Set responseKeys = map.keySet(); + inputKeys.containsAll(responseKeys); + responseKeys.containsAll(inputKeys); + } + + + @Test + public void testPostNoContent() { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/post2"); + Map body = new HashMap<>(); + body.put("input_key1", "value1"); + body.put("input_key2", 45.3d); + input.setBody(body); + input.setMethod("POST"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + assertEquals(task.getReasonForIncompletion(), Task.Status.COMPLETED, task.getStatus()); + Map hr = (Map) task.getOutputData().get("response"); + Object response = hr.get("body"); + assertEquals(Task.Status.COMPLETED, task.getStatus()); + assertNull("response is: " + response, response); + } + + @Test + public void testFailure() { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/failure"); + input.setMethod("GET"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + assertEquals("Task output: " + task.getOutputData(), Task.Status.FAILED, task.getStatus()); + assertEquals(ERROR_RESPONSE, task.getReasonForIncompletion()); + + task.setStatus(Status.SCHEDULED); + task.getInputData().remove(HttpTask.REQUEST_PARAMETER_NAME); + httpTask.start(workflow, task, workflowExecutor); + assertEquals(Task.Status.FAILED, task.getStatus()); + assertEquals(HttpTask.MISSING_REQUEST, task.getReasonForIncompletion()); + } + + @Test + public void testTextGET() { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/text"); + input.setMethod("GET"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + Map hr = (Map) task.getOutputData().get("response"); + Object response = hr.get("body"); + assertEquals(Task.Status.COMPLETED, task.getStatus()); + assertEquals(TEXT_RESPONSE, response); + } + + @Test + public void testNumberGET() { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/numeric"); + input.setMethod("GET"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + Map hr = (Map) task.getOutputData().get("response"); + Object response = hr.get("body"); + assertEquals(Task.Status.COMPLETED, task.getStatus()); + assertEquals(NUM_RESPONSE, response); + assertTrue(response instanceof Number); + } + + @Test + public void testJsonGET() throws JsonProcessingException { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/json"); + input.setMethod("GET"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + Map hr = (Map) task.getOutputData().get("response"); + Object response = hr.get("body"); + assertEquals(Task.Status.COMPLETED, task.getStatus()); + assertTrue(response instanceof Map); + Map map = (Map) response; + assertEquals(JSON_RESPONSE, objectMapper.writeValueAsString(map)); + } + + @Test + public void testExecute() { + + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/json"); + input.setMethod("GET"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + task.setStatus(Status.SCHEDULED); + task.setScheduledTime(0); + boolean executed = httpTask.execute(workflow, task, workflowExecutor); + assertFalse(executed); + + } + + @Test + public void testOptional() { + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/failure"); + input.setMethod("GET"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + assertEquals("Task output: " + task.getOutputData(), Task.Status.FAILED, task.getStatus()); + assertEquals(ERROR_RESPONSE, task.getReasonForIncompletion()); + assertTrue(!task.getStatus().isSuccessful()); + + task.setStatus(Status.SCHEDULED); + task.getInputData().remove(HttpTask.REQUEST_PARAMETER_NAME); + task.setReferenceTaskName("t1"); + httpTask.start(workflow, task, workflowExecutor); + assertEquals(Task.Status.FAILED, task.getStatus()); + assertEquals(HttpTask.MISSING_REQUEST, task.getReasonForIncompletion()); + assertTrue(!task.getStatus().isSuccessful()); + + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setOptional(true); + workflowTask.setName("HTTP"); + workflowTask.setWorkflowTaskType(TaskType.USER_DEFINED); + workflowTask.setTaskReferenceName("t1"); + + WorkflowDef def = new WorkflowDef(); + def.getTasks().add(workflowTask); + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.getTasks().add(task); + + QueueDAO queueDAO = mock(QueueDAO.class); + ExternalPayloadStorageUtils externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class); + ParametersUtils parametersUtils = mock(ParametersUtils.class); + + Map taskMappers = new HashMap<>(); + new DeciderService(parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers).decide(workflow); + + System.out.println(workflow.getTasks()); + System.out.println(workflow.getStatus()); + } + + @Test + public void testOAuth() { + Task task = new Task(); + Input input = new Input(); + input.setUri("http://localhost:7009/oauth"); + input.setMethod("POST"); + input.setOauthConsumerKey("someKey"); + input.setOauthConsumerSecret("someSecret"); + task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input); + + httpTask.start(workflow, task, workflowExecutor); + + Map response = (Map) task.getOutputData().get("response"); + Map body = (Map) response.get("body"); + + assertEquals("someKey", body.get("oauth_consumer_key")); + assertTrue("Should have OAuth nonce", body.containsKey("oauth_nonce")); + assertTrue("Should have OAuth signature", body.containsKey("oauth_signature")); + assertTrue("Should have OAuth signature method", body.containsKey("oauth_signature_method")); + assertTrue("Should have OAuth oauth_timestamp", body.containsKey("oauth_timestamp")); + assertTrue("Should have OAuth oauth_version", body.containsKey("oauth_version")); + + assertEquals("Task output: " + task.getOutputData(), Status.COMPLETED, task.getStatus()); + } + + private static class EchoHandler extends AbstractHandler { + + private TypeReference> mapOfObj = new TypeReference>() { + }; + + @Override + public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) + throws IOException { + if (request.getMethod().equals("GET") && request.getRequestURI().equals("/text")) { + PrintWriter writer = response.getWriter(); + writer.print(TEXT_RESPONSE); + writer.flush(); + writer.close(); + } else if (request.getMethod().equals("GET") && request.getRequestURI().equals("/json")) { + response.addHeader("Content-Type", "application/json"); + PrintWriter writer = response.getWriter(); + writer.print(JSON_RESPONSE); + writer.flush(); + writer.close(); + } else if (request.getMethod().equals("GET") && request.getRequestURI().equals("/failure")) { + response.addHeader("Content-Type", "text/plain"); + response.setStatus(500); + PrintWriter writer = response.getWriter(); + writer.print(ERROR_RESPONSE); + writer.flush(); + writer.close(); + } else if (request.getMethod().equals("POST") && request.getRequestURI().equals("/post")) { + response.addHeader("Content-Type", "application/json"); + BufferedReader reader = request.getReader(); + Map input = objectMapper.readValue(reader, mapOfObj); + Set keys = input.keySet(); + for (String key : keys) { + input.put(key, key); + } + PrintWriter writer = response.getWriter(); + writer.print(objectMapper.writeValueAsString(input)); + writer.flush(); + writer.close(); + } else if (request.getMethod().equals("POST") && request.getRequestURI().equals("/post2")) { + response.addHeader("Content-Type", "application/json"); + response.setStatus(204); + BufferedReader reader = request.getReader(); + Map input = objectMapper.readValue(reader, mapOfObj); + Set keys = input.keySet(); + System.out.println(keys); + response.getWriter().close(); + + } else if (request.getMethod().equals("GET") && request.getRequestURI().equals("/numeric")) { + PrintWriter writer = response.getWriter(); + writer.print(NUM_RESPONSE); + writer.flush(); + writer.close(); + } else if (request.getMethod().equals("POST") && request.getRequestURI().equals("/oauth")) { + //echo back oauth parameters generated in the Authorization header in the response + Map params = parseOauthParameters(request); + response.addHeader("Content-Type", "application/json"); + PrintWriter writer = response.getWriter(); + writer.print(objectMapper.writeValueAsString(params)); + writer.flush(); + writer.close(); + } + } + + private Map parseOauthParameters(HttpServletRequest request) { + String paramString = request.getHeader("Authorization").replaceAll("^OAuth (.*)", "$1"); + return Arrays.stream(paramString.split("\\s*,\\s*")) + .map(pair -> pair.split("=")) + .collect(Collectors.toMap(o -> o[0], o -> o[1].replaceAll("\"", ""))); + } + } } diff --git a/core/build.gradle b/core/build.gradle index 17a764f97a..dcd22b1583 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -8,7 +8,6 @@ dependencies { compile "com.netflix.servo:servo-core:${revServo}" compile "com.netflix.spectator:spectator-api:${revSpectator}" - compile "com.fasterxml.jackson.core:jackson-databind:${revJacksonDatabind}" compile "com.fasterxml.jackson.core:jackson-core:${revJacksonCore}" compile "com.jayway.jsonpath:json-path:${revJsonPath}" diff --git a/core/src/main/java/com/netflix/conductor/core/config/Configuration.java b/core/src/main/java/com/netflix/conductor/core/config/Configuration.java index ab2c795e62..25632367b7 100644 --- a/core/src/main/java/com/netflix/conductor/core/config/Configuration.java +++ b/core/src/main/java/com/netflix/conductor/core/config/Configuration.java @@ -1,19 +1,15 @@ /* * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ - package com.netflix.conductor.core.config; import com.google.inject.AbstractModule; @@ -23,65 +19,148 @@ /** * @author Viren - * */ public interface Configuration { - - /** - * - * @return time frequency in seconds, at which the workflow sweeper should run to evaluate running workflows. - */ - int getSweepFrequency(); - - /** - * - * @return when set to true, the sweep is disabled - */ - boolean disableSweep(); - - - /** - * - * @return when set to true, the background task workers executing async system tasks (eg HTTP) are disabled - * - */ - boolean disableAsyncWorkers(); - - /** - * - * @return ID of the server. Can be host name, IP address or any other meaningful identifier. Used for logging - */ - String getServerId(); + String DB_PROPERTY_NAME = "db"; + String DB_DEFAULT_VALUE = "memory"; - /** - * - * @return Current environment. e.g. test, prod - */ - String getEnvironment(); + String SWEEP_FREQUENCY_PROPERTY_NAME = "decider.sweep.frequency.seconds"; + int SWEEP_FREQUENCY_DEFAULT_VALUE = 30; - /** - * - * @return name of the stack under which the app is running. e.g. devint, testintg, staging, prod etc. - */ - String getStack(); + String SWEEP_DISABLE_PROPERTY_NAME = "decider.sweep.disable"; + // FIXME This really should be typed correctly. + String SWEEP_DISABLE_DEFAULT_VALUE = "false"; - /** - * - * @return APP ID. Used for logging - */ - String getAppId(); + String DISABLE_ASYNC_WORKERS_PROPERTY_NAME = "conductor.disable.async.workers"; + // FIXME This really should be typed correctly. + String DISABLE_ASYNC_WORKERS_DEFAULT_VALUE = "false"; - /** - * - * @return Data center region. if hosting on Amazon the value is something like us-east-1, us-west-2 etc. - */ - String getRegion(); + String ENVIRONMENT_PROPERTY_NAME = "environment"; + String ENVIRONMENT_DEFAULT_VALUE = "test"; + + String STACK_PROPERTY_NAME = "STACK"; + String STACK_DEFAULT_VALUE = "test"; + + String APP_ID_PROPERTY_NAME = "APP_ID"; + String APP_ID_DEFAULT_VALUE = "conductor"; - /** - * - * @return Availability zone / rack. for AWS deployments, the value is something like us-east-1a, etc. - */ - String getAvailabilityZone(); + String REGION_PROPERTY_NAME = "EC2_REGION"; + String REGION_DEFAULT_VALUE = "us-east-1"; + + String AVAILABILITY_ZONE_PROPERTY_NAME = "EC2_AVAILABILITY_ZONE"; + String AVAILABILITY_ZONE_DEFAULT_VALUE = "us-east-1c"; + + String JERSEY_ENABLED_PROPERTY_NAME = "conductor.jersey.enabled"; + boolean JERSEY_ENABLED_DEFAULT_VALUE = true; + + String ADDITIONAL_MODULES_PROPERTY_NAME = "conductor.additional.modules"; + + //TODO add constants for input/output external payload related properties. + + default DB getDB() { + return DB.valueOf(getDBString()); + } + + default String getDBString() { + return getProperty(DB_PROPERTY_NAME, DB_DEFAULT_VALUE).toUpperCase(); + } + + /** + * @return time frequency in seconds, at which the workflow sweeper should run to evaluate running workflows. + */ + int getSweepFrequency(); + + /** + * @return when set to true, the sweep is disabled + */ + boolean disableSweep(); + + + /** + * @return when set to true, the background task workers executing async system tasks (eg HTTP) are disabled + */ + boolean disableAsyncWorkers(); + + /** + * @return ID of the server. Can be host name, IP address or any other meaningful identifier. Used for logging + */ + String getServerId(); + + /** + * @return Current environment. e.g. test, prod + */ + String getEnvironment(); + + /** + * @return name of the stack under which the app is running. e.g. devint, testintg, staging, prod etc. + */ + String getStack(); + + /** + * @return APP ID. Used for logging + */ + String getAppId(); + + /** + * @return Data center region. if hosting on Amazon the value is something like us-east-1, us-west-2 etc. + */ + String getRegion(); + + /** + * @return Availability zone / rack. for AWS deployments, the value is something like us-east-1a, etc. + */ + String getAvailabilityZone(); + + default boolean getJerseyEnabled() { + return getBooleanProperty(JERSEY_ENABLED_PROPERTY_NAME, JERSEY_ENABLED_DEFAULT_VALUE); + } + + /** + * @param name Name of the property + * @param defaultValue Default value when not specified + * @return User defined integer property. + */ + int getIntProperty(String name, int defaultValue); + + /** + * @param name Name of the property + * @param defaultValue Default value when not specified + * @return User defined string property. + */ + String getProperty(String name, String defaultValue); + + boolean getBooleanProperty(String name, boolean defaultValue); + + default boolean getBoolProperty(String name, boolean defaultValue) { + String value = getProperty(name, null); + if (null == value || value.trim().length() == 0) { + return defaultValue; + } + return Boolean.valueOf(value.trim()); + } + + /** + * @return Returns all the configurations in a map. + */ + Map getAll(); + + /** + * @return Provides a list of additional modules to configure. Use this to inject additional modules that should be + * loaded as part of the Conductor server initialization If you are creating custom tasks + * (com.netflix.conductor.core.execution.tasks.WorkflowSystemTask) then initialize them as part of the custom + * modules. + */ + default List getAdditionalModules() { + return null; + } + + + /** + * @param name Name of the property + * @param defaultValue Default value when not specified + * @return User defined Long property. + */ + long getLongProperty(String name, long defaultValue); /** * @@ -131,46 +210,8 @@ public interface Configuration { */ Long getMaxTaskOutputPayloadSizeThresholdKB(); - /** - * - * @param name Name of the property - * @param defaultValue Default value when not specified - * @return User defined integer property. - */ - int getIntProperty(String name, int defaultValue); - - - /** - * - * @param name Name of the property - * @param defaultValue Default value when not specified - * @return User defined Long property. - */ - long getLongProperty(String name, long defaultValue); - - /** - * - * @param name Name of the property - * @param defaultValue Default value when not specified - * @return User defined string property. - */ - String getProperty(String name, String defaultValue); - - - /** - * - * @return Returns all the configurations in a map. - */ - Map getAll(); - - /** - * - * @return Provides a list of additional modules to configure. - * Use this to inject additional modules that should be loaded as part of the Conductor server initialization - * If you are creating custom tasks (com.netflix.conductor.core.execution.tasks.WorkflowSystemTask) then initialize them as part of the custom modules. - */ - default List getAdditionalModules() { - return null; - } + enum DB { + REDIS, DYNOMITE, MEMORY, REDIS_CLUSTER, MYSQL + } } diff --git a/core/src/main/java/com/netflix/conductor/core/config/CoreModule.java b/core/src/main/java/com/netflix/conductor/core/config/CoreModule.java index 30297331d7..5b2a4e3bb6 100644 --- a/core/src/main/java/com/netflix/conductor/core/config/CoreModule.java +++ b/core/src/main/java/com/netflix/conductor/core/config/CoreModule.java @@ -49,11 +49,26 @@ import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.dao.QueueDAO; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_DECISION; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_DYNAMIC; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_EVENT; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_FORK_JOIN; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_FORK_JOIN_DYNAMIC; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_JOIN; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_SIMPLE; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_SUB_WORKFLOW; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_USER_DEFINED; +import static com.netflix.conductor.common.metadata.workflow.TaskType.TASK_TYPE_WAIT; +import static com.netflix.conductor.core.events.EventQueues.EVENT_QUEUE_PROVIDERS_QUALIFIER; + /** * @author Viren */ public class CoreModule extends AbstractModule { + public static final String CONDUCTOR_QUALIFIER = "conductor"; + public static final String TASK_MAPPERS_QUALIFIER = "TaskMappers"; + @Override protected void configure() { install(MultibindingsScanner.asModule()); @@ -72,91 +87,91 @@ public ParametersUtils getParameterUtils() { } @ProvidesIntoMap - @StringMapKey("conductor") + @StringMapKey(CONDUCTOR_QUALIFIER) @Singleton - @Named("EventQueueProviders") + @Named(EVENT_QUEUE_PROVIDERS_QUALIFIER) public EventQueueProvider getDynoEventQueueProvider(QueueDAO queueDAO, Configuration configuration) { return new DynoEventQueueProvider(queueDAO, configuration); } @ProvidesIntoMap - @StringMapKey("DECISION") + @StringMapKey(TASK_TYPE_DECISION) @Singleton - @Named("TaskMappers") + @Named(TASK_MAPPERS_QUALIFIER) public TaskMapper getDecisionTaskMapper() { return new DecisionTaskMapper(); } @ProvidesIntoMap - @StringMapKey("DYNAMIC") + @StringMapKey(TASK_TYPE_DYNAMIC) @Singleton - @Named("TaskMappers") - public TaskMapper getDynamicTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { - return new DynamicTaskMapper(parametersUtils, metadataDAO); + @Named(TASK_MAPPERS_QUALIFIER) + public TaskMapper getDynamicTaskMapper(ParametersUtils parametersUtils) { + return new DynamicTaskMapper(parametersUtils); } @ProvidesIntoMap - @StringMapKey("JOIN") + @StringMapKey(TASK_TYPE_JOIN) @Singleton - @Named("TaskMappers") + @Named(TASK_MAPPERS_QUALIFIER) public TaskMapper getJoinTaskMapper() { return new JoinTaskMapper(); } @ProvidesIntoMap - @StringMapKey("FORK_JOIN_DYNAMIC") + @StringMapKey(TASK_TYPE_FORK_JOIN_DYNAMIC) @Singleton - @Named("TaskMappers") - public TaskMapper getForkJoinDynamicTaskMapper(ParametersUtils parametersUtils, ObjectMapper objectMapper) { - return new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper); + @Named(TASK_MAPPERS_QUALIFIER) + public TaskMapper getForkJoinDynamicTaskMapper(ParametersUtils parametersUtils, ObjectMapper objectMapper, MetadataDAO metadataDAO) { + return new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper, metadataDAO); } @ProvidesIntoMap - @StringMapKey("EVENT") + @StringMapKey(TASK_TYPE_EVENT) @Singleton - @Named("TaskMappers") + @Named(TASK_MAPPERS_QUALIFIER) public TaskMapper getEventTaskMapper(ParametersUtils parametersUtils) { return new EventTaskMapper(parametersUtils); } @ProvidesIntoMap - @StringMapKey("WAIT") + @StringMapKey(TASK_TYPE_WAIT) @Singleton - @Named("TaskMappers") + @Named(TASK_MAPPERS_QUALIFIER) public TaskMapper getWaitTaskMapper(ParametersUtils parametersUtils) { return new WaitTaskMapper(parametersUtils); } @ProvidesIntoMap + @StringMapKey(TASK_TYPE_SUB_WORKFLOW) @Singleton - @StringMapKey("SUB_WORKFLOW") - @Named("TaskMappers") - public TaskMapper getSubWorkflowTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { - return new SubWorkflowTaskMapper(parametersUtils, metadataDAO); + @Named(TASK_MAPPERS_QUALIFIER) + public TaskMapper getSubWorkflowTaskMapper(ParametersUtils parametersUtils) { + return new SubWorkflowTaskMapper(parametersUtils); } @ProvidesIntoMap + @StringMapKey(TASK_TYPE_FORK_JOIN) @Singleton - @StringMapKey("FORK_JOIN") - @Named("TaskMappers") + @Named(TASK_MAPPERS_QUALIFIER) public TaskMapper getForkJoinTaskMapper() { return new ForkJoinTaskMapper(); } @ProvidesIntoMap - @StringMapKey("USER_DEFINED") + @StringMapKey(TASK_TYPE_USER_DEFINED) @Singleton - @Named("TaskMappers") - public TaskMapper getUserDefinedTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { - return new UserDefinedTaskMapper(parametersUtils, metadataDAO); + @Named(TASK_MAPPERS_QUALIFIER) + public TaskMapper getUserDefinedTaskMapper(ParametersUtils parametersUtils) { + return new UserDefinedTaskMapper(parametersUtils); } @ProvidesIntoMap - @StringMapKey("SIMPLE") + @StringMapKey(TASK_TYPE_SIMPLE) @Singleton - @Named("TaskMappers") - public TaskMapper getSimpleTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { - return new SimpleTaskMapper(parametersUtils, metadataDAO); + @Named(TASK_MAPPERS_QUALIFIER) + public TaskMapper getSimpleTaskMapper(ParametersUtils parametersUtils) { + return new SimpleTaskMapper(parametersUtils); } } diff --git a/core/src/main/java/com/netflix/conductor/core/config/SystemPropertiesConfiguration.java b/core/src/main/java/com/netflix/conductor/core/config/SystemPropertiesConfiguration.java new file mode 100644 index 0000000000..1379669be8 --- /dev/null +++ b/core/src/main/java/com/netflix/conductor/core/config/SystemPropertiesConfiguration.java @@ -0,0 +1,211 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.core.config; + +import com.google.inject.AbstractModule; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; + +/** + * @author Viren + * + */ +public class SystemPropertiesConfiguration implements Configuration { + + private static Logger logger = LoggerFactory.getLogger(SystemPropertiesConfiguration.class); + + @Override + public int getSweepFrequency() { + return getIntProperty(SWEEP_FREQUENCY_PROPERTY_NAME, SWEEP_FREQUENCY_DEFAULT_VALUE); + } + + @Override + public boolean disableSweep() { + String disable = getProperty(SWEEP_DISABLE_PROPERTY_NAME, SWEEP_DISABLE_DEFAULT_VALUE); + return Boolean.getBoolean(disable); + } + + @Override + public boolean disableAsyncWorkers() { + String disable = getProperty(DISABLE_ASYNC_WORKERS_PROPERTY_NAME, DISABLE_ASYNC_WORKERS_DEFAULT_VALUE); + return Boolean.getBoolean(disable); + } + + @Override + public String getServerId() { + try { + return InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + return "unknown"; + } + } + + @Override + public String getEnvironment() { + return getProperty(ENVIRONMENT_PROPERTY_NAME, ENVIRONMENT_DEFAULT_VALUE); + } + + @Override + public String getStack() { + return getProperty(STACK_PROPERTY_NAME, STACK_DEFAULT_VALUE); + } + + @Override + public String getAppId() { + return getProperty(APP_ID_PROPERTY_NAME, APP_ID_DEFAULT_VALUE); + } + + @Override + public String getRegion() { + return getProperty(REGION_PROPERTY_NAME, REGION_DEFAULT_VALUE); + } + + @Override + public String getAvailabilityZone() { + return getProperty(AVAILABILITY_ZONE_PROPERTY_NAME, AVAILABILITY_ZONE_DEFAULT_VALUE); + } + + @Override + public int getIntProperty(String key, int defaultValue) { + String val = getProperty(key, Integer.toString(defaultValue)); + try { + defaultValue = Integer.parseInt(val); + } catch (NumberFormatException e) { + } + return defaultValue; + } + + @Override + public long getLongProperty(String key, long defaultValue) { + String val = getProperty(key, Long.toString(defaultValue)); + try { + defaultValue = Integer.parseInt(val); + } catch (NumberFormatException e) { + } + return defaultValue; + } + + + @Override + public Long getWorkflowInputPayloadSizeThresholdKB() { + return getLongProperty("conductor.workflow.input.payload.threshold.kb", 5120L); + } + + @Override + public Long getMaxWorkflowInputPayloadSizeThresholdKB() { + return getLongProperty("conductor.max.workflow.input.payload.threshold.kb", 10240L); + } + + @Override + public Long getWorkflowOutputPayloadSizeThresholdKB() { + return getLongProperty("conductor.workflow.output.payload.threshold.kb", 5120L); + } + + @Override + public Long getMaxWorkflowOutputPayloadSizeThresholdKB() { + return getLongProperty("conductor.max.workflow.output.payload.threshold.kb", 10240L); + } + + @Override + public Long getTaskInputPayloadSizeThresholdKB() { + return getLongProperty("conductor.task.input.payload.threshold.kb", 3072L); + } + + @Override + public Long getMaxTaskInputPayloadSizeThresholdKB() { + return getLongProperty("conductor.max.task.input.payload.threshold.kb", 10240L); + } + + @Override + public Long getTaskOutputPayloadSizeThresholdKB() { + return getLongProperty("conductor.task.output.payload.threshold.kb", 3072L); + } + + public Long getMaxTaskOutputPayloadSizeThresholdKB() { + return getLongProperty("conductor.max.task.output.payload.threshold.kb", 10240L); + } + + @Override + public String getProperty(String key, String defaultValue) { + + String val = null; + try { + val = System.getenv(key.replace('.', '_')); + if (val == null || val.isEmpty()) { + val = Optional.ofNullable(System.getProperty(key)).orElse(defaultValue); + } + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + return val; + } + + @Override + public boolean getBooleanProperty(String name, boolean defaultValue) { + String val = getProperty(name, null); + + if (val != null) { + return Boolean.parseBoolean(val); + } else { + return defaultValue; + } + } + + @Override + public Map getAll() { + Map map = new HashMap<>(); + Properties props = System.getProperties(); + props.entrySet().forEach(entry -> map.put(entry.getKey().toString(), entry.getValue())); + return map; + } + + @Override + public List getAdditionalModules() { + + String additionalModuleClasses = getProperty(ADDITIONAL_MODULES_PROPERTY_NAME, null); + + List modules = new LinkedList<>(); + + if (!StringUtils.isEmpty(additionalModuleClasses)) { + try { + String[] classes = additionalModuleClasses.split(","); + for (String clazz : classes) { + Object moduleObj = Class.forName(clazz).newInstance(); + if (moduleObj instanceof AbstractModule) { + AbstractModule abstractModule = (AbstractModule) moduleObj; + modules.add(abstractModule); + } else { + logger.error(clazz + " does not implement " + AbstractModule.class.getName() + ", skipping..."); + } + } + } catch (Exception e) { + logger.warn(e.getMessage(), e); + } + } + + return modules; + } +} diff --git a/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java b/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java index 7aaeffdb17..9fe36c982b 100644 --- a/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java +++ b/core/src/main/java/com/netflix/conductor/core/events/ActionProcessor.java @@ -25,12 +25,10 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskResult; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.WorkflowExecutor; import com.netflix.conductor.core.utils.JsonUtils; -import com.netflix.conductor.service.MetadataService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,97 +44,95 @@ */ @Singleton public class ActionProcessor { - private static final Logger logger = LoggerFactory.getLogger(ActionProcessor.class); - - private final WorkflowExecutor executor; - private final MetadataService metadataService; - private final ParametersUtils parametersUtils; - - private final JsonUtils jsonUtils = new JsonUtils(); - - @Inject - public ActionProcessor(WorkflowExecutor executor, MetadataService metadataService, ParametersUtils parametersUtils) { - this.executor = executor; - this.metadataService = metadataService; - this.parametersUtils = parametersUtils; - } - - public Map execute(Action action, Object payloadObject, String event, String messageId) { - - logger.debug("Executing action: {} for event: {} with messageId:{}", action.getAction(), event, messageId); - - Object jsonObject = payloadObject; - if (action.isExpandInlineJSON()) { - jsonObject = jsonUtils.expand(payloadObject); - } - - switch (action.getAction()) { - case start_workflow: - return startWorkflow(action, jsonObject, event, messageId); - case complete_task: - return completeTask(action, jsonObject, action.getComplete_task(), Status.COMPLETED, event, messageId); - case fail_task: - return completeTask(action, jsonObject, action.getFail_task(), Status.FAILED, event, messageId); - default: - break; - } - throw new UnsupportedOperationException("Action not supported " + action.getAction() + " for event " + event); - } - - @VisibleForTesting - Map completeTask(Action action, Object payload, TaskDetails taskDetails, Status status, String event, String messageId) { - - Map input = new HashMap<>(); - input.put("workflowId", taskDetails.getWorkflowId()); - input.put("taskRefName", taskDetails.getTaskRefName()); - input.putAll(taskDetails.getOutput()); - - Map replaced = parametersUtils.replace(input, payload); - String workflowId = "" + replaced.get("workflowId"); - String taskRefName = "" + replaced.get("taskRefName"); - Workflow found = executor.getWorkflow(workflowId, true); - if (found == null) { - replaced.put("error", "No workflow found with ID: " + workflowId); - return replaced; - } - Task task = found.getTaskByRefName(taskRefName); - if (task == null) { - replaced.put("error", "No task found with reference name: " + taskRefName + ", workflowId: " + workflowId); - return replaced; - } - - task.setStatus(status); - task.setOutputData(replaced); - task.getOutputData().put("conductor.event.messageId", messageId); - task.getOutputData().put("conductor.event.name", event); - - try { - executor.updateTask(new TaskResult(task)); - } catch (RuntimeException e) { - logger.error("Error updating task: {} in workflow: {} in action: {} for event: {} for message: {}", taskDetails.getTaskRefName(), taskDetails.getWorkflowId(), action.getAction(), event, messageId, e); - replaced.put("error", e.getMessage()); - throw e; - } - return replaced; - } - - private Map startWorkflow(Action action, Object payload, String event, String messageId) { - StartWorkflow params = action.getStart_workflow(); - Map output = new HashMap<>(); - try { - WorkflowDef def = metadataService.getWorkflowDef(params.getName(), params.getVersion()); - Map inputParams = params.getInput(); - Map workflowInput = parametersUtils.replace(inputParams, payload); - workflowInput.put("conductor.event.messageId", messageId); - workflowInput.put("conductor.event.name", event); - - String id = executor.startWorkflow(def.getName(), def.getVersion(), params.getCorrelationId(), workflowInput, event); - output.put("workflowId", id); - } catch (RuntimeException e) { - logger.error("Error starting workflow: {}, version: {}, for event: {} for message: {}", params.getName(), params.getVersion(), event, messageId, e); - output.put("error", e.getMessage()); - throw e; - } - return output; - } -} + private static final Logger logger = LoggerFactory.getLogger(ActionProcessor.class); + + private final WorkflowExecutor executor; + private final ParametersUtils parametersUtils; + private final JsonUtils jsonUtils = new JsonUtils(); + + @Inject + public ActionProcessor(WorkflowExecutor executor, ParametersUtils parametersUtils) { + this.executor = executor; + this.parametersUtils = parametersUtils; + } + + public Map execute(Action action, Object payloadObject, String event, String messageId) { + + logger.debug("Executing action: {} for event: {} with messageId:{}", action.getAction(), event, messageId); + + Object jsonObject = payloadObject; + if (action.isExpandInlineJson()) { + jsonObject = jsonUtils.expand(payloadObject); + } + + switch (action.getAction()) { + case START_WORKFLOW: + return startWorkflow(action, jsonObject, event, messageId); + case COMPLETE_TASK: + return completeTask(action, jsonObject, action.getCompleteTask(), Status.COMPLETED, event, messageId); + case FAIL_TASK: + return completeTask(action, jsonObject, action.getFailTask(), Status.FAILED, event, messageId); + default: + break; + } + throw new UnsupportedOperationException("Action not supported " + action.getAction() + " for event " + event); + } + + @VisibleForTesting + Map completeTask(Action action, Object payload, TaskDetails taskDetails, Status status, String event, String messageId) { + + Map input = new HashMap<>(); + input.put("workflowId", taskDetails.getWorkflowId()); + input.put("taskRefName", taskDetails.getTaskRefName()); + input.putAll(taskDetails.getOutput()); + + Map replaced = parametersUtils.replace(input, payload); + String workflowId = "" + replaced.get("workflowId"); + String taskRefName = "" + replaced.get("taskRefName"); + Workflow found = executor.getWorkflow(workflowId, true); + if (found == null) { + replaced.put("error", "No workflow found with ID: " + workflowId); + return replaced; + } + Task task = found.getTaskByRefName(taskRefName); + if (task == null) { + replaced.put("error", "No task found with reference name: " + taskRefName + ", workflowId: " + workflowId); + return replaced; + } + + task.setStatus(status); + task.setOutputData(replaced); + task.setOutputMessage(taskDetails.getOutputMessage()); + task.getOutputData().put("conductor.event.messageId", messageId); + task.getOutputData().put("conductor.event.name", event); + + try { + executor.updateTask(new TaskResult(task)); + } catch (RuntimeException e) { + logger.error("Error updating task: {} in workflow: {} in action: {} for event: {} for message: {}", taskDetails.getTaskRefName(), taskDetails.getWorkflowId(), action.getAction(), event, messageId, e); + replaced.put("error", e.getMessage()); + throw e; + } + return replaced; + } + + private Map startWorkflow(Action action, Object payload, String event, String messageId) { + StartWorkflow params = action.getStartWorkflow(); + Map output = new HashMap<>(); + try { + Map inputParams = params.getInput(); + Map workflowInput = parametersUtils.replace(inputParams, payload); + workflowInput.put("conductor.event.messageId", messageId); + workflowInput.put("conductor.event.name", event); + + String id = executor.startWorkflow(params.getName(), params.getVersion(), params.getCorrelationId(), workflowInput, event); + output.put("workflowId", id); + + } catch (RuntimeException e) { + logger.error("Error starting workflow: {}, version: {}, for event: {} for message: {}", params.getName(), params.getVersion(), event, messageId, e); + output.put("error", e.getMessage()); + throw e; + } + return output; + } +} \ No newline at end of file diff --git a/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java b/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java index 0c865fd97b..3e4daf5290 100644 --- a/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java +++ b/core/src/main/java/com/netflix/conductor/core/events/EventQueues.java @@ -36,6 +36,7 @@ */ @Singleton public class EventQueues { + public static final String EVENT_QUEUE_PROVIDERS_QUALIFIER = "EventQueueProviders"; private static final Logger logger = LoggerFactory.getLogger(EventQueues.class); @@ -44,7 +45,7 @@ public class EventQueues { private final Map providers; @Inject - public EventQueues(@Named("EventQueueProviders") Map providers, ParametersUtils parametersUtils) { + public EventQueues(@Named(EVENT_QUEUE_PROVIDERS_QUALIFIER) Map providers, ParametersUtils parametersUtils) { this.providers = providers; this.parametersUtils = parametersUtils; } diff --git a/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java b/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java index df6a88bc6f..d7879dce27 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/DeciderService.java @@ -1,4 +1,4 @@ -/* +/** * Copyright 2016 Netflix, Inc. *

* Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,15 +13,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +/** + * + */ package com.netflix.conductor.core.execution; import com.google.common.annotations.VisibleForTesting; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; import com.netflix.conductor.common.utils.ExternalPayloadStorage; @@ -30,7 +33,6 @@ import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils; import com.netflix.conductor.core.utils.IDGenerator; import com.netflix.conductor.core.utils.QueueUtils; -import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.dao.QueueDAO; import com.netflix.conductor.metrics.Monitors; import org.apache.commons.lang3.StringUtils; @@ -48,7 +50,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -69,8 +70,6 @@ public class DeciderService { private static Logger logger = LoggerFactory.getLogger(DeciderService.class); - private final MetadataDAO metadataDAO; - private final QueueDAO queueDAO; private final ParametersUtils parametersUtils; @@ -79,15 +78,13 @@ public class DeciderService { private final ExternalPayloadStorageUtils externalPayloadStorageUtils; - @SuppressWarnings("ConstantConditions") private final Predicate isNonPendingTask = task -> !task.isRetried() && !task.getStatus().equals(SKIPPED) && !task.isExecuted() || SystemTaskType.isBuiltIn(task.getTaskType()); @Inject - public DeciderService(MetadataDAO metadataDAO, ParametersUtils parametersUtils, QueueDAO queueDAO, + public DeciderService(ParametersUtils parametersUtils, QueueDAO queueDAO, ExternalPayloadStorageUtils externalPayloadStorageUtils, @Named("TaskMappers") Map taskMappers) { - this.metadataDAO = metadataDAO; this.queueDAO = queueDAO; this.parametersUtils = parametersUtils; this.taskMappers = taskMappers; @@ -95,9 +92,8 @@ public DeciderService(MetadataDAO metadataDAO, ParametersUtils parametersUtils, } //QQ public method validation of the input params - public DeciderOutcome decide(Workflow workflow, WorkflowDef workflowDef) throws TerminateWorkflowException { + public DeciderOutcome decide(Workflow workflow) throws TerminateWorkflowException { - workflow.setSchemaVersion(workflowDef.getSchemaVersion()); //In case of a new workflow the list of tasks will be empty final List tasks = workflow.getTasks(); //In case of a new workflow the list of executedTasks will also be empty @@ -108,15 +104,15 @@ public DeciderOutcome decide(Workflow workflow, WorkflowDef workflowDef) throws List tasksToBeScheduled = new LinkedList<>(); if (executedTasks.isEmpty()) { //this is the flow that the new workflow will go through - tasksToBeScheduled = startWorkflow(workflow, workflowDef); + tasksToBeScheduled = startWorkflow(workflow); if (tasksToBeScheduled == null) { tasksToBeScheduled = new LinkedList<>(); } } - return decide(workflowDef, workflow, tasksToBeScheduled); + return decide(workflow, tasksToBeScheduled); } - private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow workflow, List preScheduledTasks) throws TerminateWorkflowException { + private DeciderOutcome decide(final Workflow workflow, List preScheduledTasks) throws TerminateWorkflowException { DeciderOutcome outcome = new DeciderOutcome(); @@ -131,7 +127,7 @@ private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow work return outcome; } - // Filter the list of tasks and include only tasks that are not retried, not executed, + // Filter the list of tasks and include only tasks that are not retried, not executed // marked to be skipped and not part of System tasks that is DECISION, FORK, JOIN // This list will be empty for a new workflow being started List pendingTasks = workflow.getTasks() @@ -147,10 +143,11 @@ private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow work .map(Task::getReferenceTaskName) .collect(Collectors.toSet()); - //Traverse the pre-scheduled tasks to a linkedHasMap - Map tasksToBeScheduled = preScheduledTasks.stream() - .collect(Collectors.toMap(Task::getReferenceTaskName, Function.identity(), - (element1, element2) -> element2, LinkedHashMap::new)); + Map tasksToBeScheduled = new LinkedHashMap<>(); + + preScheduledTasks.forEach(preScheduledTask -> { + tasksToBeScheduled.put(preScheduledTask.getReferenceTaskName(), preScheduledTask); + }); // A new workflow does not enter this code branch for (Task pendingTask : pendingTasks) { @@ -160,24 +157,25 @@ private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow work executedTaskRefNames.remove(pendingTask.getReferenceTaskName()); } - TaskDef taskDefinition = metadataDAO.getTaskDef(pendingTask.getTaskDefName()); - if (taskDefinition != null) { - checkForTimeout(taskDefinition, pendingTask); + Optional taskDefinition = pendingTask.getTaskDefinition(); + + if (taskDefinition.isPresent()) { + checkForTimeout(taskDefinition.get(), pendingTask); // If the task has not been updated for "responseTimeoutSeconds" then mark task as TIMED_OUT - if (isResponseTimedOut(taskDefinition, pendingTask)) { - timeoutTask(taskDefinition, pendingTask); + if (isResponseTimedOut(taskDefinition.get(), pendingTask)) { + timeoutTask(taskDefinition.get(), pendingTask); } } if (!pendingTask.getStatus().isSuccessful()) { WorkflowTask workflowTask = pendingTask.getWorkflowTask(); if (workflowTask == null) { - workflowTask = workflowDef.getTaskByRefName(pendingTask.getReferenceTaskName()); + workflowTask = workflow.getWorkflowDefinition().getTaskByRefName(pendingTask.getReferenceTaskName()); } if (workflowTask != null && workflowTask.isOptional()) { pendingTask.setStatus(COMPLETED_WITH_ERRORS); } else { - Task retryTask = retry(taskDefinition, workflowTask, pendingTask, workflow); + Task retryTask = retry(taskDefinition.orElse(null), workflowTask, pendingTask, workflow); tasksToBeScheduled.put(retryTask.getReferenceTaskName(), retryTask); executedTaskRefNames.remove(retryTask.getReferenceTaskName()); outcome.tasksToBeUpdated.add(pendingTask); @@ -186,13 +184,14 @@ private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow work if (!pendingTask.isExecuted() && !pendingTask.isRetried() && pendingTask.getStatus().isTerminal()) { pendingTask.setExecuted(true); - List nextTasks = getNextTask(workflowDef, workflow, pendingTask); + List nextTasks = getNextTask(workflow, pendingTask); nextTasks.forEach(nextTask -> tasksToBeScheduled.putIfAbsent(nextTask.getReferenceTaskName(), nextTask)); outcome.tasksToBeUpdated.add(pendingTask); - logger.debug("Scheduling Tasks from {}, next = {} for workflow: {}", pendingTask.getTaskDefName(), + logger.debug("Scheduling Tasks from {}, next = {} for workflowId: {}", pendingTask.getTaskDefName(), nextTasks.stream() .map(Task::getTaskDefName) - .collect(Collectors.toList()), workflow.getWorkflowId()); + .collect(Collectors.toList()), + workflow.getWorkflowId()); } } @@ -203,10 +202,11 @@ private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow work if (!unScheduledTasks.isEmpty()) { logger.debug("Scheduling Tasks {} for workflow: {}", unScheduledTasks.stream() .map(Task::getTaskDefName) - .collect(Collectors.toList()), workflow.getWorkflowId()); + .collect(Collectors.toList()), + workflow.getWorkflowId()); outcome.tasksToBeScheduled.addAll(unScheduledTasks); } - if (outcome.tasksToBeScheduled.isEmpty() && checkForWorkflowCompletion(workflowDef, workflow)) { + if (outcome.tasksToBeScheduled.isEmpty() && checkForWorkflowCompletion(workflow)) { logger.debug("Marking workflow as complete. workflow=" + workflow.getWorkflowId() + ", tasks=" + workflow.getTasks()); outcome.isComplete = true; } @@ -214,7 +214,8 @@ private DeciderOutcome decide(final WorkflowDef workflowDef, final Workflow work return outcome; } - private List startWorkflow(Workflow workflow, WorkflowDef workflowDef) throws TerminateWorkflowException { + private List startWorkflow(Workflow workflow) throws TerminateWorkflowException { + final WorkflowDef workflowDef = workflow.getWorkflowDefinition(); logger.debug("Starting workflow " + workflowDef.getName() + "/" + workflow.getWorkflowId()); //The tasks will be empty in case of new workflow @@ -226,14 +227,15 @@ private List startWorkflow(Workflow workflow, WorkflowDef workflowDef) thr throw new TerminateWorkflowException("No tasks found to be executed", WorkflowStatus.COMPLETED); } - WorkflowTask taskToSchedule = workflowDef.getTasks().getFirst(); //Nothing is running yet - so schedule the first task - //Loop until a non-skipped task is found + WorkflowTask taskToSchedule = workflowDef.getTasks().get(0); //Nothing isSystemTask running yet - so schedule the first task + //Loop until a non-skipped task isSystemTask found + while (isTaskSkipped(taskToSchedule, workflow)) { taskToSchedule = workflowDef.getNextTask(taskToSchedule.getTaskReferenceName()); } //In case of a new workflow a the first non-skippable task will be scheduled - return getTasksToBeScheduled(workflowDef, workflow, taskToSchedule, 0); + return getTasksToBeScheduled(workflow, taskToSchedule, 0); } // Get the first task to schedule @@ -253,6 +255,7 @@ private List startWorkflow(Workflow workflow, WorkflowDef workflowDef) thr }); return Collections.singletonList(rerunFromTask); + } /** @@ -269,11 +272,12 @@ void updateWorkflowOutput(final Workflow workflow, @Nullable Task task) { } Task last = Optional.ofNullable(task).orElse(allTasks.get(allTasks.size() - 1)); - WorkflowDef def = metadataDAO.get(workflow.getWorkflowType(), workflow.getVersion()); + + WorkflowDef workflowDef = workflow.getWorkflowDefinition(); Map output; - if (!def.getOutputParameters().isEmpty()) { + if (workflowDef.getOutputParameters() != null && !workflowDef.getOutputParameters().isEmpty()) { Workflow workflowInstance = populateWorkflowAndTaskData(workflow); - output = parametersUtils.getTaskInput(def.getOutputParameters(), workflowInstance, null, null); + output = parametersUtils.getTaskInput(workflowDef.getOutputParameters(), workflowInstance, null, null); } else if (StringUtils.isNotBlank(last.getExternalOutputPayloadStoragePath())) { output = externalPayloadStorageUtils.downloadPayload(last.getExternalOutputPayloadStoragePath()); Monitors.recordExternalPayloadStorageUsage(last.getTaskDefName(), ExternalPayloadStorage.Operation.READ.toString(), ExternalPayloadStorage.PayloadType.TASK_OUTPUT.toString()); @@ -285,8 +289,7 @@ void updateWorkflowOutput(final Workflow workflow, @Nullable Task task) { externalPayloadStorageUtils.verifyAndUpload(workflow, ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT); } - private boolean checkForWorkflowCompletion(final WorkflowDef def, final Workflow workflow) throws TerminateWorkflowException { - + private boolean checkForWorkflowCompletion(final Workflow workflow) throws TerminateWorkflowException { List allTasks = workflow.getTasks(); if (allTasks.isEmpty()) { return false; @@ -295,8 +298,8 @@ private boolean checkForWorkflowCompletion(final WorkflowDef def, final Workflow Map taskStatusMap = new HashMap<>(); workflow.getTasks().forEach(task -> taskStatusMap.put(task.getReferenceTaskName(), task.getStatus())); - LinkedList wftasks = def.getTasks(); - boolean allCompletedSuccessfully = wftasks.stream().parallel().allMatch(wftask -> { + List workflowTasks = workflow.getWorkflowDefinition().getTasks(); + boolean allCompletedSuccessfully = workflowTasks.stream().parallel().allMatch(wftask -> { Status status = taskStatusMap.get(wftask.getTaskReferenceName()); return status != null && status.isSuccessful() && status.isTerminal(); }); @@ -306,14 +309,16 @@ private boolean checkForWorkflowCompletion(final WorkflowDef def, final Workflow .allMatch(Status::isTerminal); boolean noPendingSchedule = workflow.getTasks().stream().parallel().filter(wftask -> { - String next = getNextTasksToBeScheduled(def, workflow, wftask); + String next = getNextTasksToBeScheduled(workflow, wftask); return next != null && !taskStatusMap.containsKey(next); }).collect(Collectors.toList()).isEmpty(); return allCompletedSuccessfully && noPendingTasks && noPendingSchedule; } - private List getNextTask(WorkflowDef def, Workflow workflow, Task task) { + @VisibleForTesting + List getNextTask(Workflow workflow, Task task) { + final WorkflowDef workflowDef = workflow.getWorkflowDefinition(); // Get the following task after the last completed task if (SystemTaskType.is(task.getTaskType()) && SystemTaskType.DECISION.name().equals(task.getTaskType())) { @@ -323,18 +328,19 @@ private List getNextTask(WorkflowDef def, Workflow workflow, Task task) { } String taskReferenceName = task.getReferenceTaskName(); - WorkflowTask taskToSchedule = def.getNextTask(taskReferenceName); + WorkflowTask taskToSchedule = workflowDef.getNextTask(taskReferenceName); while (isTaskSkipped(taskToSchedule, workflow)) { - taskToSchedule = def.getNextTask(taskToSchedule.getTaskReferenceName()); + taskToSchedule = workflowDef.getNextTask(taskToSchedule.getTaskReferenceName()); } if (taskToSchedule != null) { - return getTasksToBeScheduled(def, workflow, taskToSchedule, 0); + return getTasksToBeScheduled(workflow, taskToSchedule, 0); } return Collections.emptyList(); } - private String getNextTasksToBeScheduled(WorkflowDef def, Workflow workflow, Task task) { + private String getNextTasksToBeScheduled(Workflow workflow, Task task) { + final WorkflowDef def = workflow.getWorkflowDefinition(); String taskReferenceName = task.getReferenceTaskName(); WorkflowTask taskToSchedule = def.getNextTask(taskReferenceName); @@ -377,6 +383,9 @@ Task retry(TaskDef taskDefinition, WorkflowTask workflowTask, Task task, Workflo rescheduled.setStatus(SCHEDULED); rescheduled.setPollCount(0); rescheduled.setInputData(new HashMap<>()); + rescheduled.getInputData().putAll(task.getInputData()); + + if (StringUtils.isNotBlank(task.getExternalInputPayloadStoragePath())) { rescheduled.setExternalInputPayloadStoragePath(task.getExternalInputPayloadStoragePath()); } else { @@ -406,7 +415,7 @@ Workflow populateWorkflowAndTaskData(Workflow workflow) { if (StringUtils.isNotBlank(workflow.getExternalInputPayloadStoragePath())) { // download the workflow input from external storage here and plug it into the workflow Map workflowInputParams = externalPayloadStorageUtils.downloadPayload(workflow.getExternalInputPayloadStoragePath()); - Monitors.recordExternalPayloadStorageUsage(workflow.getWorkflowType(), ExternalPayloadStorage.Operation.READ.toString(), ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT.toString()); + Monitors.recordExternalPayloadStorageUsage(workflow.getWorkflowName(), ExternalPayloadStorage.Operation.READ.toString(), ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT.toString()); workflowInstance.setInput(workflowInputParams); workflowInstance.setExternalInputPayloadStoragePath(null); } @@ -466,11 +475,14 @@ void checkForTimeout(TaskDef taskDef, Task task) { @VisibleForTesting boolean isResponseTimedOut(TaskDef taskDefinition, Task task) { - if (taskDefinition == null) { logger.warn("missing task type : {}, workflowId= {}", task.getTaskDefName(), task.getWorkflowInstanceId()); return false; } + if (task.getStatus().isTerminal() || !task.getStatus().equals(IN_PROGRESS) || taskDefinition.getResponseTimeoutSeconds() == 0) { + return false; + } + if (!task.getStatus().equals(IN_PROGRESS) || taskDefinition.getResponseTimeoutSeconds() == 0) { return false; } @@ -503,43 +515,39 @@ private void timeoutTask(TaskDef taskDef, Task task) { task.setReasonForIncompletion(reason); } - public List getTasksToBeScheduled(WorkflowDef workflowDef, Workflow workflow, + public List getTasksToBeScheduled(Workflow workflow, WorkflowTask taskToSchedule, int retryCount) { - return getTasksToBeScheduled(workflowDef, workflow, taskToSchedule, retryCount, null); + return getTasksToBeScheduled(workflow, taskToSchedule, retryCount, null); } - public List getTasksToBeScheduled(WorkflowDef workflowDefinition, Workflow workflow, + public List getTasksToBeScheduled(Workflow workflow, WorkflowTask taskToSchedule, int retryCount, String retriedTaskId) { - - Workflow workflowInstance = populateWorkflowAndTaskData(workflow); + workflow = populateWorkflowAndTaskData(workflow); Map input = parametersUtils.getTaskInput(taskToSchedule.getInputParameters(), - workflowInstance, null, null); + workflow, null, null); - Type taskType = Type.USER_DEFINED; + TaskType taskType = TaskType.USER_DEFINED; String type = taskToSchedule.getType(); - if (Type.isSystemTask(type)) { - taskType = Type.valueOf(type); + if (TaskType.isSystemTask(type)) { + taskType = TaskType.valueOf(type); } // get in progress tasks for this workflow instance - List inProgressTasks = workflowInstance.getTasks().stream() + List inProgressTasks = workflow.getTasks().stream() .filter(runningTask -> runningTask.getStatus().equals(Status.IN_PROGRESS)) .map(Task::getReferenceTaskName) .collect(Collectors.toList()); - TaskDef taskDef = Optional.ofNullable(taskToSchedule.getName()) - .map(metadataDAO::getTaskDef) - .orElse(null); - + String taskId = IDGenerator.generate(); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(workflowDefinition) - .withWorkflowInstance(workflowInstance) - .withTaskDefinition(taskDef) + .withWorkflowDefinition(workflow.getWorkflowDefinition()) + .withWorkflowInstance(workflow) + .withTaskDefinition(taskToSchedule.getTaskDefinition()) .withTaskToSchedule(taskToSchedule) .withTaskInput(input) .withRetryCount(retryCount) .withRetryTaskId(retriedTaskId) - .withTaskId(IDGenerator.generate()) + .withTaskId(taskId) .withDeciderService(this) .build(); @@ -555,22 +563,24 @@ public List getTasksToBeScheduled(WorkflowDef workflowDefinition, Workflow private boolean isTaskSkipped(WorkflowTask taskToSchedule, Workflow workflow) { try { - boolean retval = false; + boolean isTaskSkipped = false; if (taskToSchedule != null) { Task t = workflow.getTaskByRefName(taskToSchedule.getTaskReferenceName()); if (t == null) { - retval = false; + isTaskSkipped = false; } else if (t.getStatus().equals(SKIPPED)) { - retval = true; + isTaskSkipped = true; } } - return retval; + return isTaskSkipped; } catch (Exception e) { throw new TerminateWorkflowException(e.getMessage()); } + } - static class DeciderOutcome { + + public static class DeciderOutcome { List tasksToBeScheduled = new LinkedList<>(); @@ -582,5 +592,6 @@ static class DeciderOutcome { private DeciderOutcome() { } + } -} +} \ No newline at end of file diff --git a/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java b/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java index 7326f501e6..7db08828a4 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/ParametersUtils.java @@ -84,8 +84,8 @@ public Map getTaskInputV2(Map input, Workflow wo workflowParams.put("workflowId", workflow.getWorkflowId()); workflowParams.put("parentWorkflowId", workflow.getParentWorkflowId()); workflowParams.put("parentWorkflowTaskId", workflow.getParentWorkflowTaskId()); - workflowParams.put("workflowType", workflow.getWorkflowType()); - workflowParams.put("version", workflow.getVersion()); + workflowParams.put("workflowType", workflow.getWorkflowName()); + workflowParams.put("version", workflow.getWorkflowVersion()); workflowParams.put("correlationId", workflow.getCorrelationId()); workflowParams.put("reasonForIncompletion", workflow.getReasonForIncompletion()); workflowParams.put("schemaVersion", workflow.getSchemaVersion()); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java index 23cff1e7df..a5320eb3fb 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java @@ -1,17 +1,14 @@ /* * Copyright 2016 Netflix, Inc. *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.core.execution; @@ -24,6 +21,7 @@ import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -31,9 +29,9 @@ import com.netflix.conductor.core.WorkflowContext; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.ApplicationException.Code; -import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome; import com.netflix.conductor.core.execution.tasks.SubWorkflow; import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; +import com.netflix.conductor.core.metadata.MetadataMapperService; import com.netflix.conductor.core.utils.IDGenerator; import com.netflix.conductor.core.utils.QueueUtils; import com.netflix.conductor.dao.ExecutionDAO; @@ -53,7 +51,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -71,7 +68,6 @@ import static java.util.Comparator.comparingInt; import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.maxBy; -import static java.util.stream.Collectors.toSet; /** * @author Viren Workflow services provider interface @@ -91,76 +87,206 @@ public class WorkflowExecutor { private final Configuration config; + private final MetadataMapperService metadataMapperService; private final ParametersUtils parametersUtils; - public static final String deciderQueue = "_deciderQueue"; + private int activeWorkerLastPollInSecs; + + public static final String DECIDER_QUEUE = "_deciderQueue"; - private int activeWorkerLastPollnSecs; @Inject - public WorkflowExecutor(DeciderService deciderService, MetadataDAO metadataDAO, ExecutionDAO executionDAO, - QueueDAO queueDAO, ParametersUtils parametersUtils, Configuration config) { + public WorkflowExecutor( + DeciderService deciderService, + MetadataDAO metadataDAO, + ExecutionDAO executionDAO, + QueueDAO queueDAO, + MetadataMapperService metadataMapperService, + ParametersUtils parametersUtils, + Configuration config + ) { this.deciderService = deciderService; this.metadataDAO = metadataDAO; this.executionDAO = executionDAO; this.queueDAO = queueDAO; this.config = config; + this.metadataMapperService = metadataMapperService; + this.activeWorkerLastPollInSecs = config.getIntProperty("tasks.active.worker.lastpoll", 10); this.parametersUtils = parametersUtils; + } - activeWorkerLastPollnSecs = config.getIntProperty("tasks.active.worker.lastpoll", 10); + /** + * @throws ApplicationException + */ + public String startWorkflow(String name, Integer version, String correlationId, Map input, String externalInputPayloadStoragePath) { + return startWorkflow(name, version, correlationId, input, externalInputPayloadStoragePath, null); } - public String startWorkflow(String name, int version, String correlationId, Map input) { - return startWorkflow(name, version, correlationId, input, null); + /** + * @throws ApplicationException + */ + public String startWorkflow(String name, Integer version, String correlationId, Map input, String externalInputPayloadStoragePath, String event) { + return startWorkflow( + name, + version, + input, + externalInputPayloadStoragePath, + correlationId, + null, + null, + event + ); } - public String startWorkflow(String name, int version, String correlationId, Map input, String event) { - return startWorkflow(name, version, input, correlationId, null, null, event); + /** + * @throws ApplicationException + */ + public String startWorkflow( + String name, + Integer version, + String correlationId, + Map input, + String externalInputPayloadStoragePath, + String event, + Map taskToDomain + ) { + return startWorkflow( + name, + version, + input, + externalInputPayloadStoragePath, + correlationId, + null, + null, + event, + taskToDomain + ); } - public String startWorkflow(String name, int version, String correlationId, Map input, String externalInputPayloadStoragePath, String event, Map taskToDomain) { - return startWorkflow(name, version, input, externalInputPayloadStoragePath, correlationId, null, null, event, taskToDomain); + /** + * @throws ApplicationException + */ + public String startWorkflow( + String name, + Integer version, + Map input, + String externalInputPayloadStoragePath, + String correlationId, + String parentWorkflowId, + String parentWorkflowTaskId, + String event + ) { + return startWorkflow( + name, + version, + input, + externalInputPayloadStoragePath, + correlationId, + parentWorkflowId, + parentWorkflowTaskId, + event, + null + ); } - public String startWorkflow(String name, int version, Map input, String correlationId, String parentWorkflowId, String parentWorkflowTaskId, String event) { - return startWorkflow(name, version, input, null, correlationId, parentWorkflowId, parentWorkflowTaskId, event, null); + /** + * @throws ApplicationException + */ + public String startWorkflow( + WorkflowDef workflowDefinition, + Map workflowInput, + String externalInputPayloadStoragePath, + String correlationId, + String event, + Map taskToDomain + ) { + return startWorkflow( + workflowDefinition, + workflowInput, + externalInputPayloadStoragePath, + correlationId, + null, + null, + event, + taskToDomain + ); } - private final Predicate validateLastPolledTime = pd -> pd.getLastPollTime() > System.currentTimeMillis() - (activeWorkerLastPollnSecs * 1000); + /** + * @throws ApplicationException + */ + public String startWorkflow( + String name, + Integer version, + Map workflowInput, + String externalInputPayloadStoragePath, + String correlationId, + String parentWorkflowId, + String parentWorkflowTaskId, + String event, + Map taskToDomain + ) { + WorkflowDef workflowDefinition = metadataMapperService.lookupForWorkflowDefinition(name, version); + + return startWorkflow( + workflowDefinition, + workflowInput, + externalInputPayloadStoragePath, + correlationId, + parentWorkflowId, + parentWorkflowTaskId, + event, + taskToDomain + ); + } + + private final Predicate validateLastPolledTime = pd -> pd.getLastPollTime() > System.currentTimeMillis() - (activeWorkerLastPollInSecs * 1000); private final Predicate isSystemTask = task -> SystemTaskType.is(task.getTaskType()); private final Predicate isNonTerminalTask = task -> !task.getStatus().isTerminal(); - public String startWorkflow(String workflowName, int workflowVersion, Map workflowInput, - String externalInputPayloadStoragePath, String correlationId, String parentWorkflowId, - String parentWorkflowTaskId, String event, Map taskToDomain) { + /** + * @throws ApplicationException + */ + public String startWorkflow( + WorkflowDef workflowDefinition, + Map workflowInput, + String externalInputPayloadStoragePath, + String correlationId, + String parentWorkflowId, + String parentWorkflowTaskId, + String event, + Map taskToDomain + ) { + workflowDefinition = metadataMapperService.populateTaskDefinitions(workflowDefinition); // perform validations - validateWorkflow(workflowName, workflowVersion, workflowInput, externalInputPayloadStoragePath); + validateWorkflow(workflowDefinition, workflowInput, externalInputPayloadStoragePath); //A random UUID is assigned to the work flow instance String workflowId = IDGenerator.generate(); // Persist the Workflow - Workflow workflow = new Workflow(); - workflow.setWorkflowId(workflowId); - workflow.setCorrelationId(correlationId); - workflow.setWorkflowType(workflowName); - workflow.setVersion(workflowVersion); - workflow.setInput(workflowInput); - workflow.setExternalInputPayloadStoragePath(externalInputPayloadStoragePath); - workflow.setStatus(WorkflowStatus.RUNNING); - workflow.setParentWorkflowId(parentWorkflowId); - workflow.setParentWorkflowTaskId(parentWorkflowTaskId); - workflow.setOwnerApp(WorkflowContext.get().getClientApp()); - workflow.setCreateTime(System.currentTimeMillis()); - workflow.setUpdatedBy(null); - workflow.setUpdateTime(null); - workflow.setEvent(event); - workflow.setTaskToDomain(taskToDomain); - executionDAO.createWorkflow(workflow); - logger.info("A new instance of workflow {} created with workflow id {}", workflowName, workflowId); + Workflow wf = new Workflow(); + wf.setWorkflowId(workflowId); + wf.setCorrelationId(correlationId); + wf.setWorkflowDefinition(workflowDefinition); + wf.setInput(workflowInput); + wf.setExternalInputPayloadStoragePath(externalInputPayloadStoragePath); + wf.setStatus(WorkflowStatus.RUNNING); + wf.setParentWorkflowId(parentWorkflowId); + wf.setParentWorkflowTaskId(parentWorkflowTaskId); + wf.setOwnerApp(WorkflowContext.get().getClientApp()); + wf.setCreateTime(System.currentTimeMillis()); + wf.setUpdatedBy(null); + wf.setUpdateTime(null); + wf.setEvent(event); + wf.setTaskToDomain(taskToDomain); + + executionDAO.createWorkflow(wf); + logger.info("A new instance of workflow {} created with workflow id {}", wf.getWorkflowName(), workflowId); + //then decide to see if anything needs to be done as part of the workflow decide(workflowId); @@ -172,38 +298,25 @@ public String startWorkflow(String workflowName, int workflowVersion, Map workflowInput, String externalStoragePath) { + private void validateWorkflow(WorkflowDef workflowDef, Map workflowInput, String externalStoragePath) { try { - //Check if the workflow definition is valid - WorkflowDef workflowDefinition = metadataDAO.get(workflowName, workflowVersion); - if (workflowDefinition == null) { - logger.error("There is no workflow defined with name {} and version {}", workflowName, workflowVersion); - throw new ApplicationException(Code.NOT_FOUND, "No such workflow defined. name=" + workflowName + ", version=" + workflowVersion); - } - - //because everything else is a system defined task - Set missingTaskDefs = workflowDefinition.all().stream() - .filter(task -> task.getType().equals(WorkflowTask.Type.SIMPLE.name())) - .map(WorkflowTask::getName) - .filter(task -> metadataDAO.getTaskDef(task) == null) - .collect(toSet()); - - if (!missingTaskDefs.isEmpty()) { - logger.error("Cannot find the task definitions for the following tasks used in workflow: {}", missingTaskDefs); - throw new ApplicationException(INVALID_INPUT, "Cannot find the task definitions for the following tasks used in workflow: " + missingTaskDefs); - } - //Check if the input to the workflow is not null if (workflowInput == null && StringUtils.isBlank(externalStoragePath)) { - logger.error("The input for the workflow {} cannot be NULL", workflowName); + logger.error("The input for the workflow '{}' cannot be NULL", workflowDef.getName()); throw new ApplicationException(INVALID_INPUT, "NULL input passed when starting workflow"); } } catch (Exception e) { - Monitors.recordWorkflowStartError(workflowName, WorkflowContext.get().getClientApp()); + Monitors.recordWorkflowStartError(workflowDef.getName(), WorkflowContext.get().getClientApp()); throw e; } } + + /** + * @param workflowId + * @return + * @throws ApplicationException + */ public String resetCallbacksForInProgressTasks(String workflowId) { Workflow workflow = executionDAO.getWorkflow(workflowId, true); if (workflow.getStatus().isTerminal()) { @@ -233,14 +346,27 @@ public String rerun(RerunWorkflowRequest request) { return request.getReRunFromWorkflowId(); } + /** + * @param workflowId the id of the workflow to be restarted + * @throws ApplicationException in the following cases: + *

    + *
  • Workflow is not in a terminal state
  • + *
  • Workflow definition is not found
  • + *
  • Workflow is deemed non-restartable as per workflow definition
  • + *
+ */ public void rewind(String workflowId) { Workflow workflow = executionDAO.getWorkflow(workflowId, true); if (!workflow.getStatus().isTerminal()) { throw new ApplicationException(CONFLICT, "Workflow is still running. status=" + workflow.getStatus()); } - WorkflowDef workflowDef = metadataDAO.get(workflow.getWorkflowType(), workflow.getVersion()); - if (!workflowDef.isRestartable() && workflow.getStatus().equals(WorkflowStatus.COMPLETED)) { // Can only restart non completed workflows when the configuration is set to false + WorkflowDef workflowDef = Optional.ofNullable(workflow.getWorkflowDefinition()) + .orElse(metadataDAO.get(workflow.getWorkflowName(), workflow.getWorkflowVersion()) + .orElseThrow(() -> new ApplicationException(NOT_FOUND, String.format("Unable to find definition for %s", workflowId))) + ); + + if (!workflowDef.isRestartable() && workflow.getStatus().equals(WorkflowStatus.COMPLETED)) { // Can only restart non-completed workflows when the configuration is set to false throw new ApplicationException(CONFLICT, String.format("WorkflowId: %s is an instance of WorkflowDef: %s and version: %d and is non restartable", workflowId, workflowDef.getName(), workflowDef.getVersion())); } @@ -261,6 +387,7 @@ public void rewind(String workflowId) { * Gets the last instance of each failed task and reschedule each * Gets all cancelled tasks and schedule all of them except JOIN (join should change status to INPROGRESS) * Switch workflow back to RUNNING status and aall decider. + * * @param workflowId */ public void retry(String workflowId) { @@ -275,7 +402,8 @@ public void retry(String workflowId) { List failedTasks = getFailedTasksToRetry(workflow); List cancelledTasks = workflow.getTasks().stream() - .filter(x->CANCELED.equals(x.getStatus())).collect(Collectors.toList()); + .filter(t -> CANCELED.equals(t.getStatus())) + .collect(Collectors.toList()); if (failedTasks.isEmpty()) { throw new ApplicationException(CONFLICT, @@ -287,12 +415,12 @@ public void retry(String workflowId) { }); // Reschedule the cancelled task but if the join is cancelled set that to in progress - cancelledTasks.forEach(cancelledTask -> { - if (cancelledTask.getTaskType().equalsIgnoreCase(WorkflowTask.Type.JOIN.toString())) { - cancelledTask.setStatus(IN_PROGRESS); - executionDAO.updateTask(cancelledTask); + cancelledTasks.forEach(task -> { + if (task.getTaskType().equalsIgnoreCase(TaskType.JOIN.toString())) { + task.setStatus(IN_PROGRESS); + executionDAO.updateTask(task); } else { - rescheduledTasks.add(taskToBeRescheduled(cancelledTask)); + rescheduledTasks.add(taskToBeRescheduled(task)); } }); @@ -308,6 +436,7 @@ public void retry(String workflowId) { /** * Get all failed and cancelled tasks. * for failed tasks - get one for each task reference name(latest failed using seq id) + * * @param workflow * @return list of latest failed tasks, one for each task reference reference type. */ @@ -316,11 +445,15 @@ List getFailedTasksToRetry(Workflow workflow) { return workflow.getTasks().stream() .filter(x -> FAILED.equals(x.getStatus())) .collect(groupingBy(Task::getReferenceTaskName, maxBy(comparingInt(Task::getSeq)))) - .values().stream().filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList()); + .values().stream() + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); } /** * Reschedule a task + * * @param task failed or cancelled task * @return new instance of a task with "SCHEDULED" status */ @@ -344,15 +477,19 @@ public Task getPendingTaskByWorkflow(String taskReferenceName, String workflowId .findFirst() // There can only be one task by a given reference name running at a time. .orElse(null); } - + + /** + * @param wf + * @throws ApplicationException + */ @VisibleForTesting void completeWorkflow(Workflow wf) { logger.debug("Completing workflow execution for {}", wf.getWorkflowId()); Workflow workflow = executionDAO.getWorkflow(wf.getWorkflowId(), false); if (workflow.getStatus().equals(WorkflowStatus.COMPLETED)) { - executionDAO.removeFromPendingWorkflow(workflow.getWorkflowType(), workflow.getWorkflowId()); + executionDAO.removeFromPendingWorkflow(workflow.getWorkflowName(), workflow.getWorkflowId()); logger.info("Workflow has already been completed. Current status={}, workflowId= {}", workflow.getStatus(), wf.getWorkflowId()); return; } @@ -362,7 +499,13 @@ void completeWorkflow(Workflow wf) { throw new ApplicationException(CONFLICT, msg); } + // FIXME Backwards compatibility for legacy workflows already running. + // This code will be removed in a future version. + if (workflow.getWorkflowDefinition() == null) { + workflow = metadataMapperService.populateWorkflowWithDefinitions(workflow); + } deciderService.updateWorkflowOutput(wf, null); + workflow.setStatus(WorkflowStatus.COMPLETED); workflow.setOutput(wf.getOutput()); workflow.setExternalOutputPayloadStoragePath(wf.getExternalOutputPayloadStoragePath()); @@ -373,7 +516,10 @@ void completeWorkflow(Workflow wf) { // If the following task, for some reason fails, the sweep will take care of this again! if (workflow.getParentWorkflowId() != null) { Workflow parent = executionDAO.getWorkflow(workflow.getParentWorkflowId(), false); - WorkflowDef parentDef = metadataDAO.get(parent.getWorkflowType(), parent.getVersion()); + WorkflowDef parentDef = Optional.ofNullable(parent.getWorkflowDefinition()) + .orElse(metadataDAO.get(parent.getWorkflowName(), parent.getWorkflowVersion()) + .orElseThrow(() -> new ApplicationException(NOT_FOUND, String.format("Unable to find parent workflow definition for %s", wf.getWorkflowId()))) + ); logger.debug("Completed sub-workflow {}, deciding parent workflow {}", wf.getWorkflowId(), wf.getParentWorkflowId()); Task parentWorkflowTask = executionDAO.getTask(workflow.getParentWorkflowTaskId()); @@ -386,8 +532,8 @@ void completeWorkflow(Workflow wf) { } decide(parent.getWorkflowId()); } - Monitors.recordWorkflowCompletion(workflow.getWorkflowType(), workflow.getEndTime() - workflow.getStartTime(), wf.getOwnerApp()); - queueDAO.remove(deciderQueue, workflow.getWorkflowId()); //remove from the sweep queue + Monitors.recordWorkflowCompletion(workflow.getWorkflowName(), workflow.getEndTime() - workflow.getStartTime(), wf.getOwnerApp()); + queueDAO.remove(DECIDER_QUEUE, workflow.getWorkflowId()); //remove from the sweep queue logger.debug("Removed workflow {} from decider queue", wf.getWorkflowId()); } @@ -397,12 +543,22 @@ public void terminateWorkflow(String workflowId, String reason) { terminateWorkflow(workflow, reason, null); } + /** + * @param workflow + * @param reason + * @param failureWorkflow + * @throws ApplicationException + */ public void terminateWorkflow(Workflow workflow, String reason, String failureWorkflow) { - if (!workflow.getStatus().isTerminal()) { workflow.setStatus(WorkflowStatus.TERMINATED); } + // FIXME Backwards compatibility for legacy workflows already running. + // This code will be removed in a future version. + if (workflow.getWorkflowDefinition() == null) { + workflow = metadataMapperService.populateWorkflowWithDefinitions(workflow); + } deciderService.updateWorkflowOutput(workflow, null); String workflowId = workflow.getWorkflowId(); @@ -415,8 +571,16 @@ public void terminateWorkflow(Workflow workflow, String reason, String failureWo // Cancel the ones which are not completed yet.... task.setStatus(CANCELED); if (isSystemTask.test(task)) { - WorkflowSystemTask workflowSystemTask = WorkflowSystemTask.get(task.getTaskType()); - workflowSystemTask.cancel(workflow, task, this); + WorkflowSystemTask stt = WorkflowSystemTask.get(task.getTaskType()); + try { + stt.cancel(workflow, task, this); + } catch (Exception e) { + throw new ApplicationException( + Code.INTERNAL_ERROR, + String.format("Error canceling systems task: %s", stt.getName()), + e + ); + } } executionDAO.updateTask(task); } @@ -438,8 +602,20 @@ public void terminateWorkflow(Workflow workflow, String reason, String failureWo input.put("failureStatus", workflow.getStatus().toString()); try { - WorkflowDef latestFailureWorkflow = metadataDAO.getLatest(failureWorkflow); - String failureWFId = startWorkflow(failureWorkflow, latestFailureWorkflow.getVersion(), workflowId, input); + WorkflowDef latestFailureWorkflow = metadataDAO.getLatest(failureWorkflow) + .orElseThrow(() -> + new RuntimeException("Failure Workflow Definition not found for: " + failureWorkflow) + ); + + String failureWFId = startWorkflow( + latestFailureWorkflow, + input, + null, + workflowId, + null, + null + ); + workflow.getOutput().put("conductor.failure_workflow", failureWFId); } catch (Exception e) { logger.error("Failed to start error workflow", e); @@ -448,14 +624,17 @@ public void terminateWorkflow(Workflow workflow, String reason, String failureWo } } - queueDAO.remove(deciderQueue, workflow.getWorkflowId()); //remove from the sweep queue - executionDAO.removeFromPendingWorkflow(workflow.getWorkflowType(), workflow.getWorkflowId()); + queueDAO.remove(DECIDER_QUEUE, workflow.getWorkflowId()); //remove from the sweep queue + executionDAO.removeFromPendingWorkflow(workflow.getWorkflowName(), workflow.getWorkflowId()); // Send to atlas - Monitors.recordWorkflowTermination(workflow.getWorkflowType(), workflow.getStatus(), workflow.getOwnerApp()); + Monitors.recordWorkflowTermination(workflow.getWorkflowName(), workflow.getStatus(), workflow.getOwnerApp()); } - + /** + * @param taskResult the task result to be updated + * @throws ApplicationException + */ public void updateTask(TaskResult taskResult) { if (taskResult == null) { logger.info("null task given for update"); @@ -464,6 +643,13 @@ public void updateTask(TaskResult taskResult) { String workflowId = taskResult.getWorkflowInstanceId(); Workflow workflowInstance = executionDAO.getWorkflow(workflowId); + + // FIXME Backwards compatibility for legacy workflows already running. + // This code will be removed in a future version. + if (workflowInstance.getWorkflowDefinition() == null) { + workflowInstance = metadataMapperService.populateWorkflowWithDefinitions(workflowInstance); + } + Task task = executionDAO.getTask(taskResult.getTaskId()); logger.debug("Task: {} belonging to Workflow {} being updated", task, workflowInstance); @@ -477,13 +663,15 @@ public void updateTask(TaskResult taskResult) { task.setStatus(COMPLETED); } task.setOutputData(taskResult.getOutputData()); + task.setOutputMessage(taskResult.getOutputMessage()); + task.setExternalOutputPayloadStoragePath(taskResult.getExternalOutputPayloadStoragePath()); task.setReasonForIncompletion(taskResult.getReasonForIncompletion()); task.setWorkerId(taskResult.getWorkerId()); executionDAO.updateTask(task); String msg = String.format("Workflow %s is already completed as %s, task=%s, reason=%s", workflowInstance.getWorkflowId(), workflowInstance.getStatus(), task.getTaskType(), workflowInstance.getReasonForIncompletion()); logger.info(msg); - Monitors.recordUpdateConflict(task.getTaskType(), workflowInstance.getWorkflowType(), workflowInstance.getStatus()); + Monitors.recordUpdateConflict(task.getTaskType(), workflowInstance.getWorkflowName(), workflowInstance.getStatus()); return; } @@ -494,12 +682,13 @@ public void updateTask(TaskResult taskResult) { String msg = String.format("Task is already completed as %s@%d, workflow status=%s, workflowId=%s, taskId=%s", task.getStatus(), task.getEndTime(), workflowInstance.getStatus(), workflowInstance.getWorkflowId(), task.getTaskId()); logger.info(msg); - Monitors.recordUpdateConflict(task.getTaskType(), workflowInstance.getWorkflowType(), task.getStatus()); + Monitors.recordUpdateConflict(task.getTaskType(), workflowInstance.getWorkflowName(), task.getStatus()); return; } task.setStatus(valueOf(taskResult.getStatus().name())); task.setOutputData(taskResult.getOutputData()); + task.setOutputMessage(taskResult.getOutputMessage()); task.setExternalOutputPayloadStoragePath(taskResult.getExternalOutputPayloadStoragePath()); task.setReasonForIncompletion(taskResult.getReasonForIncompletion()); task.setWorkerId(taskResult.getWorkerId()); @@ -550,7 +739,6 @@ public void updateTask(TaskResult taskResult) { Monitors.recordTaskExecutionTime(task.getTaskDefName(), duration, true, task.getStatus()); Monitors.recordTaskExecutionTime(task.getTaskDefName(), lastDuration, false, task.getStatus()); } - } public List getTasks(String taskType, String startKey, int count) { @@ -565,7 +753,7 @@ public List getRunningWorkflows(String workflowName) { public List getWorkflows(String name, Integer version, Long startTime, Long endTime) { List workflowsByType = executionDAO.getWorkflowsByType(name, startTime, endTime); return workflowsByType.stream() - .filter(workflow -> workflow.getVersion() == version) + .filter(workflow -> workflow.getWorkflowVersion() == version) .map(Workflow::getWorkflowId) .collect(Collectors.toList()); @@ -577,16 +765,20 @@ public List getRunningWorkflowIds(String workflowName) { /** * @param workflowId ID of the workflow to evaluate the state for - * @return true if the workflow is in terminal state, false otherwise. + * @return true if the workflow has completed (success or failed), false otherwise. + * @throws ApplicationException If there was an error - caller should retry in this case. */ public boolean decide(String workflowId) { // If it is a new workflow, the tasks will be still empty even though include tasks is true Workflow workflow = executionDAO.getWorkflow(workflowId, true); - WorkflowDef workflowDef = metadataDAO.get(workflow.getWorkflowType(), workflow.getVersion()); + + // FIXME Backwards compatibility for legacy workflows already running. + // This code will be removed in a future version. + workflow = metadataMapperService.populateWorkflowWithDefinitions(workflow); try { - DeciderOutcome outcome = deciderService.decide(workflow, workflowDef); + DeciderService.DeciderOutcome outcome = deciderService.decide(workflow); if (outcome.isComplete) { completeWorkflow(workflow); return true; @@ -606,9 +798,18 @@ public boolean decide(String workflowId) { for (Task task : tasksToBeScheduled) { if (isSystemTask.and(isNonTerminalTask).test(task)) { WorkflowSystemTask workflowSystemTask = WorkflowSystemTask.get(task.getTaskType()); - if (!workflowSystemTask.isAsync() && workflowSystemTask.execute(workflow, task, this)) { - tasksToBeUpdated.add(task); - stateChanged = true; + + try { + if (!workflowSystemTask.isAsync() && workflowSystemTask.execute(workflow, task, this)) { + tasksToBeUpdated.add(task); + stateChanged = true; + } + } catch (Exception e) { + throw new ApplicationException( + Code.INTERNAL_ERROR, + String.format("Unable to start system task: %s", workflowSystemTask.getName()), + e + ); } } } @@ -618,7 +819,7 @@ public boolean decide(String workflowId) { if (!outcome.tasksToBeUpdated.isEmpty() || !outcome.tasksToBeScheduled.isEmpty()) { executionDAO.updateTasks(tasksToBeUpdated); executionDAO.updateWorkflow(workflow); - queueDAO.push(deciderQueue, workflow.getWorkflowId(), config.getSweepFrequency()); + queueDAO.push(DECIDER_QUEUE, workflow.getWorkflowId(), config.getSweepFrequency()); } if (stateChanged) { @@ -626,8 +827,8 @@ public boolean decide(String workflowId) { } } catch (TerminateWorkflowException twe) { - logger.info("Execution terminated of workflow: {} of type: {}", workflowId, workflowDef.getName(), twe); - terminate(workflowDef, workflow, twe); + logger.info("Execution terminated of workflow: {} of type: {}", workflowId, workflow.getWorkflowDefinition().getName(), twe); + terminate(workflow, twe); return true; } catch (RuntimeException e) { logger.error("Error deciding workflow: {}", workflowId, e); @@ -636,6 +837,9 @@ public boolean decide(String workflowId) { return false; } + /** + * @throws ApplicationException + */ public void pauseWorkflow(String workflowId) { WorkflowStatus status = WorkflowStatus.PAUSED; Workflow workflow = executionDAO.getWorkflow(workflowId, false); @@ -649,30 +853,44 @@ public void pauseWorkflow(String workflowId) { executionDAO.updateWorkflow(workflow); } + /** + * @param workflowId + * @throws IllegalStateException + */ public void resumeWorkflow(String workflowId) { Workflow workflow = executionDAO.getWorkflow(workflowId, false); if (!workflow.getStatus().equals(WorkflowStatus.PAUSED)) { - throw new IllegalStateException("The workflow " + workflowId + " is not PAUSED so cannot resume"); + throw new IllegalStateException("The workflow " + workflowId + " is not PAUSED so cannot resume. " + + "Current status is " + workflow.getStatus().name()); } workflow.setStatus(WorkflowStatus.RUNNING); executionDAO.updateWorkflow(workflow); decide(workflowId); } + /** + * @param workflowId + * @param taskReferenceName + * @param skipTaskRequest + * @throws IllegalStateException + */ public void skipTaskFromWorkflow(String workflowId, String taskReferenceName, SkipTaskRequest skipTaskRequest) { Workflow wf = executionDAO.getWorkflow(workflowId, true); + // FIXME Backwards compatibility for legacy workflows already running. + // This code will be removed in a future version. + wf = metadataMapperService.populateWorkflowWithDefinitions(wf); + // If the wf is not running then cannot skip any task if (!wf.getStatus().equals(WorkflowStatus.RUNNING)) { String errorMsg = String.format("The workflow %s is not running so the task referenced by %s cannot be skipped", workflowId, taskReferenceName); throw new IllegalStateException(errorMsg); } // Check if the reference name is as per the workflowdef - WorkflowDef wfd = metadataDAO.get(wf.getWorkflowType(), wf.getVersion()); - WorkflowTask wft = wfd.getTaskByRefName(taskReferenceName); + WorkflowTask wft = wf.getWorkflowDefinition().getTaskByRefName(taskReferenceName); if (wft == null) { - String errorMsg = String.format("The task referenced by %s does not exist in the WorkflowDefinition %s", taskReferenceName, wf.getWorkflowType()); + String errorMsg = String.format("The task referenced by %s does not exist in the WorkflowDefinition %s", taskReferenceName, wf.getWorkflowName()); throw new IllegalStateException(errorMsg); } // If the task is already started the again it cannot be skipped @@ -693,6 +911,8 @@ public void skipTaskFromWorkflow(String workflowId, String taskReferenceName, Sk if (skipTaskRequest != null) { theTask.setInputData(skipTaskRequest.getTaskInput()); theTask.setOutputData(skipTaskRequest.getTaskOutput()); + theTask.setInputMessage(skipTaskRequest.getTaskInputMessage()); + theTask.setOutputMessage(skipTaskRequest.getTaskOutputMessage()); } executionDAO.createTasks(Collections.singletonList(theTask)); decide(workflowId); @@ -702,8 +922,7 @@ public Workflow getWorkflow(String workflowId, boolean includeTasks) { return executionDAO.getWorkflow(workflowId, includeTasks); } - - private void addTaskToQueue(Task task) { + public void addTaskToQueue(Task task) { // put in queue String taskQueueName = QueueUtils.getQueueName(task); queueDAO.remove(taskQueueName, task.getTaskId()); @@ -796,7 +1015,7 @@ private void setTaskDomains(List tasks, Workflow wf) { String[] domains = domainstr.split(","); tasks.forEach(task -> { // Filter out SystemTask - if (!WorkflowTask.Type.isSystemTask(task.getTaskType())) { + if (!TaskType.isSystemTask(task.getTaskType())) { // Check which domain worker is polling // Set the task domain task.setDomain(getActiveDomain(task.getTaskType(), domains)); @@ -805,7 +1024,7 @@ private void setTaskDomains(List tasks, Workflow wf) { } else { tasks.forEach(task -> { - if (!WorkflowTask.Type.isSystemTask(task.getTaskType())) { + if (!TaskType.isSystemTask(task.getTaskType())) { String taskDomainstr = taskToDomain.get(task.getTaskType()); if (taskDomainstr != null) { task.setDomain(getActiveDomain(task.getTaskType(), taskDomainstr.split(","))); @@ -839,6 +1058,7 @@ private long getTaskDuration(long s, Task task) { @VisibleForTesting boolean scheduleTask(Workflow workflow, List tasks) { + if (tasks == null || tasks.isEmpty()) { return false; } @@ -876,7 +1096,16 @@ boolean scheduleTask(Workflow workflow, List tasks) { } task.setStartTime(System.currentTimeMillis()); if (!workflowSystemTask.isAsync()) { - workflowSystemTask.start(workflow, task, this); + try { + workflowSystemTask.start(workflow, task, this); + } catch (Exception e) { + String message = String.format( + "Unable to start task {id: %s, name: %s}", + task.getTaskId(), + task.getTaskDefName() + ); + throw new ApplicationException(Code.INTERNAL_ERROR, message, e); + } startedSystemTasks = true; executionDAO.updateTask(task); } else { @@ -894,12 +1123,12 @@ private void addTaskToQueue(final List tasks) { } } - private void terminate(final WorkflowDef def, final Workflow workflow, TerminateWorkflowException tw) { + private void terminate(final Workflow workflow, TerminateWorkflowException tw) { if (!workflow.getStatus().isTerminal()) { workflow.setStatus(tw.workflowStatus); } - String failureWorkflow = def.getFailureWorkflow(); + String failureWorkflow = workflow.getWorkflowDefinition().getFailureWorkflow(); if (failureWorkflow != null) { if (failureWorkflow.startsWith("$")) { String[] paramPathComponents = failureWorkflow.split("\\."); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java index a0decc9141..f3236636e6 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowSweeper.java @@ -80,8 +80,8 @@ public void init(WorkflowExecutor workflowExecutor) { logger.info("Workflow sweep is disabled."); return; } - List workflowIds = queueDAO.pop(WorkflowExecutor.deciderQueue, 2 * executorThreadPoolSize, 2000); - int currentQueueSize = queueDAO.getSize(WorkflowExecutor.deciderQueue); + List workflowIds = queueDAO.pop(WorkflowExecutor.DECIDER_QUEUE, 2 * executorThreadPoolSize, 2000); + int currentQueueSize = queueDAO.getSize(WorkflowExecutor.DECIDER_QUEUE); logger.debug("Sweeper's current deciderqueue size: {}.", currentQueueSize); int retrievedWorkflows = (workflowIds != null) ? workflowIds.size() : 0; logger.debug("Sweeper retrieved {} workflows from the decider queue.", retrievedWorkflows); @@ -108,15 +108,15 @@ public void sweep(List workflowIds, WorkflowExecutor workflowExecutor) t } boolean done = workflowExecutor.decide(workflowId); if(!done) { - queueDAO.setUnackTimeout(WorkflowExecutor.deciderQueue, workflowId, config.getSweepFrequency() * 1000); + queueDAO.setUnackTimeout(WorkflowExecutor.DECIDER_QUEUE, workflowId, config.getSweepFrequency() * 1000); } else { - queueDAO.remove(WorkflowExecutor.deciderQueue, workflowId); + queueDAO.remove(WorkflowExecutor.DECIDER_QUEUE, workflowId); } } catch (ApplicationException e) { if(e.getCode().equals(Code.NOT_FOUND)) { logger.error("Workflow NOT found for id: " + workflowId, e); - queueDAO.remove(WorkflowExecutor.deciderQueue, workflowId); + queueDAO.remove(WorkflowExecutor.DECIDER_QUEUE, workflowId); } } catch (Exception e) { diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapper.java index c2f9ac88ff..3ec5dd1c46 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapper.java @@ -18,6 +18,7 @@ import com.google.common.annotations.VisibleForTesting; import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -35,7 +36,7 @@ /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#DECISION} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#DECISION} * to a List {@link Task} starting with Task of type {@link SystemTaskType#DECISION} which is marked as IN_PROGRESS, * followed by the list of {@link Task} based on the case expression evaluation in the Decision task. */ @@ -44,7 +45,7 @@ public class DecisionTaskMapper implements TaskMapper { Logger logger = LoggerFactory.getLogger(DecisionTaskMapper.class); /** - * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link WorkflowTask.Type#DECISION}. + * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link TaskType#DECISION}. * * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId * @return List of tasks in the following order: @@ -67,7 +68,6 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { List tasksToBeScheduled = new LinkedList<>(); WorkflowTask taskToSchedule = taskMapperContext.getTaskToSchedule(); Workflow workflowInstance = taskMapperContext.getWorkflowInstance(); - WorkflowDef workflowDefinition = taskMapperContext.getWorkflowDefinition(); Map taskInput = taskMapperContext.getTaskInput(); int retryCount = taskMapperContext.getRetryCount(); String taskId = taskMapperContext.getTaskId(); @@ -81,7 +81,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { decisionTask.setTaskDefName(SystemTaskType.DECISION.name()); decisionTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); decisionTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - decisionTask.setWorkflowType(workflowInstance.getWorkflowType()); + decisionTask.setWorkflowType(workflowInstance.getWorkflowName()); decisionTask.setCorrelationId(workflowInstance.getCorrelationId()); decisionTask.setScheduledTime(System.currentTimeMillis()); decisionTask.setEndTime(System.currentTimeMillis()); @@ -103,8 +103,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { if (selectedTasks != null && !selectedTasks.isEmpty()) { WorkflowTask selectedTask = selectedTasks.get(0); //Schedule the first task to be executed... //TODO break out this recursive call using function composition of what needs to be done and then walk back the condition tree - List caseTasks = taskMapperContext.getDeciderService() - .getTasksToBeScheduled(workflowDefinition, workflowInstance, selectedTask, retryCount, taskMapperContext.getRetryTaskId()); + List caseTasks = taskMapperContext.getDeciderService().getTasksToBeScheduled(workflowInstance, selectedTask, retryCount, taskMapperContext.getRetryTaskId()); tasksToBeScheduled.addAll(caseTasks); decisionTask.getInputData().put("hasChildren", "true"); } diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapper.java index 7d4e732ac2..0618dfb23c 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapper.java @@ -19,12 +19,12 @@ import com.google.common.annotations.VisibleForTesting; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; -import com.netflix.conductor.dao.MetadataDAO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,19 +35,16 @@ /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#DYNAMIC} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#DYNAMIC} * to a {@link Task} based on definition derived from the dynamic task name defined in {@link WorkflowTask#getInputParameters()} */ public class DynamicTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(DynamicTaskMapper.class); - private MetadataDAO metadataDAO; - private ParametersUtils parametersUtils; - public DynamicTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { - this.metadataDAO = metadataDAO; + public DynamicTaskMapper(ParametersUtils parametersUtils) { this.parametersUtils = parametersUtils; } @@ -80,7 +77,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter dynamicTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); dynamicTask.setInputData(input); dynamicTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - dynamicTask.setWorkflowType(workflowInstance.getWorkflowType()); + dynamicTask.setWorkflowType(workflowInstance.getWorkflowName()); dynamicTask.setStatus(Task.Status.SCHEDULED); dynamicTask.setTaskType(taskToSchedule.getType()); dynamicTask.setTaskDefName(taskToSchedule.getName()); @@ -116,15 +113,15 @@ String getDynamicTaskName(Map taskInput, String taskNameParam) t } /** - * This method gets the TaskDefinition from the MetadataDao based on the {@link WorkflowTask#getName()} + * This method gets the TaskDefinition for a specific {@link WorkflowTask} * * @param taskToSchedule: An instance of {@link WorkflowTask} which has the name of the using which the {@link TaskDef} can be retrieved. - * @throws TerminateWorkflowException : in case of no work flow definition available in the {@link MetadataDAO} + * @throws TerminateWorkflowException : in case of no work flow definition available * @return: An instance of TaskDefinition */ @VisibleForTesting TaskDef getDynamicTaskDefinition(WorkflowTask taskToSchedule) throws TerminateWorkflowException { //TODO this is a common pattern in code base can be moved to DAO - return Optional.ofNullable(metadataDAO.getTaskDef(taskToSchedule.getName())) + return Optional.ofNullable(taskToSchedule.getTaskDefinition()) .orElseThrow(() -> { String reason = String.format("Invalid task specified. Cannot find task by name %s in the task definitions", taskToSchedule.getName()); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/EventTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/EventTaskMapper.java index 29b010f282..2a2cd3a808 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/EventTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/EventTaskMapper.java @@ -58,7 +58,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { eventTask.setTaskDefName(taskToSchedule.getName()); eventTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); eventTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - eventTask.setWorkflowType(workflowInstance.getWorkflowType()); + eventTask.setWorkflowType(workflowInstance.getWorkflowName()); eventTask.setCorrelationId(workflowInstance.getCorrelationId()); eventTask.setScheduledTime(System.currentTimeMillis()); eventTask.setEndTime(System.currentTimeMillis()); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapper.java index ae5b2f539f..ea7b7b4b44 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapper.java @@ -20,13 +20,16 @@ import com.google.common.annotations.VisibleForTesting; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.SystemTaskType; import com.netflix.conductor.core.execution.TerminateWorkflowException; +import com.netflix.conductor.core.metadata.MetadataMapperService; import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.dao.MetadataDAO; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.slf4j.Logger; @@ -40,7 +43,7 @@ import java.util.stream.Collectors; /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#FORK_JOIN_DYNAMIC} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#FORK_JOIN_DYNAMIC} * to a LinkedList of {@link Task} beginning with a {@link SystemTaskType#FORK}, followed by the user defined dynamic tasks and * a {@link SystemTaskType#JOIN} at the end */ @@ -48,20 +51,23 @@ public class ForkJoinDynamicTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(ForkJoinDynamicTaskMapper.class); - private ParametersUtils parametersUtils; + private final ParametersUtils parametersUtils; - private ObjectMapper objectMapper; + private final ObjectMapper objectMapper; + + private final MetadataDAO metadataDAO; private static final TypeReference> ListOfWorkflowTasks = new TypeReference>() { }; - public ForkJoinDynamicTaskMapper(ParametersUtils parametersUtils, ObjectMapper objectMapper) { + public ForkJoinDynamicTaskMapper(ParametersUtils parametersUtils, ObjectMapper objectMapper, MetadataDAO metadataDAO) { this.parametersUtils = parametersUtils; this.objectMapper = objectMapper; + this.metadataDAO = metadataDAO; } /** - * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link WorkflowTask.Type#FORK_JOIN_DYNAMIC}. + * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link TaskType#FORK_JOIN_DYNAMIC}. * Creates a Fork Task, followed by the Dynamic tasks and a final JOIN task. *

The definitions of the dynamic forks that need to be scheduled are available in the {@link WorkflowTask#getInputParameters()} * which are accessed using the {@link TaskMapperContext#getTaskToSchedule()}. The dynamic fork task definitions are referred by a key value either by @@ -74,7 +80,7 @@ public ForkJoinDynamicTaskMapper(ParametersUtils parametersUtils, ObjectMapper o *

  • If the input parameter representing the Dynamic fork tasks is available as part of {@link WorkflowTask#getDynamicForkJoinTasksParam()} then * the input for the dynamic tasks is available in the payload of the tasks definition. *
  • - *
  • A check is performed that the next following task in the {@link WorkflowDef} is a {@link WorkflowTask.Type#JOIN}
  • + *
  • A check is performed that the next following task in the {@link WorkflowDef} is a {@link TaskType#JOIN}
  • * * * @@ -82,7 +88,7 @@ public ForkJoinDynamicTaskMapper(ParametersUtils parametersUtils, ObjectMapper o * @throws TerminateWorkflowException In case of: *
      *
    • - * When the task after {@link WorkflowTask.Type#FORK_JOIN_DYNAMIC} is not a {@link WorkflowTask.Type#JOIN} + * When the task after {@link TaskType#FORK_JOIN_DYNAMIC} is not a {@link TaskType#JOIN} *
    • *
    • * When the input parameters for the dynamic tasks are not of type {@link Map} @@ -107,7 +113,6 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter WorkflowTask taskToSchedule = taskMapperContext.getTaskToSchedule(); Workflow workflowInstance = taskMapperContext.getWorkflowInstance(); - WorkflowDef workflowDef = taskMapperContext.getWorkflowDefinition(); String taskId = taskMapperContext.getTaskId(); int retryCount = taskMapperContext.getRetryCount(); @@ -130,7 +135,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter //Add each dynamic task to the mapped tasks and also get the last dynamic task in the list, // which indicates that the following task after that needs to be a join task for (WorkflowTask wft : dynForkTasks) {//TODO this is a cyclic dependency, break it out using function composition - List forkedTasks = taskMapperContext.getDeciderService().getTasksToBeScheduled(workflowDef, workflowInstance, wft, retryCount); + List forkedTasks = taskMapperContext.getDeciderService().getTasksToBeScheduled(workflowInstance, wft, retryCount); for (Task forkedTask : forkedTasks) { Map forkedTaskInput = tasksInput.get(forkedTask.getReferenceTaskName()); forkedTask.getInputData().putAll(forkedTaskInput); @@ -143,8 +148,11 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter //From the workflow definition get the next task and make sure that it is a JOIN task. //The dynamic fork tasks need to be followed by a join task - WorkflowTask joinWorkflowTask = workflowDef.getNextTask(taskToSchedule.getTaskReferenceName()); - if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(WorkflowTask.Type.JOIN.name())) { + WorkflowTask joinWorkflowTask = workflowInstance + .getWorkflowDefinition() + .getNextTask(taskToSchedule.getTaskReferenceName()); + + if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) { throw new TerminateWorkflowException("Dynamic join definition is not followed by a join task. Check the blueprint"); } @@ -162,7 +170,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter * This method creates a FORK task and adds the list of dynamic fork tasks keyed by "forkedTaskDefs" and * their names keyed by "forkedTasks" into {@link Task#getInputData()} * - * @param taskToSchedule A {@link WorkflowTask} representing {@link WorkflowTask.Type#FORK_JOIN_DYNAMIC} + * @param taskToSchedule A {@link WorkflowTask} representing {@link TaskType#FORK_JOIN_DYNAMIC} * @param workflowInstance: A instance of the {@link Workflow} which represents the workflow being executed. * @param taskId: The string representation of {@link java.util.UUID} which will be set as the taskId. * @param dynForkTasks: The list of dynamic forked tasks, the reference names of these tasks will be added to the forkDynamicTask @@ -194,7 +202,7 @@ Task createDynamicForkTask(WorkflowTask taskToSchedule, Workflow workflowInstanc * at the end to add a join task to be scheduled after all the fork tasks * * @param workflowInstance: A instance of the {@link Workflow} which represents the workflow being executed. - * @param joinWorkflowTask: A instance of {@link WorkflowTask} which is of type {@link WorkflowTask.Type#JOIN} + * @param joinWorkflowTask: A instance of {@link WorkflowTask} which is of type {@link TaskType#JOIN} * @param joinInput: The input which is set in the {@link Task#setInputData(Map)} * @return: a new instance of {@link Task} representing a {@link SystemTaskType#JOIN} */ @@ -205,7 +213,7 @@ Task createJoinTask(Workflow workflowInstance, WorkflowTask joinWorkflowTask, Ha joinTask.setTaskDefName(SystemTaskType.JOIN.name()); joinTask.setReferenceTaskName(joinWorkflowTask.getTaskReferenceName()); joinTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - joinTask.setWorkflowType(workflowInstance.getWorkflowType()); + joinTask.setWorkflowType(workflowInstance.getWorkflowName()); joinTask.setCorrelationId(workflowInstance.getCorrelationId()); joinTask.setScheduledTime(System.currentTimeMillis()); joinTask.setEndTime(System.currentTimeMillis()); @@ -233,6 +241,11 @@ Pair, Map>> getDynamicForkTasksAn Map input = parametersUtils.getTaskInput(taskToSchedule.getInputParameters(), workflowInstance, null, null); Object dynamicForkTasksJson = input.get(dynamicForkTaskParam); List dynamicForkWorkflowTasks = objectMapper.convertValue(dynamicForkTasksJson, ListOfWorkflowTasks); + for (WorkflowTask workflowTask : dynamicForkWorkflowTasks) { + if (MetadataMapperService.shouldPopulateDefinition(workflowTask)) { + workflowTask.setTaskDefinition(metadataDAO.getTaskDef(workflowTask.getName())); + } + } Object dynamicForkTasksInput = input.get(taskToSchedule.getDynamicForkTasksInputParamName()); if (!(dynamicForkTasksInput instanceof Map)) { throw new TerminateWorkflowException("Input to the dynamically forked tasks is not a map -> expecting a map of K,V but found " + dynamicForkTasksInput); @@ -272,6 +285,10 @@ Pair, Map>> getDynamicForkJoinTas dynamicForkJoinWorkflowTask.setTaskReferenceName(dynamicForkJoinTask.getReferenceName()); dynamicForkJoinWorkflowTask.setName(dynamicForkJoinTask.getTaskName()); dynamicForkJoinWorkflowTask.setType(dynamicForkJoinTask.getType()); + if (MetadataMapperService.shouldPopulateDefinition(dynamicForkJoinWorkflowTask)) { + dynamicForkJoinWorkflowTask.setTaskDefinition( + metadataDAO.getTaskDef(dynamicForkJoinTask.getTaskName())); + } return dynamicForkJoinWorkflowTask; }) .collect(Collectors.toCollection(LinkedList::new)); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapper.java index 64058d4460..3f4b85ab9c 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapper.java @@ -17,6 +17,7 @@ package com.netflix.conductor.core.execution.mapper; import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -30,7 +31,7 @@ import java.util.Map; /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#FORK_JOIN} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#FORK_JOIN} * to a LinkedList of {@link Task} beginning with a completed {@link SystemTaskType#FORK}, followed by the user defined fork tasks */ public class ForkJoinTaskMapper implements TaskMapper { @@ -38,7 +39,7 @@ public class ForkJoinTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(ForkJoinTaskMapper.class); /** - * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link WorkflowTask.Type#FORK_JOIN}. + * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link TaskType#FORK_JOIN}. * * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId * @return List of tasks in the following order: @@ -50,7 +51,7 @@ public class ForkJoinTaskMapper implements TaskMapper { * Might be any kind of task, but in most cases is a UserDefinedTask with {@link Task.Status#SCHEDULED} *
    • *
    - * @throws TerminateWorkflowException When the task after {@link WorkflowTask.Type#FORK_JOIN} is not a {@link WorkflowTask.Type#JOIN} + * @throws TerminateWorkflowException When the task after {@link TaskType#FORK_JOIN} is not a {@link TaskType#JOIN} */ @Override public List getMappedTasks(TaskMapperContext taskMapperContext) throws TerminateWorkflowException { @@ -63,7 +64,6 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter int retryCount = taskMapperContext.getRetryCount(); String taskId = taskMapperContext.getTaskId(); - WorkflowDef workflowDef = taskMapperContext.getWorkflowDefinition(); List tasksToBeScheduled = new LinkedList<>(); Task forkTask = new Task(); @@ -71,7 +71,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter forkTask.setTaskDefName(SystemTaskType.FORK.name()); forkTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); forkTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - forkTask.setWorkflowType(workflowInstance.getWorkflowType()); + forkTask.setWorkflowType(workflowInstance.getWorkflowName()); forkTask.setCorrelationId(workflowInstance.getCorrelationId()); forkTask.setScheduledTime(System.currentTimeMillis()); forkTask.setEndTime(System.currentTimeMillis()); @@ -85,12 +85,15 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter for (List wfts : forkTasks) { WorkflowTask wft = wfts.get(0); List tasks2 = taskMapperContext.getDeciderService() - .getTasksToBeScheduled(workflowDef, workflowInstance, wft, retryCount); + .getTasksToBeScheduled(workflowInstance, wft, retryCount); tasksToBeScheduled.addAll(tasks2); } - WorkflowTask joinWorkflowTask = workflowDef.getNextTask(taskToSchedule.getTaskReferenceName()); - if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(WorkflowTask.Type.JOIN.name())) { + WorkflowTask joinWorkflowTask = workflowInstance + .getWorkflowDefinition() + .getNextTask(taskToSchedule.getTaskReferenceName()); + + if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) { throw new TerminateWorkflowException("Dynamic join definition is not followed by a join task. Check the blueprint"); } return tasksToBeScheduled; diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapper.java index b88c5731e9..991ec6d80d 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapper.java @@ -17,6 +17,7 @@ package com.netflix.conductor.core.execution.mapper; import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -30,7 +31,7 @@ import java.util.Map; /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#JOIN} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#JOIN} * to a {@link Task} of type {@link SystemTaskType#JOIN} */ public class JoinTaskMapper implements TaskMapper { @@ -38,7 +39,7 @@ public class JoinTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(JoinTaskMapper.class); /** - * This method maps {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#JOIN} to a {@link Task} of type {@link SystemTaskType#JOIN} + * This method maps {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#JOIN} to a {@link Task} of type {@link SystemTaskType#JOIN} * with a status of {@link Task.Status#IN_PROGRESS} * * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId @@ -62,7 +63,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { joinTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); joinTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); joinTask.setCorrelationId(workflowInstance.getCorrelationId()); - joinTask.setWorkflowType(workflowInstance.getWorkflowType()); + joinTask.setWorkflowType(workflowInstance.getWorkflowName()); joinTask.setScheduledTime(System.currentTimeMillis()); joinTask.setEndTime(System.currentTimeMillis()); joinTask.setInputData(joinInput); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapper.java index 8da5dd2e76..9565a6bd0e 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapper.java @@ -19,12 +19,12 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; -import com.netflix.conductor.dao.MetadataDAO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,7 +35,7 @@ /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#SIMPLE} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#SIMPLE} * to a {@link Task} with status {@link Task.Status#SCHEDULED}. NOTE: There is not type defined for simples task. */ public class SimpleTaskMapper implements TaskMapper { @@ -43,19 +43,17 @@ public class SimpleTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(SimpleTaskMapper.class); private ParametersUtils parametersUtils; - private MetadataDAO metadataDAO; - public SimpleTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { + public SimpleTaskMapper(ParametersUtils parametersUtils) { this.parametersUtils = parametersUtils; - this.metadataDAO = metadataDAO; } /** - * This method maps a {@link WorkflowTask} of type {@link WorkflowTask.Type#SIMPLE} + * This method maps a {@link WorkflowTask} of type {@link TaskType#SIMPLE} * to a {@link Task} * * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId - * @throws TerminateWorkflowException In case if the task definition does not exist in the {@link MetadataDAO} + * @throws TerminateWorkflowException In case if the task definition does not exist * @return: a List with just one simple task */ @Override @@ -68,9 +66,9 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter int retryCount = taskMapperContext.getRetryCount(); String retriedTaskId = taskMapperContext.getRetryTaskId(); - TaskDef taskDefinition = Optional.ofNullable(metadataDAO.getTaskDef(taskToSchedule.getName())) + TaskDef taskDefinition = Optional.ofNullable(taskToSchedule.getTaskDefinition()) .orElseThrow(() -> { - String reason = String.format("Invalid task specified. Cannot find task by name %s in the task definitions", taskToSchedule.getName()); + String reason = String.format("Invalid task. Task %s does not have a definition", taskToSchedule.getName()); return new TerminateWorkflowException(reason); }); @@ -81,7 +79,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter simpleTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); simpleTask.setInputData(input); simpleTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - simpleTask.setWorkflowType(workflowInstance.getWorkflowType()); + simpleTask.setWorkflowType(workflowInstance.getWorkflowName()); simpleTask.setStatus(Task.Status.SCHEDULED); simpleTask.setTaskType(taskToSchedule.getName()); simpleTask.setTaskDefName(taskToSchedule.getName()); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapper.java index 0a3448e31b..84a9501f85 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapper.java @@ -1,17 +1,14 @@ /** * Copyright 2018 Netflix, Inc. *

    - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at *

    * http://www.apache.org/licenses/LICENSE-2.0 *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ @@ -20,35 +17,31 @@ import com.google.common.annotations.VisibleForTesting; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; import com.netflix.conductor.core.execution.tasks.SubWorkflow; -import com.netflix.conductor.dao.MetadataDAO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.inject.Inject; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import javax.inject.Inject; + public class SubWorkflowTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(SubWorkflowTaskMapper.class); private ParametersUtils parametersUtils; - private MetadataDAO metadataDAO; - @Inject - public SubWorkflowTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { + public SubWorkflowTaskMapper(ParametersUtils parametersUtils) { this.parametersUtils = parametersUtils; - this.metadataDAO = metadataDAO; } @Override @@ -64,19 +57,17 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { String subWorkflowName = resolvedParams.get("name").toString(); - Integer subWorkflowVersion = getSubWorkflowVersion(resolvedParams, subWorkflowName); - Task subWorkflowTask = new Task(); subWorkflowTask.setTaskType(SubWorkflow.NAME); subWorkflowTask.setTaskDefName(taskToSchedule.getName()); subWorkflowTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); subWorkflowTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - subWorkflowTask.setWorkflowType(workflowInstance.getWorkflowType()); + subWorkflowTask.setWorkflowType(workflowInstance.getWorkflowName()); subWorkflowTask.setCorrelationId(workflowInstance.getCorrelationId()); subWorkflowTask.setScheduledTime(System.currentTimeMillis()); subWorkflowTask.setEndTime(System.currentTimeMillis()); subWorkflowTask.getInputData().put("subWorkflowName", subWorkflowName); - subWorkflowTask.getInputData().put("subWorkflowVersion", subWorkflowVersion); + subWorkflowTask.getInputData().put("subWorkflowVersion", subWorkflowParams.getVersion()); subWorkflowTask.getInputData().put("workflowInput", taskMapperContext.getTaskInput()); subWorkflowTask.setTaskId(taskId); subWorkflowTask.setStatus(Task.Status.SCHEDULED); @@ -101,26 +92,11 @@ Map getSubWorkflowInputParameters(Workflow workflowInstance, Sub Map params = new HashMap<>(); params.put("name", subWorkflowParams.getName()); - Object version = subWorkflowParams.getVersion(); + Integer version = subWorkflowParams.getVersion(); if (version != null) { - params.put("version", version.toString()); + params.put("version", version); } return parametersUtils.getTaskInputV2(params, workflowInstance, null, null); } - @VisibleForTesting - Integer getSubWorkflowVersion(Map resolvedParams, String subWorkflowName) throws TerminateWorkflowException { - return Optional.ofNullable(resolvedParams.get("version")) - .map(Object::toString) - .map(Integer::parseInt) - .orElseGet( - () -> Optional.ofNullable(metadataDAO.getLatest(subWorkflowName)) - .map(WorkflowDef::getVersion) - .orElseThrow(() -> { - String reason = String.format("The Task %s defined as a sub-workflow has no workflow definition available ", subWorkflowName); - logger.error(reason); - return new TerminateWorkflowException(reason); - })); - } - } diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapperContext.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapperContext.java index 92935a3a24..5d2e2cf0e1 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapperContext.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/TaskMapperContext.java @@ -29,7 +29,6 @@ */ public class TaskMapperContext { - private WorkflowDef workflowDefinition; private Workflow workflowInstance; private TaskDef taskDefinition; private WorkflowTask taskToSchedule; @@ -40,7 +39,6 @@ public class TaskMapperContext { private DeciderService deciderService; private TaskMapperContext(Builder builder) { - workflowDefinition = builder.workflowDefinition; workflowInstance = builder.workflowInstance; taskDefinition = builder.taskDefinition; taskToSchedule = builder.taskToSchedule; @@ -70,7 +68,7 @@ public static Builder newBuilder(TaskMapperContext copy) { } public WorkflowDef getWorkflowDefinition() { - return workflowDefinition; + return workflowInstance.getWorkflowDefinition(); } public Workflow getWorkflowInstance() { @@ -109,7 +107,7 @@ public DeciderService getDeciderService() { @Override public String toString() { return "TaskMapperContext{" + - "workflowDefinition=" + workflowDefinition + + "workflowDefinition=" + getWorkflowDefinition() + ", workflowInstance=" + workflowInstance + ", taskToSchedule=" + taskToSchedule + ", taskInput=" + taskInput + diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapper.java index 1890bfee88..8010510b8d 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapper.java @@ -18,12 +18,12 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; -import com.netflix.conductor.dao.MetadataDAO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,27 +33,25 @@ import java.util.Optional; /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#USER_DEFINED} - * to a {@link Task} of type {@link WorkflowTask.Type#USER_DEFINED} with {@link Task.Status#SCHEDULED} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#USER_DEFINED} + * to a {@link Task} of type {@link TaskType#USER_DEFINED} with {@link Task.Status#SCHEDULED} */ public class UserDefinedTaskMapper implements TaskMapper { public static final Logger logger = LoggerFactory.getLogger(UserDefinedTaskMapper.class); private ParametersUtils parametersUtils; - private MetadataDAO metadataDAO; - public UserDefinedTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) { + public UserDefinedTaskMapper(ParametersUtils parametersUtils) { this.parametersUtils = parametersUtils; - this.metadataDAO = metadataDAO; } /** - * This method maps a {@link WorkflowTask} of type {@link WorkflowTask.Type#USER_DEFINED} + * This method maps a {@link WorkflowTask} of type {@link TaskType#USER_DEFINED} * to a {@link Task} in a {@link Task.Status#SCHEDULED} state * * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId - * @throws TerminateWorkflowException In case if the task definition does not exist in the {@link MetadataDAO} + * @throws TerminateWorkflowException In case if the task definition does not exist * @return: a List with just one User defined task */ @Override @@ -79,7 +77,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) throws Ter userDefinedTask.setTaskDefName(taskToSchedule.getName()); userDefinedTask.setReferenceTaskName(taskToSchedule.getTaskReferenceName()); userDefinedTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - userDefinedTask.setWorkflowType(workflowInstance.getWorkflowType()); + userDefinedTask.setWorkflowType(workflowInstance.getWorkflowName()); userDefinedTask.setCorrelationId(workflowInstance.getCorrelationId()); userDefinedTask.setScheduledTime(System.currentTimeMillis()); userDefinedTask.setTaskId(taskId); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapper.java b/core/src/main/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapper.java index edc3b153d6..5613a0ad9b 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapper.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapper.java @@ -17,6 +17,7 @@ package com.netflix.conductor.core.execution.mapper; import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; @@ -31,7 +32,7 @@ /** - * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link WorkflowTask.Type#WAIT} + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link TaskType#WAIT} * to a {@link Task} of type {@link Wait} with {@link Task.Status#IN_PROGRESS} */ public class WaitTaskMapper implements TaskMapper { @@ -61,7 +62,7 @@ public List getMappedTasks(TaskMapperContext taskMapperContext) { waitTask.setTaskDefName(taskMapperContext.getTaskToSchedule().getName()); waitTask.setReferenceTaskName(taskMapperContext.getTaskToSchedule().getTaskReferenceName()); waitTask.setWorkflowInstanceId(workflowInstance.getWorkflowId()); - waitTask.setWorkflowType(workflowInstance.getWorkflowType()); + waitTask.setWorkflowType(workflowInstance.getWorkflowName()); waitTask.setCorrelationId(workflowInstance.getCorrelationId()); waitTask.setScheduledTime(System.currentTimeMillis()); waitTask.setEndTime(System.currentTimeMillis()); diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java index 53040005e1..a20f694aae 100644 --- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java +++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Event.java @@ -65,8 +65,8 @@ public void start(Workflow workflow, Task task, WorkflowExecutor provider) { Map payload = new HashMap<>(task.getInputData()); payload.put("workflowInstanceId", workflow.getWorkflowId()); - payload.put("workflowType", workflow.getWorkflowType()); - payload.put("workflowVersion", workflow.getVersion()); + payload.put("workflowType", workflow.getWorkflowName()); + payload.put("workflowVersion", workflow.getWorkflowVersion()); payload.put("correlationId", workflow.getCorrelationId()); String payloadJson; @@ -116,10 +116,10 @@ ObservableQueue getQueue(Workflow workflow, Task task) { if(sinkValue.startsWith("conductor")) { if("conductor".equals(sinkValue)) { - queueName = sinkValue + ":" + workflow.getWorkflowType() + ":" + task.getReferenceTaskName(); + queueName = sinkValue + ":" + workflow.getWorkflowName() + ":" + task.getReferenceTaskName(); } else if(sinkValue.startsWith("conductor:")) { queueName = sinkValue.replaceAll("conductor:", ""); - queueName = "conductor:" + workflow.getWorkflowType() + ":" + queueName; + queueName = "conductor:" + workflow.getWorkflowName() + ":" + queueName; } else { task.setStatus(Status.FAILED); task.setReasonForIncompletion("Invalid / Unsupported sink specified: " + sinkValue); diff --git a/core/src/main/java/com/netflix/conductor/core/metadata/MetadataMapperService.java b/core/src/main/java/com/netflix/conductor/core/metadata/MetadataMapperService.java new file mode 100644 index 0000000000..7636d8138c --- /dev/null +++ b/core/src/main/java/com/netflix/conductor/core/metadata/MetadataMapperService.java @@ -0,0 +1,160 @@ +package com.netflix.conductor.core.metadata; + +import com.google.common.annotations.VisibleForTesting; +import com.google.inject.Singleton; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; +import com.netflix.conductor.common.metadata.workflow.TaskType; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.core.WorkflowContext; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.core.execution.TerminateWorkflowException; +import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.service.utils.ServiceUtils; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Populates metadata definitions within workflow objects. + * Benefits of loading and populating metadata definitions upfront could be: + * - Immutable definitions within a workflow execution with the added benefit of guaranteeing consistency at runtime. + * - Stress is reduced on the storage layer + */ +@Singleton +public class MetadataMapperService { + + public static final Logger logger = LoggerFactory.getLogger(MetadataMapperService.class); + + private final MetadataDAO metadataDAO; + + @Inject + public MetadataMapperService(MetadataDAO metadataDAO) { + this.metadataDAO = metadataDAO; + } + + public WorkflowDef lookupForWorkflowDefinition(String name, Integer version) { + Optional potentialDef = + version == null ? lookupLatestWorkflowDefinition(name) : lookupWorkflowDefinition(name, version); + + //Check if the workflow definition is valid + WorkflowDef workflowDefinition = potentialDef + .orElseThrow(() -> { + logger.error("There is no workflow defined with name {} and version {}", name, version); + return new ApplicationException( + ApplicationException.Code.NOT_FOUND, + String.format("No such workflow defined. name=%s, version=%s", name, version) + ); + } + ); + return workflowDefinition; + } + + @VisibleForTesting + Optional lookupWorkflowDefinition(String workflowName, int workflowVersion) { + ServiceUtils.checkNotNullOrEmpty(workflowName, "Workflow name must be specified when searching for a definition"); + return metadataDAO.get(workflowName, workflowVersion); + } + + @VisibleForTesting + Optional lookupLatestWorkflowDefinition(String workflowName) { + ServiceUtils.checkNotNullOrEmpty(workflowName, "Workflow name must be specified when searching for a definition"); + return metadataDAO.getLatest(workflowName); + } + + public Workflow populateWorkflowWithDefinitions(Workflow workflow) { + + WorkflowDef workflowDefinition = Optional.ofNullable(workflow.getWorkflowDefinition()) + .orElseGet(() -> { + WorkflowDef wd = lookupForWorkflowDefinition(workflow.getWorkflowName(), workflow.getWorkflowVersion()); + workflow.setWorkflowDefinition(wd); + return wd; + }); + + workflowDefinition.collectTasks().forEach( + workflowTask -> { + if (shouldPopulateDefinition(workflowTask)) { + workflowTask.setTaskDefinition(metadataDAO.getTaskDef(workflowTask.getName())); + } else if (workflowTask.getType().equals(TaskType.SUB_WORKFLOW.name())) { + populateVersionForSubWorkflow(workflowTask); + } + } + ); + + checkNotEmptyDefinitions(workflowDefinition); + + return workflow; + } + + public WorkflowDef populateTaskDefinitions(WorkflowDef workflowDefinition) { + workflowDefinition.collectTasks().forEach( + this::populateWorkflowTaskWithDefinition + ); + checkNotEmptyDefinitions(workflowDefinition); + return workflowDefinition; + } + + private WorkflowTask populateWorkflowTaskWithDefinition(WorkflowTask workflowTask) { + if (shouldPopulateDefinition(workflowTask)) { + workflowTask.setTaskDefinition(metadataDAO.getTaskDef(workflowTask.getName())); + } else if (workflowTask.getType().equals(TaskType.SUB_WORKFLOW.name())) { + populateVersionForSubWorkflow(workflowTask); + } + return workflowTask; + } + + private void populateVersionForSubWorkflow(WorkflowTask workflowTask) { + SubWorkflowParams subworkflowParams = workflowTask.getSubWorkflowParam(); + if (subworkflowParams.getVersion() == null) { + String subWorkflowName = subworkflowParams.getName(); + Integer subWorkflowVersion = + metadataDAO.getLatest(subWorkflowName) + .map(WorkflowDef::getVersion) + .orElseThrow( + () -> { + String reason = String.format("The Task %s defined as a sub-workflow has no workflow definition available ", subWorkflowName); + logger.error(reason); + return new TerminateWorkflowException(reason); + } + ); + subworkflowParams.setVersion(subWorkflowVersion); + } + } + + private void checkNotEmptyDefinitions(WorkflowDef workflowDefinition) { + + // Obtain the names of the tasks with missing definitions + Set missingTaskDefinitionNames = workflowDefinition.collectTasks().stream() + .filter(MetadataMapperService::shouldPopulateDefinition) + .map(WorkflowTask::getName) + .collect(Collectors.toSet()); + + if (!missingTaskDefinitionNames.isEmpty()) { + logger.error("Cannot find the task definitions for the following tasks used in workflow: {}", missingTaskDefinitionNames); + Monitors.recordWorkflowStartError(workflowDefinition.getName(), WorkflowContext.get().getClientApp()); + throw new ApplicationException(ApplicationException.Code.INVALID_INPUT, "Cannot find the task definitions for the following tasks used in workflow: " + missingTaskDefinitionNames); + } + } + + public Task populateTaskWithDefinition(Task task) { + populateWorkflowTaskWithDefinition(task.getWorkflowTask()); + return task; + } + + public static boolean shouldPopulateDefinition(WorkflowTask workflowTask) { + return workflowTask.getType().equals(TaskType.SIMPLE.name()) && + workflowTask.getTaskDefinition() == null; + } + +} diff --git a/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java b/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java index 632040c75d..1c872cdb62 100644 --- a/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java +++ b/core/src/main/java/com/netflix/conductor/dao/IndexDAO.java @@ -26,159 +26,157 @@ import java.util.concurrent.CompletableFuture; /** - * + * * @author Viren * DAO to index the workflow and task details for searching. */ public interface IndexDAO { - /** - * This method should return an unique identifier of the indexed doc - * @param workflow Workflow to be indexed - * - */ - void indexWorkflow(Workflow workflow); - - /** - * - * /** - * This method should return an unique identifier of the indexed doc - * @param workflow Workflow to be indexed - * @return CompletableFuture of type void - */ - CompletableFuture asyncIndexWorkflow(Workflow workflow); - - /** - * @param task Task to be indexed - */ - void indexTask(Task task); - - /** - * - * @param task Task to be indexed asynchronously - * @return CompletableFuture of type void - */ - CompletableFuture asyncIndexTask(Task task); - - /** - * - * @param query SQL like query for workflow search parameters. - * @param freeText Additional query in free text. Lucene syntax - * @param start start start index for pagination - * @param count count # of workflow ids to be returned - * @param sort sort options - * @return List of workflow ids for the matching query - */ - SearchResult searchWorkflows(String query, String freeText, int start, int count, List sort); - - - /** - * - * @param query SQL like query for task search parameters. - * @param freeText Additional query in free text. Lucene syntax - * @param start start start index for pagination - * @param count count # of task ids to be returned - * @param sort sort options - * @return List of workflow ids for the matching query - */ - SearchResult searchTasks(String query, String freeText, int start, int count, List sort); - - /** - * Remove the workflow index - * @param workflowId workflow to be removed - */ - void removeWorkflow(String workflowId); - - /** - * Remove the workflow index - * @param workflowId workflow to be removed - * @return CompletableFuture of type void - */ - CompletableFuture asyncRemoveWorkflow(String workflowId); - - - /** - * - * Updates the index - * @param workflowInstanceId id of the workflow - * @param keys keys to be updated - * @param values values. Number of keys and values MUST match. - */ - void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values); - - /** - * Updates the index - * @param workflowInstanceId id of the workflow - * @param keys keys to be updated - * @param values values. Number of keys and values MUST match. - * @return CompletableFuture of type void - */ - CompletableFuture asyncUpdateWorkflow(String workflowInstanceId, String[] keys, Object[] values); - - - /** - * Retrieves a specific field from the index - * @param workflowInstanceId id of the workflow - * @param key field to be retrieved - * @return value of the field as string - */ - String get(String workflowInstanceId, String key); - - /** - * @param logs Task Execution logs to be indexed - */ - void addTaskExecutionLogs(List logs); - - /** - * - * @param logs Task Execution logs to be indexed - * @return CompletableFuture of type void - */ - CompletableFuture asyncAddTaskExecutionLogs(List logs); - - /** - * - * @param taskId Id of the task for which to fetch the execution logs - * @return Returns the task execution logs for given task id - */ - List getTaskExecutionLogs(String taskId); - - - /** - * @param eventExecution Event Execution to be indexed - */ - void addEventExecution(EventExecution eventExecution); - - - /** - * - * @param eventExecution Event Execution to be indexed - * @return CompletableFuture of type void - */ - CompletableFuture asyncAddEventExecution(EventExecution eventExecution); - - /** - * Adds an incoming external message into the index - * @param queue Name of the registered queue - * @param msg Message - */ - void addMessage(String queue, Message msg); - - /** - * Search for Workflows completed or failed beyond archiveTtlDays - * @param indexName Name of the index to search - * @param archiveTtlDays Archival Time to Live - * @return List of worlflow Ids matching the pattern - */ - List searchArchivableWorkflows(String indexName, long archiveTtlDays); - - /** - * Search for RUNNING workflows changed in the last lastModifiedHoursAgoFrom to lastModifiedHoursAgoTo hours - * @param lastModifiedHoursAgoFrom - last updated date should be lastModifiedHoursAgoFrom hours ago or later - * @param lastModifiedHoursAgoTo - last updated date should be lastModifiedHoursAgoTo hours ago or earlier - * * - * @return List of workflow Ids matching the pattern - */ - List searchRecentRunningWorkflows(int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo); + /** + * Setup method in charge or initializing/populating the index. + */ + void setup() throws Exception; + + /** + * This method should return an unique identifier of the indexed doc + * @param workflow Workflow to be indexed + * + */ + void indexWorkflow(Workflow workflow); + + /** + * This method should return an unique identifier of the indexed doc + * @param workflow Workflow to be indexed + * @return CompletableFuture of type void + */ + CompletableFuture asyncIndexWorkflow(Workflow workflow); + + /** + * @param task Task to be indexed + */ + void indexTask(Task task); + + /** + * + * @param task Task to be indexed asynchronously + * @return CompletableFuture of type void + */ + CompletableFuture asyncIndexTask(Task task); + + /** + * + * @param query SQL like query for workflow search parameters. + * @param freeText Additional query in free text. Lucene syntax + * @param start start start index for pagination + * @param count count # of workflow ids to be returned + * @param sort sort options + * @return List of workflow ids for the matching query + */ + SearchResult searchWorkflows(String query, String freeText, int start, int count, List sort); + + /** + * + * @param query SQL like query for task search parameters. + * @param freeText Additional query in free text. Lucene syntax + * @param start start start index for pagination + * @param count count # of task ids to be returned + * @param sort sort options + * @return List of workflow ids for the matching query + */ + SearchResult searchTasks(String query, String freeText, int start, int count, List sort); + + /** + * Remove the workflow index + * @param workflowId workflow to be removed + */ + void removeWorkflow(String workflowId); + + /** + * Remove the workflow index + * @param workflowId workflow to be removed + * @return CompletableFuture of type void + */ + CompletableFuture asyncRemoveWorkflow(String workflowId); + + /** + * + * Updates the index + * @param workflowInstanceId id of the workflow + * @param keys keys to be updated + * @param values values. Number of keys and values MUST match. + */ + void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values); + + /** + * Updates the index + * @param workflowInstanceId id of the workflow + * @param keys keys to be updated + * @param values values. Number of keys and values MUST match. + * @return CompletableFuture of type void + */ + CompletableFuture asyncUpdateWorkflow(String workflowInstanceId, String[] keys, Object[] values); + + /** + * Retrieves a specific field from the index + * @param workflowInstanceId id of the workflow + * @param key field to be retrieved + * @return value of the field as string + */ + String get(String workflowInstanceId, String key); + + /** + * @param logs Task Execution logs to be indexed + */ + void addTaskExecutionLogs(List logs); + + /** + * + * @param logs Task Execution logs to be indexed + * @return CompletableFuture of type void + */ + CompletableFuture asyncAddTaskExecutionLogs(List logs); + + /** + * + * @param taskId Id of the task for which to fetch the execution logs + * @return Returns the task execution logs for given task id + */ + List getTaskExecutionLogs(String taskId); + + /** + * @param eventExecution Event Execution to be indexed + */ + void addEventExecution(EventExecution eventExecution); + + /** + * + * @param eventExecution Event Execution to be indexed + * @return CompletableFuture of type void + */ + CompletableFuture asyncAddEventExecution(EventExecution eventExecution); + + /** + * Adds an incoming external message into the index + * @param queue Name of the registered queue + * @param msg Message + */ + void addMessage(String queue, Message msg); + + /** + * Search for Workflows completed or failed beyond archiveTtlDays + * @param indexName Name of the index to search + * @param archiveTtlDays Archival Time to Live + * @return List of worlflow Ids matching the pattern + */ + List searchArchivableWorkflows(String indexName, long archiveTtlDays); + + /** + * Search for RUNNING workflows changed in the last lastModifiedHoursAgoFrom to lastModifiedHoursAgoTo hours + * @param lastModifiedHoursAgoFrom - last updated date should be lastModifiedHoursAgoFrom hours ago or later + * @param lastModifiedHoursAgoTo - last updated date should be lastModifiedHoursAgoTo hours ago or earlier + * * + * @return List of workflow Ids matching the pattern + */ + List searchRecentRunningWorkflows(int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo); } \ No newline at end of file diff --git a/core/src/main/java/com/netflix/conductor/dao/MetadataDAO.java b/core/src/main/java/com/netflix/conductor/dao/MetadataDAO.java index c5aa3772a7..45ec64703e 100644 --- a/core/src/main/java/com/netflix/conductor/dao/MetadataDAO.java +++ b/core/src/main/java/com/netflix/conductor/dao/MetadataDAO.java @@ -14,7 +14,7 @@ * limitations under the License. */ /** - * + * */ package com.netflix.conductor.dao; @@ -23,6 +23,7 @@ import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import java.util.List; +import java.util.Optional; /** * @author Viren @@ -30,133 +31,104 @@ */ public interface MetadataDAO { - /** - * - * @param taskDef task definition to be created - * @return name of the task definition - * - */ - String createTaskDef(TaskDef taskDef); - - /** - * - * @param taskDef task definition to be updated. - * @return name of the task definition - * - */ - String updateTaskDef(TaskDef taskDef); - - /** - * - * @param name Name of the task - * @return Task Definition - * - */ - TaskDef getTaskDef(String name); - - /** - * - * @return All the task definitions - * - */ - List getAllTaskDefs(); - - /** - * - * @param name Name of the task - */ - void removeTaskDef(String name); - - /** - * - * @param def workflow definition - * - */ - void create(WorkflowDef def); - - /** - * - * @param def workflow definition - * - */ - void update(WorkflowDef def); - - /** - * - * @param name Name of the workflow - * @return Workflow Definition - * - */ - WorkflowDef getLatest(String name); - - /** - * - * @param name Name of the workflow - * @param version version - * @return workflow definition - * - */ - WorkflowDef get(String name, int version); - - /** - * - * @param name Name of the workflow definition to be removed - * @param version Version of the workflow definition to be removed - */ - void removeWorkflowDef(String name, Integer version); - - /** - * - * @return Names of all the workflows - * - */ - List findAll(); - - /** - * - * @return List of all the workflow definitions - * - */ - List getAll(); - - /** - * - * @param name name of the workflow - * @return List of all the workflow definitions - * - */ - List getAllVersions(String name); - - /** - * - * @param eventHandler Event handler to be added. - * Will throw an exception if an event handler already exists with the name - */ - void addEventHandler(EventHandler eventHandler); - - /** - * - * @param eventHandler Event handler to be updated. - */ - void updateEventHandler(EventHandler eventHandler); - - /** - * - * @param name Removes the event handler from the system - */ - void removeEventHandlerStatus(String name); - - /** - * - * @return All the event handlers registered in the system - */ - List getEventHandlers(); - - /** - * - * @param event name of the event - * @param activeOnly if true, returns only the active handlers - * @return Returns the list of all the event handlers for a given event - */ - List getEventHandlersForEvent(String event, boolean activeOnly); + /** + * @param taskDef task definition to be created + * @return name of the task definition + */ + String createTaskDef(TaskDef taskDef); + + /** + * @param taskDef task definition to be updated. + * @return name of the task definition + */ + String updateTaskDef(TaskDef taskDef); + + /** + * @param name Name of the task + * @return Task Definition + */ + TaskDef getTaskDef(String name); + + /** + * @return All the task definitions + */ + List getAllTaskDefs(); + + /** + * @param name Name of the task + */ + void removeTaskDef(String name); + + /** + * @param def workflow definition + */ + void create(WorkflowDef def); + + /** + * @param def workflow definition + */ + void update(WorkflowDef def); + + /** + * @param name Name of the workflow + * @return Workflow Definition + */ + Optional getLatest(String name); + + /** + * @param name Name of the workflow + * @param version version + * @return workflow definition + */ + Optional get(String name, int version); + + /** + * @param name Name of the workflow definition to be removed + * @param version Version of the workflow definition to be removed + */ + void removeWorkflowDef(String name, Integer version); + + /** + * @return Names of all the workflows + */ + List findAll(); + + /** + * @return List of all the workflow definitions + */ + List getAll(); + + /** + * @param name name of the workflow + * @return List of all the workflow definitions + */ + List getAllVersions(String name); + + /** + * @param eventHandler Event handler to be added. Will throw an exception if an event handler already exists with + * the name + */ + void addEventHandler(EventHandler eventHandler); + + /** + * @param eventHandler Event handler to be updated. + */ + void updateEventHandler(EventHandler eventHandler); + + /** + * @param name Removes the event handler from the system + */ + void removeEventHandlerStatus(String name); + + /** + * @return All the event handlers registered in the system + */ + List getEventHandlers(); + + /** + * @param event name of the event + * @param activeOnly if true, returns only the active handlers + * @return Returns the list of all the event handlers for a given event + */ + List getEventHandlersForEvent(String event, boolean activeOnly); } diff --git a/core/src/main/java/com/netflix/conductor/metrics/Monitors.java b/core/src/main/java/com/netflix/conductor/metrics/Monitors.java index 71aec0f3cf..c77efc9076 100644 --- a/core/src/main/java/com/netflix/conductor/metrics/Monitors.java +++ b/core/src/main/java/com/netflix/conductor/metrics/Monitors.java @@ -18,12 +18,6 @@ */ package com.netflix.conductor.metrics; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; @@ -36,6 +30,12 @@ import com.netflix.spectator.api.Timer; import com.netflix.spectator.api.histogram.PercentileTimer; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + /** * @author Viren * diff --git a/core/src/main/java/com/netflix/conductor/service/AdminService.java b/core/src/main/java/com/netflix/conductor/service/AdminService.java index 4c3a13b337..a11bcb13a8 100644 --- a/core/src/main/java/com/netflix/conductor/service/AdminService.java +++ b/core/src/main/java/com/netflix/conductor/service/AdminService.java @@ -39,7 +39,7 @@ @Trace public class AdminService { - private static Logger logger = LoggerFactory.getLogger(AdminService.class); + private static Logger LOGGER = LoggerFactory.getLogger(AdminService.class); private final Configuration config; @@ -66,7 +66,7 @@ public AdminService(Configuration config, ExecutionService executionService, Que this.version = prop.getProperty("Implementation-Version"); this.buildDate = prop.getProperty("Build-Date"); } catch (Exception e) { - logger.error(e.getMessage(), e); + LOGGER.error(e.getMessage(), e); } } @@ -106,7 +106,7 @@ public List getListOfPendingTask(String taskType, Integer start, Integer c */ public String requeueSweep(String workflowId) { ServiceUtils.checkNotNullOrEmpty(workflowId, "WorkflowId cannot be null or empty."); - boolean pushed = queueDAO.pushIfNotExists(WorkflowExecutor.deciderQueue, workflowId, config.getSweepFrequency()); + boolean pushed = queueDAO.pushIfNotExists(WorkflowExecutor.DECIDER_QUEUE, workflowId, config.getSweepFrequency()); return pushed + "." + workflowId; } } diff --git a/core/src/main/java/com/netflix/conductor/service/ExecutionService.java b/core/src/main/java/com/netflix/conductor/service/ExecutionService.java index 821159a844..70035772aa 100644 --- a/core/src/main/java/com/netflix/conductor/service/ExecutionService.java +++ b/core/src/main/java/com/netflix/conductor/service/ExecutionService.java @@ -36,6 +36,7 @@ import com.netflix.conductor.core.execution.ApplicationException; import com.netflix.conductor.core.execution.SystemTaskType; import com.netflix.conductor.core.execution.WorkflowExecutor; +import com.netflix.conductor.core.metadata.MetadataMapperService; import com.netflix.conductor.core.utils.QueueUtils; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.IndexDAO; @@ -90,15 +91,25 @@ public class ExecutionService { private static final int POLL_COUNT_ONE = 1; + private MetadataMapperService metadataMapperService; + private static final int POLLING_TIMEOUT_IN_MS = 100; @Inject - public ExecutionService(WorkflowExecutor wfProvider, ExecutionDAO executionDAO, QueueDAO queueDAO, MetadataDAO metadataDAO, IndexDAO indexDAO, Configuration config, ExternalPayloadStorage externalPayloadStorage) { - this.workflowExecutor = wfProvider; + public ExecutionService(WorkflowExecutor workflowExecutor, + ExecutionDAO executionDAO, + QueueDAO queueDAO, + MetadataDAO metadataDAO, + MetadataMapperService metadataMapperService, + IndexDAO indexDAO, + Configuration config, + ExternalPayloadStorage externalPayloadStorage) { + this.workflowExecutor = workflowExecutor; this.executionDAO = executionDAO; this.queueDAO = queueDAO; this.metadataDAO = metadataDAO; this.indexDAO = indexDAO; + this.metadataMapperService = metadataMapperService; this.externalPayloadStorage = externalPayloadStorage; this.taskRequeueTimeout = config.getIntProperty("task.requeue.timeout", 60_000); this.maxSearchSize = config.getIntProperty("workflow.max.search.size", 5_000); @@ -200,7 +211,9 @@ public List getTasks(String taskType, String startKey, int count) { } public Task getTask(String taskId) { - return executionDAO.getTask(taskId); + return Optional.ofNullable(executionDAO.getTask(taskId)) + .map(t -> metadataMapperService.populateTaskWithDefinition(t)) + .orElse(null); } public Task getPendingTaskForWorkflow(String taskReferenceName, String workflowId) { @@ -320,7 +333,7 @@ public List getWorkflowInstances(String workflowName, String correlati List workflows = executionDAO.getWorkflowsByCorrelationId(correlationId, includeTasks); List result = new LinkedList<>(); for (Workflow wf : workflows) { - if (wf.getWorkflowType().equals(workflowName) && (includeClosed || wf.getStatus().equals(Workflow.WorkflowStatus.RUNNING))) { + if (wf.getWorkflowName().equals(workflowName) && (includeClosed || wf.getStatus().equals(Workflow.WorkflowStatus.RUNNING))) { result.add(wf); } } diff --git a/core/src/main/java/com/netflix/conductor/service/Lifecycle.java b/core/src/main/java/com/netflix/conductor/service/Lifecycle.java new file mode 100644 index 0000000000..8680ee1599 --- /dev/null +++ b/core/src/main/java/com/netflix/conductor/service/Lifecycle.java @@ -0,0 +1,33 @@ +package com.netflix.conductor.service; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This interface provides a means to help handle objects, especially those that are injected, that have a lifecycle + * component. Guice explicitly does not support this and recommends a patter much like this. This should be used by + * anything that needs to create resources or clean them up when the application is started or stopped, such as server + * listeners, clients, etc. + * + * @see ModulesShouldBeFastAndSideEffectFree + */ +public interface Lifecycle { + + Logger logger = LoggerFactory.getLogger(Lifecycle.class); + + default void start() throws Exception { + registerShutdownHook(); + } + + void stop() throws Exception; + + default void registerShutdownHook() { + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + try { + stop(); + } catch (Exception e) { + logger.error("Error when trying to shutdown a lifecycle component: " + this.getClass().getName(), e); + } + })); + } +} diff --git a/core/src/main/java/com/netflix/conductor/service/MetadataService.java b/core/src/main/java/com/netflix/conductor/service/MetadataService.java index a55c6b14b5..7f51c2f6d7 100644 --- a/core/src/main/java/com/netflix/conductor/service/MetadataService.java +++ b/core/src/main/java/com/netflix/conductor/service/MetadataService.java @@ -34,6 +34,7 @@ import javax.inject.Inject; import javax.inject.Singleton; import java.util.List; +import java.util.Optional; /** * @author Viren @@ -42,11 +43,9 @@ @Singleton @Trace public class MetadataService { - private final MetadataDAO metadataDAO; private final EventQueues eventQueues; - @Inject public MetadataService(MetadataDAO metadataDAO, EventQueues eventQueues) { this.metadataDAO = metadataDAO; @@ -54,7 +53,6 @@ public MetadataService(MetadataDAO metadataDAO, EventQueues eventQueues) { } /** - * * @param taskDefinitions Task Definitions to register */ public void registerTaskDef(List taskDefinitions) { @@ -73,7 +71,6 @@ public void registerTaskDef(List taskDefinitions) { } /** - * * @param taskDefinition Task Definition to be updated */ public void updateTaskDef(TaskDef taskDefinition) { @@ -87,7 +84,6 @@ public void updateTaskDef(TaskDef taskDefinition) { } /** - * * @param taskType Remove task definition */ public void unregisterTaskDef(String taskType) { @@ -95,7 +91,6 @@ public void unregisterTaskDef(String taskType) { } /** - * * @return List of all the registered tasks */ public List getTaskDefs() { @@ -103,7 +98,6 @@ public List getTaskDefs() { } /** - * * @param taskType Task to retrieve * @return Task Definition */ @@ -117,7 +111,6 @@ public TaskDef getTaskDef(String taskType) { } /** - * * @param def Workflow definition to be updated */ public void updateWorkflowDef(WorkflowDef def) { @@ -141,32 +134,27 @@ public void updateWorkflowDef(List workflowDefList) { } /** - * - * @param name Name of the workflow to retrieve + * @param name Name of the workflow to retrieve * @param version Optional. Version. If null, then retrieves the latest * @return Workflow definition */ public WorkflowDef getWorkflowDef(String name, Integer version) { - WorkflowDef workflowDef = null; + Optional workflowDef; if (version == null) { workflowDef = metadataDAO.getLatest(name); } else { workflowDef = metadataDAO.get(name, version); } - if(workflowDef == null){ - throw new ApplicationException(Code.NOT_FOUND, - String.format("No such workflow found by name: %s, version: %d", name, version)); - } - return workflowDef; + return workflowDef.orElseThrow(() -> new ApplicationException(Code.NOT_FOUND, + String.format("No such workflow found by name: %s, version: %d", name, version))); } /** - * * @param name Name of the workflow to retrieve * @return Latest version of the workflow definition */ - public WorkflowDef getLatestWorkflow(String name) { + public Optional getLatestWorkflow(String name) { return metadataDAO.getLatest(name); } @@ -198,9 +186,8 @@ public void unregisterWorkflowDef(String name, Integer version) { } /** - * * @param eventHandler Event handler to be added. - * Will throw an exception if an event handler already exists with the name + * Will throw an exception if an event handler already exists with the name */ public void addEventHandler(EventHandler eventHandler) { validateEvent(eventHandler); @@ -208,7 +195,6 @@ public void addEventHandler(EventHandler eventHandler) { } /** - * * @param eventHandler Event handler to be updated. */ public void updateEventHandler(EventHandler eventHandler) { @@ -217,7 +203,6 @@ public void updateEventHandler(EventHandler eventHandler) { } /** - * * @param name Removes the event handler from the system */ public void removeEventHandlerStatus(String name) { @@ -225,7 +210,6 @@ public void removeEventHandlerStatus(String name) { } /** - * * @return All the event handlers registered in the system */ public List getEventHandlers() { @@ -233,8 +217,7 @@ public List getEventHandlers() { } /** - * - * @param event name of the event + * @param event name of the event * @param activeOnly if true, returns only the active handlers * @return Returns the list of all the event handlers for a given event */ @@ -250,4 +233,5 @@ public void validateEvent(EventHandler eh) { String event = eh.getEvent(); eventQueues.getQueue(event); } + } diff --git a/core/src/main/java/com/netflix/conductor/service/TaskService.java b/core/src/main/java/com/netflix/conductor/service/TaskService.java index 5f0d3a59bb..17c14d52c3 100644 --- a/core/src/main/java/com/netflix/conductor/service/TaskService.java +++ b/core/src/main/java/com/netflix/conductor/service/TaskService.java @@ -48,7 +48,7 @@ @Trace public class TaskService { - private static final Logger logger = LoggerFactory.getLogger(TaskService.class); + private static final Logger LOGGER = LoggerFactory.getLogger(TaskService.class); private final ExecutionService executionService; @@ -70,10 +70,10 @@ public TaskService(ExecutionService executionService, QueueDAO queueDAO) { */ public Task poll(String taskType, String workerId, String domain) { ServiceUtils.checkNotNullOrEmpty(taskType, "TaskType cannot be null or empty."); - logger.debug("Task being polled: /tasks/poll/{}?{}&{}", taskType, workerId, domain); + LOGGER.debug("Task being polled: /tasks/poll/{}?{}&{}", taskType, workerId, domain); Task task = executionService.getLastPollTask(taskType, workerId, domain); if (task != null) { - logger.debug("The Task {} being returned for /tasks/poll/{}?{}&{}", task, taskType, workerId, domain); + LOGGER.debug("The Task {} being returned for /tasks/poll/{}?{}&{}", task, taskType, workerId, domain); } Monitors.recordTaskPollCount(taskType, domain, 1); return task; @@ -92,7 +92,7 @@ public Task poll(String taskType, String workerId, String domain) { public List batchPoll(String taskType, String workerId, String domain, Integer count, Integer timeout) { ServiceUtils.checkNotNullOrEmpty(taskType, "TaskType cannot be null or empty."); List polledTasks = executionService.poll(taskType, workerId, domain, count, timeout); - logger.debug("The Tasks {} being returned for /tasks/poll/{}?{}&{}", + LOGGER.debug("The Tasks {} being returned for /tasks/poll/{}?{}&{}", polledTasks.stream() .map(Task::getTaskId) .collect(Collectors.toList()), taskType, workerId, domain); @@ -136,9 +136,9 @@ public String updateTask(TaskResult taskResult) { ServiceUtils.checkNotNull(taskResult, "TaskResult cannot be null or empty."); ServiceUtils.checkNotNullOrEmpty(taskResult.getWorkflowInstanceId(), "Workflow Id cannot be null or empty"); ServiceUtils.checkNotNullOrEmpty(taskResult.getTaskId(), "Task ID cannot be null or empty"); - logger.debug("Update Task: {} with callback time: {}", taskResult, taskResult.getCallbackAfterSeconds()); + LOGGER.debug("Update Task: {} with callback time: {}", taskResult, taskResult.getCallbackAfterSeconds()); executionService.updateTask(taskResult); - logger.debug("Task: {} updated successfully with callback time: {}", taskResult, taskResult.getCallbackAfterSeconds()); + LOGGER.debug("Task: {} updated successfully with callback time: {}", taskResult, taskResult.getCallbackAfterSeconds()); return taskResult.getTaskId(); } @@ -151,7 +151,7 @@ public String updateTask(TaskResult taskResult) { */ public String ackTaskReceived(String taskId, String workerId) { ServiceUtils.checkNotNullOrEmpty(taskId, "TaskId cannot be null or empty."); - logger.debug("Ack received for task: {} from worker: {}", taskId, workerId); + LOGGER.debug("Ack received for task: {} from worker: {}", taskId, workerId); return String.valueOf(executionService.ackTaskReceived(taskId)); } diff --git a/core/src/main/java/com/netflix/conductor/service/WorkflowService.java b/core/src/main/java/com/netflix/conductor/service/WorkflowService.java index 40f1b20986..2811bc5622 100644 --- a/core/src/main/java/com/netflix/conductor/service/WorkflowService.java +++ b/core/src/main/java/com/netflix/conductor/service/WorkflowService.java @@ -66,16 +66,36 @@ public WorkflowService(WorkflowExecutor workflowExecutor, ExecutionService execu public String startWorkflow(StartWorkflowRequest startWorkflowRequest) { ServiceUtils.checkNotNull(startWorkflowRequest, "StartWorkflowRequest cannot be null"); ServiceUtils.checkNotNullOrEmpty(startWorkflowRequest.getName(), "Workflow name cannot be null or empty"); - WorkflowDef workflowDef = metadataService.getWorkflowDef(startWorkflowRequest.getName(), startWorkflowRequest.getVersion()); + + WorkflowDef workflowDef = startWorkflowRequest.getWorkflowDef(); + if (workflowDef == null) { - throw new ApplicationException(ApplicationException.Code.NOT_FOUND, - String.format("No such workflow found by name: %s, version: %d", startWorkflowRequest.getName(), - startWorkflowRequest.getVersion())); - } - return workflowExecutor.startWorkflow(workflowDef.getName(), workflowDef.getVersion(), - startWorkflowRequest.getCorrelationId(), startWorkflowRequest.getInput(), startWorkflowRequest.getExternalInputPayloadStoragePath(), null, - startWorkflowRequest.getTaskToDomain()); + workflowDef = metadataService.getWorkflowDef(startWorkflowRequest.getName(), startWorkflowRequest.getVersion()); + if (workflowDef == null) { + throw new ApplicationException(ApplicationException.Code.NOT_FOUND, + String.format("No such workflow found by name: %s, version: %d", startWorkflowRequest.getName(), + startWorkflowRequest.getVersion())); + } + return workflowExecutor.startWorkflow( + startWorkflowRequest.getName(), + startWorkflowRequest.getVersion(), + startWorkflowRequest.getCorrelationId(), + startWorkflowRequest.getInput(), + startWorkflowRequest.getExternalInputPayloadStoragePath(), + null, + startWorkflowRequest.getTaskToDomain() + ); + } else { + return workflowExecutor.startWorkflow( + startWorkflowRequest.getWorkflowDef(), + startWorkflowRequest.getInput(), + startWorkflowRequest.getExternalInputPayloadStoragePath(), + startWorkflowRequest.getCorrelationId(), + null, + startWorkflowRequest.getTaskToDomain() + ); + } } /** diff --git a/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java b/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java index 2de2196a93..2f7e78957d 100644 --- a/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java +++ b/core/src/test/java/com/netflix/conductor/core/events/TestEventProcessor.java @@ -34,7 +34,6 @@ import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TestConfiguration; import com.netflix.conductor.core.execution.WorkflowExecutor; -import com.netflix.conductor.core.utils.DummyPayloadStorage; import com.netflix.conductor.service.ExecutionService; import com.netflix.conductor.service.MetadataService; import org.junit.Before; @@ -115,18 +114,18 @@ public void testEventProcessor() { eventHandler.setActive(true); Action startWorkflowAction = new Action(); - startWorkflowAction.setAction(Type.start_workflow); - startWorkflowAction.setStart_workflow(new StartWorkflow()); - startWorkflowAction.getStart_workflow().setName("workflow_x"); - startWorkflowAction.getStart_workflow().setVersion(1); + startWorkflowAction.setAction(Type.START_WORKFLOW); + startWorkflowAction.setStartWorkflow(new StartWorkflow()); + startWorkflowAction.getStartWorkflow().setName("workflow_x"); + startWorkflowAction.getStartWorkflow().setVersion(1); eventHandler.getActions().add(startWorkflowAction); Action completeTaskAction = new Action(); - completeTaskAction.setAction(Type.complete_task); - completeTaskAction.setComplete_task(new TaskDetails()); - completeTaskAction.getComplete_task().setTaskRefName("task_x"); - completeTaskAction.getComplete_task().setWorkflowId(UUID.randomUUID().toString()); - completeTaskAction.getComplete_task().setOutput(new HashMap<>()); + completeTaskAction.setAction(Type.COMPLETE_TASK); + completeTaskAction.setCompleteTask(new TaskDetails()); + completeTaskAction.getCompleteTask().setTaskRefName("task_x"); + completeTaskAction.getCompleteTask().setWorkflowId(UUID.randomUUID().toString()); + completeTaskAction.getCompleteTask().setOutput(new HashMap<>()); eventHandler.getActions().add(completeTaskAction); eventHandler.setEvent(event); @@ -141,7 +140,7 @@ public void testEventProcessor() { doAnswer((Answer) invocation -> { started.set(true); return id; - }).when(workflowExecutor).startWorkflow(startWorkflowAction.getStart_workflow().getName(), startWorkflowAction.getStart_workflow().getVersion(), startWorkflowAction.getStart_workflow().getCorrelationId(), startWorkflowAction.getStart_workflow().getInput(), event); + }).when(workflowExecutor).startWorkflow(startWorkflowAction.getStartWorkflow().getName(), startWorkflowAction.getStartWorkflow().getVersion(), startWorkflowAction.getStartWorkflow().getCorrelationId(), startWorkflowAction.getStartWorkflow().getInput(), event); AtomicBoolean completed = new AtomicBoolean(false); doAnswer((Answer) invocation -> { @@ -150,17 +149,17 @@ public void testEventProcessor() { }).when(workflowExecutor).updateTask(any()); Task task = new Task(); - task.setReferenceTaskName(completeTaskAction.getComplete_task().getTaskRefName()); + task.setReferenceTaskName(completeTaskAction.getCompleteTask().getTaskRefName()); Workflow workflow = new Workflow(); workflow.setTasks(Collections.singletonList(task)); - when(workflowExecutor.getWorkflow(completeTaskAction.getComplete_task().getWorkflowId(), true)).thenReturn(workflow); + when(workflowExecutor.getWorkflow(completeTaskAction.getCompleteTask().getWorkflowId(), true)).thenReturn(workflow); WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setVersion(startWorkflowAction.getStart_workflow().getVersion()); - workflowDef.setName(startWorkflowAction.getStart_workflow().getName()); + workflowDef.setVersion(startWorkflowAction.getStartWorkflow().getVersion()); + workflowDef.setName(startWorkflowAction.getStartWorkflow().getName()); when(metadataService.getWorkflowDef(any(), any())).thenReturn(workflowDef); - ActionProcessor actionProcessor = new ActionProcessor(workflowExecutor, metadataService, parametersUtils); + ActionProcessor actionProcessor = new ActionProcessor(workflowExecutor, parametersUtils); EventProcessor eventProcessor = new EventProcessor(executionService, metadataService, actionProcessor, eventQueues, new TestConfiguration()); assertNotNull(eventProcessor.getQueues()); @@ -193,12 +192,12 @@ public void testEventHandlerWithCondition() { startWorkflowInput.put("param2", "SQS-${MessageId}"); Action startWorkflowAction = new Action(); - startWorkflowAction.setAction(Type.start_workflow); - startWorkflowAction.setStart_workflow(new StartWorkflow()); - startWorkflowAction.getStart_workflow().setName("cms_artwork_automation"); - startWorkflowAction.getStart_workflow().setVersion(1); - startWorkflowAction.getStart_workflow().setInput(startWorkflowInput); - startWorkflowAction.setExpandInlineJSON(true); + startWorkflowAction.setAction(Type.START_WORKFLOW); + startWorkflowAction.setStartWorkflow(new StartWorkflow()); + startWorkflowAction.getStartWorkflow().setName("cms_artwork_automation"); + startWorkflowAction.getStartWorkflow().setVersion(1); + startWorkflowAction.getStartWorkflow().setInput(startWorkflowInput); + startWorkflowAction.setExpandInlineJson(true); eventHandler.getActions().add(startWorkflowAction); eventHandler.setEvent(event); @@ -213,13 +212,13 @@ public void testEventHandlerWithCondition() { doAnswer((Answer) invocation -> { started.set(true); return id; - }).when(workflowExecutor).startWorkflow(startWorkflowAction.getStart_workflow().getName(), startWorkflowAction.getStart_workflow().getVersion(), startWorkflowAction.getStart_workflow().getCorrelationId(), startWorkflowAction.getStart_workflow().getInput(), event); + }).when(workflowExecutor).startWorkflow(startWorkflowAction.getStartWorkflow().getName(), startWorkflowAction.getStartWorkflow().getVersion(), startWorkflowAction.getStartWorkflow().getCorrelationId(), startWorkflowAction.getStartWorkflow().getInput(), event); WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName(startWorkflowAction.getStart_workflow().getName()); + workflowDef.setName(startWorkflowAction.getStartWorkflow().getName()); when(metadataService.getWorkflowDef(any(), any())).thenReturn(workflowDef); - ActionProcessor actionProcessor = new ActionProcessor(workflowExecutor, metadataService, parametersUtils); + ActionProcessor actionProcessor = new ActionProcessor(workflowExecutor, parametersUtils); EventProcessor eventProcessor = new EventProcessor(executionService, metadataService, actionProcessor, eventQueues, new TestConfiguration()); assertNotNull(eventProcessor.getQueues()); @@ -237,11 +236,11 @@ public void testEventProcessorWithRetriableError() { eventHandler.setEvent(event); Action completeTaskAction = new Action(); - completeTaskAction.setAction(Type.complete_task); - completeTaskAction.setComplete_task(new TaskDetails()); - completeTaskAction.getComplete_task().setTaskRefName("task_x"); - completeTaskAction.getComplete_task().setWorkflowId(UUID.randomUUID().toString()); - completeTaskAction.getComplete_task().setOutput(new HashMap<>()); + completeTaskAction.setAction(Type.COMPLETE_TASK); + completeTaskAction.setCompleteTask(new TaskDetails()); + completeTaskAction.getCompleteTask().setTaskRefName("task_x"); + completeTaskAction.getCompleteTask().setWorkflowId(UUID.randomUUID().toString()); + completeTaskAction.getCompleteTask().setOutput(new HashMap<>()); eventHandler.getActions().add(completeTaskAction); when(queue.rePublishIfNoAck()).thenReturn(false); @@ -268,11 +267,11 @@ public void testEventProcessorWithNonRetriableError() { eventHandler.setEvent(event); Action completeTaskAction = new Action(); - completeTaskAction.setAction(Type.complete_task); - completeTaskAction.setComplete_task(new TaskDetails()); - completeTaskAction.getComplete_task().setTaskRefName("task_x"); - completeTaskAction.getComplete_task().setWorkflowId(UUID.randomUUID().toString()); - completeTaskAction.getComplete_task().setOutput(new HashMap<>()); + completeTaskAction.setAction(Type.COMPLETE_TASK); + completeTaskAction.setCompleteTask(new TaskDetails()); + completeTaskAction.getCompleteTask().setTaskRefName("task_x"); + completeTaskAction.getCompleteTask().setWorkflowId(UUID.randomUUID().toString()); + completeTaskAction.getCompleteTask().setOutput(new HashMap<>()); eventHandler.getActions().add(completeTaskAction); when(metadataService.getEventHandlers()).thenReturn(Collections.singletonList(eventHandler)); @@ -325,7 +324,7 @@ public void testExecuteNonRetriableApplicationException() { eventExecution.setStatus(EventExecution.Status.IN_PROGRESS); eventExecution.setEvent("event"); Action action = new Action(); - action.setAction(Type.start_workflow); + action.setAction(Type.START_WORKFLOW); eventProcessor.execute(eventExecution, action, "payload"); assertEquals(1, executeInvoked.get()); @@ -346,10 +345,11 @@ public void testExecuteRetriableApplicationException() { eventExecution.setStatus(EventExecution.Status.IN_PROGRESS); eventExecution.setEvent("event"); Action action = new Action(); - action.setAction(Type.start_workflow); + action.setAction(Type.START_WORKFLOW); eventProcessor.execute(eventExecution, action, "payload"); assertEquals(3, executeInvoked.get()); assertNull(eventExecution.getOutput().get("exception")); } } + diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestConfiguration.java b/core/src/test/java/com/netflix/conductor/core/execution/TestConfiguration.java index 6eb5c1a6b1..503921bed1 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/TestConfiguration.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestConfiguration.java @@ -64,8 +64,13 @@ public String getAppId() { public String getProperty(String string, String def) { return "dummy"; } - - @Override + + @Override + public boolean getBooleanProperty(String name, boolean defaultValue) { + return false; + } + + @Override public String getAvailabilityZone() { return "us-east-1a"; } diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java index 168b4813eb..2122759d46 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderOutcomes.java @@ -1,17 +1,14 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** * @@ -24,9 +21,9 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome; @@ -47,6 +44,7 @@ import com.netflix.conductor.dao.QueueDAO; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import java.io.InputStream; import java.util.Arrays; @@ -61,6 +59,7 @@ import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -70,383 +69,399 @@ */ public class TestDeciderOutcomes { - private DeciderService deciderService; + private MetadataDAO metadataDAO; + private DeciderService deciderService; - private static ObjectMapper objectMapper = new ObjectMapper(); + private static ObjectMapper objectMapper = new ObjectMapper(); - static { - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + static { + objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); objectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); objectMapper.setSerializationInclusion(Include.NON_NULL); objectMapper.setSerializationInclusion(Include.NON_EMPTY); - } - - - @Before - public void init() { - - MetadataDAO metadataDAO = mock(MetadataDAO.class); - QueueDAO queueDAO = mock(QueueDAO.class); - ExternalPayloadStorageUtils externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class); - Configuration configuration = mock(Configuration.class); - when(configuration.getTaskInputPayloadSizeThresholdKB()).thenReturn(10L); - when(configuration.getMaxTaskInputPayloadSizeThresholdKB()).thenReturn(10240L); - - TaskDef taskDef = new TaskDef(); - taskDef.setRetryCount(1); - taskDef.setName("mockTaskDef"); - taskDef.setResponseTimeoutSeconds(60*60); - when(metadataDAO.getTaskDef(any())).thenReturn(taskDef); - ParametersUtils parametersUtils = new ParametersUtils(); - Map taskMappers = new HashMap<>(); - taskMappers.put("DECISION", new DecisionTaskMapper()); - taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("FORK_JOIN", new ForkJoinTaskMapper()); - taskMappers.put("JOIN", new JoinTaskMapper()); - taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper)); - taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("EVENT", new EventTaskMapper(parametersUtils)); - taskMappers.put("WAIT", new WaitTaskMapper(parametersUtils)); - - this.deciderService = new DeciderService(metadataDAO, parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers); - } - - @Test - public void testWorkflowWithNoTasks() throws Exception { - InputStream stream = TestDeciderOutcomes.class.getResourceAsStream("/conditional_flow.json"); - WorkflowDef def = objectMapper.readValue(stream, WorkflowDef.class); - assertNotNull(def); - - Workflow workflow = new Workflow(); - workflow.setWorkflowType(def.getName()); - workflow.setStartTime(0); - workflow.getInput().put("param1", "nested"); - workflow.getInput().put("param2", "one"); - - DeciderOutcome outcome = deciderService.decide(workflow, def); - assertNotNull(outcome); - assertFalse(outcome.isComplete); - assertTrue(outcome.tasksToBeUpdated.isEmpty()); - assertEquals(3, outcome.tasksToBeScheduled.size()); - System.out.println(outcome.tasksToBeScheduled); - - outcome.tasksToBeScheduled.forEach(t -> t.setStatus(Status.COMPLETED)); - workflow.getTasks().addAll(outcome.tasksToBeScheduled); - outcome = deciderService.decide(workflow, def); - assertFalse(outcome.isComplete); - assertEquals(outcome.tasksToBeUpdated.toString(), 3, outcome.tasksToBeUpdated.size()); - assertEquals(1, outcome.tasksToBeScheduled.size()); - assertEquals("junit_task_3", outcome.tasksToBeScheduled.get(0).getTaskDefName()); - System.out.println(outcome.tasksToBeScheduled); - } - - - @Test - public void testRetries() { - WorkflowTask task = new WorkflowTask(); - task.setName("test_task"); - task.setType("USER_TASK"); - task.setTaskReferenceName("t0"); - task.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); - task.getInputParameters().put("requestId", "${workflow.input.requestId}"); - - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - def.getTasks().add(task); - def.setSchemaVersion(2); - - Workflow workflow = new Workflow(); - workflow.getInput().put("requestId", 123); - workflow.setStartTime(System.currentTimeMillis()); - - DeciderOutcome outcome = deciderService.decide(workflow, def); - assertNotNull(outcome); - assertEquals(1, outcome.tasksToBeScheduled.size()); - assertEquals(task.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); - - String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId(); - assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); - assertEquals(123, outcome.tasksToBeScheduled.get(0).getInputData().get("requestId")); - - outcome.tasksToBeScheduled.get(0).setStatus(Status.FAILED); - workflow.getTasks().addAll(outcome.tasksToBeScheduled); - - outcome = deciderService.decide(workflow, def); - assertNotNull(outcome); - assertEquals(1, outcome.tasksToBeUpdated.size()); - assertEquals(1, outcome.tasksToBeScheduled.size()); - assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId()); - assertNotSame(task1Id, outcome.tasksToBeScheduled.get(0).getTaskId()); - assertEquals(outcome.tasksToBeScheduled.get(0).getTaskId(), outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); - assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getRetriedTaskId()); - assertEquals(123, outcome.tasksToBeScheduled.get(0).getInputData().get("requestId")); - - WorkflowTask fork = new WorkflowTask(); - fork.setName("fork0"); - fork.setWorkflowTaskType(Type.FORK_JOIN_DYNAMIC); - fork.setTaskReferenceName("fork0"); - fork.setDynamicForkTasksInputParamName("forkedInputs"); - fork.setDynamicForkTasksParam("forks"); - fork.getInputParameters().put("forks", "${workflow.input.forks}"); - fork.getInputParameters().put("forkedInputs", "${workflow.input.forkedInputs}"); - - WorkflowTask join = new WorkflowTask(); - join.setName("join0"); - join.setType("JOIN"); - join.setTaskReferenceName("join0"); - - def.getTasks().clear(); - def.getTasks().add(fork); - def.getTasks().add(join); - - List forks = new LinkedList<>(); - Map> forkedInputs = new HashMap<>(); - - for(int i = 0; i < 1; i++) { - WorkflowTask wft = new WorkflowTask(); - wft.setName("f" + i); - wft.setTaskReferenceName("f" + i); - wft.setWorkflowTaskType(Type.SIMPLE); - wft.getInputParameters().put("requestId", "${workflow.input.requestId}"); - wft.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); - forks.add(wft); - Map input = new HashMap<>(); - input.put("k", "v"); - input.put("k1", 1); - forkedInputs.put(wft.getTaskReferenceName(), input); - } - workflow = new Workflow(); - workflow.getInput().put("requestId", 123); - workflow.setStartTime(System.currentTimeMillis()); - workflow.getInput().put("forks", forks); - workflow.getInput().put("forkedInputs", forkedInputs); - - outcome = deciderService.decide(workflow, def); - assertNotNull(outcome); - assertEquals(3, outcome.tasksToBeScheduled.size()); - assertEquals(0, outcome.tasksToBeUpdated.size()); - - assertEquals("v", outcome.tasksToBeScheduled.get(1).getInputData().get("k")); - assertEquals(1, outcome.tasksToBeScheduled.get(1).getInputData().get("k1")); - assertEquals(outcome.tasksToBeScheduled.get(1).getTaskId(), outcome.tasksToBeScheduled.get(1).getInputData().get("taskId")); - System.out.println(outcome.tasksToBeScheduled.get(1).getInputData()); - - task1Id = outcome.tasksToBeScheduled.get(1).getTaskId(); - outcome.tasksToBeScheduled.get(1).setStatus(Status.FAILED); - - for(Task taskToBeScheduled : outcome.tasksToBeScheduled) { - taskToBeScheduled.setUpdateTime(System.currentTimeMillis()); - } - workflow.getTasks().addAll(outcome.tasksToBeScheduled); - - outcome = deciderService.decide(workflow, def); - assertTrue(outcome.tasksToBeScheduled.stream().anyMatch(task1 -> task1.getReferenceTaskName().equals("f0"))); - - //noinspection ConstantConditions - Task task1 = outcome.tasksToBeScheduled.stream().filter(t -> t.getReferenceTaskName().equals("f0")).findFirst().get(); - assertEquals("v", task1.getInputData().get("k")); - assertEquals(1, task1.getInputData().get("k1")); - assertEquals(task1.getTaskId(), task1.getInputData().get("taskId")); - assertNotSame(task1Id, task1.getTaskId()); - assertEquals(task1Id, task1.getRetriedTaskId()); - System.out.println(task1.getInputData()); - } - - @Test - public void testOptional() { - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - - WorkflowTask task1 = new WorkflowTask(); - task1.setName("task0"); - task1.setType("SIMPLE"); - task1.setTaskReferenceName("t0"); - task1.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); - task1.setOptional(true); - - WorkflowTask task2 = new WorkflowTask(); - task2.setName("task1"); - task2.setType("SIMPLE"); - task2.setTaskReferenceName("t1"); - - def.getTasks().add(task1); - def.getTasks().add(task2); - def.setSchemaVersion(2); - - - Workflow workflow = new Workflow(); - workflow.setStartTime(System.currentTimeMillis()); - DeciderOutcome outcome = deciderService.decide(workflow, def); - assertNotNull(outcome); - - System.out.println("Schedule after starting: " + outcome.tasksToBeScheduled); - assertEquals(1, outcome.tasksToBeScheduled.size()); - assertEquals(task1.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); - System.out.println("TaskId of the scheduled task in input: " + outcome.tasksToBeScheduled.get(0).getInputData()); - String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId(); - assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); - - workflow.getTasks().addAll(outcome.tasksToBeScheduled); - workflow.getTasks().get(0).setStatus(Status.FAILED); - - outcome = deciderService.decide(workflow, def); - - assertNotNull(outcome); - System.out.println("Schedule: " + outcome.tasksToBeScheduled); - System.out.println("Update: " + outcome.tasksToBeUpdated); - - assertEquals(1, outcome.tasksToBeUpdated.size()); - assertEquals(1, outcome.tasksToBeScheduled.size()); - - assertEquals(Task.Status.COMPLETED_WITH_ERRORS, workflow.getTasks().get(0).getStatus()); - assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId()); - assertEquals(task2.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); - - } - - @Test - public void testOptionalWithDynamicFork() { - WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName("test"); - - WorkflowTask task1 = new WorkflowTask(); - task1.setName("fork0"); - task1.setWorkflowTaskType(Type.FORK_JOIN_DYNAMIC); - task1.setTaskReferenceName("fork0"); - task1.setDynamicForkTasksInputParamName("forkedInputs"); - task1.setDynamicForkTasksParam("forks"); - task1.getInputParameters().put("forks", "${workflow.input.forks}"); - task1.getInputParameters().put("forkedInputs", "${workflow.input.forkedInputs}"); - - WorkflowTask task2 = new WorkflowTask(); - task2.setName("join0"); - task2.setType("JOIN"); - task2.setTaskReferenceName("join0"); - - workflowDef.getTasks().add(task1); - workflowDef.getTasks().add(task2); - workflowDef.setSchemaVersion(2); - - - Workflow workflow = new Workflow(); - List forkedTasks = new LinkedList<>(); - Map> forkedInputs = new HashMap<>(); - - for(int i = 0; i < 3; i++) { - WorkflowTask workflowTask = new WorkflowTask(); - workflowTask.setName("f" + i); - workflowTask.setTaskReferenceName("f" + i); - workflowTask.setWorkflowTaskType(Type.SIMPLE); - workflowTask.setOptional(true); - forkedTasks.add(workflowTask); - - forkedInputs.put(workflowTask.getTaskReferenceName(), new HashMap<>()); - } - workflow.getInput().put("forks", forkedTasks); - workflow.getInput().put("forkedInputs", forkedInputs); - - - workflow.setStartTime(System.currentTimeMillis()); - DeciderOutcome outcome = deciderService.decide(workflow, workflowDef); - assertNotNull(outcome); - assertEquals(5, outcome.tasksToBeScheduled.size()); - assertEquals(0, outcome.tasksToBeUpdated.size()); - assertEquals(SystemTaskType.FORK.name(), outcome.tasksToBeScheduled.get(0).getTaskType()); - assertEquals(Task.Status.COMPLETED, outcome.tasksToBeScheduled.get(0).getStatus()); - for(int i = 1; i < 4; i++) { - assertEquals(Task.Status.SCHEDULED, outcome.tasksToBeScheduled.get(i).getStatus()); - assertEquals("f"+ (i-1), outcome.tasksToBeScheduled.get(i).getTaskDefName()); - outcome.tasksToBeScheduled.get(i).setStatus(Status.FAILED); //let's mark them as failure - } - assertEquals(Task.Status.IN_PROGRESS, outcome.tasksToBeScheduled.get(4).getStatus()); - workflow.getTasks().clear(); - workflow.getTasks().addAll(outcome.tasksToBeScheduled); - - for(Task taskToBeScheduled : outcome.tasksToBeScheduled) { - taskToBeScheduled.setUpdateTime(System.currentTimeMillis()); - } - outcome = deciderService.decide(workflow, workflowDef); - assertNotNull(outcome); - assertEquals(SystemTaskType.JOIN.name(), outcome.tasksToBeScheduled.get(0).getTaskType()); - for(int i = 1; i < 4; i++) { - assertEquals(Task.Status.COMPLETED_WITH_ERRORS, outcome.tasksToBeUpdated.get(i).getStatus()); - assertEquals("f"+ (i-1), outcome.tasksToBeUpdated.get(i).getTaskDefName()); - } - assertEquals(Task.Status.IN_PROGRESS, outcome.tasksToBeScheduled.get(0).getStatus()); - new Join().execute(workflow, outcome.tasksToBeScheduled.get(0), null); - assertEquals(Task.Status.COMPLETED, outcome.tasksToBeScheduled.get(0).getStatus()); - - outcome.tasksToBeScheduled.stream().map(task -> task.getStatus() + ":" + task.getTaskType() + ":").forEach(System.out::println); - outcome.tasksToBeUpdated.stream().map(task -> task.getStatus() + ":" + task.getTaskType() + ":").forEach(System.out::println); - } - - @Test - public void testDecisionCases() { - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - - WorkflowTask even = new WorkflowTask(); - even.setName("even"); - even.setType("SIMPLE"); - even.setTaskReferenceName("even"); - - WorkflowTask odd = new WorkflowTask(); - odd.setName("odd"); - odd.setType("SIMPLE"); - odd.setTaskReferenceName("odd"); - - WorkflowTask defaultt = new WorkflowTask(); - defaultt.setName("defaultt"); - defaultt.setType("SIMPLE"); - defaultt.setTaskReferenceName("defaultt"); - - - WorkflowTask decide = new WorkflowTask(); - decide.setName("decide"); - decide.setWorkflowTaskType(Type.DECISION); - decide.setTaskReferenceName("d0"); - decide.getInputParameters().put("Id", "${workflow.input.Id}"); - decide.getInputParameters().put("location", "${workflow.input.location}"); - decide.setCaseExpression("if ($.Id == null) 'bad input'; else if ( ($.Id != null && $.Id % 2 == 0) || $.location == 'usa') 'even'; else 'odd'; "); - - decide.getDecisionCases().put("even", Arrays.asList(even)); - decide.getDecisionCases().put("odd", Arrays.asList(odd)); - decide.setDefaultCase(Arrays.asList(defaultt)); - - def.getTasks().add(decide); - def.setSchemaVersion(2); - - - Workflow workflow = new Workflow(); - workflow.setStartTime(System.currentTimeMillis()); - DeciderOutcome outcome = deciderService.decide(workflow, def); - assertNotNull(outcome); - - System.out.println("Schedule after starting: " + outcome.tasksToBeScheduled); - assertEquals(2, outcome.tasksToBeScheduled.size()); - assertEquals(decide.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); - assertEquals(defaultt.getTaskReferenceName(), outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); //default - System.out.println(outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); - assertEquals(Arrays.asList("bad input"), outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); - - workflow.getInput().put("Id", 9); - workflow.getInput().put("location", "usa"); - outcome = deciderService.decide(workflow, def); - assertEquals(2, outcome.tasksToBeScheduled.size()); - assertEquals(decide.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); - assertEquals(even.getTaskReferenceName(), outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); //even because of location == usa - assertEquals(Arrays.asList("even"), outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); - - workflow.getInput().put("Id", 9); - workflow.getInput().put("location", "canada"); - outcome = deciderService.decide(workflow, def); - assertEquals(2, outcome.tasksToBeScheduled.size()); - assertEquals(decide.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); - assertEquals(odd.getTaskReferenceName(), outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); //odd - assertEquals(Arrays.asList("odd"), outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); - } - + } + + @Before + public void init() { + metadataDAO = mock(MetadataDAO.class); + QueueDAO queueDAO = mock(QueueDAO.class); + ExternalPayloadStorageUtils externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class); + Configuration configuration = mock(Configuration.class); + when(configuration.getTaskInputPayloadSizeThresholdKB()).thenReturn(10L); + when(configuration.getMaxTaskInputPayloadSizeThresholdKB()).thenReturn(10240L); + + TaskDef taskDef = new TaskDef(); + taskDef.setRetryCount(1); + taskDef.setName("mockTaskDef"); + taskDef.setResponseTimeoutSeconds(60 * 60); + when(metadataDAO.getTaskDef(anyString())).thenReturn(taskDef); + ParametersUtils parametersUtils = new ParametersUtils(); + Map taskMappers = new HashMap<>(); + taskMappers.put("DECISION", new DecisionTaskMapper()); + taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils)); + taskMappers.put("FORK_JOIN", new ForkJoinTaskMapper()); + taskMappers.put("JOIN", new JoinTaskMapper()); + taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper, metadataDAO)); + taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils)); + taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils)); + taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils)); + taskMappers.put("EVENT", new EventTaskMapper(parametersUtils)); + taskMappers.put("WAIT", new WaitTaskMapper(parametersUtils)); + + this.deciderService = new DeciderService(parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers); + } + + @Test + public void testWorkflowWithNoTasks() throws Exception { + InputStream stream = TestDeciderOutcomes.class.getResourceAsStream("/conditional_flow.json"); + WorkflowDef def = objectMapper.readValue(stream, WorkflowDef.class); + assertNotNull(def); + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.setStartTime(0); + workflow.getInput().put("param1", "nested"); + workflow.getInput().put("param2", "one"); + + DeciderOutcome outcome = deciderService.decide(workflow); + assertNotNull(outcome); + assertFalse(outcome.isComplete); + assertTrue(outcome.tasksToBeUpdated.isEmpty()); + assertEquals(3, outcome.tasksToBeScheduled.size()); + System.out.println(outcome.tasksToBeScheduled); + + outcome.tasksToBeScheduled.forEach(t -> t.setStatus(Status.COMPLETED)); + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + outcome = deciderService.decide(workflow); + assertFalse(outcome.isComplete); + assertEquals(outcome.tasksToBeUpdated.toString(), 3, outcome.tasksToBeUpdated.size()); + assertEquals(1, outcome.tasksToBeScheduled.size()); + assertEquals("junit_task_3", outcome.tasksToBeScheduled.get(0).getTaskDefName()); + System.out.println(outcome.tasksToBeScheduled); + } + + + @Test + public void testRetries() { + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName("test_task"); + workflowTask.setType("USER_TASK"); + workflowTask.setTaskReferenceName("t0"); + workflowTask.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); + workflowTask.getInputParameters().put("requestId", "${workflow.input.requestId}"); + workflowTask.setTaskDefinition(new TaskDef("test_task")); + + def.getTasks().add(workflowTask); + def.setSchemaVersion(2); + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.getInput().put("requestId", 123); + workflow.setStartTime(System.currentTimeMillis()); + DeciderOutcome outcome = deciderService.decide(workflow); + assertNotNull(outcome); + + assertEquals(1, outcome.tasksToBeScheduled.size()); + assertEquals(workflowTask.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + + String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId(); + assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); + assertEquals(123, outcome.tasksToBeScheduled.get(0).getInputData().get("requestId")); + + outcome.tasksToBeScheduled.get(0).setStatus(Status.FAILED); + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + + outcome = deciderService.decide(workflow); + assertNotNull(outcome); + + assertEquals(1, outcome.tasksToBeUpdated.size()); + assertEquals(1, outcome.tasksToBeScheduled.size()); + assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId()); + assertNotSame(task1Id, outcome.tasksToBeScheduled.get(0).getTaskId()); + assertEquals(outcome.tasksToBeScheduled.get(0).getTaskId(), outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); + assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getRetriedTaskId()); + assertEquals(123, outcome.tasksToBeScheduled.get(0).getInputData().get("requestId")); + + + WorkflowTask fork = new WorkflowTask(); + fork.setName("fork0"); + fork.setWorkflowTaskType(TaskType.FORK_JOIN_DYNAMIC); + fork.setTaskReferenceName("fork0"); + fork.setDynamicForkTasksInputParamName("forkedInputs"); + fork.setDynamicForkTasksParam("forks"); + fork.getInputParameters().put("forks", "${workflow.input.forks}"); + fork.getInputParameters().put("forkedInputs", "${workflow.input.forkedInputs}"); + + WorkflowTask join = new WorkflowTask(); + join.setName("join0"); + join.setType("JOIN"); + join.setTaskReferenceName("join0"); + + def.getTasks().clear(); + def.getTasks().add(fork); + def.getTasks().add(join); + + List forks = new LinkedList<>(); + Map> forkedInputs = new HashMap<>(); + + for (int i = 0; i < 1; i++) { + WorkflowTask wft = new WorkflowTask(); + wft.setName("f" + i); + wft.setTaskReferenceName("f" + i); + wft.setWorkflowTaskType(TaskType.SIMPLE); + wft.getInputParameters().put("requestId", "${workflow.input.requestId}"); + wft.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); + wft.setTaskDefinition(new TaskDef("f" + i)); + forks.add(wft); + Map input = new HashMap<>(); + input.put("k", "v"); + input.put("k1", 1); + forkedInputs.put(wft.getTaskReferenceName(), input); + } + workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.getInput().put("requestId", 123); + workflow.setStartTime(System.currentTimeMillis()); + + workflow.getInput().put("forks", forks); + workflow.getInput().put("forkedInputs", forkedInputs); + + outcome = deciderService.decide(workflow); + assertNotNull(outcome); + assertEquals(3, outcome.tasksToBeScheduled.size()); + assertEquals(0, outcome.tasksToBeUpdated.size()); + + assertEquals("v", outcome.tasksToBeScheduled.get(1).getInputData().get("k")); + assertEquals(1, outcome.tasksToBeScheduled.get(1).getInputData().get("k1")); + assertEquals(outcome.tasksToBeScheduled.get(1).getTaskId(), outcome.tasksToBeScheduled.get(1).getInputData().get("taskId")); + System.out.println(outcome.tasksToBeScheduled.get(1).getInputData()); + task1Id = outcome.tasksToBeScheduled.get(1).getTaskId(); + + outcome.tasksToBeScheduled.get(1).setStatus(Status.FAILED); + for(Task taskToBeScheduled : outcome.tasksToBeScheduled) { + taskToBeScheduled.setUpdateTime(System.currentTimeMillis()); + } + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + + outcome = deciderService.decide(workflow); + assertTrue(outcome.tasksToBeScheduled.stream().anyMatch(task1 -> task1.getReferenceTaskName().equals("f0"))); + + //noinspection ConstantConditions + Task task1 = outcome.tasksToBeScheduled.stream().filter(t -> t.getReferenceTaskName().equals("f0")).findFirst().get(); + assertEquals("v", task1.getInputData().get("k")); + assertEquals(1, task1.getInputData().get("k1")); + assertEquals(task1.getTaskId(), task1.getInputData().get("taskId")); + assertNotSame(task1Id, task1.getTaskId()); + assertEquals(task1Id, task1.getRetriedTaskId()); + System.out.println(task1.getInputData()); + + } + + @Test + public void testOptional() { + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + + WorkflowTask task1 = new WorkflowTask(); + task1.setName("task0"); + task1.setType("SIMPLE"); + task1.setTaskReferenceName("t0"); + task1.getInputParameters().put("taskId", "${CPEWF_TASK_ID}"); + task1.setOptional(true); + task1.setTaskDefinition(new TaskDef("task0")); + + WorkflowTask task2 = new WorkflowTask(); + task2.setName("task1"); + task2.setType("SIMPLE"); + task2.setTaskReferenceName("t1"); + task2.setTaskDefinition(new TaskDef("task1")); + + def.getTasks().add(task1); + def.getTasks().add(task2); + def.setSchemaVersion(2); + + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.setStartTime(System.currentTimeMillis()); + DeciderOutcome outcome = deciderService.decide(workflow); + assertNotNull(outcome); + + System.out.println("Schedule after starting: " + outcome.tasksToBeScheduled); + assertEquals(1, outcome.tasksToBeScheduled.size()); + assertEquals(task1.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + System.out.println("TaskId of the scheduled task in input: " + outcome.tasksToBeScheduled.get(0).getInputData()); + String task1Id = outcome.tasksToBeScheduled.get(0).getTaskId(); + assertEquals(task1Id, outcome.tasksToBeScheduled.get(0).getInputData().get("taskId")); + + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + workflow.getTasks().get(0).setStatus(Status.FAILED); + + outcome = deciderService.decide(workflow); + + assertNotNull(outcome); + System.out.println("Schedule: " + outcome.tasksToBeScheduled); + System.out.println("Update: " + outcome.tasksToBeUpdated); + + assertEquals(1, outcome.tasksToBeUpdated.size()); + assertEquals(1, outcome.tasksToBeScheduled.size()); + + assertEquals(Task.Status.COMPLETED_WITH_ERRORS, workflow.getTasks().get(0).getStatus()); + assertEquals(task1Id, outcome.tasksToBeUpdated.get(0).getTaskId()); + assertEquals(task2.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + + } + + @Test + public void testOptionalWithDynamicFork() { + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + + WorkflowTask task1 = new WorkflowTask(); + task1.setName("fork0"); + task1.setWorkflowTaskType(TaskType.FORK_JOIN_DYNAMIC); + task1.setTaskReferenceName("fork0"); + task1.setDynamicForkTasksInputParamName("forkedInputs"); + task1.setDynamicForkTasksParam("forks"); + task1.getInputParameters().put("forks", "${workflow.input.forks}"); + task1.getInputParameters().put("forkedInputs", "${workflow.input.forkedInputs}"); + + WorkflowTask task2 = new WorkflowTask(); + task2.setName("join0"); + task2.setType("JOIN"); + task2.setTaskReferenceName("join0"); + + def.getTasks().add(task1); + def.getTasks().add(task2); + def.setSchemaVersion(2); + + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + List forks = new LinkedList<>(); + Map> forkedInputs = new HashMap<>(); + + for (int i = 0; i < 3; i++) { + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName("f" + i); + workflowTask.setTaskReferenceName("f" + i); + workflowTask.setWorkflowTaskType(TaskType.SIMPLE); + workflowTask.setOptional(true); + workflowTask.setTaskDefinition(new TaskDef("f" + i)); + forks.add(workflowTask); + + forkedInputs.put(workflowTask.getTaskReferenceName(), new HashMap<>()); + } + workflow.getInput().put("forks", forks); + workflow.getInput().put("forkedInputs", forkedInputs); + + + workflow.setStartTime(System.currentTimeMillis()); + DeciderOutcome outcome = deciderService.decide(workflow); + assertNotNull(outcome); + assertEquals(5, outcome.tasksToBeScheduled.size()); + assertEquals(0, outcome.tasksToBeUpdated.size()); + + assertEquals(SystemTaskType.FORK.name(), outcome.tasksToBeScheduled.get(0).getTaskType()); + assertEquals(Task.Status.COMPLETED, outcome.tasksToBeScheduled.get(0).getStatus()); + for (int i = 1; i < 4; i++) { + assertEquals(Task.Status.SCHEDULED, outcome.tasksToBeScheduled.get(i).getStatus()); + assertEquals("f" + (i - 1), outcome.tasksToBeScheduled.get(i).getTaskDefName()); + outcome.tasksToBeScheduled.get(i).setStatus(Status.FAILED); //let's mark them as failure + } + assertEquals(Task.Status.IN_PROGRESS, outcome.tasksToBeScheduled.get(4).getStatus()); + workflow.getTasks().clear(); + workflow.getTasks().addAll(outcome.tasksToBeScheduled); + + for(Task taskToBeScheduled : outcome.tasksToBeScheduled) { + taskToBeScheduled.setUpdateTime(System.currentTimeMillis()); + } + + outcome = deciderService.decide(workflow); + assertNotNull(outcome); + assertEquals(SystemTaskType.JOIN.name(), outcome.tasksToBeScheduled.get(0).getTaskType()); + for (int i = 1; i < 4; i++) { + assertEquals(Task.Status.COMPLETED_WITH_ERRORS, outcome.tasksToBeUpdated.get(i).getStatus()); + assertEquals("f" + (i - 1), outcome.tasksToBeUpdated.get(i).getTaskDefName()); + } + assertEquals(Task.Status.IN_PROGRESS, outcome.tasksToBeScheduled.get(0).getStatus()); + new Join().execute(workflow, outcome.tasksToBeScheduled.get(0), null); + assertEquals(Task.Status.COMPLETED, outcome.tasksToBeScheduled.get(0).getStatus()); + + outcome.tasksToBeScheduled.stream().map(task -> task.getStatus() + ":" + task.getTaskType() + ":").forEach(System.out::println); + outcome.tasksToBeUpdated.stream().map(task -> task.getStatus() + ":" + task.getTaskType() + ":").forEach(System.out::println); + } + + @Test + public void testDecisionCases() { + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + + WorkflowTask even = new WorkflowTask(); + even.setName("even"); + even.setType("SIMPLE"); + even.setTaskReferenceName("even"); + even.setTaskDefinition(new TaskDef("even")); + + WorkflowTask odd = new WorkflowTask(); + odd.setName("odd"); + odd.setType("SIMPLE"); + odd.setTaskReferenceName("odd"); + odd.setTaskDefinition(new TaskDef("odd")); + + WorkflowTask defaultt = new WorkflowTask(); + defaultt.setName("defaultt"); + defaultt.setType("SIMPLE"); + defaultt.setTaskReferenceName("defaultt"); + defaultt.setTaskDefinition(new TaskDef("defaultt")); + + WorkflowTask decide = new WorkflowTask(); + decide.setName("decide"); + decide.setWorkflowTaskType(TaskType.DECISION); + decide.setTaskReferenceName("d0"); + decide.getInputParameters().put("Id", "${workflow.input.Id}"); + decide.getInputParameters().put("location", "${workflow.input.location}"); + decide.setCaseExpression("if ($.Id == null) 'bad input'; else if ( ($.Id != null && $.Id % 2 == 0) || $.location == 'usa') 'even'; else 'odd'; "); + + decide.getDecisionCases().put("even", Arrays.asList(even)); + decide.getDecisionCases().put("odd", Arrays.asList(odd)); + decide.setDefaultCase(Arrays.asList(defaultt)); + + def.getTasks().add(decide); + def.setSchemaVersion(2); + + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.setStartTime(System.currentTimeMillis()); + DeciderOutcome outcome = deciderService.decide(workflow); + assertNotNull(outcome); + + System.out.println("Schedule after starting: " + outcome.tasksToBeScheduled); + assertEquals(2, outcome.tasksToBeScheduled.size()); + assertEquals(decide.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + assertEquals(defaultt.getTaskReferenceName(), outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); //default + System.out.println(outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); + assertEquals(Arrays.asList("bad input"), outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); + + workflow.getInput().put("Id", 9); + workflow.getInput().put("location", "usa"); + outcome = deciderService.decide(workflow); + assertEquals(2, outcome.tasksToBeScheduled.size()); + assertEquals(decide.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + assertEquals(even.getTaskReferenceName(), outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); //even because of location == usa + assertEquals(Arrays.asList("even"), outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); + + workflow.getInput().put("Id", 9); + workflow.getInput().put("location", "canada"); + outcome = deciderService.decide(workflow); + assertEquals(2, outcome.tasksToBeScheduled.size()); + assertEquals(decide.getTaskReferenceName(), outcome.tasksToBeScheduled.get(0).getReferenceTaskName()); + assertEquals(odd.getTaskReferenceName(), outcome.tasksToBeScheduled.get(1).getReferenceTaskName()); //odd + assertEquals(Arrays.asList("odd"), outcome.tasksToBeScheduled.get(0).getOutputData().get("caseOutput")); + } } diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java index 3c627152a2..ca8ef4fbf1 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java @@ -1,36 +1,32 @@ /** * Copyright 2016 Netflix, Inc. *

    - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at *

    * http://www.apache.org/licenses/LICENSE-2.0 *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** * */ package com.netflix.conductor.core.execution; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome; import com.netflix.conductor.core.execution.mapper.DecisionTaskMapper; import com.netflix.conductor.core.execution.mapper.DynamicTaskMapper; @@ -56,6 +52,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; +import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; @@ -64,6 +61,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -85,31 +83,20 @@ /** * @author Viren + * */ @SuppressWarnings("Duplicates") public class TestDeciderService { - private Workflow workflow; - private DeciderService deciderService; private ParametersUtils parametersUtils; - - private static Registry registry; - private MetadataDAO metadataDAO; - private ExternalPayloadStorageUtils externalPayloadStorageUtils; - private static ObjectMapper objectMapper = new ObjectMapper(); + private static Registry registry; - static { - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - objectMapper.setSerializationInclusion(Include.NON_NULL); - objectMapper.setSerializationInclusion(Include.NON_EMPTY); - } + private static ObjectMapper objectMapper = new JsonMapperProvider().get(); @Rule public ExpectedException exception = ExpectedException.none(); @@ -123,65 +110,39 @@ public static void init() { @Before public void setup() { metadataDAO = mock(MetadataDAO.class); - QueueDAO queueDAO = mock(QueueDAO.class); externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class); + QueueDAO queueDAO = mock(QueueDAO.class); TaskDef taskDef = new TaskDef(); + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setName("TestDeciderService"); + workflowDef.setVersion(1); + when(metadataDAO.getTaskDef(any())).thenReturn(taskDef); - when(metadataDAO.getLatest(any())).thenReturn(workflowDef); + when(metadataDAO.getLatest(any())).thenReturn(Optional.of(workflowDef)); parametersUtils = new ParametersUtils(); Map taskMappers = new HashMap<>(); taskMappers.put("DECISION", new DecisionTaskMapper()); - taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils, metadataDAO)); + taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils)); taskMappers.put("FORK_JOIN", new ForkJoinTaskMapper()); taskMappers.put("JOIN", new JoinTaskMapper()); - taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper)); - taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils, metadataDAO)); + taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper, metadataDAO)); + taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils)); + taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils)); + taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils)); taskMappers.put("EVENT", new EventTaskMapper(parametersUtils)); taskMappers.put("WAIT", new WaitTaskMapper(parametersUtils)); - deciderService = new DeciderService(metadataDAO, parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers); - - workflow = new Workflow(); - workflow.getInput().put("requestId", "request id 001"); - workflow.getInput().put("hasAwards", true); - workflow.getInput().put("channelMapping", 5); - Map name = new HashMap<>(); - name.put("name", "The Who"); - name.put("year", 1970); - Map name2 = new HashMap<>(); - name2.put("name", "The Doors"); - name2.put("year", 1975); - - List names = new LinkedList<>(); - names.add(name); - names.add(name2); - - workflow.getOutput().put("name", name); - workflow.getOutput().put("names", names); - workflow.getOutput().put("awards", 200); - - Task task = new Task(); - task.setReferenceTaskName("task2"); - task.getOutputData().put("location", "http://location"); - task.setStatus(Status.COMPLETED); - - Task task2 = new Task(); - task2.setReferenceTaskName("task3"); - task2.getOutputData().put("refId", "abcddef_1234_7890_aaffcc"); - task2.setStatus(Status.SCHEDULED); - - workflow.getTasks().add(task); - workflow.getTasks().add(task2); + deciderService = new DeciderService(parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers); } @Test public void testGetTaskInputV2() { + Workflow workflow = createDefaultWorkflow(); + + workflow.getWorkflowDefinition().setSchemaVersion(2); - workflow.setSchemaVersion(2); Map ip = new HashMap<>(); ip.put("workflowInputParam", "${workflow.input.requestId}"); ip.put("taskOutputParam", "${task2.output.location}"); @@ -208,12 +169,12 @@ public void testGetTaskInputV2() { assertNull(taskInput.get("taskOutputParam3")); assertNull(taskInput.get("nullValue")); assertEquals(workflow.getTasks().get(0).getStatus().name(), taskInput.get("task2Status")); //task2 and task3 are the tasks respectively -// System.out.println(taskInput); - workflow.setSchemaVersion(1); } @Test - public void testGetTaskInputV2Partial() throws Exception { + public void testGetTaskInputV2Partial() { + Workflow workflow = createDefaultWorkflow(); + System.setProperty("EC2_INSTANCE", "i-123abcdef990"); Map wfi = new HashMap<>(); Map wfmap = new HashMap<>(); @@ -232,7 +193,7 @@ public void testGetTaskInputV2Partial() throws Exception { wfi.put(ref, io); }); - workflow.setSchemaVersion(2); + workflow.getWorkflowDefinition().setSchemaVersion(2); Map ip = new HashMap<>(); ip.put("workflowInputParam", "${workflow.input.requestId}"); @@ -274,14 +235,11 @@ public void testGetTaskInputV2Partial() throws Exception { assertEquals("The Doors", taskInput.get("secondName")); assertEquals("The Band is: The Doors-\ti-123abcdef990", taskInput.get("concatenatedName")); -// System.out.println(new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT).writeValueAsString(taskInput)); - assertEquals("request id 001", taskInput.get("workflowInputParam")); assertEquals("http://location", taskInput.get("taskOutputParam")); assertNull(taskInput.get("taskOutputParam3")); assertNotNull(taskInput.get("partial")); assertEquals("http://location/something?host=i-123abcdef990", taskInput.get("partial")); - workflow.setSchemaVersion(1); } @SuppressWarnings("unchecked") @@ -306,16 +264,20 @@ public void testGetTaskInput() { json.add(m2); ip.put("complexJson", json); + WorkflowDef def = new WorkflowDef(); + def.setName("testGetTaskInput"); + def.setSchemaVersion(2); + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); workflow.getInput().put("requestId", "request id 001"); Task task = new Task(); task.setReferenceTaskName("task2"); task.getOutputData().put("location", "http://location"); task.getOutputData().put("isPersonActive", true); workflow.getTasks().add(task); - workflow.setSchemaVersion(2); Map taskInput = parametersUtils.getTaskInput(ip, workflow, null, null); -// System.out.println(taskInput.get("complexJson")); + assertNotNull(taskInput); assertTrue(taskInput.containsKey("workflowInputParam")); assertTrue(taskInput.containsKey("taskOutputParam")); @@ -334,14 +296,18 @@ public void testGetTaskInputV1() { ip.put("workflowInputParam", "workflow.input.requestId"); ip.put("taskOutputParam", "task2.output.location"); + WorkflowDef def = new WorkflowDef(); + def.setSchemaVersion(1); + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.getInput().put("requestId", "request id 001"); Task task = new Task(); task.setReferenceTaskName("task2"); task.getOutputData().put("location", "http://location"); task.getOutputData().put("isPersonActive", true); workflow.getTasks().add(task); - workflow.setSchemaVersion(1); Map taskInput = parametersUtils.getTaskInput(ip, workflow, null, null); assertNotNull(taskInput); @@ -380,7 +346,7 @@ public void testGetNextTask() { WorkflowTask taskAfterT3 = def.getNextTask("t3"); assertNotNull(taskAfterT3); - assertEquals(Type.DECISION.name(), taskAfterT3.getType()); + assertEquals(TaskType.DECISION.name(), taskAfterT3.getType()); assertEquals("d1", taskAfterT3.getTaskReferenceName()); WorkflowTask taskAfterT4 = def.getNextTask("t4"); @@ -414,14 +380,13 @@ public void testCaseStatement() { WorkflowDef def = createConditionalWF(); Workflow wf = new Workflow(); + wf.setWorkflowDefinition(def); wf.setCreateTime(0L); wf.setWorkflowId("a"); wf.setCorrelationId("b"); - wf.setWorkflowType(def.getName()); - wf.setVersion(def.getVersion()); wf.setStatus(WorkflowStatus.RUNNING); - DeciderOutcome outcome = deciderService.decide(wf, def); + DeciderOutcome outcome = deciderService.decide(wf); List scheduledTasks = outcome.tasksToBeScheduled; assertNotNull(scheduledTasks); assertEquals(2, scheduledTasks.size()); @@ -520,8 +485,8 @@ public void testTaskTimeout() { @SuppressWarnings("unchecked") @Test public void testConcurrentTaskInputCalc() throws InterruptedException { - TaskDef def = new TaskDef(); + Map inputMap = new HashMap<>(); inputMap.put("path", "${workflow.input.inputLocation}"); inputMap.put("type", "${workflow.input.sourceType}"); @@ -550,7 +515,13 @@ public void testConcurrentTaskInputCalc() throws InterruptedException { workflowInput.put("inputLocation", "baggins://inputlocation/" + x); workflowInput.put("sourceType", "MuxedSource"); workflowInput.put("channelMapping", x); + + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setName("testConcurrentTaskInputCalc"); + workflowDef.setVersion(1); + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(workflowDef); workflow.setInput(workflowInput); Map taskInput = parametersUtils.getTaskInputV2(new HashMap<>(), workflow, null, def); @@ -563,7 +534,6 @@ public void testConcurrentTaskInputCalc() throws InterruptedException { Object cmObj = reqInput.get(0).get("channelMapping"); assertNotNull(cmObj); if (!(cmObj instanceof Number)) { -// System.out.println("Not a number @ " + x + ", found: " + cmObj.getClass()); result[x] = -1; } else { Number channelMapping = (Number) cmObj; @@ -586,15 +556,15 @@ public void testConcurrentTaskInputCalc() throws InterruptedException { for (int i = 0; i < result.length; i++) { assertEquals(i, result[i]); } -// System.out.println("Done"); } @SuppressWarnings("unchecked") @Test public void testTaskRetry() { + Workflow workflow = createDefaultWorkflow(); - workflow.setSchemaVersion(2); + workflow.getWorkflowDefinition().setSchemaVersion(2); Map inputParams = new HashMap<>(); inputParams.put("workflowInputParam", "${workflow.input.requestId}"); @@ -621,8 +591,12 @@ public void testTaskRetry() { workflowTask.getInputParameters().put("env", env); Task task2 = deciderService.retry(taskDef, workflowTask, task, workflow); + System.out.println(task.getTaskId() + ":\n" + task.getInputData()); + System.out.println(task2.getTaskId() + ":\n" + task2.getInputData()); + assertEquals("t1", task.getInputData().get("task_id")); assertEquals("t1", ((Map) task.getInputData().get("env")).get("env_task_id")); + assertNotSame(task.getTaskId(), task2.getTaskId()); assertEquals(task2.getTaskId(), task2.getInputData().get("task_id")); assertEquals(task2.getTaskId(), ((Map) task2.getInputData().get("env")).get("env_task_id")); @@ -631,22 +605,17 @@ public void testTaskRetry() { task3.getInputData().putAll(taskInput); task3.setStatus(Status.FAILED_WITH_TERMINAL_ERROR); task3.setTaskId("t1"); - - when(metadataDAO.get(anyString(), anyInt())).thenReturn(new WorkflowDef()); + when(metadataDAO.get(anyString(), anyInt())).thenReturn(Optional.of(new WorkflowDef())); exception.expect(TerminateWorkflowException.class); deciderService.retry(taskDef, workflowTask, task3, workflow); - } @Test - public void testFork() throws Exception { + public void testFork() throws IOException { InputStream stream = TestDeciderService.class.getResourceAsStream("/test.json"); Workflow workflow = objectMapper.readValue(stream, Workflow.class); - InputStream defs = TestDeciderService.class.getResourceAsStream("/def.json"); - WorkflowDef def = objectMapper.readValue(defs, WorkflowDef.class); - - DeciderOutcome outcome = deciderService.decide(workflow, def); + DeciderOutcome outcome = deciderService.decide(workflow); assertFalse(outcome.isComplete); assertEquals(5, outcome.tasksToBeScheduled.size()); assertEquals(1, outcome.tasksToBeUpdated.size()); @@ -657,8 +626,7 @@ public void testDecideSuccessfulWorkflow() { WorkflowDef workflowDef = createLinearWorkflow(); Workflow workflow = new Workflow(); - workflow.setWorkflowType(workflowDef.getName()); - workflow.setVersion(workflowDef.getVersion()); + workflow.setWorkflowDefinition(workflowDef); workflow.setStatus(WorkflowStatus.RUNNING); Task task1 = new Task(); @@ -671,9 +639,9 @@ public void testDecideSuccessfulWorkflow() { workflow.getTasks().add(task1); - DeciderOutcome deciderOutcome = deciderService.decide(workflow, workflowDef); + DeciderOutcome deciderOutcome = deciderService.decide(workflow); assertNotNull(deciderOutcome); - assertTrue(workflow.getTaskByRefName("s1").isExecuted()); + assertFalse(workflow.getTaskByRefName("s1").isRetried()); assertEquals(1, deciderOutcome.tasksToBeUpdated.size()); assertEquals("s1", deciderOutcome.tasksToBeUpdated.get(0).getReferenceTaskName()); @@ -691,7 +659,7 @@ public void testDecideSuccessfulWorkflow() { task2.setStatus(Status.COMPLETED); workflow.getTasks().add(task2); - deciderOutcome = deciderService.decide(workflow, workflowDef); + deciderOutcome = deciderService.decide(workflow); assertNotNull(deciderOutcome); assertTrue(workflow.getTaskByRefName("s2").isExecuted()); assertFalse(workflow.getTaskByRefName("s2").isRetried()); @@ -707,8 +675,7 @@ public void testDecideFailedTask() { WorkflowDef workflowDef = createLinearWorkflow(); Workflow workflow = new Workflow(); - workflow.setWorkflowType(workflowDef.getName()); - workflow.setVersion(workflowDef.getVersion()); + workflow.setWorkflowDefinition(workflowDef); workflow.setStatus(WorkflowStatus.RUNNING); Task task = new Task(); @@ -719,9 +686,15 @@ public void testDecideFailedTask() { task.setExecuted(false); task.setStatus(Status.FAILED); + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setTaskReferenceName("s1"); + workflowTask.setName("junit_task_l1"); + workflowTask.setTaskDefinition(new TaskDef("junit_task_l1")); + task.setWorkflowTask(workflowTask); + workflow.getTasks().add(task); - DeciderOutcome deciderOutcome = deciderService.decide(workflow, workflowDef); + DeciderOutcome deciderOutcome = deciderService.decide(workflow); assertNotNull(deciderOutcome); assertFalse(workflow.getTaskByRefName("s1").isExecuted()); assertTrue(workflow.getTaskByRefName("s1").isRetried()); @@ -738,30 +711,33 @@ public void testGetTasksToBeScheduled() { WorkflowDef workflowDef = createLinearWorkflow(); Workflow workflow = new Workflow(); - workflow.setWorkflowType(workflowDef.getName()); - workflow.setVersion(workflowDef.getVersion()); + workflow.setWorkflowDefinition(workflowDef); workflow.setStatus(WorkflowStatus.RUNNING); WorkflowTask workflowTask1 = new WorkflowTask(); + workflowTask1.setName("s1"); workflowTask1.setTaskReferenceName("s1"); - workflowTask1.setType(Type.SIMPLE.name()); + workflowTask1.setType(TaskType.SIMPLE.name()); + workflowTask1.setTaskDefinition(new TaskDef("s1")); - List tasksToBeScheduled = deciderService.getTasksToBeScheduled(workflowDef, workflow, workflowTask1, 0, null); + List tasksToBeScheduled = deciderService.getTasksToBeScheduled(workflow, workflowTask1, 0, null); assertNotNull(tasksToBeScheduled); assertEquals(1, tasksToBeScheduled.size()); assertEquals("s1", tasksToBeScheduled.get(0).getReferenceTaskName()); WorkflowTask workflowTask2 = new WorkflowTask(); + workflowTask2.setName("s2"); workflowTask2.setTaskReferenceName("s2"); - workflowTask2.setType(Type.SIMPLE.name()); - tasksToBeScheduled = deciderService.getTasksToBeScheduled(workflowDef, workflow, workflowTask2, 0, null); + workflowTask2.setType(TaskType.SIMPLE.name()); + workflowTask2.setTaskDefinition(new TaskDef("s2")); + tasksToBeScheduled = deciderService.getTasksToBeScheduled(workflow, workflowTask2, 0, null); assertNotNull(tasksToBeScheduled); assertEquals(1, tasksToBeScheduled.size()); assertEquals("s2", tasksToBeScheduled.get(0).getReferenceTaskName()); } @Test - public void testIsResponseTimedOut() { + public void testIsResponsedTimeOut() { TaskDef taskDef = new TaskDef(); taskDef.setName("test_rt"); taskDef.setResponseTimeoutSeconds(10); @@ -782,63 +758,52 @@ public void testPopulateWorkflowAndTaskData() { String workflowInputPath = "workflow/input/test.json"; String taskInputPath = "task/input/test.json"; String taskOutputPath = "task/output/test.json"; - Map workflowParams = new HashMap<>(); workflowParams.put("key1", "value1"); workflowParams.put("key2", 100); when(externalPayloadStorageUtils.downloadPayload(workflowInputPath)).thenReturn(workflowParams); - Map taskInputParams = new HashMap<>(); taskInputParams.put("key", "taskInput"); when(externalPayloadStorageUtils.downloadPayload(taskInputPath)).thenReturn(taskInputParams); - Map taskOutputParams = new HashMap<>(); taskOutputParams.put("key", "taskOutput"); when(externalPayloadStorageUtils.downloadPayload(taskOutputPath)).thenReturn(taskOutputParams); - Task task = new Task(); task.setExternalInputPayloadStoragePath(taskInputPath); task.setExternalOutputPayloadStoragePath(taskOutputPath); - Workflow workflow = new Workflow(); workflow.setExternalInputPayloadStoragePath(workflowInputPath); workflow.getTasks().add(task); - Workflow workflowInstance = deciderService.populateWorkflowAndTaskData(workflow); assertNotNull(workflowInstance); - assertTrue(workflow.getInput().isEmpty()); assertNotNull(workflowInstance.getInput()); assertEquals(workflowParams, workflowInstance.getInput()); - assertTrue(workflow.getTasks().get(0).getInputData().isEmpty()); assertNotNull(workflowInstance.getTasks().get(0).getInputData()); assertEquals(taskInputParams, workflowInstance.getTasks().get(0).getInputData()); - assertTrue(workflow.getTasks().get(0).getOutputData().isEmpty()); assertNotNull(workflowInstance.getTasks().get(0).getOutputData()); assertEquals(taskOutputParams, workflowInstance.getTasks().get(0).getOutputData()); - assertNull(workflowInstance.getExternalInputPayloadStoragePath()); assertNull(workflowInstance.getTasks().get(0).getExternalInputPayloadStoragePath()); assertNull(workflowInstance.getTasks().get(0).getExternalOutputPayloadStoragePath()); } - @SuppressWarnings("unchecked") @Test public void testUpdateWorkflowOutput() { Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(new WorkflowDef()); deciderService.updateWorkflowOutput(workflow, null); assertNotNull(workflow.getOutput()); assertTrue(workflow.getOutput().isEmpty()); - Task task = new Task(); Map taskOutput = new HashMap<>(); taskOutput.put("taskKey", "taskValue"); task.setOutputData(taskOutput); workflow.getTasks().add(task); WorkflowDef workflowDef = new WorkflowDef(); - when(metadataDAO.get(anyString(), anyInt())).thenReturn(workflowDef); + when(metadataDAO.get(anyString(), anyInt())).thenReturn(Optional.of(workflowDef)); deciderService.updateWorkflowOutput(workflow, null); assertNotNull(workflow.getOutput()); assertEquals("taskValue", workflow.getOutput().get("taskKey")); @@ -853,6 +818,7 @@ private WorkflowDef createConditionalWF() { inputParams1.put("p2", "workflow.input.param2"); workflowTask1.setInputParameters(inputParams1); workflowTask1.setTaskReferenceName("t1"); + workflowTask1.setTaskDefinition(new TaskDef("junit_task_1")); WorkflowTask workflowTask2 = new WorkflowTask(); workflowTask2.setName("junit_task_2"); @@ -860,6 +826,7 @@ private WorkflowDef createConditionalWF() { inputParams2.put("tp1", "workflow.input.param1"); workflowTask2.setInputParameters(inputParams2); workflowTask2.setTaskReferenceName("t2"); + workflowTask2.setTaskDefinition(new TaskDef("junit_task_2")); WorkflowTask workflowTask3 = new WorkflowTask(); workflowTask3.setName("junit_task_3"); @@ -867,6 +834,7 @@ private WorkflowDef createConditionalWF() { inputParams2.put("tp3", "workflow.input.param2"); workflowTask3.setInputParameters(inputParams3); workflowTask3.setTaskReferenceName("t3"); + workflowTask3.setTaskDefinition(new TaskDef("junit_task_3")); WorkflowDef workflowDef = new WorkflowDef(); workflowDef.setName("Conditional Workflow"); @@ -874,7 +842,7 @@ private WorkflowDef createConditionalWF() { workflowDef.setInputParameters(Arrays.asList("param1", "param2")); WorkflowTask decisionTask2 = new WorkflowTask(); - decisionTask2.setType(Type.DECISION.name()); + decisionTask2.setType(TaskType.DECISION.name()); decisionTask2.setCaseValueParam("case"); decisionTask2.setName("conditional2"); decisionTask2.setTaskReferenceName("conditional2"); @@ -886,7 +854,7 @@ private WorkflowDef createConditionalWF() { WorkflowTask decisionTask = new WorkflowTask(); - decisionTask.setType(Type.DECISION.name()); + decisionTask.setType(TaskType.DECISION.name()); decisionTask.setCaseValueParam("case"); decisionTask.setName("conditional"); decisionTask.setTaskReferenceName("conditional"); @@ -901,11 +869,12 @@ private WorkflowDef createConditionalWF() { WorkflowTask notifyTask = new WorkflowTask(); notifyTask.setName("junit_task_4"); notifyTask.setTaskReferenceName("junit_task_4"); + notifyTask.setTaskDefinition(new TaskDef("junit_task_4")); WorkflowTask finalDecisionTask = new WorkflowTask(); finalDecisionTask.setName("finalcondition"); finalDecisionTask.setTaskReferenceName("tf"); - finalDecisionTask.setType(Type.DECISION.name()); + finalDecisionTask.setType(TaskType.DECISION.name()); finalDecisionTask.setCaseValueParam("finalCase"); Map fi = new HashMap<>(); fi.put("finalCase", "workflow.input.finalCase"); @@ -926,11 +895,13 @@ private WorkflowDef createLinearWorkflow() { workflowTask1.setName("junit_task_l1"); workflowTask1.setInputParameters(inputParams); workflowTask1.setTaskReferenceName("s1"); + workflowTask1.setTaskDefinition(new TaskDef("junit_task_l1")); WorkflowTask workflowTask2 = new WorkflowTask(); workflowTask2.setName("junit_task_l2"); workflowTask2.setInputParameters(inputParams); workflowTask2.setTaskReferenceName("s2"); + workflowTask2.setTaskDefinition(new TaskDef("junit_task_l2")); WorkflowDef workflowDef = new WorkflowDef(); workflowDef.setSchemaVersion(2); @@ -941,6 +912,49 @@ private WorkflowDef createLinearWorkflow() { return workflowDef; } + private Workflow createDefaultWorkflow() { + + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setName("TestDeciderService"); + workflowDef.setVersion(1); + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(workflowDef); + + workflow.getInput().put("requestId", "request id 001"); + workflow.getInput().put("hasAwards", true); + workflow.getInput().put("channelMapping", 5); + Map name = new HashMap<>(); + name.put("name", "The Who"); + name.put("year", 1970); + Map name2 = new HashMap<>(); + name2.put("name", "The Doors"); + name2.put("year", 1975); + + List names = new LinkedList<>(); + names.add(name); + names.add(name2); + + workflow.getOutput().put("name", name); + workflow.getOutput().put("names", names); + workflow.getOutput().put("awards", 200); + + Task task = new Task(); + task.setReferenceTaskName("task2"); + task.getOutputData().put("location", "http://location"); + task.setStatus(Status.COMPLETED); + + Task task2 = new Task(); + task2.setReferenceTaskName("task3"); + task2.getOutputData().put("refId", "abcddef_1234_7890_aaffcc"); + task2.setStatus(Status.SCHEDULED); + + workflow.getTasks().add(task); + workflow.getTasks().add(task2); + + return workflow; + } + private WorkflowDef createNestedWorkflow() { WorkflowDef workflowDef = new WorkflowDef(); @@ -960,11 +974,12 @@ private WorkflowDef createNestedWorkflow() { workflowTask.setName("junit_task_" + i); workflowTask.setInputParameters(inputParams); workflowTask.setTaskReferenceName("t" + i); + workflowTask.setTaskDefinition(new TaskDef("junit_task_" + i)); tasks.add(workflowTask); } WorkflowTask decisionTask = new WorkflowTask(); - decisionTask.setType(Type.DECISION.name()); + decisionTask.setType(TaskType.DECISION.name()); decisionTask.setName("Decision"); decisionTask.setTaskReferenceName("d1"); decisionTask.setDefaultCase(Collections.singletonList(tasks.get(8))); @@ -976,26 +991,26 @@ private WorkflowDef createNestedWorkflow() { WorkflowDef subWorkflowDef = createLinearWorkflow(); WorkflowTask subWorkflow = new WorkflowTask(); - subWorkflow.setType(Type.SUB_WORKFLOW.name()); + subWorkflow.setType(TaskType.SUB_WORKFLOW.name()); SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); subWorkflowParams.setName(subWorkflowDef.getName()); subWorkflow.setSubWorkflowParam(subWorkflowParams); subWorkflow.setTaskReferenceName("sw1"); WorkflowTask forkTask2 = new WorkflowTask(); - forkTask2.setType(Type.FORK_JOIN.name()); + forkTask2.setType(TaskType.FORK_JOIN.name()); forkTask2.setName("second fork"); forkTask2.setTaskReferenceName("fork2"); forkTask2.getForkTasks().add(Arrays.asList(tasks.get(2), tasks.get(4))); forkTask2.getForkTasks().add(Arrays.asList(tasks.get(3), decisionTask)); WorkflowTask joinTask2 = new WorkflowTask(); - joinTask2.setType(Type.JOIN.name()); + joinTask2.setType(TaskType.JOIN.name()); joinTask2.setTaskReferenceName("join2"); joinTask2.setJoinOn(Arrays.asList("t4", "d1")); WorkflowTask forkTask1 = new WorkflowTask(); - forkTask1.setType(Type.FORK_JOIN.name()); + forkTask1.setType(TaskType.FORK_JOIN.name()); forkTask1.setTaskReferenceName("fork1"); forkTask1.getForkTasks().add(Collections.singletonList(tasks.get(1))); forkTask1.getForkTasks().add(Arrays.asList(forkTask2, joinTask2)); @@ -1003,7 +1018,7 @@ private WorkflowDef createNestedWorkflow() { WorkflowTask joinTask1 = new WorkflowTask(); - joinTask1.setType(Type.JOIN.name()); + joinTask1.setType(TaskType.JOIN.name()); joinTask1.setTaskReferenceName("join1"); joinTask1.setJoinOn(Arrays.asList("t1", "fork2")); @@ -1013,4 +1028,5 @@ private WorkflowDef createNestedWorkflow() { return workflowDef; } + } diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowDef.java b/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowDef.java index 0eb5b9caff..5282837cdc 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowDef.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowDef.java @@ -31,7 +31,7 @@ import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; +import com.netflix.conductor.common.metadata.workflow.TaskType; /** * @author Viren @@ -50,7 +50,7 @@ public void test(){ WorkflowTask task3 = create("decision_task_1"); def.getTasks().add(task3); - task3.setType(Type.DECISION.name()); + task3.setType(TaskType.DECISION.name()); task3.getDecisionCases().put("Case1", Arrays.asList(create("case_1_task_1"), create("case_1_task_2"))); task3.getDecisionCases().put("Case2", Arrays.asList(create("case_2_task_1"), create("case_2_task_2"))); task3.getDecisionCases().put("Case3", Arrays.asList(deciderTask("decision_task_2", toMap("Case31", "case31_task_1", "case_31_task_2"), Arrays.asList("case3_def_task")))); @@ -98,7 +98,7 @@ private WorkflowTask create(String name){ private WorkflowTask deciderTask(String name, Map> decisions, List defaultTasks){ WorkflowTask task = create(name); - task.setType(Type.DECISION.name()); + task.setType(TaskType.DECISION.name()); decisions.entrySet().forEach(e -> { List tasks = new LinkedList<>(); e.getValue().forEach(taskName -> tasks.add(create(taskName))); diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowExecutor.java b/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowExecutor.java index 87a4e13c25..d5ec4a0096 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowExecutor.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/TestWorkflowExecutor.java @@ -21,9 +21,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.mapper.DecisionTaskMapper; import com.netflix.conductor.core.execution.mapper.DynamicTaskMapper; @@ -38,6 +38,7 @@ import com.netflix.conductor.core.execution.mapper.WaitTaskMapper; import com.netflix.conductor.core.execution.tasks.Wait; import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; +import com.netflix.conductor.core.metadata.MetadataMapperService; import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils; import com.netflix.conductor.core.utils.IDGenerator; import com.netflix.conductor.dao.ExecutionDAO; @@ -53,6 +54,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -89,17 +91,19 @@ public void init() { ParametersUtils parametersUtils = new ParametersUtils(); Map taskMappers = new HashMap<>(); taskMappers.put("DECISION", new DecisionTaskMapper()); - taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils, metadataDAO)); + taskMappers.put("DYNAMIC", new DynamicTaskMapper(parametersUtils)); taskMappers.put("FORK_JOIN", new ForkJoinTaskMapper()); taskMappers.put("JOIN", new JoinTaskMapper()); - taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper)); - taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils, metadataDAO)); - taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils, metadataDAO)); + taskMappers.put("FORK_JOIN_DYNAMIC", new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper, metadataDAO)); + taskMappers.put("USER_DEFINED", new UserDefinedTaskMapper(parametersUtils)); + taskMappers.put("SIMPLE", new SimpleTaskMapper(parametersUtils)); + taskMappers.put("SUB_WORKFLOW", new SubWorkflowTaskMapper(parametersUtils)); taskMappers.put("EVENT", new EventTaskMapper(parametersUtils)); taskMappers.put("WAIT", new WaitTaskMapper(parametersUtils)); - DeciderService deciderService = new DeciderService(metadataDAO, parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers); - workflowExecutor = new WorkflowExecutor(deciderService, metadataDAO, executionDAO, queueDAO, parametersUtils, config); + + DeciderService deciderService = new DeciderService(parametersUtils, queueDAO, externalPayloadStorageUtils, taskMappers); + MetadataMapperService metadataMapperService = new MetadataMapperService(metadataDAO); + workflowExecutor = new WorkflowExecutor(deciderService, metadataDAO, executionDAO, queueDAO, metadataMapperService, parametersUtils, config); } @Test @@ -139,15 +143,15 @@ public void start(Workflow workflow, Task task, WorkflowExecutor executor) { List tasks = new LinkedList<>(); WorkflowTask taskToSchedule = new WorkflowTask(); - taskToSchedule.setWorkflowTaskType(Type.USER_DEFINED); + taskToSchedule.setWorkflowTaskType(TaskType.USER_DEFINED); taskToSchedule.setType("HTTP"); WorkflowTask taskToSchedule2 = new WorkflowTask(); - taskToSchedule2.setWorkflowTaskType(Type.USER_DEFINED); + taskToSchedule2.setWorkflowTaskType(TaskType.USER_DEFINED); taskToSchedule2.setType("HTTP2"); WorkflowTask wait = new WorkflowTask(); - wait.setWorkflowTaskType(Type.WAIT); + wait.setWorkflowTaskType(TaskType.WAIT); wait.setType("WAIT"); wait.setTaskReferenceName("wait"); @@ -226,9 +230,12 @@ public void start(Workflow workflow, Task task, WorkflowExecutor executor) { @Test @SuppressWarnings("unchecked") public void testCompleteWorkflow() { + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); workflow.setWorkflowId("1"); - workflow.setWorkflowType("test"); workflow.setStatus(Workflow.WorkflowStatus.RUNNING); workflow.setOwnerApp("junit_test"); workflow.setStartTime(10L); @@ -369,6 +376,7 @@ public void testRetryWorkflow() { Workflow workflow = new Workflow(); workflow.setWorkflowId("testRetryWorkflowId"); workflow.setWorkflowType("testRetryWorkflowId"); + workflow.setVersion(1); workflow.setOwnerApp("junit_testRetryWorkflowId"); workflow.setStartTime(10L); workflow.setEndTime(100L); @@ -403,7 +411,7 @@ public void testRetryWorkflow() { Task task_1_1 = new Task(); task_1_1.setTaskId(UUID.randomUUID().toString()); task_1_1.setSeq(20); - task_1_1.setTaskType(Type.SIMPLE.toString()); + task_1_1.setTaskType(TaskType.SIMPLE.toString()); task_1_1.setStatus(Status.CANCELED); task_1_1.setTaskDefName("task1"); task_1_1.setReferenceTaskName("task1_ref1"); @@ -411,7 +419,7 @@ public void testRetryWorkflow() { Task task_1_2 = new Task(); task_1_2.setTaskId(UUID.randomUUID().toString()); task_1_2.setSeq(21); - task_1_2.setTaskType(Type.SIMPLE.toString()); + task_1_2.setTaskType(TaskType.SIMPLE.toString()); task_1_2.setStatus(Status.FAILED); task_1_2.setTaskDefName("task1"); task_1_2.setReferenceTaskName("task1_ref1"); @@ -420,7 +428,7 @@ public void testRetryWorkflow() { task_2_1.setTaskId(UUID.randomUUID().toString()); task_2_1.setSeq(22); task_2_1.setStatus(Status.FAILED); - task_2_1.setTaskType(Type.SIMPLE.toString()); + task_2_1.setTaskType(TaskType.SIMPLE.toString()); task_2_1.setTaskDefName("task2"); task_2_1.setReferenceTaskName("task2_ref1"); @@ -429,7 +437,7 @@ public void testRetryWorkflow() { task_3_1.setTaskId(UUID.randomUUID().toString()); task_3_1.setSeq(23); task_3_1.setStatus(Status.CANCELED); - task_3_1.setTaskType(Type.SIMPLE.toString()); + task_3_1.setTaskType(TaskType.SIMPLE.toString()); task_3_1.setTaskDefName("task3"); task_3_1.setReferenceTaskName("task3_ref1"); @@ -437,7 +445,7 @@ public void testRetryWorkflow() { task_4_1.setTaskId(UUID.randomUUID().toString()); task_4_1.setSeq(122); task_4_1.setStatus(Status.FAILED); - task_4_1.setTaskType(Type.SIMPLE.toString()); + task_4_1.setTaskType(TaskType.SIMPLE.toString()); task_4_1.setTaskDefName("task1"); task_4_1.setReferenceTaskName("task4_refABC"); @@ -447,7 +455,7 @@ public void testRetryWorkflow() { //when: when(executionDAO.getWorkflow(anyString(), anyBoolean())).thenReturn(workflow); WorkflowDef workflowDef = new WorkflowDef(); - when(metadataDAO.get(anyString(), anyInt())).thenReturn(workflowDef); + when(metadataDAO.get(anyString(), anyInt())).thenReturn(Optional.of(workflowDef)); workflowExecutor.retry(workflow.getWorkflowId()); diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapperTest.java index 0b0dde29fc..9be10afc9d 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/DecisionTaskMapperTest.java @@ -2,6 +2,7 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -78,7 +79,7 @@ public void getMappedTasks() { //Decision task instance WorkflowTask decisionTask = new WorkflowTask(); - decisionTask.setType(WorkflowTask.Type.DECISION.name()); + decisionTask.setType(TaskType.DECISION.name()); decisionTask.setName("Decision"); decisionTask.setTaskReferenceName("decisionTask"); decisionTask.setDefaultCase(Arrays.asList(task1)); @@ -90,11 +91,14 @@ public void getMappedTasks() { decisionCases.put("odd", Arrays.asList(task3)); decisionTask.setDecisionCases(decisionCases); //Workflow instance + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setSchemaVersion(2); + Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(workflowDef); Map workflowInput = new HashMap<>(); workflowInput.put("Id", "22"); workflowInstance.setInput(workflowInput); - workflowInstance.setSchemaVersion(2); Map body = new HashMap<>(); body.put("input", taskDefinitionInput); @@ -104,12 +108,11 @@ public void getMappedTasks() { workflowInstance, null, null); - WorkflowDef workflowDef = new WorkflowDef(); Task theTask = new Task(); theTask.setReferenceTaskName("Foo"); theTask.setTaskId(IDGenerator.generate()); - when(deciderService.getTasksToBeScheduled(workflowDef, workflowInstance, task2, 0, null)) + when(deciderService.getTasksToBeScheduled(workflowInstance, task2, 0, null)) .thenReturn(Arrays.asList(theTask)); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() @@ -136,7 +139,7 @@ public void getMappedTasks() { public void getEvaluatedCaseValue() { WorkflowTask decisionTask = new WorkflowTask(); - decisionTask.setType(WorkflowTask.Type.DECISION.name()); + decisionTask.setType(TaskType.DECISION.name()); decisionTask.setName("Decision"); decisionTask.setTaskReferenceName("decisionTask"); decisionTask.setInputParameters(ip1); @@ -148,6 +151,7 @@ public void getEvaluatedCaseValue() { decisionTask.setDecisionCases(decisionCases); Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(new WorkflowDef()); Map workflowInput = new HashMap<>(); workflowInput.put("p1", "workflow.input.param1"); workflowInput.put("p2", "workflow.input.param2"); @@ -173,7 +177,7 @@ public void getEvaluatedCaseValueUsingExpression() { //Decision task instance WorkflowTask decisionTask = new WorkflowTask(); - decisionTask.setType(WorkflowTask.Type.DECISION.name()); + decisionTask.setType(TaskType.DECISION.name()); decisionTask.setName("Decision"); decisionTask.setTaskReferenceName("decisionTask"); decisionTask.setDefaultCase(Arrays.asList(task1)); @@ -184,12 +188,16 @@ public void getEvaluatedCaseValueUsingExpression() { decisionCases.put("even", Arrays.asList(task2)); decisionCases.put("odd", Arrays.asList(task3)); decisionTask.setDecisionCases(decisionCases); + //Workflow instance + WorkflowDef def = new WorkflowDef(); + def.setSchemaVersion(2); + Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(def); Map workflowInput = new HashMap<>(); workflowInput.put("Id", "22"); workflowInstance.setInput(workflowInput); - workflowInstance.setSchemaVersion(2); Map body = new HashMap<>(); body.put("input", taskDefinitionInput); @@ -215,7 +223,7 @@ public void getEvaluatedCaseValueException() { //Decision task instance WorkflowTask decisionTask = new WorkflowTask(); - decisionTask.setType(WorkflowTask.Type.DECISION.name()); + decisionTask.setType(TaskType.DECISION.name()); decisionTask.setName("Decision"); decisionTask.setTaskReferenceName("decisionTask"); decisionTask.setDefaultCase(Arrays.asList(task1)); @@ -226,12 +234,16 @@ public void getEvaluatedCaseValueException() { decisionCases.put("even", Arrays.asList(task2)); decisionCases.put("odd", Arrays.asList(task3)); decisionTask.setDecisionCases(decisionCases); + //Workflow instance + WorkflowDef def = new WorkflowDef(); + def.setSchemaVersion(2); + Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(def); Map workflowInput = new HashMap<>(); workflowInput.put(".Id", "22"); workflowInstance.setInput(workflowInput); - workflowInstance.setSchemaVersion(2); Map body = new HashMap<>(); body.put("input", taskDefinitionInput); diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapperTest.java index 153dbca1a4..f0ef216bbb 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/DynamicTaskMapperTest.java @@ -8,7 +8,6 @@ import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; import com.netflix.conductor.core.utils.IDGenerator; -import com.netflix.conductor.dao.MetadataDAO; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -18,7 +17,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyMap; import static org.mockito.Matchers.anyString; @@ -27,18 +26,16 @@ public class DynamicTaskMapperTest { - MetadataDAO metadataDAO; - ParametersUtils parametersUtils; - DynamicTaskMapper dynamicTaskMapper; + private ParametersUtils parametersUtils; + private DynamicTaskMapper dynamicTaskMapper; @Rule public ExpectedException expectedException = ExpectedException.none(); @Before public void setUp() throws Exception { - metadataDAO = mock(MetadataDAO.class); parametersUtils = mock(ParametersUtils.class); - dynamicTaskMapper = new DynamicTaskMapper(parametersUtils, metadataDAO); + dynamicTaskMapper = new DynamicTaskMapper(parametersUtils); } @Test @@ -49,18 +46,23 @@ public void getMappedTasks() throws Exception { workflowTask.setDynamicTaskNameParam("dynamicTaskName"); TaskDef taskDef = new TaskDef(); taskDef.setName("DynoTask"); + workflowTask.setTaskDefinition(taskDef); Map taskInput = new HashMap<>(); taskInput.put("dynamicTaskName", "DynoTask"); - when(metadataDAO.getTaskDef("DynoTask")).thenReturn(taskDef); when(parametersUtils.getTaskInput(anyMap(), any(Workflow.class), any(TaskDef.class), anyString())).thenReturn(taskInput); String taskId = IDGenerator.generate(); + + Workflow workflow = new Workflow(); + WorkflowDef workflowDef = new WorkflowDef(); + workflow.setWorkflowDefinition(workflowDef); + TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) - .withTaskDefinition(new TaskDef()) + .withWorkflowInstance(workflow) + .withWorkflowDefinition(workflowDef) + .withTaskDefinition(workflowTask.getTaskDefinition()) .withTaskToSchedule(workflowTask) .withTaskInput(taskInput) .withRetryCount(0) @@ -104,7 +106,7 @@ public void getDynamicTaskDefinition() throws Exception { workflowTask.setName("Foo"); TaskDef taskDef = new TaskDef(); taskDef.setName("Foo"); - when(metadataDAO.getTaskDef("Foo")).thenReturn(taskDef); + workflowTask.setTaskDefinition(taskDef); //when TaskDef dynamicTaskDefinition = dynamicTaskMapper.getDynamicTaskDefinition(workflowTask); @@ -119,8 +121,6 @@ public void getDynamicTaskDefinitionNull() { WorkflowTask workflowTask = new WorkflowTask(); workflowTask.setName("Foo"); - when(metadataDAO.getTaskDef("Foo")).thenReturn(null); - expectedException.expect(TerminateWorkflowException.class); expectedException.expectMessage(String.format("Invalid task specified. Cannot find task by name %s in the task definitions", workflowTask.getName())); @@ -129,4 +129,4 @@ public void getDynamicTaskDefinitionNull() { } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/EventTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/EventTaskMapperTest.java index ed29aab38a..849081e548 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/EventTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/EventTaskMapperTest.java @@ -22,8 +22,6 @@ public class EventTaskMapperTest { - - @Test public void getMappedTasks() throws Exception { ParametersUtils parametersUtils = Mockito.mock(ParametersUtils.class); @@ -38,9 +36,13 @@ public void getMappedTasks() throws Exception { when(parametersUtils.getTaskInput(anyMap(), any(Workflow.class), any(TaskDef.class), anyString())).thenReturn(eventTaskInput); + WorkflowDef wd = new WorkflowDef(); + Workflow w = new Workflow(); + w.setWorkflowDefinition(wd); + TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(wd) + .withWorkflowInstance(w) .withTaskDefinition(new TaskDef()) .withTaskToSchedule(taskToBeScheduled) .withRetryCount(0) @@ -55,4 +57,4 @@ public void getMappedTasks() throws Exception { } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapperTest.java index ff9ce3eaf9..13b4f365ab 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinDynamicTaskMapperTest.java @@ -5,6 +5,7 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -13,6 +14,7 @@ import com.netflix.conductor.core.execution.SystemTaskType; import com.netflix.conductor.core.execution.TerminateWorkflowException; import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.dao.MetadataDAO; import org.apache.commons.lang3.tuple.Pair; import org.junit.Before; import org.junit.Rule; @@ -36,6 +38,7 @@ public class ForkJoinDynamicTaskMapperTest { + private MetadataDAO metadataDAO; private ParametersUtils parametersUtils; private ObjectMapper objectMapper; private DeciderService deciderService; @@ -46,12 +49,13 @@ public class ForkJoinDynamicTaskMapperTest { @Before - public void setUp() { + public void setUp() throws Exception { + metadataDAO = Mockito.mock(MetadataDAO.class); parametersUtils = Mockito.mock(ParametersUtils.class); objectMapper = Mockito.mock(ObjectMapper.class); deciderService = Mockito.mock(DeciderService.class); - forkJoinDynamicTaskMapper = new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper); + forkJoinDynamicTaskMapper = new ForkJoinDynamicTaskMapper(parametersUtils, objectMapper, metadataDAO); } @@ -65,9 +69,10 @@ public void getMappedTasksException() { def.setInputParameters(Arrays.asList("param1", "param2")); Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(def); WorkflowTask dynamicForkJoinToSchedule = new WorkflowTask(); - dynamicForkJoinToSchedule.setType(WorkflowTask.Type.FORK_JOIN_DYNAMIC.name()); + dynamicForkJoinToSchedule.setType(TaskType.FORK_JOIN_DYNAMIC.name()); dynamicForkJoinToSchedule.setTaskReferenceName("dynamicfanouttask"); dynamicForkJoinToSchedule.setDynamicForkTasksParam("dynamicTasks"); dynamicForkJoinToSchedule.setDynamicForkTasksInputParamName("dynamicTasksInput"); @@ -76,7 +81,7 @@ public void getMappedTasksException() { WorkflowTask join = new WorkflowTask(); - join.setType(WorkflowTask.Type.JOIN.name()); + join.setType(TaskType.JOIN.name()); join.setTaskReferenceName("dynamictask_join"); def.getTasks().add(dynamicForkJoinToSchedule); @@ -113,12 +118,11 @@ public void getMappedTasksException() { Task simpleTask2 = new Task(); simpleTask2.setReferenceTaskName("xdt2"); - when(deciderService.getTasksToBeScheduled(def, workflowInstance, wt2, 0 )).thenReturn(Arrays.asList(simpleTask1)); - when(deciderService.getTasksToBeScheduled(def, workflowInstance, wt3, 0 )).thenReturn(Arrays.asList(simpleTask2)); + when(deciderService.getTasksToBeScheduled(workflowInstance, wt2, 0 )).thenReturn(Arrays.asList(simpleTask1)); + when(deciderService.getTasksToBeScheduled(workflowInstance, wt3, 0 )).thenReturn(Arrays.asList(simpleTask2)); String taskId = IDGenerator.generate(); - //TaskMapperContext taskMapperContext = new TaskMapperContext(def, workflowInstance, dynamicForkJoinToSchedule, null,0, null, taskId, deciderService); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() .withWorkflowDefinition(def) .withWorkflowInstance(workflowInstance) @@ -127,6 +131,7 @@ public void getMappedTasksException() { .withTaskId(taskId) .withDeciderService(deciderService) .build(); + //then expectedException.expect(TerminateWorkflowException.class); forkJoinDynamicTaskMapper.getMappedTasks(taskMapperContext); @@ -143,9 +148,10 @@ public void getMappedTasks() { def.setInputParameters(Arrays.asList("param1", "param2")); Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(def); WorkflowTask dynamicForkJoinToSchedule = new WorkflowTask(); - dynamicForkJoinToSchedule.setType(WorkflowTask.Type.FORK_JOIN_DYNAMIC.name()); + dynamicForkJoinToSchedule.setType(TaskType.FORK_JOIN_DYNAMIC.name()); dynamicForkJoinToSchedule.setTaskReferenceName("dynamicfanouttask"); dynamicForkJoinToSchedule.setDynamicForkTasksParam("dynamicTasks"); dynamicForkJoinToSchedule.setDynamicForkTasksInputParamName("dynamicTasksInput"); @@ -154,7 +160,7 @@ public void getMappedTasks() { WorkflowTask join = new WorkflowTask(); - join.setType(WorkflowTask.Type.JOIN.name()); + join.setType(TaskType.JOIN.name()); join.setTaskReferenceName("dynamictask_join"); def.getTasks().add(dynamicForkJoinToSchedule); @@ -182,7 +188,6 @@ public void getMappedTasks() { //when when(parametersUtils.getTaskInput(anyMap(), any(Workflow.class), any(TaskDef.class), anyString())) .thenReturn(dynamicTasksInput); - when(objectMapper.convertValue(anyMap(),any(TypeReference.class))).thenReturn(Arrays.asList(wt2, wt3)); @@ -192,8 +197,8 @@ public void getMappedTasks() { Task simpleTask2 = new Task(); simpleTask2.setReferenceTaskName("xdt2"); - when(deciderService.getTasksToBeScheduled(def, workflowInstance, wt2, 0 )).thenReturn(Arrays.asList(simpleTask1)); - when(deciderService.getTasksToBeScheduled(def, workflowInstance, wt3, 0 )).thenReturn(Arrays.asList(simpleTask2)); + when(deciderService.getTasksToBeScheduled(workflowInstance, wt2, 0 )).thenReturn(Arrays.asList(simpleTask1)); + when(deciderService.getTasksToBeScheduled(workflowInstance, wt3, 0 )).thenReturn(Arrays.asList(simpleTask2)); String taskId = IDGenerator.generate(); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() @@ -222,7 +227,7 @@ public void getMappedTasks() { public void getDynamicForkJoinTasksAndInput() { //Given WorkflowTask dynamicForkJoinToSchedule = new WorkflowTask(); - dynamicForkJoinToSchedule.setType(WorkflowTask.Type.FORK_JOIN_DYNAMIC.name()); + dynamicForkJoinToSchedule.setType(TaskType.FORK_JOIN_DYNAMIC.name()); dynamicForkJoinToSchedule.setTaskReferenceName("dynamicfanouttask"); dynamicForkJoinToSchedule.setDynamicForkJoinTasksParam("dynamicTasks"); dynamicForkJoinToSchedule.getInputParameters().put("dynamicTasks", "dt1.output.dynamicTasks"); @@ -260,7 +265,7 @@ public void getDynamicForkJoinTasksAndInput() { public void getDynamicForkJoinTasksAndInputException() { //Given WorkflowTask dynamicForkJoinToSchedule = new WorkflowTask(); - dynamicForkJoinToSchedule.setType(WorkflowTask.Type.FORK_JOIN_DYNAMIC.name()); + dynamicForkJoinToSchedule.setType(TaskType.FORK_JOIN_DYNAMIC.name()); dynamicForkJoinToSchedule.setTaskReferenceName("dynamicfanouttask"); dynamicForkJoinToSchedule.setDynamicForkJoinTasksParam("dynamicTasks"); dynamicForkJoinToSchedule.getInputParameters().put("dynamicTasks", "dt1.output.dynamicTasks"); @@ -297,7 +302,7 @@ public void getDynamicForkJoinTasksAndInputException() { public void getDynamicForkTasksAndInput() { //Given WorkflowTask dynamicForkJoinToSchedule = new WorkflowTask(); - dynamicForkJoinToSchedule.setType(WorkflowTask.Type.FORK_JOIN_DYNAMIC.name()); + dynamicForkJoinToSchedule.setType(TaskType.FORK_JOIN_DYNAMIC.name()); dynamicForkJoinToSchedule.setTaskReferenceName("dynamicfanouttask"); dynamicForkJoinToSchedule.setDynamicForkTasksParam("dynamicTasks"); dynamicForkJoinToSchedule.setDynamicForkTasksInputParamName("dynamicTasksInput"); @@ -340,7 +345,7 @@ public void getDynamicForkTasksAndInputException() { //Given WorkflowTask dynamicForkJoinToSchedule = new WorkflowTask(); - dynamicForkJoinToSchedule.setType(WorkflowTask.Type.FORK_JOIN_DYNAMIC.name()); + dynamicForkJoinToSchedule.setType(TaskType.FORK_JOIN_DYNAMIC.name()); dynamicForkJoinToSchedule.setTaskReferenceName("dynamicfanouttask"); dynamicForkJoinToSchedule.setDynamicForkTasksParam("dynamicTasks"); dynamicForkJoinToSchedule.setDynamicForkTasksInputParamName("dynamicTasksInput"); @@ -376,4 +381,4 @@ public void getDynamicForkTasksAndInputException() { forkJoinDynamicTaskMapper.getDynamicForkTasksAndInput(dynamicForkJoinToSchedule, new Workflow(), "dynamicTasks"); } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapperTest.java index 1e3573b121..feb19d817f 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/ForkJoinTaskMapperTest.java @@ -1,6 +1,7 @@ package com.netflix.conductor.core.execution.mapper; import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -23,9 +24,9 @@ public class ForkJoinTaskMapperTest { - DeciderService deciderService; + private DeciderService deciderService; - ForkJoinTaskMapper forkJoinTaskMapper; + private ForkJoinTaskMapper forkJoinTaskMapper; @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -46,7 +47,7 @@ public void getMappedTasks() throws Exception { def.setInputParameters(Arrays.asList("param1", "param2")); WorkflowTask forkTask = new WorkflowTask(); - forkTask.setType(WorkflowTask.Type.FORK_JOIN.name()); + forkTask.setType(TaskType.FORK_JOIN.name()); forkTask.setTaskReferenceName("forktask"); WorkflowTask wft1 = new WorkflowTask(); @@ -80,7 +81,7 @@ public void getMappedTasks() throws Exception { def.getTasks().add(forkTask); WorkflowTask join = new WorkflowTask(); - join.setType(WorkflowTask.Type.JOIN.name()); + join.setType(TaskType.JOIN.name()); join.setTaskReferenceName("forktask_join"); join.setJoinOn(Arrays.asList("t3","t2")); @@ -88,6 +89,7 @@ public void getMappedTasks() throws Exception { def.getTasks().add(wft4); Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); Task task1 = new Task(); task1.setReferenceTaskName(wft1.getTaskReferenceName()); @@ -95,11 +97,10 @@ public void getMappedTasks() throws Exception { Task task3 = new Task(); task3.setReferenceTaskName(wft3.getTaskReferenceName()); - Mockito.when(deciderService.getTasksToBeScheduled(def, workflow, wft1,0)).thenReturn(Arrays.asList(task1)); - Mockito.when(deciderService.getTasksToBeScheduled(def, workflow, wft2,0)).thenReturn(Arrays.asList(task3)); + Mockito.when(deciderService.getTasksToBeScheduled(workflow, wft1,0)).thenReturn(Arrays.asList(task1)); + Mockito.when(deciderService.getTasksToBeScheduled(workflow, wft2,0)).thenReturn(Arrays.asList(task3)); String taskId = IDGenerator.generate(); - TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() .withWorkflowDefinition(def) .withWorkflowInstance(workflow) @@ -127,7 +128,7 @@ public void getMappedTasksException() throws Exception { def.setInputParameters(Arrays.asList("param1", "param2")); WorkflowTask forkTask = new WorkflowTask(); - forkTask.setType(WorkflowTask.Type.FORK_JOIN.name()); + forkTask.setType(TaskType.FORK_JOIN.name()); forkTask.setTaskReferenceName("forktask"); WorkflowTask wft1 = new WorkflowTask(); @@ -161,13 +162,14 @@ public void getMappedTasksException() throws Exception { def.getTasks().add(forkTask); WorkflowTask join = new WorkflowTask(); - join.setType(WorkflowTask.Type.JOIN.name()); + join.setType(TaskType.JOIN.name()); join.setTaskReferenceName("forktask_join"); join.setJoinOn(Arrays.asList("t3","t2")); def.getTasks().add(wft4); Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); Task task1 = new Task(); task1.setReferenceTaskName(wft1.getTaskReferenceName()); @@ -175,8 +177,8 @@ public void getMappedTasksException() throws Exception { Task task3 = new Task(); task3.setReferenceTaskName(wft3.getTaskReferenceName()); - Mockito.when(deciderService.getTasksToBeScheduled(def, workflow, wft1,0)).thenReturn(Arrays.asList(task1)); - Mockito.when(deciderService.getTasksToBeScheduled(def, workflow, wft2,0)).thenReturn(Arrays.asList(task3)); + Mockito.when(deciderService.getTasksToBeScheduled(workflow, wft1,0)).thenReturn(Arrays.asList(task1)); + Mockito.when(deciderService.getTasksToBeScheduled(workflow, wft2,0)).thenReturn(Arrays.asList(task3)); String taskId = IDGenerator.generate(); @@ -188,10 +190,11 @@ public void getMappedTasksException() throws Exception { .withTaskId(taskId) .withDeciderService(deciderService) .build(); + expectedException.expect(TerminateWorkflowException.class); expectedException.expectMessage("Dynamic join definition is not followed by a join task. Check the blueprint"); forkJoinTaskMapper.getMappedTasks(taskMapperContext); } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapperTest.java index c652f3a55b..455881db6a 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/JoinTaskMapperTest.java @@ -2,6 +2,7 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -10,10 +11,10 @@ import org.junit.Test; import java.util.Arrays; -import java.util.HashMap; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; public class JoinTaskMapperTest { @@ -22,14 +23,18 @@ public class JoinTaskMapperTest { public void getMappedTasks() throws Exception { WorkflowTask taskToSchedule = new WorkflowTask(); - taskToSchedule.setType(WorkflowTask.Type.JOIN.name()); + taskToSchedule.setType(TaskType.JOIN.name()); taskToSchedule.setJoinOn(Arrays.asList("task1, task2")); String taskId = IDGenerator.generate(); + WorkflowDef wd = new WorkflowDef(); + Workflow w = new Workflow(); + w.setWorkflowDefinition(wd); + TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(wd) + .withWorkflowInstance(w) .withTaskDefinition(new TaskDef()) .withTaskToSchedule(taskToSchedule) .withRetryCount(0) @@ -42,4 +47,4 @@ public void getMappedTasks() throws Exception { assertEquals(SystemTaskType.JOIN.name(), mappedTasks.get(0).getTaskType()); } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapperTest.java index db8287c4ea..85a3b53f37 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/SimpleTaskMapperTest.java @@ -8,7 +8,6 @@ import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; import com.netflix.conductor.core.utils.IDGenerator; -import com.netflix.conductor.dao.MetadataDAO; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -17,17 +16,14 @@ import java.util.HashMap; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class SimpleTaskMapperTest { - ParametersUtils parametersUtils; - MetadataDAO metadataDAO; - - //subject - SimpleTaskMapper simpleTaskMapper; + private ParametersUtils parametersUtils; + private SimpleTaskMapper simpleTaskMapper; @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -35,8 +31,7 @@ public class SimpleTaskMapperTest { @Before public void setUp() throws Exception { parametersUtils = mock(ParametersUtils.class); - metadataDAO = mock(MetadataDAO.class); - simpleTaskMapper = new SimpleTaskMapper(parametersUtils, metadataDAO); + simpleTaskMapper = new SimpleTaskMapper(parametersUtils); } @Test @@ -44,14 +39,18 @@ public void getMappedTasks() throws Exception { WorkflowTask taskToSchedule = new WorkflowTask(); taskToSchedule.setName("simple_task"); + taskToSchedule.setTaskDefinition(new TaskDef("simple_task")); String taskId = IDGenerator.generate(); String retriedTaskId = IDGenerator.generate(); - when(metadataDAO.getTaskDef("simple_task")).thenReturn(new TaskDef()); + WorkflowDef wd = new WorkflowDef(); + Workflow w = new Workflow(); + w.setWorkflowDefinition(wd); + TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(wd) + .withWorkflowInstance(w) .withTaskDefinition(new TaskDef()) .withTaskToSchedule(taskToSchedule) .withTaskInput(new HashMap<>()) @@ -74,9 +73,13 @@ public void getMappedTasksException() throws Exception { String taskId = IDGenerator.generate(); String retriedTaskId = IDGenerator.generate(); + WorkflowDef wd = new WorkflowDef(); + Workflow w = new Workflow(); + w.setWorkflowDefinition(wd); + TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(wd) + .withWorkflowInstance(w) .withTaskDefinition(new TaskDef()) .withTaskToSchedule(taskToSchedule) .withTaskInput(new HashMap<>()) @@ -85,10 +88,9 @@ public void getMappedTasksException() throws Exception { .withTaskId(taskId) .build(); - when(metadataDAO.getTaskDef("simple_task")).thenReturn(null); //then expectedException.expect(TerminateWorkflowException.class); - expectedException.expectMessage(String.format("Invalid task specified. Cannot find task by name %s in the task definitions", taskToSchedule.getName())); + expectedException.expectMessage(String.format("Invalid task. Task %s does not have a definition", taskToSchedule.getName())); //when simpleTaskMapper.getMappedTasks(taskMapperContext); @@ -96,4 +98,4 @@ public void getMappedTasksException() throws Exception { } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapperTest.java index d879a451c2..8cfa5a993a 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/SubWorkflowTaskMapperTest.java @@ -43,7 +43,7 @@ public class SubWorkflowTaskMapperTest { public void setUp() { parametersUtils = mock(ParametersUtils.class); metadataDAO = mock(MetadataDAO.class); - subWorkflowTaskMapper = new SubWorkflowTaskMapper(parametersUtils, metadataDAO); + subWorkflowTaskMapper = new SubWorkflowTaskMapper(parametersUtils); deciderService = mock(DeciderService.class); } @@ -53,20 +53,20 @@ public void getMappedTasks() { //Given WorkflowDef workflowDef = new WorkflowDef(); Workflow workflowInstance = new Workflow(); + workflowInstance.setWorkflowDefinition(workflowDef); WorkflowTask taskToSchedule = new WorkflowTask(); SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); subWorkflowParams.setName("Foo"); - subWorkflowParams.setVersion("2"); + subWorkflowParams.setVersion(2); taskToSchedule.setSubWorkflowParam(subWorkflowParams); Map taskInput = new HashMap<>(); Map subWorkflowParamMap = new HashMap<>(); subWorkflowParamMap.put("name","FooWorkFlow"); - subWorkflowParamMap.put("version","2"); + subWorkflowParamMap.put("version",2); when(parametersUtils.getTaskInputV2(anyMap(), any(Workflow.class), anyString(), any(TaskDef.class))) .thenReturn(subWorkflowParamMap); - //When TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() .withWorkflowDefinition(workflowDef) @@ -95,7 +95,7 @@ public void getSubWorkflowParams() { WorkflowTask workflowTask = new WorkflowTask(); SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); subWorkflowParams.setName("Foo"); - subWorkflowParams.setVersion("2"); + subWorkflowParams.setVersion(2); workflowTask.setSubWorkflowParam(subWorkflowParams); assertEquals(subWorkflowParams, subWorkflowTaskMapper.getSubWorkflowParams(workflowTask)); @@ -112,41 +112,4 @@ public void getExceptionWhenNoSubWorkflowParamsPassed() { subWorkflowTaskMapper.getSubWorkflowParams(workflowTask); } - - - @Test - public void getSubWorkflowVersion() { - Map subWorkflowParamMap = new HashMap<>(); - subWorkflowParamMap.put("name","FooWorkFlow"); - subWorkflowParamMap.put("version","2"); - - Integer version = subWorkflowTaskMapper.getSubWorkflowVersion(subWorkflowParamMap, "FooWorkFlow"); - - assertEquals(version, Integer.valueOf(2)); - } - - @Test - public void getSubworkflowVersionFromMeta() { - Map subWorkflowParamMap = new HashMap<>(); - WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName("FooWorkFlow"); - workflowDef.setVersion(2); - when(metadataDAO.getLatest(any())).thenReturn(workflowDef); - - Integer version = subWorkflowTaskMapper.getSubWorkflowVersion(subWorkflowParamMap, "FooWorkFlow"); - - assertEquals(version, Integer.valueOf(2)); - } - - @Test - public void getSubworkflowVersionFromMetaException() { - Map subWorkflowParamMap = new HashMap<>(); - when(metadataDAO.getLatest(any())).thenReturn(null); - - expectedException.expect(TerminateWorkflowException.class); - expectedException.expectMessage(String.format("The Task %s defined as a sub-workflow has no workflow definition available ", "FooWorkFlow")); - - subWorkflowTaskMapper.getSubWorkflowVersion(subWorkflowParamMap, "FooWorkFlow"); - } - -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapperTest.java index 3bdab8f5a4..f55416c311 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/UserDefinedTaskMapperTest.java @@ -2,13 +2,13 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.execution.ParametersUtils; import com.netflix.conductor.core.execution.TerminateWorkflowException; import com.netflix.conductor.core.utils.IDGenerator; -import com.netflix.conductor.dao.MetadataDAO; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -17,17 +17,13 @@ import java.util.HashMap; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class UserDefinedTaskMapperTest { - ParametersUtils parametersUtils; - MetadataDAO metadataDAO; - - //subject - UserDefinedTaskMapper userDefinedTaskMapper; + private ParametersUtils parametersUtils; + private UserDefinedTaskMapper userDefinedTaskMapper; @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -35,8 +31,7 @@ public class UserDefinedTaskMapperTest { @Before public void setUp() throws Exception { parametersUtils = mock(ParametersUtils.class); - metadataDAO = mock(MetadataDAO.class); - userDefinedTaskMapper = new UserDefinedTaskMapper(parametersUtils, metadataDAO); + userDefinedTaskMapper = new UserDefinedTaskMapper(parametersUtils); } @Test @@ -44,14 +39,18 @@ public void getMappedTasks() throws Exception { //Given WorkflowTask taskToSchedule = new WorkflowTask(); taskToSchedule.setName("user_task"); - taskToSchedule.setType(WorkflowTask.Type.USER_DEFINED.name()); + taskToSchedule.setType(TaskType.USER_DEFINED.name()); + taskToSchedule.setTaskDefinition(new TaskDef("user_task")); String taskId = IDGenerator.generate(); String retriedTaskId = IDGenerator.generate(); - when(metadataDAO.getTaskDef("user_task")).thenReturn(new TaskDef()); + + Workflow workflow = new Workflow(); + WorkflowDef workflowDef = new WorkflowDef(); + workflow.setWorkflowDefinition(workflowDef); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(workflowDef) + .withWorkflowInstance(workflow) .withTaskDefinition(new TaskDef()) .withTaskToSchedule(taskToSchedule) .withTaskInput(new HashMap<>()) @@ -65,7 +64,7 @@ public void getMappedTasks() throws Exception { //Then assertEquals(1, mappedTasks.size()); - assertEquals(WorkflowTask.Type.USER_DEFINED.name(), mappedTasks.get(0).getTaskType()); + assertEquals(TaskType.USER_DEFINED.name(), mappedTasks.get(0).getTaskType()); } @Test @@ -73,14 +72,17 @@ public void getMappedTasksException() throws Exception { //Given WorkflowTask taskToSchedule = new WorkflowTask(); taskToSchedule.setName("user_task"); - taskToSchedule.setType(WorkflowTask.Type.USER_DEFINED.name()); + taskToSchedule.setType(TaskType.USER_DEFINED.name()); String taskId = IDGenerator.generate(); String retriedTaskId = IDGenerator.generate(); - when(metadataDAO.getTaskDef("user_task")).thenReturn(null); + + Workflow workflow = new Workflow(); + WorkflowDef workflowDef = new WorkflowDef(); + workflow.setWorkflowDefinition(workflowDef); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(workflowDef) + .withWorkflowInstance(workflow) .withTaskToSchedule(taskToSchedule) .withTaskInput(new HashMap<>()) .withRetryCount(0) @@ -96,4 +98,4 @@ public void getMappedTasksException() throws Exception { } -} \ No newline at end of file +} diff --git a/core/src/test/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapperTest.java b/core/src/test/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapperTest.java index df6f61def5..a2d6d0ef6b 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapperTest.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/mapper/WaitTaskMapperTest.java @@ -2,6 +2,7 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; import com.netflix.conductor.common.run.Workflow; @@ -23,14 +24,17 @@ public void getMappedTasks() { //Given WorkflowTask taskToSchedule = new WorkflowTask(); taskToSchedule.setName("Wait_task"); - taskToSchedule.setType(WorkflowTask.Type.WAIT.name()); + taskToSchedule.setType(TaskType.WAIT.name()); String taskId = IDGenerator.generate(); ParametersUtils parametersUtils = new ParametersUtils(); + Workflow workflow = new Workflow(); + WorkflowDef workflowDef = new WorkflowDef(); + workflow.setWorkflowDefinition(workflowDef); TaskMapperContext taskMapperContext = TaskMapperContext.newBuilder() - .withWorkflowDefinition(new WorkflowDef()) - .withWorkflowInstance(new Workflow()) + .withWorkflowDefinition(workflowDef) + .withWorkflowInstance(workflow) .withTaskDefinition(new TaskDef()) .withTaskToSchedule(taskToSchedule) .withTaskInput(new HashMap<>()) diff --git a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java index 325dc3f029..6c14c4700b 100644 --- a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java +++ b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestEvent.java @@ -1,26 +1,24 @@ /** * Copyright 2017 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** - * + * */ package com.netflix.conductor.core.execution.tasks; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.metadata.workflow.TaskType; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.EventQueues; @@ -55,227 +53,234 @@ */ public class TestEvent { - private EventQueues eventQueues; - private ParametersUtils parametersUtils; - - @Before - public void setup() { - Map providers = new HashMap<>(); - providers.put("sqs", new MockQueueProvider("sqs")); - providers.put("conductor", new MockQueueProvider("conductor")); - parametersUtils = new ParametersUtils(); - eventQueues = new EventQueues(providers, parametersUtils); - } - - @Test - public void testEvent() { - System.setProperty("QUEUE_NAME", "queue_name_001"); - String eventt = "queue_${QUEUE_NAME}"; - String event = parametersUtils.replace(eventt).toString(); - assertNotNull(event); - assertEquals("queue_queue_name_001", event); - - eventt = "queue_9"; - event = parametersUtils.replace(eventt).toString(); - assertNotNull(event); - assertEquals(eventt, event); - } - - @Test - public void testSinkParam() { - String sink = "sqs:queue_name"; - - Workflow workflow = new Workflow(); - workflow.setWorkflowType("wf0"); - - Task task1 = new Task(); - task1.setReferenceTaskName("t1"); - task1.getOutputData().put("q", "t1_queue"); - workflow.getTasks().add(task1); - - Task task2 = new Task(); - task2.setReferenceTaskName("t2"); - task2.getOutputData().put("q", "task2_queue"); - workflow.getTasks().add(task2); - - Task task = new Task(); - task.setReferenceTaskName("event"); - task.getInputData().put("sink", sink); - task.setTaskType(WorkflowTask.Type.EVENT.name()); - workflow.getTasks().add(task); - - Event event = new Event(eventQueues, parametersUtils); - ObservableQueue queue = event.getQueue(workflow, task); - assertNotNull(task.getReasonForIncompletion(), queue); - assertEquals("queue_name", queue.getName()); - assertEquals("sqs", queue.getType()); - - sink = "sqs:${t1.output.q}"; - task.getInputData().put("sink", sink); - queue = event.getQueue(workflow, task); - assertNotNull(queue); - assertEquals("t1_queue", queue.getName()); - assertEquals("sqs", queue.getType()); - System.out.println(task.getOutputData().get("event_produced")); - - sink = "sqs:${t2.output.q}"; - task.getInputData().put("sink", sink); - queue = event.getQueue(workflow, task); - assertNotNull(queue); - assertEquals("task2_queue", queue.getName()); - assertEquals("sqs", queue.getType()); - System.out.println(task.getOutputData().get("event_produced")); - - sink = "conductor"; - task.getInputData().put("sink", sink); - queue = event.getQueue(workflow, task); - assertNotNull(queue); - assertEquals(workflow.getWorkflowType() + ":" + task.getReferenceTaskName(), queue.getName()); - assertEquals("conductor", queue.getType()); - System.out.println(task.getOutputData().get("event_produced")); - - sink = "sqs:static_value"; - task.getInputData().put("sink", sink); - queue = event.getQueue(workflow, task); - assertNotNull(queue); - assertEquals("static_value", queue.getName()); - assertEquals("sqs", queue.getType()); - assertEquals(sink, task.getOutputData().get("event_produced")); - System.out.println(task.getOutputData().get("event_produced")); - - sink = "bad:queue"; - task.getInputData().put("sink", sink); - queue = event.getQueue(workflow, task); - assertNull(queue); - assertEquals(Task.Status.FAILED, task.getStatus()); - } - - @SuppressWarnings("unchecked") - @Test - public void test() { - Workflow workflow = new Workflow(); - workflow.setWorkflowType("testWorkflow"); - workflow.setVersion(2); - - Task task = new Task(); - task.getInputData().put("sink", "conductor"); - task.setReferenceTaskName("task0"); - task.setTaskId("task_id_0"); - - QueueDAO dao = mock(QueueDAO.class); - String[] publishedQueue = new String[1]; - List publishedMessages = new LinkedList<>(); - - doAnswer((Answer) invocation -> { - String queueName = invocation.getArgumentAt(0, String.class); - System.out.println(queueName); - publishedQueue[0] = queueName; - List messages = invocation.getArgumentAt(1, List.class); - publishedMessages.addAll(messages); - return null; - }).when(dao).push(any(), any()); - - doAnswer((Answer>) invocation -> { - String messageId = invocation.getArgumentAt(1, String.class); - if(publishedMessages.get(0).getId().equals(messageId)) { - publishedMessages.remove(0); - return Collections.singletonList(messageId); - } - return null; - }).when(dao).remove(any(), any()); - - Map providers = new HashMap<>(); - providers.put("conductor", new DynoEventQueueProvider(dao, new TestConfiguration())); - eventQueues = new EventQueues(providers, parametersUtils); - Event event = new Event(eventQueues, parametersUtils); - event.start(workflow, task, null); - - assertEquals(Task.Status.COMPLETED, task.getStatus()); - assertNotNull(task.getOutputData()); - assertEquals("conductor:" + workflow.getWorkflowType() + ":" + task.getReferenceTaskName(), task.getOutputData().get("event_produced")); - assertEquals(task.getOutputData().get("event_produced"), "conductor:" + publishedQueue[0]); - assertEquals(1, publishedMessages.size()); - assertEquals(task.getTaskId(), publishedMessages.get(0).getId()); - assertNotNull(publishedMessages.get(0).getPayload()); - - event.cancel(workflow, task, null); - assertTrue(publishedMessages.isEmpty()); - } - - - @Test - public void testFailures() { - Event event = new Event(eventQueues, parametersUtils); - Workflow workflow = new Workflow(); - workflow.setWorkflowType("testWorkflow"); - workflow.setVersion(2); - - Task task = new Task(); - task.setReferenceTaskName("task0"); - task.setTaskId("task_id_0"); - - event.start(workflow, task, null); - assertEquals(Task.Status.FAILED, task.getStatus()); - assertNotNull(task.getReasonForIncompletion()); - System.out.println(task.getReasonForIncompletion()); - - task.getInputData().put("sink", "bad_sink"); - task.setStatus(Status.SCHEDULED); - - event.start(workflow, task, null); - assertEquals(Task.Status.FAILED, task.getStatus()); - assertNotNull(task.getReasonForIncompletion()); - System.out.println(task.getReasonForIncompletion()); - - task.setStatus(Status.SCHEDULED); - task.setScheduledTime(System.currentTimeMillis()); - event.execute(workflow, task, null); - assertEquals(Task.Status.SCHEDULED, task.getStatus()); - - task.setScheduledTime(System.currentTimeMillis() - 610_000); - event.start(workflow, task, null); - assertEquals(Task.Status.FAILED, task.getStatus()); - } - - @Test - public void testDynamicSinks() { - - Event event = new Event(eventQueues, parametersUtils); - Workflow workflow = new Workflow(); - workflow.setWorkflowType("testWorkflow"); - workflow.setVersion(2); - - Task task = new Task(); - task.setReferenceTaskName("task0"); - task.setTaskId("task_id_0"); - task.setStatus(Status.IN_PROGRESS); - task.getInputData().put("sink", "conductor:some_arbitary_queue"); - - - ObservableQueue queue = event.getQueue(workflow, task); - assertEquals(Task.Status.IN_PROGRESS, task.getStatus()); - assertNotNull(queue); - assertEquals("testWorkflow:some_arbitary_queue", queue.getName()); - assertEquals("testWorkflow:some_arbitary_queue", queue.getURI()); - assertEquals("conductor", queue.getType()); - assertEquals("conductor:testWorkflow:some_arbitary_queue", task.getOutputData().get("event_produced")); - - task.getInputData().put("sink", "conductor"); - queue = event.getQueue(workflow, task); - assertEquals("not in progress: " + task.getReasonForIncompletion(), Task.Status.IN_PROGRESS, task.getStatus()); - assertNotNull(queue); - assertEquals("testWorkflow:task0", queue.getName()); - - task.getInputData().put("sink", "sqs:my_sqs_queue_name"); - queue = event.getQueue(workflow, task); - assertEquals("not in progress: " + task.getReasonForIncompletion(), Task.Status.IN_PROGRESS, task.getStatus()); - assertNotNull(queue); - assertEquals("my_sqs_queue_name", queue.getName()); - assertEquals("sqs", queue.getType()); - - task.getInputData().put("sink", "sns:my_sqs_queue_name"); - queue = event.getQueue(workflow, task); - assertEquals(Task.Status.FAILED, task.getStatus()); - } - + WorkflowDef testWorkflowDefinition; + + private EventQueues eventQueues; + private ParametersUtils parametersUtils; + + @Before + public void setup() { + Map providers = new HashMap<>(); + providers.put("sqs", new MockQueueProvider("sqs")); + providers.put("conductor", new MockQueueProvider("conductor")); + + parametersUtils = new ParametersUtils(); + eventQueues = new EventQueues(providers, parametersUtils); + + testWorkflowDefinition = new WorkflowDef(); + testWorkflowDefinition.setName("testWorkflow"); + testWorkflowDefinition.setVersion(2); + } + + @Test + public void testEvent() { + System.setProperty("QUEUE_NAME", "queue_name_001"); + String eventt = "queue_${QUEUE_NAME}"; + String event = parametersUtils.replace(eventt).toString(); + assertNotNull(event); + assertEquals("queue_queue_name_001", event); + + eventt = "queue_9"; + event = parametersUtils.replace(eventt).toString(); + assertNotNull(event); + assertEquals(eventt, event); + } + + @Test + public void testSinkParam() { + String sink = "sqs:queue_name"; + + WorkflowDef def = new WorkflowDef(); + def.setName("wf0"); + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + + Task task1 = new Task(); + task1.setReferenceTaskName("t1"); + task1.getOutputData().put("q", "t1_queue"); + workflow.getTasks().add(task1); + + Task task2 = new Task(); + task2.setReferenceTaskName("t2"); + task2.getOutputData().put("q", "task2_queue"); + workflow.getTasks().add(task2); + + Task task = new Task(); + task.setReferenceTaskName("event"); + task.getInputData().put("sink", sink); + task.setTaskType(TaskType.EVENT.name()); + workflow.getTasks().add(task); + + Event event = new Event(eventQueues, parametersUtils); + ObservableQueue queue = event.getQueue(workflow, task); + assertNotNull(task.getReasonForIncompletion(), queue); + assertEquals("queue_name", queue.getName()); + assertEquals("sqs", queue.getType()); + + sink = "sqs:${t1.output.q}"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals("t1_queue", queue.getName()); + assertEquals("sqs", queue.getType()); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "sqs:${t2.output.q}"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals("task2_queue", queue.getName()); + assertEquals("sqs", queue.getType()); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "conductor"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals(workflow.getWorkflowName() + ":" + task.getReferenceTaskName(), queue.getName()); + assertEquals("conductor", queue.getType()); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "sqs:static_value"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNotNull(queue); + assertEquals("static_value", queue.getName()); + assertEquals("sqs", queue.getType()); + assertEquals(sink, task.getOutputData().get("event_produced")); + System.out.println(task.getOutputData().get("event_produced")); + + sink = "bad:queue"; + task.getInputData().put("sink", sink); + queue = event.getQueue(workflow, task); + assertNull(queue); + assertEquals(Task.Status.FAILED, task.getStatus()); + } + + @SuppressWarnings("unchecked") + @Test + public void test() throws Exception { + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(testWorkflowDefinition); + + Task task = new Task(); + task.getInputData().put("sink", "conductor"); + task.setReferenceTaskName("task0"); + task.setTaskId("task_id_0"); + + QueueDAO dao = mock(QueueDAO.class); + String[] publishedQueue = new String[1]; + List publishedMessages = new LinkedList<>(); + + doAnswer((Answer) invocation -> { + String queueName = invocation.getArgumentAt(0, String.class); + System.out.println(queueName); + publishedQueue[0] = queueName; + List messages = invocation.getArgumentAt(1, List.class); + publishedMessages.addAll(messages); + return null; + }).when(dao).push(any(), any()); + + doAnswer((Answer>) invocation -> { + String messageId = invocation.getArgumentAt(1, String.class); + if(publishedMessages.get(0).getId().equals(messageId)) { + publishedMessages.remove(0); + return Collections.singletonList(messageId); + } + return null; + }).when(dao).remove(any(), any()); + + Map providers = new HashMap<>(); + providers.put("conductor", new DynoEventQueueProvider(dao, new TestConfiguration())); + eventQueues = new EventQueues(providers, parametersUtils); + Event event = new Event(eventQueues, parametersUtils); + event.start(workflow, task, null); + + assertEquals(Task.Status.COMPLETED, task.getStatus()); + assertNotNull(task.getOutputData()); + assertEquals("conductor:" + workflow.getWorkflowName() + ":" + task.getReferenceTaskName(), task.getOutputData().get("event_produced")); + assertEquals(task.getOutputData().get("event_produced"), "conductor:" + publishedQueue[0]); + assertEquals(1, publishedMessages.size()); + assertEquals(task.getTaskId(), publishedMessages.get(0).getId()); + assertNotNull(publishedMessages.get(0).getPayload()); + + event.cancel(workflow, task, null); + assertTrue(publishedMessages.isEmpty()); + } + + + @Test + public void testFailures() { + Event event = new Event(eventQueues, parametersUtils); + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(testWorkflowDefinition); + + Task task = new Task(); + task.setReferenceTaskName("task0"); + task.setTaskId("task_id_0"); + + event.start(workflow, task, null); + assertEquals(Task.Status.FAILED, task.getStatus()); + assertTrue(task.getReasonForIncompletion() != null); + System.out.println(task.getReasonForIncompletion()); + + task.getInputData().put("sink", "bad_sink"); + task.setStatus(Status.SCHEDULED); + + event.start(workflow, task, null); + assertEquals(Task.Status.FAILED, task.getStatus()); + assertTrue(task.getReasonForIncompletion() != null); + System.out.println(task.getReasonForIncompletion()); + + task.setStatus(Status.SCHEDULED); + task.setScheduledTime(System.currentTimeMillis()); + event.execute(workflow, task, null); + assertEquals(Task.Status.SCHEDULED, task.getStatus()); + + task.setScheduledTime(System.currentTimeMillis() - 610_000); + event.start(workflow, task, null); + assertEquals(Task.Status.FAILED, task.getStatus()); + } + + @Test + public void testDynamicSinks() { + Event event = new Event(eventQueues, parametersUtils); + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(testWorkflowDefinition); + + Task task = new Task(); + task.setReferenceTaskName("task0"); + task.setTaskId("task_id_0"); + task.setStatus(Status.IN_PROGRESS); + task.getInputData().put("sink", "conductor:some_arbitary_queue"); + + + ObservableQueue queue = event.getQueue(workflow, task); + assertEquals(Task.Status.IN_PROGRESS, task.getStatus()); + assertNotNull(queue); + assertEquals("testWorkflow:some_arbitary_queue", queue.getName()); + assertEquals("testWorkflow:some_arbitary_queue", queue.getURI()); + assertEquals("conductor", queue.getType()); + assertEquals("conductor:testWorkflow:some_arbitary_queue", task.getOutputData().get("event_produced")); + + task.getInputData().put("sink", "conductor"); + queue = event.getQueue(workflow, task); + assertEquals("not in progress: " + task.getReasonForIncompletion(), Task.Status.IN_PROGRESS, task.getStatus()); + assertNotNull(queue); + assertEquals("testWorkflow:task0", queue.getName()); + + task.getInputData().put("sink", "sqs:my_sqs_queue_name"); + queue = event.getQueue(workflow, task); + assertEquals("not in progress: " + task.getReasonForIncompletion(), Task.Status.IN_PROGRESS, task.getStatus()); + assertNotNull(queue); + assertEquals("my_sqs_queue_name", queue.getName()); + assertEquals("sqs", queue.getType()); + + task.getInputData().put("sink", "sns:my_sqs_queue_name"); + queue = event.getQueue(workflow, task); + assertEquals(Task.Status.FAILED, task.getStatus()); + + } + } diff --git a/core/src/test/java/com/netflix/conductor/core/metadata/MetadataMapperServiceTest.java b/core/src/test/java/com/netflix/conductor/core/metadata/MetadataMapperServiceTest.java new file mode 100644 index 0000000000..1816bf1ce6 --- /dev/null +++ b/core/src/test/java/com/netflix/conductor/core/metadata/MetadataMapperServiceTest.java @@ -0,0 +1,240 @@ +package com.netflix.conductor.core.metadata; + +import com.google.common.collect.ImmutableList; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; +import com.netflix.conductor.common.metadata.workflow.TaskType; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.core.execution.TerminateWorkflowException; +import com.netflix.conductor.dao.MetadataDAO; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import java.util.List; +import java.util.Optional; + +import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class MetadataMapperServiceTest { + + @Mock + private MetadataDAO metadataDAO; + + @InjectMocks + private MetadataMapperService metadataMapperService; + + @Test + public void testMetadataPopulationOnSimpleTask() { + String nameTaskDefinition = "task1"; + TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition); + WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition); + + when(metadataDAO.getTaskDef(nameTaskDefinition)).thenReturn(taskDefinition); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask)); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + + assertEquals(1, workflowDefinition.getTasks().size()); + WorkflowTask populatedWorkflowTask = workflowDefinition.getTasks().get(0); + assertNotNull(populatedWorkflowTask.getTaskDefinition()); + verify(metadataDAO).getTaskDef(nameTaskDefinition); + } + + @Test + public void testNoMetadataPopulationOnEmbeddedTaskDefinition() { + String nameTaskDefinition = "task2"; + TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition); + WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition); + workflowTask.setTaskDefinition(taskDefinition); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask)); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + + assertEquals(1, workflowDefinition.getTasks().size()); + WorkflowTask populatedWorkflowTask = workflowDefinition.getTasks().get(0); + assertNotNull(populatedWorkflowTask.getTaskDefinition()); + verifyZeroInteractions(metadataDAO); + } + + @Test + public void testMetadataPopulationOnlyOnNecessaryWorkflowTasks() { + String nameTaskDefinition1 = "task4"; + TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition1); + WorkflowTask workflowTask1 = createWorkflowTask(nameTaskDefinition1); + workflowTask1.setTaskDefinition(taskDefinition); + + String nameTaskDefinition2 = "task5"; + WorkflowTask workflowTask2 = createWorkflowTask(nameTaskDefinition2); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask1, workflowTask2)); + + when(metadataDAO.getTaskDef(nameTaskDefinition2)).thenReturn(taskDefinition); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + + assertEquals(2, workflowDefinition.getTasks().size()); + List workflowTasks = workflowDefinition.getTasks(); + assertNotNull(workflowTasks.get(0).getTaskDefinition()); + assertNotNull(workflowTasks.get(1).getTaskDefinition()); + + verify(metadataDAO).getTaskDef(nameTaskDefinition2); + verifyNoMoreInteractions(metadataDAO); + } + + @Test(expected = ApplicationException.class) + public void testMetadataPopulationMissingDefinitions() { + String nameTaskDefinition1 = "task4"; + WorkflowTask workflowTask1 = createWorkflowTask(nameTaskDefinition1); + + String nameTaskDefinition2 = "task5"; + WorkflowTask workflowTask2 = createWorkflowTask(nameTaskDefinition2); + + TaskDef taskDefinition = createTaskDefinition(nameTaskDefinition1); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask1, workflowTask2)); + + when(metadataDAO.getTaskDef(nameTaskDefinition1)).thenReturn(taskDefinition); + when(metadataDAO.getTaskDef(nameTaskDefinition2)).thenReturn(null); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + } + + @Test + public void testVersionPopulationForSubworkflowTaskIfVersionIsNotAvailable() { + String nameTaskDefinition = "taskSubworkflow6"; + String workflowDefinitionName = "subworkflow"; + Integer version = 3; + + WorkflowDef subWorkflowDefinition = createWorkflowDefinition("workflowDefinitionName"); + subWorkflowDefinition.setVersion(version); + + WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition); + workflowTask.setWorkflowTaskType(TaskType.SUB_WORKFLOW); + SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); + subWorkflowParams.setName(workflowDefinitionName); + workflowTask.setSubWorkflowParam(subWorkflowParams); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask)); + + when(metadataDAO.getLatest(workflowDefinitionName)).thenReturn(Optional.of(subWorkflowDefinition)); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + + assertEquals(1, workflowDefinition.getTasks().size()); + List workflowTasks = workflowDefinition.getTasks(); + SubWorkflowParams params = workflowTasks.get(0).getSubWorkflowParam(); + + assertEquals(workflowDefinitionName, params.getName()); + assertEquals(version, params.getVersion()); + + verify(metadataDAO).getLatest(workflowDefinitionName); + verifyNoMoreInteractions(metadataDAO); + } + + @Test + public void testNoVersionPopulationForSubworkflowTaskIfAvailable() { + String nameTaskDefinition = "taskSubworkflow7"; + String workflowDefinitionName = "subworkflow"; + Integer version = 2; + + WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition); + workflowTask.setWorkflowTaskType(TaskType.SUB_WORKFLOW); + SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); + subWorkflowParams.setName(workflowDefinitionName); + subWorkflowParams.setVersion(version); + workflowTask.setSubWorkflowParam(subWorkflowParams); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask)); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + + assertEquals(1, workflowDefinition.getTasks().size()); + List workflowTasks = workflowDefinition.getTasks(); + SubWorkflowParams params = workflowTasks.get(0).getSubWorkflowParam(); + + assertEquals(workflowDefinitionName, params.getName()); + assertEquals(version, params.getVersion()); + + verifyZeroInteractions(metadataDAO); + } + + + @Test(expected = TerminateWorkflowException.class) + public void testExceptionWhenWorkflowDefinitionNotAvailable() { + String nameTaskDefinition = "taskSubworkflow8"; + String workflowDefinitionName = "subworkflow"; + + WorkflowTask workflowTask = createWorkflowTask(nameTaskDefinition); + workflowTask.setWorkflowTaskType(TaskType.SUB_WORKFLOW); + SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); + subWorkflowParams.setName(workflowDefinitionName); + workflowTask.setSubWorkflowParam(subWorkflowParams); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testMetadataPopulation"); + workflowDefinition.setTasks(ImmutableList.of(workflowTask)); + + when(metadataDAO.getLatest(workflowDefinitionName)).thenReturn(Optional.empty()); + + metadataMapperService.populateTaskDefinitions(workflowDefinition); + + verify(metadataDAO).getLatest(workflowDefinitionName); + } + + @Test(expected = ApplicationException.class) + public void testLookupWorkflowDefinition() { + String workflowName = "test"; + when(metadataDAO.get(workflowName, 0)).thenReturn(Optional.of(new WorkflowDef())); + Optional optionalWorkflowDef = metadataMapperService.lookupWorkflowDefinition(workflowName, 0); + assertTrue(optionalWorkflowDef.isPresent()); + + metadataMapperService.lookupWorkflowDefinition(null, 0); + } + + @Test(expected = ApplicationException.class) + public void testLookupLatestWorkflowDefinition() { + String workflowName = "test"; + when(metadataDAO.getLatest(workflowName)).thenReturn(Optional.of(new WorkflowDef())); + Optional optionalWorkflowDef = metadataMapperService.lookupLatestWorkflowDefinition(workflowName); + assertTrue(optionalWorkflowDef.isPresent()); + + metadataMapperService.lookupLatestWorkflowDefinition(null); + } + + private WorkflowDef createWorkflowDefinition(String name) { + WorkflowDef workflowDefinition = new WorkflowDef(); + workflowDefinition.setName(name); + return workflowDefinition; + } + + private WorkflowTask createWorkflowTask(String name) { + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName(name); + workflowTask.setType(TaskType.SIMPLE.name()); + return workflowTask; + } + + private TaskDef createTaskDefinition(String name) { + TaskDef taskDefinition = new TaskDef(name); + return taskDefinition; + } +} diff --git a/core/src/test/java/com/netflix/conductor/core/utils/JsonMapperProviderTest.java b/core/src/test/java/com/netflix/conductor/core/utils/JsonMapperProviderTest.java new file mode 100644 index 0000000000..ced225cf1a --- /dev/null +++ b/core/src/test/java/com/netflix/conductor/core/utils/JsonMapperProviderTest.java @@ -0,0 +1,42 @@ +package com.netflix.conductor.core.utils; + +import com.fasterxml.jackson.core.JsonGenerationException; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.protobuf.Any; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import com.netflix.conductor.common.utils.JsonMapperProvider; +import org.junit.Test; + +import java.io.IOException; +import java.io.StringWriter; + +import static org.junit.Assert.*; + +public class JsonMapperProviderTest { + @Test + public void testSimpleMapping() throws JsonGenerationException, JsonMappingException, IOException { + ObjectMapper m = new JsonMapperProvider().get(); + assertTrue(m.canSerialize(Any.class)); + + Struct struct1 = Struct.newBuilder().putFields( + "some-key", Value.newBuilder().setStringValue("some-value").build() + ).build(); + + Any source = Any.pack(struct1); + + StringWriter buf = new StringWriter(); + m.writer().writeValue(buf, source); + + Any dest = m.reader().forType(Any.class).readValue(buf.toString()); + assertEquals(source.getTypeUrl(), dest.getTypeUrl()); + + Struct struct2 = dest.unpack(Struct.class); + assertTrue(struct2.containsFields("some-key")); + assertEquals( + struct1.getFieldsOrThrow("some-key").getStringValue(), + struct2.getFieldsOrThrow("some-key").getStringValue() + ); + } +} \ No newline at end of file diff --git a/core/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java b/core/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java new file mode 100644 index 0000000000..3d228b50f5 --- /dev/null +++ b/core/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java @@ -0,0 +1,413 @@ +package com.netflix.conductor.dao; + +import com.netflix.conductor.common.metadata.tasks.PollData; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.core.execution.ApplicationException; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; + +import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public abstract class ExecutionDAOTest { + + abstract protected ExecutionDAO getExecutionDAO(); + + @Rule + public ExpectedException expected = ExpectedException.none(); + + @Test + public void testTaskExceedsLimit() { + TaskDef taskDefinition = new TaskDef(); + taskDefinition.setName("task1"); + taskDefinition.setConcurrentExecLimit(1); + + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName("task1"); + workflowTask.setTaskDefinition(taskDefinition); + workflowTask.setTaskDefinition(taskDefinition); + + List tasks = new LinkedList<>(); + for (int i = 0; i < 15; i++) { + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId("t_" + i); + task.setWorkflowInstanceId("workflow_" + i); + task.setReferenceTaskName("task1"); + task.setTaskDefName("task1"); + tasks.add(task); + task.setStatus(Task.Status.SCHEDULED); + task.setWorkflowTask(workflowTask); + } + + getExecutionDAO().createTasks(tasks); + assertFalse(getExecutionDAO().exceedsInProgressLimit(tasks.get(0))); + tasks.get(0).setStatus(Task.Status.IN_PROGRESS); + getExecutionDAO().updateTask(tasks.get(0)); + + for (Task task : tasks) { + assertTrue(getExecutionDAO().exceedsInProgressLimit(task)); + } + } + + @Test + public void testCreateTaskException() { + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId("t1"); + task.setTaskDefName("task1"); + + expected.expect(ApplicationException.class); + expected.expectMessage("Workflow instance id cannot be null"); + getExecutionDAO().createTasks(Collections.singletonList(task)); + + task.setWorkflowInstanceId("wfid"); + expected.expect(ApplicationException.class); + expected.expectMessage("Task reference name cannot be null"); + getExecutionDAO().createTasks(Collections.singletonList(task)); + } + + @Test + public void testCreateTaskException2() { + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId("t1"); + task.setTaskDefName("task1"); + task.setWorkflowInstanceId("wfid"); + + expected.expect(ApplicationException.class); + expected.expectMessage("Task reference name cannot be null"); + getExecutionDAO().createTasks(Collections.singletonList(task)); + } + + @Test + public void testPollData() { + getExecutionDAO().updateLastPoll("taskDef", null, "workerId1"); + PollData pd = getExecutionDAO().getPollData("taskDef", null); + assertNotNull(pd); + assertTrue(pd.getLastPollTime() > 0); + assertEquals(pd.getQueueName(), "taskDef"); + assertEquals(pd.getDomain(), null); + assertEquals(pd.getWorkerId(), "workerId1"); + + getExecutionDAO().updateLastPoll("taskDef", "domain1", "workerId1"); + pd = getExecutionDAO().getPollData("taskDef", "domain1"); + assertNotNull(pd); + assertTrue(pd.getLastPollTime() > 0); + assertEquals(pd.getQueueName(), "taskDef"); + assertEquals(pd.getDomain(), "domain1"); + assertEquals(pd.getWorkerId(), "workerId1"); + + List pData = getExecutionDAO().getPollData("taskDef"); + assertEquals(pData.size(), 2); + + pd = getExecutionDAO().getPollData("taskDef", "domain2"); + assertTrue(pd == null); + } + + @Test + public void testTaskCreateDups() { + List tasks = new LinkedList<>(); + String workflowId = UUID.randomUUID().toString(); + + for (int i = 0; i < 3; i++) { + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + i); + task.setReferenceTaskName("t" + i); + task.setRetryCount(0); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("task" + i); + task.setStatus(Task.Status.IN_PROGRESS); + tasks.add(task); + } + + //Let's insert a retried task + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + 2); + task.setReferenceTaskName("t" + 2); + task.setRetryCount(1); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("task" + 2); + task.setStatus(Task.Status.IN_PROGRESS); + tasks.add(task); + + //Duplicate task! + task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + 1); + task.setReferenceTaskName("t" + 1); + task.setRetryCount(0); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("task" + 1); + task.setStatus(Task.Status.IN_PROGRESS); + tasks.add(task); + + List created = getExecutionDAO().createTasks(tasks); + assertEquals(tasks.size() - 1, created.size()); //1 less + + Set srcIds = tasks.stream().map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()).collect(Collectors.toSet()); + Set createdIds = created.stream().map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()).collect(Collectors.toSet()); + + assertEquals(srcIds, createdIds); + + List pending = getExecutionDAO().getPendingTasksByWorkflow("task0", workflowId); + assertNotNull(pending); + assertEquals(1, pending.size()); + assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0))); + + List found = getExecutionDAO().getTasks(tasks.get(0).getTaskDefName(), null, 1); + assertNotNull(found); + assertEquals(1, found.size()); + assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0))); + } + + @Test + public void testTaskOps() { + List tasks = new LinkedList<>(); + String workflowId = UUID.randomUUID().toString(); + + for (int i = 0; i < 3; i++) { + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + i); + task.setReferenceTaskName("testTaskOps" + i); + task.setRetryCount(0); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("testTaskOps" + i); + task.setStatus(Task.Status.IN_PROGRESS); + tasks.add(task); + } + + for (int i = 0; i < 3; i++) { + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId("x" + workflowId + "_t" + i); + task.setReferenceTaskName("testTaskOps" + i); + task.setRetryCount(0); + task.setWorkflowInstanceId("x" + workflowId); + task.setTaskDefName("testTaskOps" + i); + task.setStatus(Task.Status.IN_PROGRESS); + getExecutionDAO().createTasks(Arrays.asList(task)); + } + + + List created = getExecutionDAO().createTasks(tasks); + assertEquals(tasks.size(), created.size()); + + List pending = getExecutionDAO().getPendingTasksForTaskType(tasks.get(0).getTaskDefName()); + assertNotNull(pending); + assertEquals(2, pending.size()); + //Pending list can come in any order. finding the one we are looking for and then comparing + Task matching = pending.stream().filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId())).findAny().get(); + assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0))); + + List update = new LinkedList<>(); + for (int i = 0; i < 3; i++) { + Task found = getExecutionDAO().getTask(workflowId + "_t" + i); + assertNotNull(found); + found.getOutputData().put("updated", true); + found.setStatus(Task.Status.COMPLETED); + update.add(found); + } + getExecutionDAO().updateTasks(update); + + List taskIds = tasks.stream().map(Task::getTaskId).collect(Collectors.toList()); + List found = getExecutionDAO().getTasks(taskIds); + assertEquals(taskIds.size(), found.size()); + found.forEach(task -> { + assertTrue(task.getOutputData().containsKey("updated")); + assertEquals(true, task.getOutputData().get("updated")); + getExecutionDAO().removeTask(task.getTaskId()); + }); + + found = getExecutionDAO().getTasks(taskIds); + assertTrue(found.isEmpty()); + } + + @Test + public void testPending() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_test"); + + Workflow workflow = createTestWorkflow(); + workflow.setWorkflowDefinition(def); + + String idBase = workflow.getWorkflowId(); + generateWorkflows(workflow, idBase, 10); + + long count = getExecutionDAO().getPendingWorkflowCount(def.getName()); + assertEquals(10, count); + + for (int i = 0; i < 10; i++) { + getExecutionDAO().removeFromPendingWorkflow(def.getName(), "x" + i + idBase); + } + count = getExecutionDAO().getPendingWorkflowCount(def.getName()); + assertEquals(0, count); + } + + @Test + public void complexExecutionTest() { + Workflow workflow = createTestWorkflow(); + + String workflowId = getExecutionDAO().createWorkflow(workflow); + List created = getExecutionDAO().createTasks(workflow.getTasks()); + assertEquals(workflow.getTasks().size(), created.size()); + + Workflow workflowWithTasks = getExecutionDAO().getWorkflow(workflow.getWorkflowId(), true); + assertEquals(workflowWithTasks.getWorkflowId(), workflowId); + assertTrue(!workflowWithTasks.getTasks().isEmpty()); + + assertEquals(workflow.getWorkflowId(), workflowId); + Workflow found = getExecutionDAO().getWorkflow(workflowId, false); + assertTrue(found.getTasks().isEmpty()); + + workflow.getTasks().clear(); + assertEquals(workflow, found); + + workflow.getInput().put("updated", true); + getExecutionDAO().updateWorkflow(workflow); + found = getExecutionDAO().getWorkflow(workflowId); + assertNotNull(found); + assertTrue(found.getInput().containsKey("updated")); + assertEquals(true, found.getInput().get("updated")); + + List running = getExecutionDAO().getRunningWorkflowIds(workflow.getWorkflowName()); + assertNotNull(running); + assertTrue(running.isEmpty()); + + workflow.setStatus(Workflow.WorkflowStatus.RUNNING); + getExecutionDAO().updateWorkflow(workflow); + + running = getExecutionDAO().getRunningWorkflowIds(workflow.getWorkflowName()); + assertNotNull(running); + assertEquals(1, running.size()); + assertEquals(workflow.getWorkflowId(), running.get(0)); + + List pending = getExecutionDAO().getPendingWorkflowsByType(workflow.getWorkflowName()); + assertNotNull(pending); + assertEquals(1, pending.size()); + assertEquals(3, pending.get(0).getTasks().size()); + pending.get(0).getTasks().clear(); + assertEquals(workflow, pending.get(0)); + + workflow.setStatus(Workflow.WorkflowStatus.COMPLETED); + getExecutionDAO().updateWorkflow(workflow); + running = getExecutionDAO().getRunningWorkflowIds(workflow.getWorkflowName()); + assertNotNull(running); + assertTrue(running.isEmpty()); + + List bytime = getExecutionDAO().getWorkflowsByType(workflow.getWorkflowName(), System.currentTimeMillis(), System.currentTimeMillis() + 100); + assertNotNull(bytime); + assertTrue(bytime.isEmpty()); + + bytime = getExecutionDAO().getWorkflowsByType(workflow.getWorkflowName(), workflow.getCreateTime() - 10, workflow.getCreateTime() + 10); + assertNotNull(bytime); + assertEquals(1, bytime.size()); + } + + protected Workflow createTestWorkflow() { + WorkflowDef def = new WorkflowDef(); + def.setName("Junit Workflow"); + def.setVersion(3); + def.setSchemaVersion(2); + + Workflow workflow = new Workflow(); + workflow.setWorkflowDefinition(def); + workflow.setCorrelationId("correlationX"); + workflow.setCreatedBy("junit_tester"); + workflow.setEndTime(200L); + + Map input = new HashMap<>(); + input.put("param1", "param1 value"); + input.put("param2", 100); + workflow.setInput(input); + + Map output = new HashMap<>(); + output.put("ouput1", "output 1 value"); + output.put("op2", 300); + workflow.setOutput(output); + + workflow.setOwnerApp("workflow"); + workflow.setParentWorkflowId("parentWorkflowId"); + workflow.setParentWorkflowTaskId("parentWFTaskId"); + workflow.setReasonForIncompletion("missing recipe"); + workflow.setReRunFromWorkflowId("re-run from id1"); + workflow.setStartTime(90L); + workflow.setStatus(Workflow.WorkflowStatus.FAILED); + workflow.setWorkflowId("workflow0"); + + List tasks = new LinkedList<>(); + + Task task = new Task(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId("t1"); + task.setReferenceTaskName("t1"); + task.setWorkflowInstanceId(workflow.getWorkflowId()); + task.setTaskDefName("task1"); + + Task task2 = new Task(); + task2.setScheduledTime(2L); + task2.setSeq(2); + task2.setTaskId("t2"); + task2.setReferenceTaskName("t2"); + task2.setWorkflowInstanceId(workflow.getWorkflowId()); + task2.setTaskDefName("task2"); + + Task task3 = new Task(); + task3.setScheduledTime(2L); + task3.setSeq(3); + task3.setTaskId("t3"); + task3.setReferenceTaskName("t3"); + task3.setWorkflowInstanceId(workflow.getWorkflowId()); + task3.setTaskDefName("task3"); + + tasks.add(task); + tasks.add(task2); + tasks.add(task3); + + workflow.setTasks(tasks); + + workflow.setUpdatedBy("junit_tester"); + workflow.setUpdateTime(800L); + + return workflow; + } + + protected void generateWorkflows(Workflow base, String baseId, int count) { + for (int i = 0; i < count; i++) { + base.setWorkflowId("x" + i + baseId); + base.setCorrelationId("corr001"); + base.setStatus(Workflow.WorkflowStatus.RUNNING); + getExecutionDAO().createWorkflow(base); + } + } +} diff --git a/core/src/test/java/com/netflix/conductor/service/WorkflowServiceTest.java b/core/src/test/java/com/netflix/conductor/service/WorkflowServiceTest.java index 33955f4957..f0eea22335 100644 --- a/core/src/test/java/com/netflix/conductor/service/WorkflowServiceTest.java +++ b/core/src/test/java/com/netflix/conductor/service/WorkflowServiceTest.java @@ -75,23 +75,6 @@ public void testStartWorkflow() { assertEquals("w112", workflowService.startWorkflow(startWorkflowRequest)); } - @Test(expected = ApplicationException.class) - public void testApplicationExceptionStartWorkflowMessage() { - try { - when(mockMetadata.getWorkflowDef(anyString(), anyInt())).thenReturn(null); - - StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest(); - startWorkflowRequest.setName("w123"); - startWorkflowRequest.setVersion(1); - workflowService.startWorkflow(startWorkflowRequest); - } catch (ApplicationException ex) { - String message = "No such workflow found by name: w123, version: 1"; - assertEquals(message, ex.getMessage()); - throw ex; - } - fail("ApplicationException did not throw!"); - } - @Test public void testStartWorkflowParam() { WorkflowDef workflowDef = new WorkflowDef(); diff --git a/core/src/test/resources/conditional_flow.json b/core/src/test/resources/conditional_flow.json index 2f057b756b..d3345892e2 100644 --- a/core/src/test/resources/conditional_flow.json +++ b/core/src/test/resources/conditional_flow.json @@ -31,13 +31,51 @@ "p2": "workflow.input.param2" }, "type": "SIMPLE", - "startDelay": 0 + "startDelay": 0, + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "junit_task_1", + "description": "junit_task_1", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } }, { "name": "junit_task_3", "taskReferenceName": "t3", "type": "SIMPLE", - "startDelay": 0 + "startDelay": 0, + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "junit_task_3", + "description": "junit_task_3", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } } ], "two": [ @@ -49,7 +87,26 @@ "tp3": "workflow.input.param2" }, "type": "SIMPLE", - "startDelay": 0 + "startDelay": 0, + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "junit_task_2", + "description": "junit_task_2", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } } ] }, @@ -61,7 +118,26 @@ "name": "junit_task_3", "taskReferenceName": "t3", "type": "SIMPLE", - "startDelay": 0 + "startDelay": 0, + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "junit_task_3", + "description": "junit_task_3", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } } ] }, @@ -74,7 +150,26 @@ "tp3": "workflow.input.param2" }, "type": "SIMPLE", - "startDelay": 0 + "startDelay": 0, + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "junit_task_2", + "description": "junit_task_2", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } } ], "startDelay": 0 @@ -93,7 +188,26 @@ "name": "junit_task_4", "taskReferenceName": "junit_task_4", "type": "SIMPLE", - "startDelay": 0 + "startDelay": 0, + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "junit_task_4", + "description": "junit_task_4", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } } ] }, diff --git a/core/src/test/resources/def.json b/core/src/test/resources/def.json deleted file mode 100644 index 393807aaa8..0000000000 --- a/core/src/test/resources/def.json +++ /dev/null @@ -1 +0,0 @@ -{"createTime":1477681181098,"updateTime":1502738273998,"name":"performance_test_1","description":"performance_test_1","version":1,"tasks":[{"name":"perf_task_1","taskReferenceName":"perf_task_1","inputParameters":{"mod":"${workflow.input.mod}","oddEven":"${workflow.input.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"dyntask","taskReferenceName":"perf_task_2","inputParameters":{"taskToExecute":"${workflow.input.task2Name}"},"type":"DYNAMIC","dynamicTaskNameParam":"taskToExecute","startDelay":0},{"name":"perf_task_3","taskReferenceName":"perf_task_3","inputParameters":{"mod":"${perf_task_2.output.mod}","oddEven":"${perf_task_2.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"get_from_es","taskReferenceName":"get_es_1","type":"HTTP","startDelay":0},{"name":"oddEvenDecision","taskReferenceName":"oddEvenDecision","inputParameters":{"oddEven":"${perf_task_3.output.oddEven}"},"type":"DECISION","caseValueParam":"oddEven","decisionCases":{"0":[{"name":"perf_task_4","taskReferenceName":"perf_task_4","inputParameters":{"mod":"${perf_task_3.output.mod}","oddEven":"${perf_task_3.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"dynamic_fanout","taskReferenceName":"fanout1","inputParameters":{"dynamicTasks":"${perf_task_4.output.dynamicTasks}","input":"${perf_task_4.output.inputs}"},"type":"FORK_JOIN_DYNAMIC","dynamicForkTasksParam":"dynamicTasks","dynamicForkTasksInputParamName":"input","startDelay":0},{"name":"dynamic_join","taskReferenceName":"join1","type":"JOIN","startDelay":0},{"name":"perf_task_5","taskReferenceName":"perf_task_5","inputParameters":{"mod":"${perf_task_4.output.mod}","oddEven":"${perf_task_4.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_6","taskReferenceName":"perf_task_6","inputParameters":{"mod":"${perf_task_5.output.mod}","oddEven":"${perf_task_5.output.oddEven}"},"type":"SIMPLE","startDelay":0}],"1":[{"name":"perf_task_7","taskReferenceName":"perf_task_7","inputParameters":{"mod":"${perf_task_3.output.mod}","oddEven":"${perf_task_3.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_8","taskReferenceName":"perf_task_8","inputParameters":{"mod":"${perf_task_7.output.mod}","oddEven":"${perf_task_7.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_9","taskReferenceName":"perf_task_9","inputParameters":{"mod":"${perf_task_8.output.mod}","oddEven":"${perf_task_8.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"modDecision","taskReferenceName":"modDecision","inputParameters":{"mod":"${perf_task_8.output.mod}"},"type":"DECISION","caseValueParam":"mod","decisionCases":{"0":[{"name":"perf_task_12","taskReferenceName":"perf_task_12","inputParameters":{"mod":"${perf_task_9.output.mod}","oddEven":"${perf_task_9.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_13","taskReferenceName":"perf_task_13","inputParameters":{"mod":"${perf_task_12.output.mod}","oddEven":"${perf_task_12.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"sub_workflow_x","taskReferenceName":"wf1","inputParameters":{"mod":"${perf_task_12.output.mod}","oddEven":"${perf_task_12.output.oddEven}"},"type":"SUB_WORKFLOW","startDelay":0,"subWorkflowParam":{"name":"sub_flow_1","version":1}}],"1":[{"name":"perf_task_15","taskReferenceName":"perf_task_15","inputParameters":{"mod":"${perf_task_9.output.mod}","oddEven":"${perf_task_9.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_16","taskReferenceName":"perf_task_16","inputParameters":{"mod":"${perf_task_15.output.mod}","oddEven":"${perf_task_15.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"sub_workflow_x","taskReferenceName":"wf2","inputParameters":{"mod":"${perf_task_12.output.mod}","oddEven":"${perf_task_12.output.oddEven}"},"type":"SUB_WORKFLOW","startDelay":0,"subWorkflowParam":{"name":"sub_flow_1","version":1}}],"4":[{"name":"perf_task_18","taskReferenceName":"perf_task_18","inputParameters":{"mod":"${perf_task_9.output.mod}","oddEven":"${perf_task_9.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_19","taskReferenceName":"perf_task_19","inputParameters":{"mod":"${perf_task_18.output.mod}","oddEven":"${perf_task_18.output.oddEven}"},"type":"SIMPLE","startDelay":0}],"5":[{"name":"perf_task_21","taskReferenceName":"perf_task_21","inputParameters":{"mod":"${perf_task_9.output.mod}","oddEven":"${perf_task_9.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"sub_workflow_x","taskReferenceName":"wf3","inputParameters":{"mod":"${perf_task_12.output.mod}","oddEven":"${perf_task_12.output.oddEven}"},"type":"SUB_WORKFLOW","startDelay":0,"subWorkflowParam":{"name":"sub_flow_1","version":1}},{"name":"perf_task_22","taskReferenceName":"perf_task_22","inputParameters":{"mod":"${perf_task_21.output.mod}","oddEven":"${perf_task_21.output.oddEven}"},"type":"SIMPLE","startDelay":0}]},"defaultCase":[{"name":"perf_task_24","taskReferenceName":"perf_task_24","inputParameters":{"mod":"${perf_task_9.output.mod}","oddEven":"${perf_task_9.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"sub_workflow_x","taskReferenceName":"wf4","inputParameters":{"mod":"${perf_task_12.output.mod}","oddEven":"${perf_task_12.output.oddEven}"},"type":"SUB_WORKFLOW","startDelay":0,"subWorkflowParam":{"name":"sub_flow_1","version":1}},{"name":"perf_task_25","taskReferenceName":"perf_task_25","inputParameters":{"mod":"${perf_task_24.output.mod}","oddEven":"${perf_task_24.output.oddEven}"},"type":"SIMPLE","startDelay":0}],"startDelay":0}]},"startDelay":0},{"name":"perf_task_28","taskReferenceName":"perf_task_28","inputParameters":{"mod":"${perf_task_3.output.mod}","oddEven":"${perf_task_3.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_29","taskReferenceName":"perf_task_29","inputParameters":{"mod":"${perf_task_28.output.mod}","oddEven":"${perf_task_28.output.oddEven}"},"type":"SIMPLE","startDelay":0},{"name":"perf_task_30","taskReferenceName":"perf_task_30","inputParameters":{"mod":"${perf_task_29.output.mod}","oddEven":"${perf_task_29.output.oddEven}"},"type":"SIMPLE","startDelay":0}],"schemaVersion":2} \ No newline at end of file diff --git a/core/src/test/resources/test.json b/core/src/test/resources/test.json index 523a73f11e..e2c1a8b5da 100644 --- a/core/src/test/resources/test.json +++ b/core/src/test/resources/test.json @@ -1,941 +1,1277 @@ { - "ownerApp": "cpeworkflowtests", - "createTime": 1505587453961, - "updateTime": 1505588471071, - "status": "RUNNING", - "endTime": 0, - "workflowId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "tasks": [ - { - "taskType": "perf_task_1", - "status": "COMPLETED", - "inputData": { - "mod": "0", - "oddEven": "0" - }, - "referenceTaskName": "perf_task_1", - "retryCount": 0, - "seq": 1, - "correlationId": "1505587453950", - "pollCount": 1, - "taskDefName": "perf_task_1", - "scheduledTime": 1505587453972, - "startTime": 1505587455481, - "endTime": 1505587455539, - "updateTime": 1505587455539, - "startDelayInSeconds": 0, - "retried": false, - "executed": true, - "callbackFromWorker": true, - "responseTimeoutSeconds": 3600, - "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "taskId": "3a54e268-0054-4eab-aea2-e54d1b89896c", - "callbackAfterSeconds": 0, - "outputData": { - "mod": "5", - "oddEven": "1", - "inputs": { - "subflow_0": { - "mod": 4, - "oddEven": 0 - }, - "subflow_4": { - "mod": 4, - "oddEven": 0 - }, - "subflow_2": { - "mod": 4, - "oddEven": 0 - } - }, - "dynamicTasks": [ - { - "name": null, - "taskReferenceName": "subflow_0", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_2", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_4", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - } - ], - "attempt": 1 - }, - "workflowTask": { - "name": "perf_task_1", - "taskReferenceName": "perf_task_1", - "inputParameters": { - "mod": "${workflow.input.mod}", - "oddEven": "${workflow.input.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - "queueWaitTime": 1509, - "taskStatus": "COMPLETED" - }, - { - "taskType": "perf_task_10", - "status": "COMPLETED", - "inputData": { - "taskToExecute": "perf_task_10" - }, - "referenceTaskName": "perf_task_2", - "retryCount": 0, - "seq": 2, - "correlationId": "1505587453950", - "pollCount": 1, - "taskDefName": "perf_task_10", - "scheduledTime": 1505587455517, - "startTime": 1505587457017, - "endTime": 1505587457075, - "updateTime": 1505587457075, - "startDelayInSeconds": 0, - "retried": false, - "executed": true, - "callbackFromWorker": true, - "responseTimeoutSeconds": 3600, - "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "taskId": "3731c3ee-f918-42b7-8bb3-fb016fc0ecae", - "callbackAfterSeconds": 0, - "outputData": { - "mod": "1", - "oddEven": "1", - "inputs": { - "subflow_0": { - "mod": 4, - "oddEven": 0 - }, - "subflow_4": { - "mod": 4, - "oddEven": 0 - }, - "subflow_2": { - "mod": 4, - "oddEven": 0 - } - }, - "dynamicTasks": [ - { - "name": null, - "taskReferenceName": "subflow_0", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_2", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_4", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - } - ], - "attempt": 1 - }, - "workflowTask": { - "name": "perf_task_10", - "taskReferenceName": "perf_task_2", - "inputParameters": { - "taskToExecute": "${workflow.input.task2Name}" - }, - "type": "DYNAMIC", - "dynamicTaskNameParam": "taskToExecute", - "startDelay": 0 - }, - "queueWaitTime": 1500, - "taskStatus": "COMPLETED" - }, - { - "taskType": "perf_task_3", - "status": "COMPLETED", - "inputData": { - "mod": "1", - "oddEven": "1" - }, - "referenceTaskName": "perf_task_3", - "retryCount": 0, - "seq": 3, - "correlationId": "1505587453950", - "pollCount": 1, - "taskDefName": "perf_task_3", - "scheduledTime": 1505587457064, - "startTime": 1505587459498, - "endTime": 1505587459560, - "updateTime": 1505587459560, - "startDelayInSeconds": 0, - "retried": false, - "executed": true, - "callbackFromWorker": true, - "responseTimeoutSeconds": 3600, - "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "taskId": "738370d6-596f-4ae5-95bf-ca635c7f10dd", - "callbackAfterSeconds": 0, - "outputData": { - "mod": "6", - "oddEven": "0", - "inputs": { - "subflow_0": { - "mod": 4, - "oddEven": 0 - }, - "subflow_4": { - "mod": 4, - "oddEven": 0 - }, - "subflow_2": { - "mod": 4, - "oddEven": 0 - } - }, - "dynamicTasks": [ - { - "name": null, - "taskReferenceName": "subflow_0", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_2", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_4", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - } - ], - "attempt": 1 - }, - "workflowTask": { - "name": "perf_task_3", - "taskReferenceName": "perf_task_3", - "inputParameters": { - "mod": "${perf_task_2.output.mod}", - "oddEven": "${perf_task_2.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - "queueWaitTime": 2434, - "taskStatus": "COMPLETED" - }, - { - "taskType": "HTTP", - "status": "COMPLETED", - "inputData": { - "http_request": { - "uri": "/wfe_perf/workflow/_search?q=status:RUNNING&size=0&beta", - "method": "GET", - "vipAddress": "es_cpe_wfe.us-east-1.cloud.netflix.com" - } - }, - "referenceTaskName": "get_es_1", - "retryCount": 0, - "seq": 4, - "correlationId": "1505587453950", - "pollCount": 1, - "taskDefName": "get_from_es", - "scheduledTime": 1505587459547, - "startTime": 1505587459996, - "endTime": 1505587460250, - "updateTime": 1505587460250, - "startDelayInSeconds": 0, - "retried": false, - "executed": true, - "callbackFromWorker": true, - "responseTimeoutSeconds": 0, - "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "taskId": "64b49d62-1dfb-4290-94d4-971b4d033f33", - "callbackAfterSeconds": 0, - "workerId": "i-04c53d07aba5b5e9c", - "outputData": { - "response": { - "headers": { - "Content-Length": [ - "121" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ] - }, - "reasonPhrase": "OK", - "body": { - "took": 1, - "timed_out": false, - "_shards": { - "total": 6, - "successful": 6, - "failed": 0 - }, - "hits": { - "total": 1, - "max_score": 0.0, - "hits": [] - } - }, - "statusCode": 200 - } - }, - "workflowTask": { - "name": "get_from_es", - "taskReferenceName": "get_es_1", - "type": "HTTP", - "startDelay": 0 - }, - "queueWaitTime": 449, - "taskStatus": "COMPLETED" - }, - { - "taskType": "DECISION", - "status": "COMPLETED", - "inputData": { - "hasChildren": "true", - "case": "0" - }, - "referenceTaskName": "oddEvenDecision", - "retryCount": 0, - "seq": 5, - "correlationId": "1505587453950", - "pollCount": 0, - "taskDefName": "DECISION", - "scheduledTime": 1505587460216, - "startTime": 1505587460241, - "endTime": 1505587460274, - "updateTime": 1505587460274, - "startDelayInSeconds": 0, - "retried": false, - "executed": true, - "callbackFromWorker": true, - "responseTimeoutSeconds": 0, - "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "taskId": "5a596a36-09eb-4a11-a952-01ab5a7c362f", - "callbackAfterSeconds": 0, - "outputData": { - "caseOutput": [ - "0" - ] - }, - "workflowTask": { - "name": "oddEvenDecision", - "taskReferenceName": "oddEvenDecision", - "inputParameters": { - "oddEven": "${perf_task_3.output.oddEven}" - }, - "type": "DECISION", - "caseValueParam": "oddEven", - "decisionCases": { - "0": [ - { - "name": "perf_task_4", - "taskReferenceName": "perf_task_4", - "inputParameters": { - "mod": "${perf_task_3.output.mod}", - "oddEven": "${perf_task_3.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "dynamic_fanout", - "taskReferenceName": "fanout1", - "inputParameters": { - "dynamicTasks": "${perf_task_4.output.dynamicTasks}", - "input": "${perf_task_4.output.inputs}" - }, - "type": "FORK_JOIN_DYNAMIC", - "dynamicForkTasksParam": "dynamicTasks", - "dynamicForkTasksInputParamName": "input", - "startDelay": 0 - }, - { - "name": "dynamic_join", - "taskReferenceName": "join1", - "type": "JOIN", - "startDelay": 0 - }, - { - "name": "perf_task_5", - "taskReferenceName": "perf_task_5", - "inputParameters": { - "mod": "${perf_task_4.output.mod}", - "oddEven": "${perf_task_4.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "perf_task_6", - "taskReferenceName": "perf_task_6", - "inputParameters": { - "mod": "${perf_task_5.output.mod}", - "oddEven": "${perf_task_5.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - } - ], - "1": [ - { - "name": "perf_task_7", - "taskReferenceName": "perf_task_7", - "inputParameters": { - "mod": "${perf_task_3.output.mod}", - "oddEven": "${perf_task_3.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "perf_task_8", - "taskReferenceName": "perf_task_8", - "inputParameters": { - "mod": "${perf_task_7.output.mod}", - "oddEven": "${perf_task_7.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "perf_task_9", - "taskReferenceName": "perf_task_9", - "inputParameters": { - "mod": "${perf_task_8.output.mod}", - "oddEven": "${perf_task_8.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "modDecision", - "taskReferenceName": "modDecision", - "inputParameters": { - "mod": "${perf_task_8.output.mod}" - }, - "type": "DECISION", - "caseValueParam": "mod", - "decisionCases": { - "0": [ - { - "name": "perf_task_12", - "taskReferenceName": "perf_task_12", - "inputParameters": { - "mod": "${perf_task_9.output.mod}", - "oddEven": "${perf_task_9.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "perf_task_13", - "taskReferenceName": "perf_task_13", - "inputParameters": { - "mod": "${perf_task_12.output.mod}", - "oddEven": "${perf_task_12.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "sub_workflow_x", - "taskReferenceName": "wf1", - "inputParameters": { - "mod": "${perf_task_12.output.mod}", - "oddEven": "${perf_task_12.output.oddEven}" - }, - "type": "SUB_WORKFLOW", - "startDelay": 0, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": 1 - } - } - ], - "1": [ - { - "name": "perf_task_15", - "taskReferenceName": "perf_task_15", - "inputParameters": { - "mod": "${perf_task_9.output.mod}", - "oddEven": "${perf_task_9.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "perf_task_16", - "taskReferenceName": "perf_task_16", - "inputParameters": { - "mod": "${perf_task_15.output.mod}", - "oddEven": "${perf_task_15.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "sub_workflow_x", - "taskReferenceName": "wf2", - "inputParameters": { - "mod": "${perf_task_12.output.mod}", - "oddEven": "${perf_task_12.output.oddEven}" - }, - "type": "SUB_WORKFLOW", - "startDelay": 0, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": 1 - } - } - ], - "4": [ - { - "name": "perf_task_18", - "taskReferenceName": "perf_task_18", - "inputParameters": { - "mod": "${perf_task_9.output.mod}", - "oddEven": "${perf_task_9.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "perf_task_19", - "taskReferenceName": "perf_task_19", - "inputParameters": { - "mod": "${perf_task_18.output.mod}", - "oddEven": "${perf_task_18.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - } - ], - "5": [ - { - "name": "perf_task_21", - "taskReferenceName": "perf_task_21", - "inputParameters": { - "mod": "${perf_task_9.output.mod}", - "oddEven": "${perf_task_9.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "sub_workflow_x", - "taskReferenceName": "wf3", - "inputParameters": { - "mod": "${perf_task_12.output.mod}", - "oddEven": "${perf_task_12.output.oddEven}" - }, - "type": "SUB_WORKFLOW", - "startDelay": 0, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": 1 - } - }, - { - "name": "perf_task_22", - "taskReferenceName": "perf_task_22", - "inputParameters": { - "mod": "${perf_task_21.output.mod}", - "oddEven": "${perf_task_21.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - } - ] - }, - "defaultCase": [ - { - "name": "perf_task_24", - "taskReferenceName": "perf_task_24", - "inputParameters": { - "mod": "${perf_task_9.output.mod}", - "oddEven": "${perf_task_9.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - { - "name": "sub_workflow_x", - "taskReferenceName": "wf4", - "inputParameters": { - "mod": "${perf_task_12.output.mod}", - "oddEven": "${perf_task_12.output.oddEven}" - }, - "type": "SUB_WORKFLOW", - "startDelay": 0, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": 1 - } - }, - { - "name": "perf_task_25", - "taskReferenceName": "perf_task_25", - "inputParameters": { - "mod": "${perf_task_24.output.mod}", - "oddEven": "${perf_task_24.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - } - ], - "startDelay": 0 - } - ] - }, - "startDelay": 0 - }, - "queueWaitTime": 25, - "taskStatus": "COMPLETED" - }, - { - "taskType": "perf_task_4", - "status": "COMPLETED", - "inputData": { - "mod": "6", - "oddEven": "0" - }, - "referenceTaskName": "perf_task_4", - "retryCount": 0, - "seq": 6, - "correlationId": "1505587453950", - "pollCount": 1, - "taskDefName": "perf_task_4", - "scheduledTime": 1505587460234, - "startTime": 1505587463699, - "endTime": 1505587463718, - "updateTime": 1505587463718, - "startDelayInSeconds": 0, - "retried": false, - "executed": false, - "callbackFromWorker": true, - "responseTimeoutSeconds": 3600, - "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", - "taskId": "1bf3da08-9d16-4f8a-98c3-4a6efee0e03a", - "callbackAfterSeconds": 0, - "outputData": { - "mod": "9", - "oddEven": "1", - "inputs": { - "subflow_0": { - "mod": 4, - "oddEven": 0 - }, - "subflow_4": { - "mod": 4, - "oddEven": 0 - }, - "subflow_2": { - "mod": 4, - "oddEven": 0 - } - }, - "dynamicTasks": [ - { - "name": null, - "taskReferenceName": "subflow_0", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_2", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - }, - { - "name": null, - "taskReferenceName": "subflow_4", - "description": null, - "inputParameters": null, - "type": "SUB_WORKFLOW", - "dynamicTaskNameParam": null, - "caseValueParam": null, - "caseExpression": null, - "decisionCases": { - - }, - "dynamicForkJoinTasksParam": null, - "dynamicForkTasksParam": null, - "dynamicForkTasksInputParamName": null, - "defaultCase": [], - "forkTasks": [], - "startDelay": 0, - "joinOn": [], - "sink": null, - "optional": null, - "subWorkflowParam": { - "name": "sub_flow_1", - "version": null - } - } - ], - "attempt": 1 - }, - "workflowTask": { - "name": "perf_task_4", - "taskReferenceName": "perf_task_4", - "inputParameters": { - "mod": "${perf_task_3.output.mod}", - "oddEven": "${perf_task_3.output.oddEven}" - }, - "type": "SIMPLE", - "startDelay": 0 - }, - "queueWaitTime": 3465, - "taskStatus": "COMPLETED" - } - ], - "input": { - "mod": "0", - "oddEven": "0", - "task2Name": "perf_task_10" - }, - "workflowType": "performance_test_1", - "version": 1, - "correlationId": "1505587453950", - "schemaVersion": 2, - "taskToDomain": { - "*": "beta" - }, - "startTime": 1505587453961 -} \ No newline at end of file + "ownerApp": "cpeworkflowtests", + "createTime": 1505587453961, + "updateTime": 1505588471071, + "status": "RUNNING", + "endTime": 0, + "workflowId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "tasks": [ + { + "taskType": "perf_task_1", + "status": "COMPLETED", + "inputData": { + "mod": "0", + "oddEven": "0" + }, + "referenceTaskName": "perf_task_1", + "retryCount": 0, + "seq": 1, + "correlationId": "1505587453950", + "pollCount": 1, + "taskDefName": "perf_task_1", + "scheduledTime": 1505587453972, + "startTime": 1505587455481, + "endTime": 1505587455539, + "updateTime": 1505587455539, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "taskId": "3a54e268-0054-4eab-aea2-e54d1b89896c", + "callbackAfterSeconds": 0, + "outputData": { + "mod": "5", + "oddEven": "1", + "inputs": { + "subflow_0": { + "mod": 4, + "oddEven": 0 + }, + "subflow_4": { + "mod": 4, + "oddEven": 0 + }, + "subflow_2": { + "mod": 4, + "oddEven": 0 + } + }, + "dynamicTasks": [ + { + "name": null, + "taskReferenceName": "subflow_0", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_2", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_4", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + } + ], + "attempt": 1 + }, + "workflowTask": { + "name": "perf_task_1", + "taskReferenceName": "perf_task_1", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + "queueWaitTime": 1509, + "taskStatus": "COMPLETED" + }, + { + "taskType": "perf_task_10", + "status": "COMPLETED", + "inputData": { + "taskToExecute": "perf_task_10" + }, + "referenceTaskName": "perf_task_2", + "retryCount": 0, + "seq": 2, + "correlationId": "1505587453950", + "pollCount": 1, + "taskDefName": "perf_task_10", + "scheduledTime": 1505587455517, + "startTime": 1505587457017, + "endTime": 1505587457075, + "updateTime": 1505587457075, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "taskId": "3731c3ee-f918-42b7-8bb3-fb016fc0ecae", + "callbackAfterSeconds": 0, + "outputData": { + "mod": "1", + "oddEven": "1", + "inputs": { + "subflow_0": { + "mod": 4, + "oddEven": 0 + }, + "subflow_4": { + "mod": 4, + "oddEven": 0 + }, + "subflow_2": { + "mod": 4, + "oddEven": 0 + } + }, + "dynamicTasks": [ + { + "name": null, + "taskReferenceName": "subflow_0", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_2", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_4", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + } + ], + "attempt": 1 + }, + "workflowTask": { + "name": "perf_task_10", + "taskReferenceName": "perf_task_2", + "inputParameters": { + "taskToExecute": "${workflow.input.task2Name}" + }, + "type": "DYNAMIC", + "dynamicTaskNameParam": "taskToExecute", + "startDelay": 0 + }, + "queueWaitTime": 1500, + "taskStatus": "COMPLETED" + }, + { + "taskType": "perf_task_3", + "status": "COMPLETED", + "inputData": { + "mod": "1", + "oddEven": "1" + }, + "referenceTaskName": "perf_task_3", + "retryCount": 0, + "seq": 3, + "correlationId": "1505587453950", + "pollCount": 1, + "taskDefName": "perf_task_3", + "scheduledTime": 1505587457064, + "startTime": 1505587459498, + "endTime": 1505587459560, + "updateTime": 1505587459560, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "taskId": "738370d6-596f-4ae5-95bf-ca635c7f10dd", + "callbackAfterSeconds": 0, + "outputData": { + "mod": "6", + "oddEven": "0", + "inputs": { + "subflow_0": { + "mod": 4, + "oddEven": 0 + }, + "subflow_4": { + "mod": 4, + "oddEven": 0 + }, + "subflow_2": { + "mod": 4, + "oddEven": 0 + } + }, + "dynamicTasks": [ + { + "name": null, + "taskReferenceName": "subflow_0", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_2", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_4", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + } + ], + "attempt": 1 + }, + "workflowTask": { + "name": "perf_task_3", + "taskReferenceName": "perf_task_3", + "inputParameters": { + "mod": "${perf_task_2.output.mod}", + "oddEven": "${perf_task_2.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + "queueWaitTime": 2434, + "taskStatus": "COMPLETED" + }, + { + "taskType": "HTTP", + "status": "COMPLETED", + "inputData": { + "http_request": { + "uri": "/wfe_perf/workflow/_search?q=status:RUNNING&size=0&beta", + "method": "GET", + "vipAddress": "es_cpe_wfe.us-east-1.cloud.netflix.com" + } + }, + "referenceTaskName": "get_es_1", + "retryCount": 0, + "seq": 4, + "correlationId": "1505587453950", + "pollCount": 1, + "taskDefName": "get_from_es", + "scheduledTime": 1505587459547, + "startTime": 1505587459996, + "endTime": 1505587460250, + "updateTime": 1505587460250, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "taskId": "64b49d62-1dfb-4290-94d4-971b4d033f33", + "callbackAfterSeconds": 0, + "workerId": "i-04c53d07aba5b5e9c", + "outputData": { + "response": { + "headers": { + "Content-Length": [ + "121" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ] + }, + "reasonPhrase": "OK", + "body": { + "took": 1, + "timed_out": false, + "_shards": { + "total": 6, + "successful": 6, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0.0, + "hits": [] + } + }, + "statusCode": 200 + } + }, + "workflowTask": { + "name": "get_from_es", + "taskReferenceName": "get_es_1", + "type": "HTTP", + "startDelay": 0 + }, + "queueWaitTime": 449, + "taskStatus": "COMPLETED" + }, + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "0" + }, + "referenceTaskName": "oddEvenDecision", + "retryCount": 0, + "seq": 5, + "correlationId": "1505587453950", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1505587460216, + "startTime": 1505587460241, + "endTime": 1505587460274, + "updateTime": 1505587460274, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "taskId": "5a596a36-09eb-4a11-a952-01ab5a7c362f", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "0" + ] + }, + "workflowTask": { + "name": "oddEvenDecision", + "taskReferenceName": "oddEvenDecision", + "inputParameters": { + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "DECISION", + "caseValueParam": "oddEven", + "decisionCases": { + "0": [ + { + "name": "perf_task_4", + "taskReferenceName": "perf_task_4", + "inputParameters": { + "mod": "${perf_task_3.output.mod}", + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "dynamic_fanout", + "taskReferenceName": "fanout1", + "inputParameters": { + "dynamicTasks": "${perf_task_4.output.dynamicTasks}", + "input": "${perf_task_4.output.inputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "dynamicForkTasksParam": "dynamicTasks", + "dynamicForkTasksInputParamName": "input", + "startDelay": 0 + }, + { + "name": "dynamic_join", + "taskReferenceName": "join1", + "type": "JOIN", + "startDelay": 0 + }, + { + "name": "perf_task_5", + "taskReferenceName": "perf_task_5", + "inputParameters": { + "mod": "${perf_task_4.output.mod}", + "oddEven": "${perf_task_4.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_6", + "taskReferenceName": "perf_task_6", + "inputParameters": { + "mod": "${perf_task_5.output.mod}", + "oddEven": "${perf_task_5.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "1": [ + { + "name": "perf_task_7", + "taskReferenceName": "perf_task_7", + "inputParameters": { + "mod": "${perf_task_3.output.mod}", + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_8", + "taskReferenceName": "perf_task_8", + "inputParameters": { + "mod": "${perf_task_7.output.mod}", + "oddEven": "${perf_task_7.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_9", + "taskReferenceName": "perf_task_9", + "inputParameters": { + "mod": "${perf_task_8.output.mod}", + "oddEven": "${perf_task_8.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "modDecision", + "taskReferenceName": "modDecision", + "inputParameters": { + "mod": "${perf_task_8.output.mod}" + }, + "type": "DECISION", + "caseValueParam": "mod", + "decisionCases": { + "0": [ + { + "name": "perf_task_12", + "taskReferenceName": "perf_task_12", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_13", + "taskReferenceName": "perf_task_13", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf1", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ], + "1": [ + { + "name": "perf_task_15", + "taskReferenceName": "perf_task_15", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_16", + "taskReferenceName": "perf_task_16", + "inputParameters": { + "mod": "${perf_task_15.output.mod}", + "oddEven": "${perf_task_15.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf2", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ], + "4": [ + { + "name": "perf_task_18", + "taskReferenceName": "perf_task_18", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_19", + "taskReferenceName": "perf_task_19", + "inputParameters": { + "mod": "${perf_task_18.output.mod}", + "oddEven": "${perf_task_18.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "5": [ + { + "name": "perf_task_21", + "taskReferenceName": "perf_task_21", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf3", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + }, + { + "name": "perf_task_22", + "taskReferenceName": "perf_task_22", + "inputParameters": { + "mod": "${perf_task_21.output.mod}", + "oddEven": "${perf_task_21.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ] + }, + "defaultCase": [ + { + "name": "perf_task_24", + "taskReferenceName": "perf_task_24", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf4", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + }, + { + "name": "perf_task_25", + "taskReferenceName": "perf_task_25", + "inputParameters": { + "mod": "${perf_task_24.output.mod}", + "oddEven": "${perf_task_24.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "startDelay": 0 + } + ] + }, + "startDelay": 0 + }, + "queueWaitTime": 25, + "taskStatus": "COMPLETED" + }, + { + "taskType": "perf_task_4", + "status": "COMPLETED", + "inputData": { + "mod": "6", + "oddEven": "0" + }, + "referenceTaskName": "perf_task_4", + "retryCount": 0, + "seq": 6, + "correlationId": "1505587453950", + "pollCount": 1, + "taskDefName": "perf_task_4", + "scheduledTime": 1505587460234, + "startTime": 1505587463699, + "endTime": 1505587463718, + "updateTime": 1505587463718, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "46e2d0d7-0809-40f2-9f22-bed9d41f6613", + "taskId": "1bf3da08-9d16-4f8a-98c3-4a6efee0e03a", + "callbackAfterSeconds": 0, + "outputData": { + "mod": "9", + "oddEven": "1", + "inputs": { + "subflow_0": { + "mod": 4, + "oddEven": 0 + }, + "subflow_4": { + "mod": 4, + "oddEven": 0 + }, + "subflow_2": { + "mod": 4, + "oddEven": 0 + } + }, + "dynamicTasks": [ + { + "name": null, + "taskReferenceName": "subflow_0", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_2", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + }, + { + "name": null, + "taskReferenceName": "subflow_4", + "description": null, + "inputParameters": null, + "type": "SUB_WORKFLOW", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": { + }, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "sink": null, + "optional": null, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": null + } + } + ], + "attempt": 1 + }, + "workflowTask": { + "name": "perf_task_4", + "taskReferenceName": "perf_task_4", + "inputParameters": { + "mod": "${perf_task_3.output.mod}", + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + "queueWaitTime": 3465, + "taskStatus": "COMPLETED" + } + ], + "input": { + "mod": "0", + "oddEven": "0", + "task2Name": "perf_task_10" + }, + "workflowType": "performance_test_1", + "version": 1, + "correlationId": "1505587453950", + "schemaVersion": 2, + "taskToDomain": { + "*": "beta" + }, + "startTime": 1505587453961, + "workflowDefinition": { + "createTime": 1477681181098, + "updateTime": 1502738273998, + "name": "performance_test_1", + "description": "performance_test_1", + "version": 1, + "tasks": [ + { + "name": "perf_task_1", + "taskReferenceName": "perf_task_1", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "dyntask", + "taskReferenceName": "perf_task_2", + "inputParameters": { + "taskToExecute": "${workflow.input.task2Name}" + }, + "type": "DYNAMIC", + "dynamicTaskNameParam": "taskToExecute", + "startDelay": 0 + }, + { + "name": "perf_task_3", + "taskReferenceName": "perf_task_3", + "inputParameters": { + "mod": "${perf_task_2.output.mod}", + "oddEven": "${perf_task_2.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "get_from_es", + "taskReferenceName": "get_es_1", + "type": "HTTP", + "startDelay": 0 + }, + { + "name": "oddEvenDecision", + "taskReferenceName": "oddEvenDecision", + "inputParameters": { + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "DECISION", + "caseValueParam": "oddEven", + "decisionCases": { + "0": [ + { + "name": "perf_task_4", + "taskReferenceName": "perf_task_4", + "inputParameters": { + "mod": "${perf_task_3.output.mod}", + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "dynamic_fanout", + "taskReferenceName": "fanout1", + "inputParameters": { + "dynamicTasks": "${perf_task_4.output.dynamicTasks}", + "input": "${perf_task_4.output.inputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "dynamicForkTasksParam": "dynamicTasks", + "dynamicForkTasksInputParamName": "input", + "startDelay": 0 + }, + { + "name": "dynamic_join", + "taskReferenceName": "join1", + "type": "JOIN", + "startDelay": 0 + }, + { + "name": "perf_task_5", + "taskReferenceName": "perf_task_5", + "inputParameters": { + "mod": "${perf_task_4.output.mod}", + "oddEven": "${perf_task_4.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_6", + "taskReferenceName": "perf_task_6", + "inputParameters": { + "mod": "${perf_task_5.output.mod}", + "oddEven": "${perf_task_5.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "1": [ + { + "name": "perf_task_7", + "taskReferenceName": "perf_task_7", + "inputParameters": { + "mod": "${perf_task_3.output.mod}", + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_8", + "taskReferenceName": "perf_task_8", + "inputParameters": { + "mod": "${perf_task_7.output.mod}", + "oddEven": "${perf_task_7.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_9", + "taskReferenceName": "perf_task_9", + "inputParameters": { + "mod": "${perf_task_8.output.mod}", + "oddEven": "${perf_task_8.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "modDecision", + "taskReferenceName": "modDecision", + "inputParameters": { + "mod": "${perf_task_8.output.mod}" + }, + "type": "DECISION", + "caseValueParam": "mod", + "decisionCases": { + "0": [ + { + "name": "perf_task_12", + "taskReferenceName": "perf_task_12", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_13", + "taskReferenceName": "perf_task_13", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf1", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ], + "1": [ + { + "name": "perf_task_15", + "taskReferenceName": "perf_task_15", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_16", + "taskReferenceName": "perf_task_16", + "inputParameters": { + "mod": "${perf_task_15.output.mod}", + "oddEven": "${perf_task_15.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf2", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ], + "4": [ + { + "name": "perf_task_18", + "taskReferenceName": "perf_task_18", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_19", + "taskReferenceName": "perf_task_19", + "inputParameters": { + "mod": "${perf_task_18.output.mod}", + "oddEven": "${perf_task_18.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "5": [ + { + "name": "perf_task_21", + "taskReferenceName": "perf_task_21", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf3", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + }, + { + "name": "perf_task_22", + "taskReferenceName": "perf_task_22", + "inputParameters": { + "mod": "${perf_task_21.output.mod}", + "oddEven": "${perf_task_21.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ] + }, + "defaultCase": [ + { + "name": "perf_task_24", + "taskReferenceName": "perf_task_24", + "inputParameters": { + "mod": "${perf_task_9.output.mod}", + "oddEven": "${perf_task_9.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf4", + "inputParameters": { + "mod": "${perf_task_12.output.mod}", + "oddEven": "${perf_task_12.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + }, + { + "name": "perf_task_25", + "taskReferenceName": "perf_task_25", + "inputParameters": { + "mod": "${perf_task_24.output.mod}", + "oddEven": "${perf_task_24.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "startDelay": 0 + } + ] + }, + "startDelay": 0 + }, + { + "name": "perf_task_28", + "taskReferenceName": "perf_task_28", + "inputParameters": { + "mod": "${perf_task_3.output.mod}", + "oddEven": "${perf_task_3.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_29", + "taskReferenceName": "perf_task_29", + "inputParameters": { + "mod": "${perf_task_28.output.mod}", + "oddEven": "${perf_task_28.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + }, + { + "name": "perf_task_30", + "taskReferenceName": "perf_task_30", + "inputParameters": { + "mod": "${perf_task_29.output.mod}", + "oddEven": "${perf_task_29.output.oddEven}" + }, + "type": "SIMPLE", + "startDelay": 0 + } + ], + "schemaVersion": 2 + } +} diff --git a/docker/grpc/Makefile b/docker/grpc/Makefile new file mode 100644 index 0000000000..142b96b4a9 --- /dev/null +++ b/docker/grpc/Makefile @@ -0,0 +1,20 @@ + +clean-db: + docker volume rm grpc_conductor_mysql + +compose-build: + docker-compose -f docker-compose.dependencies.yaml -f docker-compose.apps.yaml build + +dependencies-up: + docker-compose -f docker-compose.dependencies.yaml up -d + +dependencies-down: + docker-compose -f docker-compose.dependencies.yaml down + +stack-up: + docker-compose -f docker-compose.dependencies.yaml up -d + sleep 30 + docker-compose -f docker-compose.dependencies.yaml -f docker-compose.apps.yaml up + +stack-down: + docker-compose -f docker-compose.dependencies.yaml -f docker-compose.apps.yaml down diff --git a/docker/grpc/docker-compose.apps.yaml b/docker/grpc/docker-compose.apps.yaml new file mode 100644 index 0000000000..d9cc32622e --- /dev/null +++ b/docker/grpc/docker-compose.apps.yaml @@ -0,0 +1,33 @@ +version: '3.4' + +services: + + conductor-server: + environment: + - CONFIG_PROP=config-mysql-grpc.properties + image: conductor:server + build: + context: ../../ + dockerfile: docker/server/Dockerfile + ports: + - 8080:8080 + - 8090:8090 + networks: + - internal + depends_on: + - mysql + - elasticsearch + + conductor-ui: + environment: + - WF_SERVER=http://conductor-server:8080/api/ + image: conductor:ui + build: + context: ../../ + dockerfile: docker/ui/Dockerfile + ports: + - 5000:5000 + networks: + - internal + depends_on: + - conductor-server diff --git a/docker/grpc/docker-compose.dependencies.yaml b/docker/grpc/docker-compose.dependencies.yaml new file mode 100644 index 0000000000..b413e64a4f --- /dev/null +++ b/docker/grpc/docker-compose.dependencies.yaml @@ -0,0 +1,39 @@ +version: '3.4' + +services: + + mysql: + image: mysql:5.6 + environment: + MYSQL_ROOT_PASSWORD: 12345 + MYSQL_DATABASE: conductor + MYSQL_USER: conductor + MYSQL_PASSWORD: password + volumes: + - type: volume + source: conductor_mysql + target: /var/lib/mysql + networks: + - internal + ports: + - 3306:3306 + + # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8 + environment: + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - transport.host=0.0.0.0 + - discovery.type=single-node + - xpack.security.enabled=false + networks: + - internal + ports: + - 9200:9200 + - 9300:9300 + +volumes: + conductor_mysql: + +networks: + internal: diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index f369b21eca..496c2098ae 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -17,6 +17,7 @@ COPY ./server/build/libs/conductor-server-*-all.jar /app/libs RUN chmod +x /app/startup.sh EXPOSE 8080 +EXPOSE 8090 CMD [ "/app/startup.sh" ] ENTRYPOINT [ "/bin/sh"] diff --git a/docker/server/config/config-local.properties b/docker/server/config/config-local.properties index b9cced6448..b72d893d56 100755 --- a/docker/server/config/config-local.properties +++ b/docker/server/config/config-local.properties @@ -1,3 +1,7 @@ +# Servers. +conductor.jetty.server.enabled=true +conductor.grpc.server.enabled=false + # Database persistence model. Possible values are memory, redis, and dynomite. # If ommitted, the persistence used is memory # @@ -25,6 +29,13 @@ queues.dynomite.threads=10 # For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. queues.dynomite.nonQuorum.port=22122 +# Elastic search instance type. Possible values are memory and external. +# If not specified, the instance type will be embedded in memory +# +# memory: The instance is created in memory and lost when the server dies. Useful for development and testing. +# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when +# the server dies. Useful for more stable environments like staging or production. +workflow.elasticsearch.instanceType=external # Transport address to elasticsearch workflow.elasticsearch.url=localhost:9300 diff --git a/docker/server/config/config-mysql-grpc.properties b/docker/server/config/config-mysql-grpc.properties new file mode 100755 index 0000000000..e8aff4c808 --- /dev/null +++ b/docker/server/config/config-mysql-grpc.properties @@ -0,0 +1,31 @@ +# Servers. +conductor.jetty.server.enabled=true +conductor.grpc.server.enabled=true + +# Database persistence model. Possible values are memory, redis, and dynomite. +# If ommitted, the persistence used is memory +# +# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo +# redis : non-Dynomite based redis instance +# dynomite : Dynomite cluster. Use this for HA configuration. + +db=mysql + +jdbc.url=jdbc:mysql://mysql:3306/conductor + +# Elastic search instance type. Possible values are memory and external. +# If not specified, the instance type will be embedded in memory +# +# memory: The instance is created in memory and lost when the server dies. Useful for development and testing. +# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when +# the server dies. Useful for more stable environments like staging or production. +workflow.elasticsearch.instanceType=external + +# Transport address to elasticsearch +workflow.elasticsearch.url=elasticsearch:9300 + +# Name of the elasticsearch cluster +workflow.elasticsearch.index.name=conductor + +# Additional modules (optional) +# conductor.additional.modules=class_extending_com.google.inject.AbstractModule diff --git a/docker/server/config/config-mysql.properties b/docker/server/config/config-mysql.properties new file mode 100755 index 0000000000..7c8a0f88b6 --- /dev/null +++ b/docker/server/config/config-mysql.properties @@ -0,0 +1,34 @@ +# Servers. +conductor.jetty.server.enabled=true +conductor.grpc.server.enabled=false + +# Database persistence model. Possible values are memory, redis, and dynomite. +# If ommitted, the persistence used is memory +# +# memory : The data is stored in memory and lost when the server dies. Useful for testing or demo +# redis : non-Dynomite based redis instance +# dynomite : Dynomite cluster. Use this for HA configuration. + +db=mysql + +jdbc.url=jdbc:mysql://mysql:3306/conductor + +# Elastic search instance type. Possible values are memory and external. +# If not specified, the instance type will be embedded in memory +# +# memory: The instance is created in memory and lost when the server dies. Useful for development and testing. +# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when +# the server dies. Useful for more stable environments like staging or production. +workflow.elasticsearch.instanceType=external + +# Transport address to elasticsearch +workflow.elasticsearch.url=elasticsearch:9300 + +# Name of the elasticsearch cluster +workflow.elasticsearch.index.name=conductor + +# Additional modules (optional) +# conductor.additional.modules=class_extending_com.google.inject.AbstractModule + +# Load sample kitchen sink workflow +loadSample=true diff --git a/docker/server/config/config.properties b/docker/server/config/config.properties index a03f97afa4..d66187dd3f 100755 --- a/docker/server/config/config.properties +++ b/docker/server/config/config.properties @@ -1,3 +1,7 @@ +# Servers. +conductor.jetty.server.enabled=true +conductor.grpc.server.enabled=false + # Database persistence model. Possible values are memory, redis, and dynomite. # If ommitted, the persistence used is memory # @@ -28,6 +32,13 @@ queues.dynomite.threads=10 # For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. queues.dynomite.nonQuorum.port=22122 +# Elastic search instance type. Possible values are memory and external. +# If not specified, the instance type will be embedded in memory +# +# memory: The instance is created in memory and lost when the server dies. Useful for development and testing. +# external: Elastic search instance runs outside of the server. Data is persisted and does not get lost when +# the server dies. Useful for more stable environments like staging or production. +workflow.elasticsearch.instanceType=external # Transport address to elasticsearch workflow.elasticsearch.url=es:9300 diff --git a/docker/ui/Dockerfile b/docker/ui/Dockerfile index db53cb2ef3..ab9d198ee6 100644 --- a/docker/ui/Dockerfile +++ b/docker/ui/Dockerfile @@ -1,7 +1,7 @@ # # conductor:ui - Netflix conductor UI # -FROM node:alpine +FROM node:9-alpine MAINTAINER Netflix OSS # Install the required packages for the node build diff --git a/docs/docs/events/index.md b/docs/docs/events/index.md index 9b83552e2d..25351d468b 100644 --- a/docs/docs/events/index.md +++ b/docs/docs/events/index.md @@ -66,8 +66,8 @@ Given the following payload in the message: ```json { - "action": "start_workflow", - "start_workflow": { + "action": "startWorkflow", + "startWorkflow": { "name": "WORKFLOW_NAME", "version": "input": { @@ -81,15 +81,15 @@ Given the following payload in the message: ```json { - "action": "complete_task", - "complete_task": { + "action": "completeTask", + "completeTask": { "workflowId": "${source.externalId.workflowId}", "taskRefName": "task_1", "output": { "response": "${source.result}" } }, - "expandInlineJSON": true + "expandInlineJson": true } ``` @@ -97,21 +97,21 @@ Given the following payload in the message: ```json { - "action": "fail_task", - "fail_task": { + "action": "failTask", + "failTask": { "workflowId": "${source.externalId.workflowId}", "taskRefName": "task_1", "output": { "response": "${source.result}" } }, - "expandInlineJSON": true + "expandInlineJson": true } ``` Input for starting a workflow and output when completing / failing task follows the same [expressions](/metadata/#wiring-inputs-and-outputs) used for wiring workflow inputs. !!!info "Expanding stringified JSON elements in payload" - `expandInlineJSON` property, when set to true will expand the inlined stringified JSON elements in the payload to JSON documents and replace the string value with JSON document. + `expandInlineJson` property, when set to true will expand the inlined stringified JSON elements in the payload to JSON documents and replace the string value with JSON document. This feature allows such elements to be used with JSON path expressions. ## Extending diff --git a/elasticsearch/build.gradle b/elasticsearch/build.gradle new file mode 100644 index 0000000000..5e0b388fea --- /dev/null +++ b/elasticsearch/build.gradle @@ -0,0 +1,6 @@ +dependencies { + compile project(':conductor-core') + + compile "com.google.inject:guice:${revGuice}" + +} diff --git a/es2-persistence/README.md b/es2-persistence/README.md deleted file mode 100644 index 2e7c86296e..0000000000 --- a/es2-persistence/README.md +++ /dev/null @@ -1,3 +0,0 @@ -## Usage - -Set `workflow.elasticsearch.version=2` in Server module's configuration options. diff --git a/es2-persistence/build.gradle b/es2-persistence/build.gradle deleted file mode 100644 index 2bfcd96526..0000000000 --- a/es2-persistence/build.gradle +++ /dev/null @@ -1,41 +0,0 @@ -plugins { - id 'com.github.johnrengelman.plugin-shadow' version '2.0.3' -} - -configurations { - // Prevent shaded dependencies from being published, while keeping them available to tests - shadow.extendsFrom compileOnly - testRuntime.extendsFrom compileOnly -} - -dependencies { - compile project(':conductor-core') - - //ES5 Dependency - compile "org.apache.logging.log4j:log4j-api:${revLog4jApi}" - compile "org.apache.logging.log4j:log4j-core:${revLog4jCore}" - - compileOnly "commons-io:commons-io:${revCommonsIo}" - - compileOnly "org.elasticsearch:elasticsearch:${revElasticSearch2}" - compileOnly "com.github.rholder:guava-retrying:${revGuavaRetrying}" -} - -// Drop the classifier and delete jar task actions to replace the regular jar artifact with the shadow artifact -shadowJar { - configurations = [project.configurations.shadow] - classifier = null - - // Service files are not included by default. - mergeServiceFiles { - include 'META-INF/services/*' - include 'META-INF/maven/*' - } -} - -jar.enabled = false -jar.dependsOn shadowJar - -configureRelocationShadowJar { - prefix = 'conductor' -} \ No newline at end of file diff --git a/es2-persistence/dependencies.lock b/es2-persistence/dependencies.lock deleted file mode 100644 index 25a979cb31..0000000000 --- a/es2-persistence/dependencies.lock +++ /dev/null @@ -1,962 +0,0 @@ -{ - "compile": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "compileClasspath": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.8.1" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0", - "requested": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "commons-io:commons-io": { - "locked": "2.4", - "requested": "2.4" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.elasticsearch:elasticsearch": { - "locked": "2.4.6", - "requested": "2.4.6" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "compileOnly": { - "com.github.rholder:guava-retrying": { - "locked": "2.0.0", - "requested": "2.0.0" - }, - "commons-io:commons-io": { - "locked": "2.4", - "requested": "2.4" - }, - "org.elasticsearch:elasticsearch": { - "locked": "2.4.6", - "requested": "2.4.6" - } - }, - "default": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "runtime": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "runtimeClasspath": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "shadow": { - "com.github.rholder:guava-retrying": { - "locked": "2.0.0", - "requested": "2.0.0" - }, - "commons-io:commons-io": { - "locked": "2.4", - "requested": "2.4" - }, - "org.elasticsearch:elasticsearch": { - "locked": "2.4.6", - "requested": "2.4.6" - } - }, - "testCompile": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "junit:junit-dep": { - "locked": "4.10", - "requested": "4.10" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.mockito:mockito-all": { - "locked": "1.10.0", - "requested": "1.10.0" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "testCompileClasspath": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "junit:junit-dep": { - "locked": "4.10", - "requested": "4.10" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.mockito:mockito-all": { - "locked": "1.10.0", - "requested": "1.10.0" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "testRuntime": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.8.1" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0", - "requested": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "commons-io:commons-io": { - "locked": "2.4", - "requested": "2.4" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "junit:junit-dep": { - "locked": "4.10", - "requested": "4.10" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.elasticsearch:elasticsearch": { - "locked": "2.4.6", - "requested": "2.4.6" - }, - "org.mockito:mockito-all": { - "locked": "1.10.0", - "requested": "1.10.0" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - }, - "testRuntimeClasspath": { - "com.amazonaws:aws-java-sdk-s3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.11.86" - }, - "com.fasterxml.jackson.core:jackson-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.8.1" - }, - "com.fasterxml.jackson.core:jackson-databind": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.7.5" - }, - "com.github.rholder:guava-retrying": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "2.0.0", - "requested": "2.0.0" - }, - "com.google.inject.extensions:guice-multibindings": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.google.inject:guice": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "4.1.0" - }, - "com.jayway.jsonpath:json-path": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "2.2.0" - }, - "com.netflix.conductor:conductor-common": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.servo:servo-core": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.12.17" - }, - "com.netflix.spectator:spectator-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.68.0" - }, - "com.spotify:completable-futures": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "0.3.1" - }, - "commons-io:commons-io": { - "locked": "2.4", - "requested": "2.4" - }, - "io.reactivex:rxjava": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "1.2.2" - }, - "junit:junit-dep": { - "locked": "4.10", - "requested": "4.10" - }, - "org.apache.commons:commons-lang3": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-core" - ], - "locked": "3.0" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.9.1", - "requested": "2.9.1" - }, - "org.elasticsearch:elasticsearch": { - "locked": "2.4.6", - "requested": "2.4.6" - }, - "org.mockito:mockito-all": { - "locked": "1.10.0", - "requested": "1.10.0" - }, - "org.slf4j:slf4j-api": { - "firstLevelTransitive": [ - "com.netflix.conductor:conductor-common" - ], - "locked": "1.7.25" - } - } -} \ No newline at end of file diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/EmbeddedElasticSearch.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/EmbeddedElasticSearch.java deleted file mode 100644 index 31574a84b0..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/EmbeddedElasticSearch.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.conductor.dao.es; - -import org.apache.commons.io.FileUtils; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; - -public class EmbeddedElasticSearch { - - private static final String ES_PATH_DATA = "path.data"; - - private static final String ES_PATH_HOME = "path.home"; - - private static final Logger logger = LoggerFactory.getLogger(EmbeddedElasticSearch.class); - - public static final int DEFAULT_PORT = 9200; - public static final String DEFAULT_CLUSTER_NAME = "elasticsearch_test"; - public static final String DEFAULT_HOST = "127.0.0.1"; - public static final String DEFAULT_SETTING_FILE = "embedded-es.yml"; - - private static Node instance; - private static Client client; - private static File dataDir; - - public static void start() throws Exception { - start(DEFAULT_CLUSTER_NAME, DEFAULT_HOST, DEFAULT_PORT, true); - } - - public static synchronized void start(String clusterName, String host, int port, boolean enableTransportClient) throws Exception{ - - if (instance != null && !instance.isClosed()) { - logger.info("Elastic Search is already running on port {}", getPort()); - return; - } - - final Settings settings = getSettings(clusterName, host, port, enableTransportClient); - setupDataDir(settings); - - logger.info("Starting ElasticSearch for cluster {} ", settings.get("cluster.name")); - instance = NodeBuilder.nodeBuilder().data(true).local(enableTransportClient ? false : true).settings(settings).client(false).node(); - instance.start(); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - instance.close(); - } - }); - logger.info("ElasticSearch cluster {} started in local mode on port {}", instance.settings().get("cluster.name"), getPort()); - client = instance.client(); - } - - private static void setupDataDir(Settings settings) { - String path = settings.get(ES_PATH_DATA); - cleanDataDir(path); - createDataDir(path); - } - - public static void cleanDataDir(String path) { - try { - logger.info("Deleting contents of data dir {}", path); - File f = new File(path); - if (f.exists()) { - FileUtils.cleanDirectory(new File(path)); - } - } catch (IOException e) { - logger.error("Failed to delete ES data dir"); - } - } - - private static Settings getSettings(String clusterName, String host, int port, boolean enableTransportClient) throws IOException { - dataDir = Files.createTempDirectory(clusterName+"_"+System.currentTimeMillis()+"data").toFile(); - File homeDir = Files.createTempDirectory(clusterName+"_"+System.currentTimeMillis()+"-home").toFile(); - return Settings.builder() - .put("cluster.name", clusterName) - .put("http.host", host) - .put("http.port", port) - .put(ES_PATH_DATA, dataDir.getAbsolutePath()) - .put(ES_PATH_HOME, homeDir.getAbsolutePath()) - .put("http.enabled", true) - .put("script.inline", "on") - .put("script.indexed", "on") - .build(); - } - - private static void createDataDir(String dataDirLoc) { - try { - Path dataDirPath = FileSystems.getDefault().getPath(dataDirLoc); - Files.createDirectories(dataDirPath); - dataDir = dataDirPath.toFile(); - } catch (IOException e) { - logger.error("Failed to create data dir"); - } - } - - public static Client getClient() { - if (instance == null || instance.isClosed()) { - logger.error("Embedded ElasticSearch is not Initialized and started, please call start() method first"); - return null; - } - return client; - } - - private static String getPort() { - return instance.settings().get("http.port"); - } - - public static synchronized void stop() { - - if (instance != null && !instance.isClosed()) { - String port = getPort(); - logger.info("Stopping Elastic Search"); - instance.close(); - logger.info("Elastic Search on port {} stopped", port); - } - - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/ElasticSearchDAO.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/ElasticSearchDAO.java deleted file mode 100644 index 8237839819..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/ElasticSearchDAO.java +++ /dev/null @@ -1,546 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Predicate; -import com.netflix.conductor.annotations.Trace; -import com.netflix.conductor.common.metadata.events.EventExecution; -import com.netflix.conductor.common.metadata.tasks.Task; -import com.netflix.conductor.common.metadata.tasks.TaskExecLog; -import com.netflix.conductor.common.run.SearchResult; -import com.netflix.conductor.common.run.TaskSummary; -import com.netflix.conductor.common.run.Workflow; -import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.config.Configuration; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.core.execution.ApplicationException; -import com.netflix.conductor.core.execution.ApplicationException.Code; -import com.netflix.conductor.dao.IndexDAO; -import com.netflix.conductor.dao.es.index.query.parser.Expression; -import com.netflix.conductor.dao.es.index.query.parser.ParserException; -import com.netflix.conductor.dao.es.utils.RetryUtil; -import com.netflix.conductor.metrics.Monitors; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.get.GetField; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryStringQueryBuilder; -import org.elasticsearch.indices.IndexAlreadyExistsException; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; -import org.joda.time.DateTime; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import javax.inject.Singleton; -import java.io.InputStream; -import java.text.SimpleDateFormat; -import java.time.LocalDate; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.TimeZone; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -/** - * @author Viren - * - */ -@Trace -@Singleton -public class ElasticSearchDAO implements IndexDAO { - - private static Logger logger = LoggerFactory.getLogger(ElasticSearchDAO.class); - - private static final String WORKFLOW_DOC_TYPE = "workflow"; - - private static final String TASK_DOC_TYPE = "task"; - - private static final String LOG_DOC_TYPE = "task_log"; - - private static final String EVENT_DOC_TYPE = "event"; - - private static final String MSG_DOC_TYPE = "message"; - - private static final String className = ElasticSearchDAO.class.getSimpleName(); - - private static final int RETRY_COUNT = 3; - - private String indexName; - - private String logIndexName; - - private String logIndexPrefix; - - private ObjectMapper objectMapper; - - private Client elasticSearchClient; - - - private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); - - private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW"); - - private final ExecutorService executorService; - - static { - SIMPLE_DATE_FORMAT.setTimeZone(GMT); - } - - @Inject - public ElasticSearchDAO(Client elasticSearchClient, Configuration config, ObjectMapper objectMapper) { - this.objectMapper = objectMapper; - this.elasticSearchClient = elasticSearchClient; - this.indexName = config.getProperty("workflow.elasticsearch.index.name", null); - - try { - - initIndex(); - updateIndexName(config); - Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> updateIndexName(config), 0, 1, TimeUnit.HOURS); - - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - int corePoolSize = 6; - int maximumPoolSize = 12; - long keepAliveTime = 1L; - this.executorService = new ThreadPoolExecutor(corePoolSize, - maximumPoolSize, - keepAliveTime, - TimeUnit.MINUTES, - new LinkedBlockingQueue<>()); - } - - private void updateIndexName(Configuration config) { - this.logIndexPrefix = config.getProperty("workflow.elasticsearch.tasklog.index.name", "task_log"); - this.logIndexName = this.logIndexPrefix + "_" + SIMPLE_DATE_FORMAT.format(new Date()); - - try { - elasticSearchClient.admin().indices().prepareGetIndex().addIndices(logIndexName).execute().actionGet(); - } catch (IndexNotFoundException infe) { - try { - elasticSearchClient.admin().indices().prepareCreate(logIndexName).execute().actionGet(); - } catch (IndexAlreadyExistsException ignored) { - - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - } - - /** - * Initializes the index with required templates and mappings. - */ - private void initIndex() throws Exception { - - //0. Add the tasklog template - GetIndexTemplatesResponse result = elasticSearchClient.admin().indices().prepareGetTemplates("tasklog_template").execute().actionGet(); - if (result.getIndexTemplates().isEmpty()) { - logger.info("Creating the index template 'tasklog_template'"); - InputStream stream = ElasticSearchDAO.class.getResourceAsStream("/template_tasklog.json"); - byte[] templateSource = IOUtils.toByteArray(stream); - - try { - elasticSearchClient.admin().indices().preparePutTemplate("tasklog_template").setSource(templateSource).execute().actionGet(); - } catch (Exception e) { - logger.error("Failed to init tasklog_template", e); - } - } - - //1. Create the required index - try { - elasticSearchClient.admin().indices().prepareGetIndex().addIndices(indexName).execute().actionGet(); - } catch (IndexNotFoundException infe) { - try { - elasticSearchClient.admin().indices().prepareCreate(indexName).execute().actionGet(); - } catch (IndexAlreadyExistsException ignored) { - } - } - - //2. Add Mappings for the workflow document type - GetMappingsResponse getMappingsResponse = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(WORKFLOW_DOC_TYPE).execute().actionGet(); - if (getMappingsResponse.mappings().isEmpty()) { - logger.info("Adding the workflow type mappings"); - InputStream stream = ElasticSearchDAO.class.getResourceAsStream("/mappings_docType_workflow.json"); - byte[] bytes = IOUtils.toByteArray(stream); - String source = new String(bytes); - try { - elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(WORKFLOW_DOC_TYPE).setSource(source).execute().actionGet(); - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - - //3. Add Mappings for task document type - getMappingsResponse = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(TASK_DOC_TYPE).execute().actionGet(); - if (getMappingsResponse.mappings().isEmpty()) { - logger.info("Adding the task type mappings"); - InputStream stream = ElasticSearchDAO.class.getResourceAsStream("/mappings_docType_task.json"); - byte[] bytes = IOUtils.toByteArray(stream); - String source = new String(bytes); - try { - elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(TASK_DOC_TYPE).setSource(source).execute().actionGet(); - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - } - - @Override - public void indexWorkflow(Workflow workflow) { - try { - - String id = workflow.getWorkflowId(); - WorkflowSummary summary = new WorkflowSummary(workflow); - byte[] doc = objectMapper.writeValueAsBytes(summary); - UpdateRequest req = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, id); - req.doc(doc); - req.upsert(doc); - req.retryOnConflict(5); - updateWithRetry(req, "Index workflow into doc_type workflow"); - } catch (Throwable e) { - logger.error("Indexing failed {}", e.getMessage(), e); - } - } - - @Override - public CompletableFuture asyncIndexWorkflow(Workflow workflow) { - return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService); - } - - @Override - public void indexTask(Task task) { - try { - String id = task.getTaskId(); - TaskSummary summary = new TaskSummary(task); - byte[] doc = objectMapper.writeValueAsBytes(summary); - - UpdateRequest req = new UpdateRequest(indexName, TASK_DOC_TYPE, id); - req.doc(doc); - req.upsert(doc); - updateWithRetry(req, "Index task into doc_type of task"); - } catch (Throwable e) { - logger.error("Indexing failed {}", e.getMessage(), e); - } - } - - @Override - public CompletableFuture asyncIndexTask(Task task) { - return CompletableFuture.runAsync(() -> indexTask(task), executorService); - } - - @Override - public void addTaskExecutionLogs(List taskExecLogs) { - - if (taskExecLogs.isEmpty()) { - return; - } - try { - BulkRequestBuilder bulkRequestBuilder = elasticSearchClient.prepareBulk(); - for (TaskExecLog taskExecLog : taskExecLogs) { - IndexRequest request = new IndexRequest(logIndexName, LOG_DOC_TYPE); - request.source(objectMapper.writeValueAsBytes(taskExecLog)); - bulkRequestBuilder.add(request); - } - new RetryUtil().retryOnException(() -> bulkRequestBuilder.execute().actionGet(), - null, BulkResponse::hasFailures, RETRY_COUNT, "Indexing all execution logs into doc_type task", "addTaskExecutionLogs"); - } catch (Throwable e) { - logger.error("Indexing failed {}", e.getMessage(), e); - } - - } - - @Override - public CompletableFuture asyncAddTaskExecutionLogs(List logs) { - return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), executorService); - } - - @Override - public List getTaskExecutionLogs(String taskId) { - - try { - - QueryBuilder qf; - Expression expression = Expression.fromString("taskId='" + taskId + "'"); - qf = expression.getFilterBuilder(); - - BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(qf); - QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery("*"); - BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); - - final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*").setQuery(fq).setTypes(LOG_DOC_TYPE).addSort(SortBuilders.fieldSort("createdTime").order(SortOrder.ASC).unmappedType("long")); - SearchResponse response = srb.execute().actionGet(); - SearchHit[] hits = response.getHits().getHits(); - List logs = new ArrayList<>(hits.length); - for (SearchHit hit : hits) { - String source = hit.getSourceAsString(); - TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class); - logs.add(tel); - } - - return logs; - - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - - return null; - } - - @Override - public void addMessage(String queue, Message msg) { - - // Run all indexing other than workflow indexing in a separate threadpool - Map doc = new HashMap<>(); - doc.put("messageId", msg.getId()); - doc.put("payload", msg.getPayload()); - doc.put("queue", queue); - doc.put("created", System.currentTimeMillis()); - IndexRequest request = new IndexRequest(logIndexName, MSG_DOC_TYPE); - request.source(doc); - new RetryUtil<>().retryOnException(() -> elasticSearchClient.index(request).actionGet(), null, - null, RETRY_COUNT, "Indexing document in for docType: message", "addMessage"); - - } - - @Override - public void addEventExecution(EventExecution eventExecution) { - try { - byte[] doc = objectMapper.writeValueAsBytes(eventExecution); - String id = eventExecution.getName() + "." + eventExecution.getEvent() + "." + eventExecution.getMessageId() + "." + eventExecution.getId(); - UpdateRequest req = new UpdateRequest(logIndexName, EVENT_DOC_TYPE, id); - req.doc(doc); - req.upsert(doc); - req.retryOnConflict(5); - updateWithRetry(req, "Update Event execution for doc_type event"); - } catch (Throwable e) { - logger.error("Indexing failed {}", e.getMessage(), e); - } - } - - - @Override - public CompletableFuture asyncAddEventExecution(EventExecution eventExecution) { - return CompletableFuture.runAsync(() -> addEventExecution(eventExecution), executorService); - } - - private void updateWithRetry(UpdateRequest request, String operationDescription) { - try { - new RetryUtil().retryOnException( - () -> elasticSearchClient.update(request).actionGet(), - null, - null, - RETRY_COUNT, - operationDescription, - "updateWithRetry" - ); - } catch (Exception e) { - Monitors.error(className, "index"); - logger.error("Indexing failed for {}, {}", request.index(), request.type(), e); - } - } - - @Override - public SearchResult searchWorkflows(String query, String freeText, int start, int count, List sort) { - try { - return search(query, start, count, sort, freeText, WORKFLOW_DOC_TYPE); - } catch (ParserException e) { - throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - @Override - public SearchResult searchTasks(String query, String freeText, int start, int count, List sort) { - try { - return search(query, start, count, sort, freeText, TASK_DOC_TYPE); - } catch (ParserException e) { - throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - @Override - public void removeWorkflow(String workflowId) { - try { - - DeleteRequest req = new DeleteRequest(indexName, WORKFLOW_DOC_TYPE, workflowId); - DeleteResponse response = elasticSearchClient.delete(req).actionGet(); - if (!response.isFound()) { - logger.error("Index removal failed - document not found by id " + workflowId); - } - } catch (Throwable e) { - logger.error("Index removal failed failed {}", e.getMessage(), e); - Monitors.error(className, "remove"); - } - } - - @Override - public CompletableFuture asyncRemoveWorkflow(String workflowId) { - return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService); - } - - @Override - public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { - if (keys.length != values.length) { - throw new IllegalArgumentException("Number of keys and values should be same."); - } - - UpdateRequest request = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId); - Map source = IntStream.range(0, keys.length).boxed() - .collect(Collectors.toMap(i -> keys[i], i -> values[i])); - request.doc(source); - logger.debug("Updating workflow {} with {}", workflowInstanceId, source); - new RetryUtil<>().retryOnException(() -> elasticSearchClient.update(request).actionGet(), null, null, - RETRY_COUNT, "Updating index for doc_type workflow", "updateWorkflow"); - - } - - @Override - public CompletableFuture asyncUpdateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { - return CompletableFuture.runAsync(() -> updateWorkflow(workflowInstanceId, keys, values), executorService); - } - - @Override - public String get(String workflowInstanceId, String fieldToGet) { - Object value = null; - GetRequest request = new GetRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId).fields(fieldToGet); - GetResponse response = elasticSearchClient.get(request).actionGet(); - Map fields = response.getFields(); - if (fields == null) { - return null; - } - GetField field = fields.get(fieldToGet); - if (field != null) value = field.getValue(); - if (value != null) { - return value.toString(); - } - return null; - } - - private SearchResult search(String structuredQuery, int start, int size, List sortOptions, String freeTextQuery, String docType) throws ParserException { - QueryBuilder qf = QueryBuilders.matchAllQuery(); - if (StringUtils.isNotEmpty(structuredQuery)) { - Expression expression = Expression.fromString(structuredQuery); - qf = expression.getFilterBuilder(); - } - - BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(qf); - QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(freeTextQuery); - BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); - final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(indexName).setQuery(fq).setTypes(docType).setNoFields().setFrom(start).setSize(size); - if (sortOptions != null) { - sortOptions.forEach(sortOption -> { - SortOrder order = SortOrder.ASC; - String field = sortOption; - int indx = sortOption.indexOf(':'); - if (indx > 0) { //Can't be 0, need the field name at-least - field = sortOption.substring(0, indx); - order = SortOrder.valueOf(sortOption.substring(indx + 1)); - } - srb.addSort(field, order); - }); - } - List result = new LinkedList<>(); - SearchResponse response = srb.execute().actionGet(); - response.getHits().forEach(hit -> result.add(hit.getId())); - long count = response.getHits().getTotalHits(); - return new SearchResult<>(count, result); - } - - - @Override - public List searchArchivableWorkflows(String indexName, long archiveTtlDays) { - QueryBuilder q = QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("endTime").lt(LocalDate.now().minusDays(archiveTtlDays))) - .should(QueryBuilders.termQuery("status", "COMPLETED")) - .should(QueryBuilders.termQuery("status", "FAILED")) - .mustNot(QueryBuilders.existsQuery("archived")) - .minimumNumberShouldMatch(1); - SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName) - .setTypes("workflow") - .setQuery(q) - .setSize(1000); - - SearchResponse response = s.execute().actionGet(); - - SearchHits hits = response.getHits(); - List ids = new LinkedList<>(); - for (SearchHit hit : hits.getHits()) { - ids.add(hit.getId()); - } - return ids; - } - - //copy paste from com.netflix.conductor.dao.es.index.ElasticSearchDAO5.searchRecentIncompletedWorkflows - public List searchRecentRunningWorkflows(int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo) { - DateTime dateTime = new DateTime(); - QueryBuilder q = QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("updateTime") - .gt(dateTime.minusHours(lastModifiedHoursAgoFrom))) - .must(QueryBuilders.rangeQuery("updateTime") - .lt(dateTime.minusHours(lastModifiedHoursAgoTo))) - .must(QueryBuilders.termQuery("status", "RUNNING")); - - SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName) - .setTypes("workflow") - .setQuery(q) - .setSize(5000) - .addSort("updateTime", SortOrder.ASC); - - SearchResponse response = s.execute().actionGet(); - SearchHits hits = response.getHits(); - List ids = new LinkedList<>(); - for (SearchHit hit : hits.getHits()) { - ids.add(hit.getId()); - } - return ids; - } -} \ No newline at end of file diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/ElasticSearchModule.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/ElasticSearchModule.java deleted file mode 100644 index c8dccc7cc4..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/ElasticSearchModule.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index; - -import com.google.inject.AbstractModule; -import com.google.inject.Provides; -import com.netflix.conductor.core.config.Configuration; -import com.netflix.conductor.dao.IndexDAO; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Singleton; -import java.net.InetAddress; - - -/** - * @author Viren - * Provider for the elasticsearch transport client - */ -public class ElasticSearchModule extends AbstractModule { - - private static Logger log = LoggerFactory.getLogger(ElasticSearchDAO.class); - - @Provides - @Singleton - public Client getClient(Configuration config) throws Exception { - - String clusterAddress = config.getProperty("workflow.elasticsearch.url", ""); - if(clusterAddress.equals("")) { - log.warn("workflow.elasticsearch.url is not set. Indexing will remain DISABLED."); - } - - Settings.Builder settings = Settings.settingsBuilder(); - settings.put("client.transport.ignore_cluster_name", true); - settings.put("client.transport.sniff", true); - - TransportClient tc = TransportClient.builder().settings(settings).build(); - String[] hosts = clusterAddress.split(","); - for (String host : hosts) { - String[] hostparts = host.split(":"); - String hostname = hostparts[0]; - int hostport = 9200; - if (hostparts.length == 2) hostport = Integer.parseInt(hostparts[1]); - tc.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(hostname), hostport)); - } - return tc; - - } - - @Override - protected void configure() { - bind(IndexDAO.class).to(ElasticSearchDAO.class); - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/AbstractNode.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/AbstractNode.java deleted file mode 100644 index 4588a2ee24..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/AbstractNode.java +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import java.io.InputStream; -import java.math.BigDecimal; -import java.util.HashSet; -import java.util.Set; -import java.util.regex.Pattern; - - -/** - * @author Viren - * - */ -public abstract class AbstractNode { - - public static final Pattern WHITESPACE = Pattern.compile("\\s"); - - protected static Set comparisonOprs = new HashSet(); - - static { - comparisonOprs.add('>'); - comparisonOprs.add('<'); - comparisonOprs.add('='); - } - - protected InputStream is; - - - - protected AbstractNode(InputStream is) throws ParserException { - this.is = is; - this.parse(); - } - - protected boolean isNumber(String test){ - try{ - //If you can convert to a big decimal value, then it is a number. - new BigDecimal(test); - return true; - - }catch(NumberFormatException e){ - //Ignore - } - return false; - } - - protected boolean isBoolOpr(byte[] buffer){ - if(buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R'){ - return true; - }else if(buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D'){ - return true; - } - return false; - } - - protected boolean isComparisonOpr(byte[] buffer){ - if(buffer[0] == 'I' && buffer[1] == 'N'){ - return true; - }else if(buffer[0] == '!' && buffer[1] == '='){ - return true; - }else{ - return comparisonOprs.contains((char)buffer[0]); - } - - } - - protected byte[] peek(int length) throws Exception { - return read(length, true); - } - - protected byte[] read(int length) throws Exception { - return read(length, false); - } - - protected String readToken() throws Exception { - skipWhitespace(); - StringBuilder sb = new StringBuilder(); - while(is.available() > 0){ - char c = (char) peek(1)[0]; - if(c == ' ' || c == '\t' || c == '\n' || c == '\r'){ - is.skip(1); - break; - }else if(c == '=' || c == '>' || c == '<' || c == '!'){ - //do not skip - break; - } - sb.append(c); - is.skip(1); - } - return sb.toString().trim(); - } - - protected boolean isNumeric(char c) { - if (c == '-' || c == 'e' || (c >= '0' && c <= '9') || c == '.'){ - return true; - } - return false; - } - - protected void assertExpected(byte[] found, String expected) throws ParserException { - assertExpected(new String(found), expected); - } - - protected void assertExpected(String found, String expected) throws ParserException { - if(!found.equals(expected)){ - throw new ParserException("Expected " + expected + ", found " + found); - } - } - protected void assertExpected(char found, char expected) throws ParserException { - if(found != expected){ - throw new ParserException("Expected " + expected + ", found " + found); - } - } - - protected static void efor(int length, FunctionThrowingException consumer) throws Exception { - for(int i = 0; i < length; i++){ - consumer.accept(i); - } - } - - protected abstract void _parse() throws Exception; - - //Public stuff here - private void parse() throws ParserException { - //skip white spaces - skipWhitespace(); - try{ - _parse(); - }catch(Exception e){ - System.out.println("\t" + this.getClass().getSimpleName() + "->" + this.toString()); - if(!(e instanceof ParserException)){ - throw new ParserException("Error parsing", e); - }else{ - throw (ParserException)e; - } - } - skipWhitespace(); - } - - //Private methods - - private byte[] read(int length, boolean peekOnly) throws Exception { - byte[] buf = new byte[length]; - if(peekOnly){ - is.mark(length); - } - efor(length, (Integer c)-> buf[c] = (byte) is.read()); - if(peekOnly){ - is.reset(); - } - return buf; - } - - protected void skipWhitespace() throws ParserException { - try{ - while(is.available() > 0){ - byte c = peek(1)[0]; - if(c == ' ' || c == '\t' || c == '\n' || c == '\r'){ - //skip - read(1); - }else{ - break; - } - } - }catch(Exception e){ - throw new ParserException(e.getMessage(), e); - } - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ComparisonOp.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ComparisonOp.java deleted file mode 100644 index 12ecf80f27..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ComparisonOp.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import java.io.InputStream; - -/** - * @author Viren - */ -public class ComparisonOp extends AbstractNode { - - public static enum Operators { - BETWEEN("BETWEEN"), EQUALS("="), LESS_THAN("<"), GREATER_THAN(">"), IN("IN"), NOT_EQUALS("!="), IS("IS"); - - private String value; - Operators(String value){ - this.value = value; - } - - public String value(){ - return value; - } - } - - private static final int betwnLen = Operators.BETWEEN.value().length(); - - private String value; - - public ComparisonOp(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = peek(betwnLen); - if(peeked[0] == '=' || peeked[0] == '>' || peeked[0] == '<'){ - this.value = new String(peeked, 0, 1); - }else if(peeked[0] == 'I' && peeked[1] == 'N'){ - this.value = "IN"; - }else if(peeked[0] == 'I' && peeked[1] == 'S'){ - this.value = "IS"; - }else if(peeked[0] == '!' && peeked[1] == '='){ - this.value = "!="; - }else if(peeked.length == betwnLen && new String(peeked).equals(Operators.BETWEEN.value())){ - this.value = Operators.BETWEEN.value(); - }else{ - throw new ParserException("Expecting an operator (=, >, <, !=, BETWEEN, IN), but found none. Peeked=>" + new String(peeked)); - } - - read(this.value.length()); - } - - @Override - public String toString(){ - return " " + value + " "; - } - - public String getOperator(){ - return value; - } - -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ConstValue.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ConstValue.java deleted file mode 100644 index 5ba72cccc8..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ConstValue.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import java.io.InputStream; - - -/** - * @author Viren - * Constant value can be: - *
      - *
    1. List of values (a,b,c) - *
    2. Range of values (m AND n) - *
    3. A value (x) - *
    4. A value is either a string or a number - *
    - * - */ -public class ConstValue extends AbstractNode { - - public static enum SystemConsts { - NULL("null"), NOT_NULL("not null"); - private String value; - SystemConsts(String value){ - this.value = value; - } - - public String value(){ - return value; - } - } - - private Object value; - - private SystemConsts sysConsts; - - public ConstValue(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = peek(4); - String sp = new String(peeked).trim(); - //Read a constant value (number or a string) - if(peeked[0] == '"' || peeked[0] == '\''){ - this.value = readString(is); - } else if(sp.toLowerCase().startsWith("not")){ - this.value = SystemConsts.NOT_NULL.value(); - sysConsts = SystemConsts.NOT_NULL; - read(SystemConsts.NOT_NULL.value().length()); - } else if(sp.equalsIgnoreCase(SystemConsts.NULL.value())){ - this.value = SystemConsts.NULL.value(); - sysConsts = SystemConsts.NULL; - read(SystemConsts.NULL.value().length()); - } else{ - this.value = readNumber(is); - } - } - - private String readNumber(InputStream is) throws Exception { - StringBuilder sb = new StringBuilder(); - while(is.available() > 0){ - is.mark(1); - char c = (char) is.read(); - if(!isNumeric(c)){ - is.reset(); - break; - }else{ - sb.append(c); - } - } - String numValue = sb.toString().trim(); - return numValue; - } - /** - * Reads an escaped string - * @throws Exception - */ - private String readString(InputStream is) throws Exception { - char delim = (char)read(1)[0]; - StringBuilder sb = new StringBuilder(); - boolean valid = false; - while(is.available() > 0){ - char c = (char) is.read(); - if(c == delim){ - valid = true; - break; - } else if(c == '\\'){ - // read the next character as part of the value - c = (char) is.read(); - sb.append(c); - } else{ - sb.append(c); - } - } - if(!valid){ - throw new ParserException("String constant is not quoted with <" + delim + "> : " + sb.toString()); - } - return "\"" + sb.toString() + "\""; - } - - public Object getValue(){ - return value; - } - - @Override - public String toString(){ - return ""+value; - } - - public boolean isSysConstant(){ - return this.sysConsts != null; - } - - public SystemConsts getSysConstant(){ - return this.sysConsts; - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Expression.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Expression.java deleted file mode 100644 index 1229cdfb38..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Expression.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -/** - * @author Viren - * - */ -public class Expression extends AbstractNode implements FilterProvider { - - private NameValue nameVal; - - private GroupedExpression ge; - - private BooleanOp op; - - private Expression rhs; - - public Expression(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = peek(1); - - if(peeked[0] == '('){ - this.ge = new GroupedExpression(is); - }else{ - this.nameVal = new NameValue(is); - } - - peeked = peek(3); - if( isBoolOpr(peeked) ){ - //we have an expression next - this.op = new BooleanOp(is); - this.rhs = new Expression(is); - } - } - - public boolean isBinaryExpr(){ - return this.op != null; - } - - public BooleanOp getOperator(){ - return this.op; - } - - public Expression getRightHandSide(){ - return this.rhs; - } - - public boolean isNameValue(){ - return this.nameVal != null; - } - - public NameValue getNameValue(){ - return this.nameVal; - } - - public GroupedExpression getGroupedExpression(){ - return this.ge; - } - - @Override - public QueryBuilder getFilterBuilder(){ - QueryBuilder lhs = null; - if(nameVal != null){ - lhs = nameVal.getFilterBuilder(); - }else{ - lhs = ge.getFilterBuilder(); - } - - if(this.isBinaryExpr()){ - QueryBuilder rhsFilter = rhs.getFilterBuilder(); - if(this.op.isAnd()){ - return QueryBuilders.boolQuery().must(lhs).must(rhsFilter); - }else{ - return QueryBuilders.boolQuery().should(lhs).should(rhsFilter); - } - }else{ - return lhs; - } - - } - - @Override - public String toString(){ - if(isBinaryExpr()){ - return "" + (nameVal==null?ge:nameVal) + op + rhs; - }else{ - return "" + (nameVal==null?ge:nameVal); - } - } - - public static Expression fromString(String value) throws ParserException{ - return new Expression(new BufferedInputStream(new ByteArrayInputStream(value.getBytes()))); - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/FilterProvider.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/FilterProvider.java deleted file mode 100644 index fd0a809a41..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/FilterProvider.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import org.elasticsearch.index.query.QueryBuilder; - -/** - * @author Viren - * - */ -public interface FilterProvider { - - /** - * - * @return FilterBuilder for elasticsearch - */ - public QueryBuilder getFilterBuilder(); - -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/FunctionThrowingException.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/FunctionThrowingException.java deleted file mode 100644 index b32b7f0733..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/FunctionThrowingException.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -/** - * @author Viren - * - */ -@FunctionalInterface -public interface FunctionThrowingException { - - void accept(T t) throws Exception; - -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/GroupedExpression.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/GroupedExpression.java deleted file mode 100644 index 31bb3296db..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/GroupedExpression.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import org.elasticsearch.index.query.QueryBuilder; - -import java.io.InputStream; - -/** - * @author Viren - * - */ -public class GroupedExpression extends AbstractNode implements FilterProvider { - - private Expression expression; - - public GroupedExpression(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = read(1); - assertExpected(peeked, "("); - - this.expression = new Expression(is); - - peeked = read(1); - assertExpected(peeked, ")"); - - } - - @Override - public String toString(){ - return "(" + expression + ")"; - } - - /** - * @return the expression - */ - public Expression getExpression() { - return expression; - } - - @Override - public QueryBuilder getFilterBuilder() { - return expression.getFilterBuilder(); - } - - -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ListConst.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ListConst.java deleted file mode 100644 index 964c69652a..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ListConst.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import java.io.InputStream; -import java.util.LinkedList; -import java.util.List; - - -/** - * @author Viren - * List of constants - * - */ -public class ListConst extends AbstractNode { - - private List values; - - public ListConst(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = read(1); - assertExpected(peeked, "("); - this.values = readList(); - } - - private List readList() throws Exception { - List list = new LinkedList(); - boolean valid = false; - char c; - - StringBuilder sb = new StringBuilder(); - while(is.available() > 0){ - c = (char) is.read(); - if(c == ')'){ - valid = true; - break; - }else if(c == ','){ - list.add(sb.toString().trim()); - sb = new StringBuilder(); - }else{ - sb.append(c); - } - } - list.add(sb.toString().trim()); - if(!valid){ - throw new ParserException("Expected ')' but never encountered in the stream"); - } - return list; - } - - public List getList(){ - return (List) values; - } - - @Override - public String toString(){ - return values.toString(); - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/NameValue.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/NameValue.java deleted file mode 100644 index cc15d01811..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/NameValue.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; - -import java.io.InputStream; - -/** - * @author Viren - *
    - * Represents an expression of the form as below:
    - * key OPR value 
    - * OPR is the comparison operator which could be on the following:
    - * 	>, <, = , !=, IN, BETWEEN
    - * 
    - */ -public class NameValue extends AbstractNode implements FilterProvider { - - private Name name; - - private ComparisonOp op; - - private ConstValue value; - - private Range range; - - private ListConst valueList; - - public NameValue(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.name = new Name(is); - this.op = new ComparisonOp(is); - - if(this.op.getOperator().equals(ComparisonOp.Operators.BETWEEN.value())){ - this.range = new Range(is); - }if(this.op.getOperator().equals(ComparisonOp.Operators.IN.value())){ - this.valueList = new ListConst(is); - }else{ - this.value = new ConstValue(is); - } - } - - @Override - public String toString() { - return "" + name + op + value; - } - - /** - * @return the name - */ - public Name getName() { - return name; - } - - /** - * @return the op - */ - public ComparisonOp getOp() { - return op; - } - - /** - * @return the value - */ - public ConstValue getValue() { - return value; - } - - @Override - public QueryBuilder getFilterBuilder(){ - if(op.getOperator().equals(ComparisonOp.Operators.EQUALS.value())){ - return QueryBuilders.queryStringQuery(name.getName() + ":" + value.getValue().toString()); - }else if(op.getOperator().equals(ComparisonOp.Operators.BETWEEN.value())){ - return QueryBuilders.rangeQuery(name.getName()).from(range.getLow()).to(range.getHigh()); - }else if(op.getOperator().equals(ComparisonOp.Operators.IN.value())){ - return QueryBuilders.termsQuery(name.getName(), valueList.getList()); - }else if(op.getOperator().equals(ComparisonOp.Operators.NOT_EQUALS.value())){ - return QueryBuilders.queryStringQuery("NOT " + name.getName() + ":" + value.getValue().toString()); - }else if(op.getOperator().equals(ComparisonOp.Operators.GREATER_THAN.value())){ - return QueryBuilders.rangeQuery(name.getName()).from(value.getValue()).includeLower(false).includeUpper(false); - }else if(op.getOperator().equals(ComparisonOp.Operators.IS.value())){ - if(value.getSysConstant().equals(ConstValue.SystemConsts.NULL)){ - return QueryBuilders.boolQuery().mustNot(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).mustNot(QueryBuilders.existsQuery(name.getName()))); - } else if(value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)){ - return QueryBuilders.boolQuery().mustNot(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(QueryBuilders.existsQuery(name.getName()))); - } - }else if(op.getOperator().equals(ComparisonOp.Operators.LESS_THAN.value())){ - return QueryBuilders.rangeQuery(name.getName()).to(value.getValue()).includeLower(false).includeUpper(false); - } - - throw new IllegalStateException("Incorrect/unsupported operators"); - } - - -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ParserException.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ParserException.java deleted file mode 100644 index bbf1f63f0e..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/ParserException.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -/** - * @author Viren - * - */ -@SuppressWarnings("serial") -public class ParserException extends Exception { - - public ParserException(String message) { - super(message); - } - - public ParserException(String message, Throwable cause) { - super(message, cause); - } - -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Range.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Range.java deleted file mode 100644 index 78005be516..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Range.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es.index.query.parser; - -import java.io.InputStream; - - -/** - * @author Viren - * - */ -public class Range extends AbstractNode { - - private String low; - - private String high; - - public Range(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.low = readNumber(is); - - skipWhitespace(); - byte[] peeked = read(3); - assertExpected(peeked, "AND"); - skipWhitespace(); - - String num = readNumber(is); - if(num == null || "".equals(num)){ - throw new ParserException("Missing the upper range value..."); - } - this.high = num; - - } - - private String readNumber(InputStream is) throws Exception { - StringBuilder sb = new StringBuilder(); - while(is.available() > 0){ - is.mark(1); - char c = (char) is.read(); - if(!isNumeric(c)){ - is.reset(); - break; - }else{ - sb.append(c); - } - } - String numValue = sb.toString().trim(); - return numValue; - } - - - /** - * @return the low - */ - public String getLow() { - return low; - } - - /** - * @return the high - */ - public String getHigh() { - return high; - } - - @Override - public String toString(){ - return low + " AND " + high; - } -} diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/utils/RetryUtil.java b/es2-persistence/src/main/java/com/netflix/conductor/dao/es/utils/RetryUtil.java deleted file mode 100644 index 5020087e6b..0000000000 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/utils/RetryUtil.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Copyright 2018 Netflix, Inc. - *

    - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.netflix.conductor.dao.es.utils; - -import com.github.rholder.retry.Attempt; -import com.github.rholder.retry.BlockStrategies; -import com.github.rholder.retry.RetryException; -import com.github.rholder.retry.RetryListener; -import com.github.rholder.retry.Retryer; -import com.github.rholder.retry.RetryerBuilder; -import com.github.rholder.retry.StopStrategies; -import com.github.rholder.retry.WaitStrategies; -import com.google.common.base.Predicate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; - -/** - * Utility class that deals with retries in case of transient failures. - * - * Note: - * Create a new {@link RetryUtil} for every operation that needs to retried for the stated retries. - * - * Limitations: - *

      - *
    • - * The current implementation does not make a distinction between transient and non transient errors. - * There is no categorization of transient and non transient failure in Conductor. - * Once the exception hierarchy is available in Conductor, this class implementation can be changed to - * short circuit the non transient errors. - *
    • - *
    • - * Currently only couple of wait strategies are implemented {@link WaitStrategies#exponentialWait()} and - * {@link WaitStrategies#randomWait(long, TimeUnit)} with fixed attributes for each of the strategies. - *
    • - *
    • - * The retry limit is not configurable and is hard coded to 3 - *
    • - *
    - * - * @param The type of the object that will be returned by the flaky supplier function - */ -public class RetryUtil { - - private static final Logger logger = LoggerFactory.getLogger(RetryUtil.class); - - private AtomicInteger internalNumberOfRetries = new AtomicInteger(); - - /** - * A helper method which has the ability to execute a flaky supplier function and retry in case of failures. - * - * @param supplierCommand: Any function that is flaky and needs multiple retries. - * @param throwablePredicate: A Guava {@link Predicate} housing the exceptional - * criteria to perform informed filtering before retrying. - * @param resultRetryPredicate: a predicate to be evaluated for a valid condition of the expected result - * @param retryCount: Number of times the function is to be retried before failure - * @param shortDescription: A short description of the function that will be used in logging and error propagation. - * The intention of this description is to provide context for Operability. - * @param operationName: The name of the function for traceability in logs - * @return an instance of return type of the supplierCommand - * @throws RuntimeException in case of failed attempts to get T, which needs to be returned by the supplierCommand. - * The instance of the returned exception has: - *
      - *
    • A message with shortDescription and operationName with the number of retries made
    • - *
    • And a reference to the original exception generated during the last {@link Attempt} of the retry
    • - *
    - */ - @SuppressWarnings("Guava") - public T retryOnException(Supplier supplierCommand, - Predicate throwablePredicate, - Predicate resultRetryPredicate, - int retryCount, - String shortDescription, String operationName) throws RuntimeException { - - Retryer retryer = RetryerBuilder.newBuilder() - .retryIfException(Optional.ofNullable(throwablePredicate).orElse(exception -> true)) - .retryIfResult(Optional.ofNullable(resultRetryPredicate).orElse(result -> false)) - .withWaitStrategy(WaitStrategies.join( - WaitStrategies.exponentialWait(1000, 90, TimeUnit.SECONDS), - WaitStrategies.randomWait(100, TimeUnit.MILLISECONDS, 500, TimeUnit.MILLISECONDS) - )) - .withStopStrategy(StopStrategies.stopAfterAttempt(retryCount)) - .withBlockStrategy(BlockStrategies.threadSleepStrategy()) - .withRetryListener(new RetryListener() { - @Override - public void onRetry(Attempt attempt) { - logger.debug("Attempt # {}, {} millis since first attempt. Operation: {}, description:{}", - attempt.getAttemptNumber(), attempt.getDelaySinceFirstAttempt(), operationName, shortDescription); - internalNumberOfRetries.incrementAndGet(); - } - }) - .build(); - - try { - return retryer.call(supplierCommand::get); - } catch (ExecutionException executionException) { - String errorMessage = String.format("Operation '%s:%s' failed for the %d time in RetryUtil", operationName, - shortDescription, internalNumberOfRetries.get()); - logger.debug(errorMessage); - throw new RuntimeException(errorMessage, executionException.getCause()); - } catch (RetryException retryException) { - String errorMessage = String.format("Operation '%s:%s' failed after retrying %d times, retry limit %d", operationName, - shortDescription, internalNumberOfRetries.get(), 3); - logger.debug(errorMessage, retryException.getLastFailedAttempt().getExceptionCause()); - throw new RuntimeException(errorMessage, retryException.getLastFailedAttempt().getExceptionCause()); - } - } -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/AbstractParserTest.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/AbstractParserTest.java deleted file mode 100644 index 8a5412dc97..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/AbstractParserTest.java +++ /dev/null @@ -1,34 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -/** - * @author Viren - * - */ -public abstract class AbstractParserTest { - - protected InputStream getInputStream(String expression) { - return new BufferedInputStream(new ByteArrayInputStream(expression.getBytes())); - } - -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestBooleanOp.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestBooleanOp.java deleted file mode 100644 index 2f235dbba9..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestBooleanOp.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * @author Viren - * - */ -public class TestBooleanOp extends AbstractParserTest { - - @Test - public void test() throws Exception { - String[] tests = new String[]{"AND", "OR"}; - for(String test : tests){ - BooleanOp name = new BooleanOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } - } - - @Test(expected=ParserException.class) - public void testInvalid() throws Exception { - String test = "<"; - BooleanOp name = new BooleanOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - - } -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestComparisonOp.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestComparisonOp.java deleted file mode 100644 index c9feeb6d75..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestComparisonOp.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * @author Viren - * - */ -public class TestComparisonOp extends AbstractParserTest { - - @Test - public void test() throws Exception { - String[] tests = new String[]{"<",">","=","!=","IN"}; - for(String test : tests){ - ComparisonOp name = new ComparisonOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } - } - - @Test(expected=ParserException.class) - public void testInvalidOp() throws Exception { - String test = "AND"; - ComparisonOp name = new ComparisonOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestConstValue.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestConstValue.java deleted file mode 100644 index 2a6676e3ba..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestConstValue.java +++ /dev/null @@ -1,102 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.*; - -/** - * @author Viren - * - */ -public class TestConstValue extends AbstractParserTest { - - @Test - public void testStringConst() throws Exception { - String test = "'string value'"; - String expected = test.replaceAll("'", "\""); //Quotes are removed but then the result is double quoted. - ConstValue cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertEquals(expected, cv.getValue()); - assertTrue(cv.getValue() instanceof String); - - test = "\"string value\""; - cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertEquals(expected, cv.getValue()); - assertTrue(cv.getValue() instanceof String); - } - - @Test - public void testSystemConst() throws Exception { - String test = "null"; - ConstValue cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertTrue(cv.getValue() instanceof String); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL); - test = "null"; - - test = "not null"; - cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL); - } - - @Test(expected=ParserException.class) - public void testInvalid() throws Exception { - String test = "'string value"; - new ConstValue(getInputStream(test)); - } - - - @Test - public void testNumConst() throws Exception { - String test = "12345.89"; - ConstValue cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertTrue(cv.getValue() instanceof String); //Numeric values are stored as string as we are just passing thru them to ES - assertEquals(test, cv.getValue()); - } - - @Test - public void testRange() throws Exception { - String test = "50 AND 100"; - Range range = new Range(getInputStream(test)); - assertEquals("50", range.getLow()); - assertEquals("100", range.getHigh()); - } - - @Test(expected=ParserException.class) - public void testBadRange() throws Exception { - String test = "50 AND"; - new Range(getInputStream(test)); - } - - @Test - public void testArray() throws Exception { - String test = "(1, 3, 'name', 'value2')"; - ListConst lc = new ListConst(getInputStream(test)); - List list = lc.getList(); - assertEquals(4, list.size()); - assertTrue(list.contains("1")); - assertEquals("'value2'", list.get(3)); //Values are preserved as it is... - } -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestExpression.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestExpression.java deleted file mode 100644 index 25cc6b6ab1..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestExpression.java +++ /dev/null @@ -1,152 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import org.junit.Test; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -import static org.junit.Assert.*; - -/** - * @author Viren - * - */ -public class TestExpression extends AbstractParserTest { - - @Test - public void test() throws Exception{ - String test = "type='IMAGE' AND subType ='sdp' AND (metadata.width > 50 OR metadata.height > 50)"; - //test = "type='IMAGE' AND subType ='sdp'"; - //test = "(metadata.type = 'IMAGE')"; - InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); - Expression expr = new Expression(is); - - System.out.println(expr); - - assertTrue(expr.isBinaryExpr()); - assertNull(expr.getGroupedExpression()); - assertNotNull(expr.getNameValue()); - - NameValue nv = expr.getNameValue(); - assertEquals("type", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"IMAGE\"", nv.getValue().getValue()); - - Expression rhs = expr.getRightHandSide(); - assertNotNull(rhs); - assertTrue(rhs.isBinaryExpr()); - - nv = rhs.getNameValue(); - assertNotNull(nv); //subType = sdp - assertNull(rhs.getGroupedExpression()); - assertEquals("subType", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"sdp\"", nv.getValue().getValue()); - - assertEquals("AND", rhs.getOperator().getOperator()); - rhs = rhs.getRightHandSide(); - assertNotNull(rhs); - assertFalse(rhs.isBinaryExpr()); - GroupedExpression ge = rhs.getGroupedExpression(); - assertNotNull(ge); - expr = ge.getExpression(); - assertNotNull(expr); - - assertTrue(expr.isBinaryExpr()); - nv = expr.getNameValue(); - assertNotNull(nv); - assertEquals("metadata.width", nv.getName().getName()); - assertEquals(">", nv.getOp().getOperator()); - assertEquals("50", nv.getValue().getValue()); - - - - assertEquals("OR", expr.getOperator().getOperator()); - rhs = expr.getRightHandSide(); - assertNotNull(rhs); - assertFalse(rhs.isBinaryExpr()); - nv = rhs.getNameValue(); - assertNotNull(nv); - - assertEquals("metadata.height", nv.getName().getName()); - assertEquals(">", nv.getOp().getOperator()); - assertEquals("50", nv.getValue().getValue()); - - } - - @Test - public void testWithSysConstants() throws Exception{ - String test = "type='IMAGE' AND subType ='sdp' AND description IS null"; - InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); - Expression expr = new Expression(is); - - System.out.println(expr); - - assertTrue(expr.isBinaryExpr()); - assertNull(expr.getGroupedExpression()); - assertNotNull(expr.getNameValue()); - - NameValue nv = expr.getNameValue(); - assertEquals("type", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"IMAGE\"", nv.getValue().getValue()); - - Expression rhs = expr.getRightHandSide(); - assertNotNull(rhs); - assertTrue(rhs.isBinaryExpr()); - - nv = rhs.getNameValue(); - assertNotNull(nv); //subType = sdp - assertNull(rhs.getGroupedExpression()); - assertEquals("subType", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"sdp\"", nv.getValue().getValue()); - - assertEquals("AND", rhs.getOperator().getOperator()); - rhs = rhs.getRightHandSide(); - assertNotNull(rhs); - assertFalse(rhs.isBinaryExpr()); - GroupedExpression ge = rhs.getGroupedExpression(); - assertNull(ge); - nv = rhs.getNameValue(); - assertNotNull(nv); - assertEquals("description", nv.getName().getName()); - assertEquals("IS", nv.getOp().getOperator()); - ConstValue cv = nv.getValue(); - assertNotNull(cv); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL); - - test = "description IS not null"; - is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); - expr = new Expression(is); - - System.out.println(expr); - nv = expr.getNameValue(); - assertNotNull(nv); - assertEquals("description", nv.getName().getName()); - assertEquals("IS", nv.getOp().getOperator()); - cv = nv.getValue(); - assertNotNull(cv); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL); - - } - -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestGroupedExpression.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestGroupedExpression.java deleted file mode 100644 index a7fc26b935..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestGroupedExpression.java +++ /dev/null @@ -1,32 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import org.junit.Test; - -/** - * @author Viren - * - */ -public class TestGroupedExpression { - - @Test - public void test(){ - - } -} diff --git a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestName.java b/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestName.java deleted file mode 100644 index af61f92d25..0000000000 --- a/es2-persistence/src/test/java/com/netflix/conductor/dao/es/index/query/parser/TestName.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.netflix.conductor.dao.es.index.query.parser; /** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * @author Viren - * - */ -public class TestName extends AbstractParserTest { - - @Test - public void test() throws Exception{ - String test = "metadata.en_US.lang "; - Name name = new Name(getInputStream(test)); - String nameVal = name.getName(); - assertNotNull(nameVal); - assertEquals(test.trim(), nameVal); - } -} diff --git a/es5-persistence/README.md b/es5-persistence/README.md index dce62966b2..272dd6d516 100644 --- a/es5-persistence/README.md +++ b/es5-persistence/README.md @@ -1,3 +1,2 @@ ## Usage -Set `workflow.elasticsearch.version=5` in Server module's configuration options. \ No newline at end of file diff --git a/es5-persistence/build.gradle b/es5-persistence/build.gradle index 05e0cd6935..a36128e0b4 100644 --- a/es5-persistence/build.gradle +++ b/es5-persistence/build.gradle @@ -1,13 +1,13 @@ dependencies { + compile project(':conductor-core') - compile project(':conductor-core') - compile "com.google.inject:guice:${revGuice}" - - //ES5 Dependency - compile "org.apache.logging.log4j:log4j-api:${revLog4jApi}" - compile "org.apache.logging.log4j:log4j-core:${revLog4jCore}" + compile "commons-io:commons-io:${revCommonsIo}" compile "org.elasticsearch:elasticsearch:${revElasticSearch5}" compile "org.elasticsearch.client:transport:${revElasticSearch5}" - compile "commons-io:commons-io:${revCommonsIo}" + compile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${revElasticSearch5}" + + //ES5 Dependency + compile "org.apache.logging.log4j:log4j-api:${revLog4jApi}" + compile "org.apache.logging.log4j:log4j-core:${revLog4jCore}" } diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/EmbeddedElasticSearchV5.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/EmbeddedElasticSearchV5.java deleted file mode 100644 index c106ff006d..0000000000 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/EmbeddedElasticSearchV5.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.conductor.dao.es5; - -import org.apache.commons.io.FileUtils; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; - -import java.util.Collection; - -import org.elasticsearch.node.InternalSettingsPreparer; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.Netty4Plugin; - -import static java.util.Collections.singletonList; - - -public class EmbeddedElasticSearchV5 { - - private static final String ES_PATH_DATA = "path.data"; - - private static final String ES_PATH_HOME = "path.home"; - - private static final Logger logger = LoggerFactory.getLogger(EmbeddedElasticSearchV5.class); - - public static final int DEFAULT_PORT = 9200; - public static final String DEFAULT_CLUSTER_NAME = "elasticsearch_test"; - public static final String DEFAULT_HOST = "127.0.0.1"; - public static final String DEFAULT_SETTING_FILE = "embedded-es.yml"; - - private static Node instance; - private static Client client; - private static File dataDir; - - private static class PluginConfigurableNode extends Node { - public PluginConfigurableNode(Settings preparedSettings, Collection> classpathPlugins) { - super(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), classpathPlugins); - } - } - - public static void start() throws Exception { - start(DEFAULT_CLUSTER_NAME, DEFAULT_HOST, DEFAULT_PORT, true); - } - - public static synchronized void start(String clusterName, String host, int port, boolean enableTransportClient) throws Exception { - - if (instance != null && !instance.isClosed()) { - logger.info("Elastic Search is already running on port {}", getPort()); - return; - } - - final Settings settings = getSettings(clusterName, host, port, enableTransportClient); - setupDataDir(settings); - - logger.info("Starting ElasticSearch for cluster {} ", settings.get("cluster.name")); - instance = new PluginConfigurableNode(settings, singletonList(Netty4Plugin.class)); - instance.start(); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - try { - instance.close(); - } catch (IOException e) { - logger.error("Error closing ElasticSearch"); - } - } - }); - logger.info("ElasticSearch cluster {} started in local mode on port {}", instance.settings().get("cluster.name"), getPort()); - client = instance.client(); - } - - private static void setupDataDir(Settings settings) { - String path = settings.get(ES_PATH_DATA); - cleanDataDir(path); - createDataDir(path); - } - - public static void cleanDataDir(String path) { - try { - logger.info("Deleting contents of data dir {}", path); - File f = new File(path); - if (f.exists()) { - FileUtils.cleanDirectory(new File(path)); - } - } catch (IOException e) { - logger.error("Failed to delete ES data dir"); - } - } - - private static Settings getSettings(String clusterName, String host, int port, boolean enableTransportClient) throws IOException { - dataDir = Files.createTempDirectory(clusterName + "_" + System.currentTimeMillis() + "data").toFile(); - File homeDir = Files.createTempDirectory(clusterName + "_" + System.currentTimeMillis() + "-home").toFile(); - Settings.Builder settingsBuilder = Settings.builder() - .put("cluster.name", clusterName) - .put("http.host", host) - .put("http.port", port) - .put(ES_PATH_DATA, dataDir.getAbsolutePath()) - .put(ES_PATH_HOME, homeDir.getAbsolutePath()) - .put("http.enabled", true) - .put("script.inline", true) - .put("script.stored", true) - .put("node.data", true) - .put("http.enabled", true) - .put("http.type", "netty4") - .put("transport.type", "netty4"); - - return settingsBuilder.build(); - } - - private static void createDataDir(String dataDirLoc) { - try { - Path dataDirPath = FileSystems.getDefault().getPath(dataDirLoc); - Files.createDirectories(dataDirPath); - dataDir = dataDirPath.toFile(); - } catch (IOException e) { - logger.error("Failed to create data dir"); - } - } - - public static Client getClient() { - if (instance == null || instance.isClosed()) { - logger.error("Embedded ElasticSearch is not Initialized and started, please call start() method first"); - return null; - } - return client; - } - - private static String getPort() { - return instance.settings().get("http.port"); - } - - public static synchronized void stop() throws Exception { - - if (instance != null && !instance.isClosed()) { - String port = getPort(); - logger.info("Stopping Elastic Search"); - instance.close(); - logger.info("Elastic Search on port {} stopped", port); - } - - } -} \ No newline at end of file diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchDAOV5.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchDAOV5.java index 2c1444678d..2e8a184c54 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchDAOV5.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchDAOV5.java @@ -14,7 +14,7 @@ * limitations under the License. */ /** - * + * */ package com.netflix.conductor.dao.es5.index; @@ -27,14 +27,14 @@ import com.netflix.conductor.common.run.TaskSummary; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.common.utils.RetryUtil; import com.netflix.conductor.core.events.queue.Message; import com.netflix.conductor.core.execution.ApplicationException; import com.netflix.conductor.core.execution.ApplicationException.Code; -import com.netflix.conductor.common.utils.RetryUtil; import com.netflix.conductor.dao.IndexDAO; import com.netflix.conductor.dao.es5.index.query.parser.Expression; -import com.netflix.conductor.dao.es5.index.query.parser.ParserException; +import com.netflix.conductor.elasticsearch.ElasticSearchConfiguration; +import com.netflix.conductor.elasticsearch.query.parser.ParserException; import com.netflix.conductor.metrics.Monitors; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; @@ -57,7 +57,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -99,444 +98,453 @@ @Singleton public class ElasticSearchDAOV5 implements IndexDAO { - private static Logger logger = LoggerFactory.getLogger(ElasticSearchDAOV5.class); - - private static final String WORKFLOW_DOC_TYPE = "workflow"; - - private static final String TASK_DOC_TYPE = "task"; - - private static final String LOG_DOC_TYPE = "task_log"; - - private static final String EVENT_DOC_TYPE = "event"; - - private static final String MSG_DOC_TYPE = "message"; - - private static final String className = ElasticSearchDAOV5.class.getSimpleName(); - - private static final int RETRY_COUNT = 3; - - private String indexName; - - private String logIndexName; - - private String logIndexPrefix; - - private ObjectMapper objectMapper; - - private Client elasticSearchClient; - - - private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); - + private static Logger logger = LoggerFactory.getLogger(ElasticSearchDAOV5.class); + + private static final String WORKFLOW_DOC_TYPE = "workflow"; + + private static final String TASK_DOC_TYPE = "task"; + + private static final String LOG_DOC_TYPE = "task_log"; + + private static final String EVENT_DOC_TYPE = "event"; + + private static final String MSG_DOC_TYPE = "message"; + + private static final String className = ElasticSearchDAOV5.class.getSimpleName(); + + private static final int RETRY_COUNT = 3; + + private String indexName; + + private String logIndexName; + + private String logIndexPrefix; + + private ObjectMapper objectMapper; + + private Client elasticSearchClient; + + + private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); + private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMww"); - private final ExecutorService executorService; - + private final ExecutorService executorService; + static { - SIMPLE_DATE_FORMAT.setTimeZone(GMT); + SIMPLE_DATE_FORMAT.setTimeZone(GMT); + } + + @Inject + public ElasticSearchDAOV5(Client elasticSearchClient, ElasticSearchConfiguration config, ObjectMapper objectMapper) { + this.objectMapper = objectMapper; + this.elasticSearchClient = elasticSearchClient; + this.indexName = config.getIndexName(); + this.logIndexPrefix = config.getTasklogIndexName(); + + int corePoolSize = 6; + int maximumPoolSize = 12; + long keepAliveTime = 1L; + this.executorService = new ThreadPoolExecutor(corePoolSize, + maximumPoolSize, + keepAliveTime, + TimeUnit.MINUTES, + new LinkedBlockingQueue<>()); } - - @Inject - public ElasticSearchDAOV5(Client elasticSearchClient, Configuration config, ObjectMapper objectMapper) { - this.objectMapper = objectMapper; - this.elasticSearchClient = elasticSearchClient; - this.indexName = config.getProperty("workflow.elasticsearch.index.name", null); - - try { - - initIndex(); - updateIndexName(config); - Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> updateIndexName(config), 0, 1, TimeUnit.HOURS); - - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - - int corePoolSize = 6; - int maximumPoolSize = 12; - long keepAliveTime = 1L; - this.executorService = new ThreadPoolExecutor(corePoolSize, - maximumPoolSize, - keepAliveTime, - TimeUnit.MINUTES, - new LinkedBlockingQueue<>()); - } - - private void updateIndexName(Configuration config) { - this.logIndexPrefix = config.getProperty("workflow.elasticsearch.tasklog.index.name", "task_log"); - this.logIndexName = this.logIndexPrefix + "_" + SIMPLE_DATE_FORMAT.format(new Date()); - - try { - elasticSearchClient.admin().indices().prepareGetIndex().addIndices(logIndexName).execute().actionGet(); - } catch (IndexNotFoundException infe) { - try { - elasticSearchClient.admin().indices().prepareCreate(logIndexName).execute().actionGet(); - } catch (ResourceAlreadyExistsException ilee) { - - } catch (Exception e) { - logger.error("Failed to update log index name: {}", logIndexName, e); - } - } - } - - /** - * Initializes the index with required templates and mappings. - */ - private void initIndex() throws Exception { - - //0. Add the tasklog template - GetIndexTemplatesResponse result = elasticSearchClient.admin().indices().prepareGetTemplates("tasklog_template").execute().actionGet(); - if(result.getIndexTemplates().isEmpty()) { - logger.info("Creating the index template 'tasklog_template'"); - InputStream stream = ElasticSearchDAOV5.class.getResourceAsStream("/template_tasklog.json"); - byte[] templateSource = IOUtils.toByteArray(stream); - - try { - elasticSearchClient.admin().indices().preparePutTemplate("tasklog_template").setSource(templateSource, XContentType.JSON).execute().actionGet(); - }catch(Exception e) { - logger.error("Failed to init tasklog_template", e); - } - } - - //1. Create the required index - try { - elasticSearchClient.admin().indices().prepareGetIndex().addIndices(indexName).execute().actionGet(); - }catch(IndexNotFoundException infe) { - try { - elasticSearchClient.admin().indices().prepareCreate(indexName).execute().actionGet(); - }catch(ResourceAlreadyExistsException done) {} - } - - //2. Add Mappings for the workflow document type - GetMappingsResponse getMappingsResponse = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(WORKFLOW_DOC_TYPE).execute().actionGet(); - if(getMappingsResponse.mappings().isEmpty()) { - logger.info("Adding the workflow type mappings"); - InputStream stream = ElasticSearchDAOV5.class.getResourceAsStream("/mappings_docType_workflow.json"); - byte[] bytes = IOUtils.toByteArray(stream); - String source = new String(bytes); - try { - elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(WORKFLOW_DOC_TYPE).setSource(source).execute().actionGet(); - }catch(Exception e) { - logger.error("Failed to init index mappings", e); - } - } - - //3. Add Mappings for task document type - getMappingsResponse = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(TASK_DOC_TYPE).execute().actionGet(); - if (getMappingsResponse.mappings().isEmpty()) { - logger.info("Adding the task type mappings"); - InputStream stream = ElasticSearchDAOV5.class.getResourceAsStream("/mappings_docType_task.json"); - byte[] bytes = IOUtils.toByteArray(stream); - String source = new String(bytes); - try { - elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(TASK_DOC_TYPE).setSource(source).execute().actionGet(); - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - } - - @Override - public void indexWorkflow(Workflow workflow) { - try { - - String id = workflow.getWorkflowId(); - WorkflowSummary summary = new WorkflowSummary(workflow); - byte[] doc = objectMapper.writeValueAsBytes(summary); - - UpdateRequest req = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, id); - req.doc(doc, XContentType.JSON); - req.upsert(doc, XContentType.JSON); - req.retryOnConflict(5); - updateWithRetry(req, "Index workflow into doc_type workflow"); - - } catch (Throwable e) { - logger.error("Failed to index workflow: {}", workflow.getWorkflowId(), e); - } - } - - @Override - public CompletableFuture asyncIndexWorkflow(Workflow workflow) { - return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService); - } - - @Override - public void indexTask(Task task) { - try { - - String id = task.getTaskId(); - TaskSummary summary = new TaskSummary(task); - byte[] doc = objectMapper.writeValueAsBytes(summary); - - UpdateRequest req = new UpdateRequest(indexName, TASK_DOC_TYPE, id); - req.doc(doc, XContentType.JSON); - req.upsert(doc, XContentType.JSON); - updateWithRetry(req, "Index workflow into doc_type workflow"); - - } catch (Throwable e) { - logger.error("Failed to index task: {}", task.getTaskId(), e); - } - } - - @Override - public CompletableFuture asyncIndexTask(Task task) { - return CompletableFuture.runAsync(() -> indexTask(task), executorService); - } - - - @Override - public void addTaskExecutionLogs(List taskExecLogs) { - if (taskExecLogs.isEmpty()) { - return; - } - try { - BulkRequestBuilder bulkRequestBuilder = elasticSearchClient.prepareBulk(); - for (TaskExecLog log : taskExecLogs) { - IndexRequest request = new IndexRequest(logIndexName, LOG_DOC_TYPE); - request.source(objectMapper.writeValueAsBytes(log), XContentType.JSON); - bulkRequestBuilder.add(request); - } - new RetryUtil().retryOnException(() -> bulkRequestBuilder.execute().actionGet(), null , - BulkResponse::hasFailures, RETRY_COUNT, "Indexing all execution logs into doc_type task", "addTaskExecutionLogs"); - } catch (Throwable e) { - List taskIds = taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList()); - logger.error("Failed to index task execution logs for tasks: ", taskIds, e); - } - } - - @Override - public CompletableFuture asyncAddTaskExecutionLogs(List logs) { - return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), executorService); - } - - @Override - public List getTaskExecutionLogs(String taskId) { - try { - Expression expression = Expression.fromString("taskId='" + taskId + "'"); - QueryBuilder queryBuilder = expression.getFilterBuilder(); - - BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder); - QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery("*"); - BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); - - final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*").setQuery(fq).setTypes(LOG_DOC_TYPE).addSort(SortBuilders.fieldSort("createdTime").order(SortOrder.ASC)); - SearchResponse response = srb.execute().actionGet(); - SearchHit[] hits = response.getHits().getHits(); - List logs = new ArrayList<>(hits.length); - for(SearchHit hit : hits) { - String source = hit.getSourceAsString(); - TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class); - logs.add(tel); - } - - return logs; - - }catch(Exception e) { - logger.error("Failed to get task execution logs for task: {}", taskId, e); - } - - return null; - } - - @Override - public void addMessage(String queue, Message message) { - Map doc = new HashMap<>(); - doc.put("messageId", message.getId()); - doc.put("payload", message.getPayload()); - doc.put("queue", queue); - doc.put("created", System.currentTimeMillis()); - IndexRequest request = new IndexRequest(logIndexName, MSG_DOC_TYPE); - request.source(doc); - try { - new RetryUtil<>().retryOnException(() -> elasticSearchClient.index(request).actionGet(), null, - null, RETRY_COUNT, "Indexing document in for docType: message", "addMessage"); - } catch (Throwable e) { - logger.error("Failed to index message: {}", message.getId(), e); - } - } - - @Override - public void addEventExecution(EventExecution eventExecution) { - try { - byte[] doc = objectMapper.writeValueAsBytes(eventExecution); - String id = eventExecution.getName() + "." + eventExecution.getEvent() + "." + eventExecution.getMessageId() + "." + eventExecution.getId(); - UpdateRequest req = new UpdateRequest(logIndexName, EVENT_DOC_TYPE, id); - req.doc(doc, XContentType.JSON); - req.upsert(doc, XContentType.JSON); - req.retryOnConflict(5); - updateWithRetry(req, "Update Event execution for doc_type event"); - } catch (Throwable e) { - logger.error("Failed to index event execution: {}", eventExecution.getId(), e); - } - } - - @Override - public CompletableFuture asyncAddEventExecution(EventExecution eventExecution) { - return CompletableFuture.runAsync(() -> addEventExecution(eventExecution), executorService); - } - - private void updateWithRetry(UpdateRequest request, String operationDescription) { - try { - new RetryUtil().retryOnException(() -> elasticSearchClient.update(request).actionGet(), null, - null, RETRY_COUNT, operationDescription, "updateWithRetry"); - } catch (Exception e) { - Monitors.error(className, "index"); - logger.error("Failed to index {} for request type: {}", request.index(), request.type(), e); - } - } - - @Override - public SearchResult searchWorkflows(String query, String freeText, int start, int count, List sort) { - try { - - return search(query, start, count, sort, freeText, WORKFLOW_DOC_TYPE); - - } catch (ParserException e) { - throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - @Override - public SearchResult searchTasks(String query, String freeText, int start, int count, List sort) { - try { - return search(query, start, count, sort, freeText, TASK_DOC_TYPE); - } catch (ParserException e) { - throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - @Override - public void removeWorkflow(String workflowId) { - try { - DeleteRequest request = new DeleteRequest(indexName, WORKFLOW_DOC_TYPE, workflowId); - DeleteResponse response = elasticSearchClient.delete(request).actionGet(); - if (response.getResult() == DocWriteResponse.Result.DELETED) { - logger.error("Index removal failed - document not found by id: {}", workflowId); - } - } catch (Throwable e) { - logger.error("Failed to remove workflow {} from index", workflowId, e); - Monitors.error(className, "remove"); - } - } - - @Override - public CompletableFuture asyncRemoveWorkflow(String workflowId) { - return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService); - } - - @Override - public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { - if (keys.length != values.length) { - throw new ApplicationException(Code.INVALID_INPUT, "Number of keys and values do not match"); - } - - UpdateRequest request = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId); - Map source = IntStream.range(0, keys.length).boxed() - .collect(Collectors.toMap(i -> keys[i], i -> values[i])); - request.doc(source); - logger.debug("Updating workflow {} with {}", workflowInstanceId, source); - new RetryUtil<>().retryOnException(() -> elasticSearchClient.update(request), null, null, RETRY_COUNT, - "Updating index for doc_type workflow", "updateWorkflow"); - } - - @Override - public CompletableFuture asyncUpdateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { - return CompletableFuture.runAsync(() -> updateWorkflow(workflowInstanceId, keys, values), executorService); - } - - @Override - public String get(String workflowInstanceId, String fieldToGet) { - GetRequest request = new GetRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId) - .fetchSourceContext(new FetchSourceContext(true, new String[]{fieldToGet}, Strings.EMPTY_ARRAY)); - GetResponse response = elasticSearchClient.get(request).actionGet(); - - if (response.isExists()){ - Map sourceAsMap = response.getSourceAsMap(); - if (sourceAsMap.containsKey(fieldToGet)){ - return sourceAsMap.get(fieldToGet).toString(); - } - } - - logger.debug("Unable to find Workflow: {} in ElasticSearch index: {}.", workflowInstanceId, indexName); - return null; - } - - private SearchResult search(String structuredQuery, int start, int size, List sortOptions, String freeTextQuery, String docType) throws ParserException { - QueryBuilder queryBuilder = QueryBuilders.matchAllQuery(); - if(StringUtils.isNotEmpty(structuredQuery)) { - Expression expression = Expression.fromString(structuredQuery); - queryBuilder = expression.getFilterBuilder(); - } - - BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder); - QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(freeTextQuery); - BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); - final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(indexName).setQuery(fq).setTypes(docType).storedFields("_id").setFrom(start).setSize(size); - if(sortOptions != null){ - sortOptions.forEach(sortOption -> { - SortOrder order = SortOrder.ASC; - String field = sortOption; - int indx = sortOption.indexOf(':'); - if(indx > 0){ //Can't be 0, need the field name at-least - field = sortOption.substring(0, indx); - order = SortOrder.valueOf(sortOption.substring(indx+1)); - } - srb.addSort(field, order); - }); - } - List result = new LinkedList(); - SearchResponse response = srb.get(); - response.getHits().forEach(hit -> { - result.add(hit.getId()); - }); - long count = response.getHits().getTotalHits(); - return new SearchResult(count, result); - } - - @Override - public List searchArchivableWorkflows(String indexName, long archiveTtlDays) { - QueryBuilder q = QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("endTime").lt(LocalDate.now().minusDays(archiveTtlDays).toString())) - .should(QueryBuilders.termQuery("status", "COMPLETED")) - .should(QueryBuilders.termQuery("status", "FAILED")) - .mustNot(QueryBuilders.existsQuery("archived")) - .minimumShouldMatch(1); - SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName) - .setTypes("workflow") - .setQuery(q) - .setSize(1000); - SearchResponse response = s.execute().actionGet(); - SearchHits hits = response.getHits(); - List ids = new LinkedList<>(); - for (SearchHit hit : hits.getHits()) { - ids.add(hit.getId()); - } - return ids; - } - - public List searchRecentRunningWorkflows(int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo) { - DateTime dateTime = new DateTime(); - QueryBuilder q = QueryBuilders.boolQuery() - .must(QueryBuilders.rangeQuery("updateTime") - .gt(dateTime.minusHours(lastModifiedHoursAgoFrom))) - .must(QueryBuilders.rangeQuery("updateTime") - .lt(dateTime.minusHours(lastModifiedHoursAgoTo))) - .must(QueryBuilders.termQuery("status", "RUNNING")); - - SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName) - .setTypes("workflow") - .setQuery(q) - .setSize(5000) + + @Override + public void setup() throws Exception { + elasticSearchClient.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().get(); + + try { + + initIndex(); + updateIndexName(); + Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> updateIndexName(), 0, 1, TimeUnit.HOURS); + + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + + //1. Create the required index + try { + elasticSearchClient.admin().indices().prepareGetIndex().addIndices(indexName).execute().actionGet(); + }catch(IndexNotFoundException infe) { + try { + elasticSearchClient.admin().indices().prepareCreate(indexName).execute().actionGet(); + }catch(ResourceAlreadyExistsException done) {} + } + + //2. Add Mappings for the workflow document type + GetMappingsResponse getMappingsResponse = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(WORKFLOW_DOC_TYPE).execute().actionGet(); + if(getMappingsResponse.mappings().isEmpty()) { + logger.info("Adding the workflow type mappings"); + InputStream stream = ElasticSearchDAOV5.class.getResourceAsStream("/mappings_docType_workflow.json"); + byte[] bytes = IOUtils.toByteArray(stream); + String source = new String(bytes); + try { + elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(WORKFLOW_DOC_TYPE).setSource(source).execute().actionGet(); + }catch(Exception e) { + logger.error("Failed to init index mappings", e); + } + } + + //3. Add Mappings for task document type + getMappingsResponse = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(TASK_DOC_TYPE).execute().actionGet(); + if (getMappingsResponse.mappings().isEmpty()) { + logger.info("Adding the task type mappings"); + InputStream stream = ElasticSearchDAOV5.class.getResourceAsStream("/mappings_docType_task.json"); + byte[] bytes = IOUtils.toByteArray(stream); + String source = new String(bytes); + try { + elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(TASK_DOC_TYPE).setSource(source).execute().actionGet(); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + } + } + + private void updateIndexName() { + this.logIndexName = this.logIndexPrefix + "_" + SIMPLE_DATE_FORMAT.format(new Date()); + + try { + elasticSearchClient.admin().indices().prepareGetIndex().addIndices(logIndexName).execute().actionGet(); + } catch (IndexNotFoundException infe) { + try { + elasticSearchClient.admin().indices().prepareCreate(logIndexName).execute().actionGet(); + } catch (ResourceAlreadyExistsException ilee) { + + } catch (Exception e) { + logger.error("Failed to update log index name: {}", logIndexName, e); + } + } + } + + /** + * Initializes the index with required templates and mappings. + */ + private void initIndex() throws Exception { + + //0. Add the tasklog template + GetIndexTemplatesResponse result = elasticSearchClient.admin().indices().prepareGetTemplates("tasklog_template").execute().actionGet(); + if(result.getIndexTemplates().isEmpty()) { + logger.info("Creating the index template 'tasklog_template'"); + InputStream stream = ElasticSearchDAOV5.class.getResourceAsStream("/template_tasklog.json"); + byte[] templateSource = IOUtils.toByteArray(stream); + + try { + elasticSearchClient.admin().indices().preparePutTemplate("tasklog_template").setSource(templateSource, XContentType.JSON).execute().actionGet(); + }catch(Exception e) { + logger.error("Failed to init tasklog_template", e); + } + } + + } + + @Override + public void indexWorkflow(Workflow workflow) { + try { + + String id = workflow.getWorkflowId(); + WorkflowSummary summary = new WorkflowSummary(workflow); + byte[] doc = objectMapper.writeValueAsBytes(summary); + + UpdateRequest req = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, id); + req.doc(doc, XContentType.JSON); + req.upsert(doc, XContentType.JSON); + req.retryOnConflict(5); + updateWithRetry(req, "Index workflow into doc_type workflow"); + + } catch (Throwable e) { + logger.error("Failed to index workflow: {}", workflow.getWorkflowId(), e); + } + } + + @Override + public CompletableFuture asyncIndexWorkflow(Workflow workflow) { + return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService); + } + + @Override + public void indexTask(Task task) { + try { + + String id = task.getTaskId(); + TaskSummary summary = new TaskSummary(task); + byte[] doc = objectMapper.writeValueAsBytes(summary); + + UpdateRequest req = new UpdateRequest(indexName, TASK_DOC_TYPE, id); + req.doc(doc, XContentType.JSON); + req.upsert(doc, XContentType.JSON); + updateWithRetry(req, "Index workflow into doc_type workflow"); + + } catch (Throwable e) { + logger.error("Failed to index task: {}", task.getTaskId(), e); + } + } + + @Override + public CompletableFuture asyncIndexTask(Task task) { + return CompletableFuture.runAsync(() -> indexTask(task), executorService); + } + + + @Override + public void addTaskExecutionLogs(List taskExecLogs) { + if (taskExecLogs.isEmpty()) { + return; + } + try { + BulkRequestBuilder bulkRequestBuilder = elasticSearchClient.prepareBulk(); + for (TaskExecLog log : taskExecLogs) { + IndexRequest request = new IndexRequest(logIndexName, LOG_DOC_TYPE); + request.source(objectMapper.writeValueAsBytes(log), XContentType.JSON); + bulkRequestBuilder.add(request); + } + new RetryUtil().retryOnException(() -> bulkRequestBuilder.execute().actionGet(), null , + BulkResponse::hasFailures, RETRY_COUNT, "Indexing all execution logs into doc_type task", "addTaskExecutionLogs"); + } catch (Throwable e) { + List taskIds = taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList()); + logger.error("Failed to index task execution logs for tasks: ", taskIds, e); + } + } + + @Override + public CompletableFuture asyncAddTaskExecutionLogs(List logs) { + return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), executorService); + } + + @Override + public List getTaskExecutionLogs(String taskId) { + try { + Expression expression = Expression.fromString("taskId='" + taskId + "'"); + QueryBuilder queryBuilder = expression.getFilterBuilder(); + + BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder); + QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery("*"); + BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); + + final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*") + .setQuery(fq).setTypes(LOG_DOC_TYPE) + .addSort(SortBuilders.fieldSort("createdTime") + .order(SortOrder.ASC)); + SearchResponse response = srb.execute().actionGet(); + SearchHit[] hits = response.getHits().getHits(); + List logs = new ArrayList<>(hits.length); + for(SearchHit hit : hits) { + String source = hit.getSourceAsString(); + TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class); + logs.add(tel); + } + + return logs; + + }catch(Exception e) { + logger.error("Failed to get task execution logs for task: {}", taskId, e); + } + + return null; + } + + @Override + public void addMessage(String queue, Message message) { + Map doc = new HashMap<>(); + doc.put("messageId", message.getId()); + doc.put("payload", message.getPayload()); + doc.put("queue", queue); + doc.put("created", System.currentTimeMillis()); + IndexRequest request = new IndexRequest(logIndexName, MSG_DOC_TYPE); + request.source(doc); + try { + new RetryUtil<>().retryOnException(() -> elasticSearchClient.index(request).actionGet(), null, + null, RETRY_COUNT, "Indexing document in for docType: message", "addMessage"); + } catch (Throwable e) { + logger.error("Failed to index message: {}", message.getId(), e); + } + } + + @Override + public void addEventExecution(EventExecution eventExecution) { + try { + byte[] doc = objectMapper.writeValueAsBytes(eventExecution); + String id = eventExecution.getName() + "." + eventExecution.getEvent() + "." + eventExecution.getMessageId() + "." + eventExecution.getId(); + UpdateRequest req = new UpdateRequest(logIndexName, EVENT_DOC_TYPE, id); + req.doc(doc, XContentType.JSON); + req.upsert(doc, XContentType.JSON); + req.retryOnConflict(5); + updateWithRetry(req, "Update Event execution for doc_type event"); + } catch (Throwable e) { + logger.error("Failed to index event execution: {}", eventExecution.getId(), e); + } + } + + @Override + public CompletableFuture asyncAddEventExecution(EventExecution eventExecution) { + return CompletableFuture.runAsync(() -> addEventExecution(eventExecution), executorService); + } + + private void updateWithRetry(UpdateRequest request, String operationDescription) { + try { + new RetryUtil().retryOnException(() -> elasticSearchClient.update(request).actionGet(), null, + null, RETRY_COUNT, operationDescription, "updateWithRetry"); + } catch (Exception e) { + Monitors.error(className, "index"); + logger.error("Failed to index {} for request type: {}", request.index(), request.type(), e); + } + } + + @Override + public SearchResult searchWorkflows(String query, String freeText, int start, int count, List sort) { + try { + + return search(query, start, count, sort, freeText, WORKFLOW_DOC_TYPE); + + } catch (ParserException e) { + throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); + } + } + + @Override + public SearchResult searchTasks(String query, String freeText, int start, int count, List sort) { + try { + return search(query, start, count, sort, freeText, TASK_DOC_TYPE); + } catch (ParserException e) { + throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e); + } + } + + @Override + public void removeWorkflow(String workflowId) { + try { + DeleteRequest request = new DeleteRequest(indexName, WORKFLOW_DOC_TYPE, workflowId); + DeleteResponse response = elasticSearchClient.delete(request).actionGet(); + if (response.getResult() == DocWriteResponse.Result.DELETED) { + logger.error("Index removal failed - document not found by id: {}", workflowId); + } + } catch (Throwable e) { + logger.error("Failed to remove workflow {} from index", workflowId, e); + Monitors.error(className, "remove"); + } + } + + @Override + public CompletableFuture asyncRemoveWorkflow(String workflowId) { + return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService); + } + + @Override + public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { + if (keys.length != values.length) { + throw new ApplicationException(Code.INVALID_INPUT, "Number of keys and values do not match"); + } + + UpdateRequest request = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId); + Map source = IntStream.range(0, keys.length).boxed() + .collect(Collectors.toMap(i -> keys[i], i -> values[i])); + request.doc(source); + logger.debug("Updating workflow {} with {}", workflowInstanceId, source); + new RetryUtil<>().retryOnException(() -> elasticSearchClient.update(request), null, null, RETRY_COUNT, + "Updating index for doc_type workflow", "updateWorkflow"); + } + + @Override + public CompletableFuture asyncUpdateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { + return CompletableFuture.runAsync(() -> updateWorkflow(workflowInstanceId, keys, values), executorService); + } + + @Override + public String get(String workflowInstanceId, String fieldToGet) { + GetRequest request = new GetRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId) + .fetchSourceContext(new FetchSourceContext(true, new String[]{fieldToGet}, Strings.EMPTY_ARRAY)); + GetResponse response = elasticSearchClient.get(request).actionGet(); + + if (response.isExists()){ + Map sourceAsMap = response.getSourceAsMap(); + if (sourceAsMap.containsKey(fieldToGet)){ + return sourceAsMap.get(fieldToGet).toString(); + } + } + + logger.debug("Unable to find Workflow: {} in ElasticSearch index: {}.", workflowInstanceId, indexName); + return null; + } + + private SearchResult search(String structuredQuery, int start, int size, List sortOptions, String freeTextQuery, String docType) throws ParserException { + QueryBuilder queryBuilder = QueryBuilders.matchAllQuery(); + if(StringUtils.isNotEmpty(structuredQuery)) { + Expression expression = Expression.fromString(structuredQuery); + queryBuilder = expression.getFilterBuilder(); + } + + BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder); + QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(freeTextQuery); + BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); + final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(indexName).setQuery(fq).setTypes(docType).storedFields("_id").setFrom(start).setSize(size); + if(sortOptions != null){ + sortOptions.forEach(sortOption -> { + SortOrder order = SortOrder.ASC; + String field = sortOption; + int indx = sortOption.indexOf(':'); + if(indx > 0){ //Can't be 0, need the field name at-least + field = sortOption.substring(0, indx); + order = SortOrder.valueOf(sortOption.substring(indx+1)); + } + srb.addSort(field, order); + }); + } + List result = new LinkedList(); + SearchResponse response = srb.get(); + response.getHits().forEach(hit -> { + result.add(hit.getId()); + }); + long count = response.getHits().getTotalHits(); + return new SearchResult(count, result); + } + + @Override + public List searchArchivableWorkflows(String indexName, long archiveTtlDays) { + QueryBuilder q = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("endTime").lt(LocalDate.now().minusDays(archiveTtlDays).toString())) + .should(QueryBuilders.termQuery("status", "COMPLETED")) + .should(QueryBuilders.termQuery("status", "FAILED")) + .mustNot(QueryBuilders.existsQuery("archived")) + .minimumShouldMatch(1); + SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName) + .setTypes("workflow") + .setQuery(q) + .setSize(1000); + SearchResponse response = s.execute().actionGet(); + SearchHits hits = response.getHits(); + List ids = new LinkedList<>(); + for (SearchHit hit : hits.getHits()) { + ids.add(hit.getId()); + } + return ids; + } + + public List searchRecentRunningWorkflows(int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo) { + DateTime dateTime = new DateTime(); + QueryBuilder q = QueryBuilders.boolQuery() + .must(QueryBuilders.rangeQuery("updateTime") + .gt(dateTime.minusHours(lastModifiedHoursAgoFrom))) + .must(QueryBuilders.rangeQuery("updateTime") + .lt(dateTime.minusHours(lastModifiedHoursAgoTo))) + .must(QueryBuilders.termQuery("status", "RUNNING")); + + SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName) + .setTypes("workflow") + .setQuery(q) + .setSize(5000) .addSort("updateTime",SortOrder.ASC); - SearchResponse response = s.execute().actionGet(); - SearchHits hits = response.getHits(); - List ids = new LinkedList<>(); - for (SearchHit hit : hits.getHits()) { - ids.add(hit.getId()); - } - return ids; - } + SearchResponse response = s.execute().actionGet(); + SearchHits hits = response.getHits(); + List ids = new LinkedList<>(); + for (SearchHit hit : hits.getHits()) { + ids.add(hit.getId()); + } + return ids; + } -} +} \ No newline at end of file diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchModuleV5.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchModuleV5.java deleted file mode 100644 index ac63184934..0000000000 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/ElasticSearchModuleV5.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es5.index; - -import java.net.InetAddress; - -import javax.inject.Singleton; - -import com.netflix.conductor.dao.IndexDAO; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.elasticsearch.transport.client.PreBuiltTransportClient; -import com.google.inject.AbstractModule; -import com.google.inject.Provides; -import com.netflix.conductor.core.config.Configuration; - - -/** - * @author Viren - * Provider for the elasticsearch transport client - */ -public class ElasticSearchModuleV5 extends AbstractModule { - - private static Logger log = LoggerFactory.getLogger(ElasticSearchModuleV5.class); - - @Provides - @Singleton - public Client getClient(Configuration config) throws Exception { - - String clusterAddress = config.getProperty("workflow.elasticsearch.url", ""); - if(clusterAddress.equals("")) { - log.warn("workflow.elasticsearch.url is not set. Indexing will remain DISABLED."); - } - - Settings settings = Settings.builder() - .put("client.transport.ignore_cluster_name",true) - .put("client.transport.sniff", true) - .build(); - - TransportClient tc = new PreBuiltTransportClient(settings); - String[] hosts = clusterAddress.split(","); - for (String host : hosts) { - String[] hostparts = host.split(":"); - String hostname = hostparts[0]; - int hostport = 9200; - if (hostparts.length == 2) hostport = Integer.parseInt(hostparts[1]); - tc.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(hostname), hostport)); - } - return tc; - - } - - @Override - protected void configure() { - bind(IndexDAO.class).to(ElasticSearchDAOV5.class); - } -} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/BooleanOp.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/BooleanOp.java deleted file mode 100644 index 95808989d7..0000000000 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/BooleanOp.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es5.index.query.parser; - -import java.io.InputStream; - -/** - * @author Viren - * - */ -public class BooleanOp extends AbstractNode { - - private String value; - - public BooleanOp(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] buffer = peek(3); - if(buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R'){ - this.value = "OR"; - }else if(buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D'){ - this.value = "AND"; - }else { - throw new ParserException("No valid boolean operator found..."); - } - read(this.value.length()); - } - - @Override - public String toString(){ - return " " + value + " "; - } - - public String getOperator(){ - return value; - } - - public boolean isAnd(){ - return "AND".equals(value); - } - - public boolean isOr(){ - return "OR".equals(value); - } -} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Expression.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Expression.java index de6dbba0e2..a2d170491f 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Expression.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Expression.java @@ -18,13 +18,17 @@ */ package com.netflix.conductor.dao.es5.index.query.parser; -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; +import com.netflix.conductor.elasticsearch.query.parser.AbstractNode; +import com.netflix.conductor.elasticsearch.query.parser.BooleanOp; +import com.netflix.conductor.elasticsearch.query.parser.ParserException; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.InputStream; + /** * @author Viren * diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/GroupedExpression.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/GroupedExpression.java index e325d505cc..3b59eaa4fe 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/GroupedExpression.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/GroupedExpression.java @@ -1,67 +1,67 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** - * + * */ package com.netflix.conductor.dao.es5.index.query.parser; -import java.io.InputStream; +import com.netflix.conductor.elasticsearch.query.parser.AbstractNode; +import com.netflix.conductor.elasticsearch.query.parser.ParserException; import org.elasticsearch.index.query.QueryBuilder; +import java.io.InputStream; + /** * @author Viren - * + * */ public class GroupedExpression extends AbstractNode implements FilterProvider { - private Expression expression; - - public GroupedExpression(InputStream is) throws ParserException { - super(is); - } + private Expression expression; + + public GroupedExpression(InputStream is) throws ParserException { + super(is); + } + + @Override + protected void _parse() throws Exception { + byte[] peeked = read(1); + assertExpected(peeked, "("); + + this.expression = new Expression(is); + + peeked = read(1); + assertExpected(peeked, ")"); + + } + + @Override + public String toString() { + return "(" + expression + ")"; + } - @Override - protected void _parse() throws Exception { - byte[] peeked = read(1); - assertExpected(peeked, "("); - - this.expression = new Expression(is); - - peeked = read(1); - assertExpected(peeked, ")"); - - } - - @Override - public String toString(){ - return "(" + expression + ")"; - } + /** + * @return the expression + */ + public Expression getExpression() { + return expression; + } - /** - * @return the expression - */ - public Expression getExpression() { - return expression; - } + @Override + public QueryBuilder getFilterBuilder() { + return expression.getFilterBuilder(); + } - @Override - public QueryBuilder getFilterBuilder() { - return expression.getFilterBuilder(); - } - } diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Name.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Name.java deleted file mode 100644 index b7b9fb9a18..0000000000 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Name.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.dao.es5.index.query.parser; - -import java.io.InputStream; - -/** - * @author Viren - * Represents the name of the field to be searched against. - */ -public class Name extends AbstractNode { - - private String value; - - public Name(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.value = readToken(); - } - - @Override - public String toString(){ - return value; - } - - public String getName(){ - return value; - } - -} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/NameValue.java b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/NameValue.java index 7b8127b99a..05cd829b50 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/NameValue.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/NameValue.java @@ -1,29 +1,33 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** - * + * */ package com.netflix.conductor.dao.es5.index.query.parser; -import java.io.InputStream; +import com.netflix.conductor.elasticsearch.query.parser.AbstractNode; +import com.netflix.conductor.elasticsearch.query.parser.ComparisonOp; +import com.netflix.conductor.elasticsearch.query.parser.ComparisonOp.Operators; +import com.netflix.conductor.elasticsearch.query.parser.ConstValue; +import com.netflix.conductor.elasticsearch.query.parser.ListConst; +import com.netflix.conductor.elasticsearch.query.parser.Name; +import com.netflix.conductor.elasticsearch.query.parser.ParserException; +import com.netflix.conductor.elasticsearch.query.parser.Range; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import com.netflix.conductor.dao.es5.index.query.parser.ComparisonOp.Operators; +import java.io.InputStream; /** * @author Viren @@ -36,84 +40,85 @@ */ public class NameValue extends AbstractNode implements FilterProvider { - private Name name; - - private ComparisonOp op; - - private ConstValue value; - - private Range range; - - private ListConst valueList; - - public NameValue(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.name = new Name(is); - this.op = new ComparisonOp(is); - - if(this.op.getOperator().equals(Operators.BETWEEN.value())){ - this.range = new Range(is); - }if(this.op.getOperator().equals(Operators.IN.value())){ - this.valueList = new ListConst(is); - }else{ - this.value = new ConstValue(is); - } - } - - @Override - public String toString() { - return "" + name + op + value; - } - - /** - * @return the name - */ - public Name getName() { - return name; - } - - /** - * @return the op - */ - public ComparisonOp getOp() { - return op; - } - - /** - * @return the value - */ - public ConstValue getValue() { - return value; - } - - @Override - public QueryBuilder getFilterBuilder(){ - if(op.getOperator().equals(Operators.EQUALS.value())){ - return QueryBuilders.queryStringQuery(name.getName() + ":" + value.getValue().toString()); - }else if(op.getOperator().equals(Operators.BETWEEN.value())){ - return QueryBuilders.rangeQuery(name.getName()).from(range.getLow()).to(range.getHigh()); - }else if(op.getOperator().equals(Operators.IN.value())){ - return QueryBuilders.termsQuery(name.getName(), valueList.getList()); - }else if(op.getOperator().equals(Operators.NOT_EQUALS.value())){ - return QueryBuilders.queryStringQuery("NOT " + name.getName() + ":" + value.getValue().toString()); - }else if(op.getOperator().equals(Operators.GREATER_THAN.value())){ - return QueryBuilders.rangeQuery(name.getName()).from(value.getValue()).includeLower(false).includeUpper(false); - }else if(op.getOperator().equals(Operators.IS.value())){ - if(value.getSysConstant().equals(ConstValue.SystemConsts.NULL)){ - return QueryBuilders.boolQuery().mustNot(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).mustNot(QueryBuilders.existsQuery(name.getName()))); - } else if(value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)){ - return QueryBuilders.boolQuery().mustNot(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(QueryBuilders.existsQuery(name.getName()))); - } - }else if(op.getOperator().equals(Operators.LESS_THAN.value())){ - return QueryBuilders.rangeQuery(name.getName()).to(value.getValue()).includeLower(false).includeUpper(false); - } - - throw new IllegalStateException("Incorrect/unsupported operators"); - } - - + private Name name; + + private ComparisonOp op; + + private ConstValue value; + + private Range range; + + private ListConst valueList; + + public NameValue(InputStream is) throws ParserException { + super(is); + } + + @Override + protected void _parse() throws Exception { + this.name = new Name(is); + this.op = new ComparisonOp(is); + + if (this.op.getOperator().equals(Operators.BETWEEN.value())) { + this.range = new Range(is); + } + if (this.op.getOperator().equals(Operators.IN.value())) { + this.valueList = new ListConst(is); + } else { + this.value = new ConstValue(is); + } + } + + @Override + public String toString() { + return "" + name + op + value; + } + + /** + * @return the name + */ + public Name getName() { + return name; + } + + /** + * @return the op + */ + public ComparisonOp getOp() { + return op; + } + + /** + * @return the value + */ + public ConstValue getValue() { + return value; + } + + @Override + public QueryBuilder getFilterBuilder() { + if (op.getOperator().equals(Operators.EQUALS.value())) { + return QueryBuilders.queryStringQuery(name.getName() + ":" + value.getValue().toString()); + } else if (op.getOperator().equals(Operators.BETWEEN.value())) { + return QueryBuilders.rangeQuery(name.getName()).from(range.getLow()).to(range.getHigh()); + } else if (op.getOperator().equals(Operators.IN.value())) { + return QueryBuilders.termsQuery(name.getName(), valueList.getList()); + } else if (op.getOperator().equals(Operators.NOT_EQUALS.value())) { + return QueryBuilders.queryStringQuery("NOT " + name.getName() + ":" + value.getValue().toString()); + } else if (op.getOperator().equals(Operators.GREATER_THAN.value())) { + return QueryBuilders.rangeQuery(name.getName()).from(value.getValue()).includeLower(false).includeUpper(false); + } else if (op.getOperator().equals(Operators.IS.value())) { + if (value.getSysConstant().equals(ConstValue.SystemConsts.NULL)) { + return QueryBuilders.boolQuery().mustNot(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).mustNot(QueryBuilders.existsQuery(name.getName()))); + } else if (value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)) { + return QueryBuilders.boolQuery().mustNot(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(QueryBuilders.existsQuery(name.getName()))); + } + } else if (op.getOperator().equals(Operators.LESS_THAN.value())) { + return QueryBuilders.rangeQuery(name.getName()).to(value.getValue()).includeLower(false).includeUpper(false); + } + + throw new IllegalStateException("Incorrect/unsupported operators"); + } + + } diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchConfiguration.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchConfiguration.java new file mode 100644 index 0000000000..a2ec17666e --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchConfiguration.java @@ -0,0 +1,104 @@ +package com.netflix.conductor.elasticsearch; + +import com.google.common.base.Strings; +import com.netflix.conductor.core.config.Configuration; + +import java.net.URI; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +public interface ElasticSearchConfiguration extends Configuration { + + String ELASTICSEARCH_PROPERTY_NAME = "workflow.elasticsearch.instanceType"; + ElasticSearchInstanceType ELASTICSEARCH_INSTANCE_TYPE_DEFAULT_VALUE = ElasticSearchInstanceType.MEMORY; + + String ELASTIC_SEARCH_URL_PROPERTY_NAME = "workflow.elasticsearch.url"; + String ELASTIC_SEARCH_URL_DEFAULT_VALUE = "localhost:9300"; + + String ELASTIC_SEARCH_INDEX_NAME_PROPERTY_NAME = "workflow.elasticsearch.index.name"; + String ELASTIC_SEARCH_INDEX_NAME_DEFAULT_VALUE = "conductor"; + + String TASK_LOG_INDEX_NAME_PROPERTY_NAME = "workflow.elasticsearch.tasklog.index.name"; + String TASK_LOG_INDEX_NAME_DEFAULT_VALUE = "task_log"; + + String EMBEDDED_DATA_PATH_PROPERTY_NAME = "workflow.elasticsearch.embedded.data.path"; + String EMBEDDED_DATA_PATH_DEFAULT_VALUE = "path.data"; + + String EMBEDDED_HOME_PATH_PROPERTY_NAME = "workflow.elasticsearch.embedded.data.home"; + String EMBEDDED_HOME_PATH_DEFAULT_VALUE = "path.home"; + + String EMBEDDED_PORT_PROPERTY_NAME = "workflow.elasticsearch.embedded.port"; + int EMBEDDED_PORT_DEFAULT_VALUE = 9200; + + String EMBEDDED_CLUSTER_NAME_PROPERTY_NAME = "workflow.elasticsearch.embedded.cluster.name"; + String EMBEDDED_CLUSTER_NAME_DEFAULT_VALUE = "elasticsearch_test"; + + String EMBEDDED_HOST_PROPERTY_NAME = "workflow.elasticsearch.embedded.host"; + String EMBEDDED_HOST_DEFAULT_VALUE = "127.0.0.1"; + + String EMBEDDED_SETTINGS_FILE_PROPERTY_NAME = "workflow.elasticsearch.embedded.settings.file"; + String EMBEDDED_SETTINGS_FILE_DEFAULT_VALUE = "embedded-es.yml"; + + default String getURL() { + return getProperty(ELASTIC_SEARCH_URL_PROPERTY_NAME, ELASTIC_SEARCH_URL_DEFAULT_VALUE); + } + + default List getURIs(){ + + String clusterAddress = getURL(); + + String[] hosts = clusterAddress.split(","); + + return Arrays.stream(hosts).map( host -> + (host.startsWith("http://") || host.startsWith("tcp://")) ? URI.create(host) : URI.create("tcp://" + host) + ).collect(Collectors.toList()); + } + + default String getIndexName() { + return getProperty(ELASTIC_SEARCH_INDEX_NAME_PROPERTY_NAME, ELASTIC_SEARCH_INDEX_NAME_DEFAULT_VALUE); + } + + default String getTasklogIndexName() { + return getProperty(TASK_LOG_INDEX_NAME_PROPERTY_NAME, TASK_LOG_INDEX_NAME_DEFAULT_VALUE); + } + + default String getEmbeddedDataPath() { + return getProperty(EMBEDDED_DATA_PATH_PROPERTY_NAME, EMBEDDED_DATA_PATH_DEFAULT_VALUE); + } + + default String getEmbeddedHomePath() { + return getProperty(EMBEDDED_HOME_PATH_PROPERTY_NAME, EMBEDDED_HOME_PATH_DEFAULT_VALUE); + } + + default int getEmbeddedPort() { + return getIntProperty(EMBEDDED_PORT_PROPERTY_NAME, EMBEDDED_PORT_DEFAULT_VALUE); + + } + + default String getEmbeddedClusterName() { + return getProperty(EMBEDDED_CLUSTER_NAME_PROPERTY_NAME, EMBEDDED_CLUSTER_NAME_DEFAULT_VALUE); + } + + default String getEmbeddedHost() { + return getProperty(EMBEDDED_HOST_PROPERTY_NAME, EMBEDDED_HOST_DEFAULT_VALUE); + } + + default String getEmbeddedSettingsFile() { + return getProperty(EMBEDDED_SETTINGS_FILE_PROPERTY_NAME, EMBEDDED_SETTINGS_FILE_DEFAULT_VALUE); + } + + default ElasticSearchInstanceType getElasticSearchInstanceType() { + ElasticSearchInstanceType elasticSearchInstanceType = ELASTICSEARCH_INSTANCE_TYPE_DEFAULT_VALUE; + String instanceTypeConfig = getProperty(ELASTICSEARCH_PROPERTY_NAME, ""); + if (!Strings.isNullOrEmpty(instanceTypeConfig)) { + elasticSearchInstanceType = ElasticSearchInstanceType.valueOf(instanceTypeConfig.toUpperCase()); + } + return elasticSearchInstanceType; + } + + enum ElasticSearchInstanceType { + MEMORY, EXTERNAL + } + +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchModule.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchModule.java new file mode 100644 index 0000000000..b385442b66 --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchModule.java @@ -0,0 +1,14 @@ +package com.netflix.conductor.elasticsearch; + +import com.google.inject.AbstractModule; +import com.google.inject.Singleton; + +import org.elasticsearch.client.Client; + +public class ElasticSearchModule extends AbstractModule { + @Override + protected void configure() { + bind(ElasticSearchConfiguration.class).to(SystemPropertiesElasticSearchConfiguration.class); + bind(Client.class).toProvider(ElasticSearchTransportClientProvider.class).in(Singleton.class); + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchRestClientProvider.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchRestClientProvider.java new file mode 100644 index 0000000000..a21e0bc2a6 --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchRestClientProvider.java @@ -0,0 +1,34 @@ +package com.netflix.conductor.elasticsearch; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; + +import java.net.URI; +import java.util.List; +import java.util.stream.Collectors; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class ElasticSearchRestClientProvider implements Provider { + private final ElasticSearchConfiguration configuration; + + @Inject + public ElasticSearchRestClientProvider(ElasticSearchConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public RestHighLevelClient get() { + RestClient lowLevelRestClient = RestClient.builder(convertToHttpHosts(configuration.getURIs())).build(); + return new RestHighLevelClient(lowLevelRestClient); + } + + private HttpHost[] convertToHttpHosts(List hosts) { + List list = hosts.stream().map(host -> + new HttpHost(host.getHost(), host.getPort())) + .collect(Collectors.toList()); + return list.toArray(new HttpHost[0]); + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchTransportClientProvider.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchTransportClientProvider.java new file mode 100644 index 0000000000..4e60ac4460 --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/ElasticSearchTransportClientProvider.java @@ -0,0 +1,58 @@ +package com.netflix.conductor.elasticsearch; + +import com.google.inject.ProvisionException; + +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.transport.client.PreBuiltTransportClient; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Optional; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class ElasticSearchTransportClientProvider implements Provider { + private static final Logger logger = LoggerFactory.getLogger(ElasticSearchTransportClientProvider.class); + + private final ElasticSearchConfiguration configuration; + + @Inject + public ElasticSearchTransportClientProvider(ElasticSearchConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public Client get() { + + Settings settings = Settings.builder() + .put("client.transport.ignore_cluster_name", true) + .put("client.transport.sniff", true) + .build(); + + TransportClient tc = new PreBuiltTransportClient(settings); + + List clusterAddresses = configuration.getURIs(); + + if (clusterAddresses.isEmpty()) { + logger.warn(ElasticSearchConfiguration.ELASTIC_SEARCH_URL_PROPERTY_NAME + + " is not set. Indexing will remain DISABLED."); + } + for (URI hostAddress : clusterAddresses) { + int port = Optional.ofNullable(hostAddress.getPort()).orElse(9200); + try { + tc.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(hostAddress.getHost()), port)); + } catch (UnknownHostException uhe){ + throw new ProvisionException("Invalid host" + hostAddress.getHost(), uhe); + } + } + return tc; + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/EmbeddedElasticSearch.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/EmbeddedElasticSearch.java new file mode 100644 index 0000000000..578309fb6d --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/EmbeddedElasticSearch.java @@ -0,0 +1,41 @@ +package com.netflix.conductor.elasticsearch; + +import com.netflix.conductor.service.Lifecycle; + +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; + +public interface EmbeddedElasticSearch extends Lifecycle { + Logger logger = LoggerFactory.getLogger(EmbeddedElasticSearch.class); + + default void cleanDataDir(String path) { + File dataDir = new File(path); + + try { + logger.info("Deleting contents of data dir {}", path); + if (dataDir.exists()) { + FileUtils.cleanDirectory(dataDir); + } + } catch (IOException e) { + logger.error(String.format("Failed to delete ES data dir: %s", dataDir.getAbsolutePath()), e); + } + } + + default File createDataDir(String dataDirLoc) throws IOException { + Path dataDirPath = FileSystems.getDefault().getPath(dataDirLoc); + Files.createDirectories(dataDirPath); + return dataDirPath.toFile(); + } + + default File setupDataDir(String path) throws IOException { + cleanDataDir(path); + return createDataDir(path); + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/EmbeddedElasticSearchProvider.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/EmbeddedElasticSearchProvider.java new file mode 100644 index 0000000000..8f22fbd633 --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/EmbeddedElasticSearchProvider.java @@ -0,0 +1,8 @@ +package com.netflix.conductor.elasticsearch; + +import java.util.Optional; + +import javax.inject.Provider; + +public interface EmbeddedElasticSearchProvider extends Provider> { +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/SystemPropertiesElasticSearchConfiguration.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/SystemPropertiesElasticSearchConfiguration.java new file mode 100644 index 0000000000..33b59d982e --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/SystemPropertiesElasticSearchConfiguration.java @@ -0,0 +1,7 @@ +package com.netflix.conductor.elasticsearch; + +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +public class SystemPropertiesElasticSearchConfiguration + extends SystemPropertiesConfiguration implements ElasticSearchConfiguration { +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/ElasticSearchV5Module.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/ElasticSearchV5Module.java new file mode 100644 index 0000000000..3592fd1bca --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/ElasticSearchV5Module.java @@ -0,0 +1,38 @@ +/** + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.elasticsearch.es5; + +import com.google.inject.AbstractModule; + +import com.netflix.conductor.dao.IndexDAO; +import com.netflix.conductor.dao.es5.index.ElasticSearchDAOV5; +import com.netflix.conductor.elasticsearch.ElasticSearchModule; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearchProvider; + + +/** + * @author Viren + * Provider for the elasticsearch transport client + */ +public class ElasticSearchV5Module extends AbstractModule { + + @Override + protected void configure() { + install(new ElasticSearchModule()); + bind(IndexDAO.class).to(ElasticSearchDAOV5.class); + bind(EmbeddedElasticSearchProvider.class).to(EmbeddedElasticSearchV5Provider.class); + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/EmbeddedElasticSearchV5.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/EmbeddedElasticSearchV5.java new file mode 100644 index 0000000000..9fec326b12 --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/EmbeddedElasticSearchV5.java @@ -0,0 +1,129 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.elasticsearch.es5; + +import com.netflix.conductor.elasticsearch.ElasticSearchConfiguration; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearch; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.Netty4Plugin; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.Collection; + +import static java.util.Collections.singletonList; + + +public class EmbeddedElasticSearchV5 implements EmbeddedElasticSearch { + + private static final Logger logger = LoggerFactory.getLogger(EmbeddedElasticSearchV5.class); + + private final String clusterName; + private final String host; + private final int port; + + private Node instance; + private File dataDir; + + public EmbeddedElasticSearchV5(String clusterName, String host, int port){ + this.clusterName = clusterName; + this.host = host; + this.port = port; + } + + private class PluginConfigurableNode extends Node { + public PluginConfigurableNode(Settings preparedSettings, Collection> classpathPlugins) { + super(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), classpathPlugins); + } + } + + @Override + public void start() throws Exception { + start(clusterName, host, port); + } + + public synchronized void start(String clusterName, String host, int port) throws Exception { + + if (instance != null) { + String msg = String.format( + "An instance of this Embedded Elastic Search server is already running on port: %d. " + + "It must be stopped before you can call start again.", + getPort() + ); + logger.error(msg); + throw new IllegalStateException(msg); + } + + final Settings settings = getSettings(clusterName, host, port); + dataDir = setupDataDir(settings.get(ElasticSearchConfiguration.EMBEDDED_DATA_PATH_DEFAULT_VALUE)); + + logger.info("Starting ElasticSearch for cluster {} ", settings.get("cluster.name")); + instance = new PluginConfigurableNode(settings, singletonList(Netty4Plugin.class)); + instance.start(); + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + try { + if (instance != null) { + instance.close(); + } + } catch (IOException e) { + logger.error("Error closing ElasticSearch"); + } + })); + logger.info("ElasticSearch cluster {} started in local mode on port {}", instance.settings().get("cluster.name"), getPort()); + } + + private Settings getSettings(String clusterName, String host, int port) throws IOException { + dataDir = Files.createTempDirectory(clusterName + "_" + System.currentTimeMillis() + "data").toFile(); + File homeDir = Files.createTempDirectory(clusterName + "_" + System.currentTimeMillis() + "-home").toFile(); + Settings.Builder settingsBuilder = Settings.builder() + .put("cluster.name", clusterName) + .put("http.host", host) + .put("http.port", port) + .put("transport.tcp.port", port + 100) + .put(ElasticSearchConfiguration.EMBEDDED_DATA_PATH_DEFAULT_VALUE, dataDir.getAbsolutePath()) + .put(ElasticSearchConfiguration.EMBEDDED_HOME_PATH_DEFAULT_VALUE, homeDir.getAbsolutePath()) + .put("http.enabled", true) + .put("script.inline", true) + .put("script.stored", true) + .put("node.data", true) + .put("http.enabled", true) + .put("http.type", "netty4") + .put("transport.type", "netty4"); + + return settingsBuilder.build(); + } + + private String getPort() { + return instance.settings().get("http.port"); + } + + @Override + public synchronized void stop() throws Exception { + + if (instance != null && !instance.isClosed()) { + String port = getPort(); + logger.info("Stopping Elastic Search"); + instance.close(); + instance = null; + logger.info("Elastic Search on port {} stopped", port); + } + + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/EmbeddedElasticSearchV5Provider.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/EmbeddedElasticSearchV5Provider.java new file mode 100644 index 0000000000..19dabec1b8 --- /dev/null +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/es5/EmbeddedElasticSearchV5Provider.java @@ -0,0 +1,32 @@ +package com.netflix.conductor.elasticsearch.es5; + +import com.netflix.conductor.elasticsearch.ElasticSearchConfiguration; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearch; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearchProvider; + +import javax.inject.Inject; +import java.util.Optional; + +public class EmbeddedElasticSearchV5Provider implements EmbeddedElasticSearchProvider { + private final ElasticSearchConfiguration configuration; + + @Inject + public EmbeddedElasticSearchV5Provider(ElasticSearchConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public Optional get() { + return isEmbedded() ? Optional.of( + new EmbeddedElasticSearchV5( + configuration.getEmbeddedClusterName(), + configuration.getEmbeddedHost(), + configuration.getEmbeddedPort() + ) + ) : Optional.empty(); + } + + private boolean isEmbedded() { + return configuration.getElasticSearchInstanceType().equals(ElasticSearchConfiguration.ElasticSearchInstanceType.MEMORY); + } +} diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/AbstractNode.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/AbstractNode.java similarity index 98% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/AbstractNode.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/AbstractNode.java index 8cd7741940..1ca29e9587 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/AbstractNode.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/AbstractNode.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; import java.math.BigDecimal; diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/BooleanOp.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/BooleanOp.java similarity index 96% rename from es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/BooleanOp.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/BooleanOp.java index 99c66e08e3..f8f2f0862f 100644 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/BooleanOp.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/BooleanOp.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ComparisonOp.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ComparisonOp.java similarity index 97% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ComparisonOp.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ComparisonOp.java index 55a86cc8f1..e1eebed806 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ComparisonOp.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ComparisonOp.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ConstValue.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ConstValue.java similarity index 98% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ConstValue.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ConstValue.java index 3612714890..9e081e0518 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ConstValue.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ConstValue.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/FunctionThrowingException.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/FunctionThrowingException.java similarity index 92% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/FunctionThrowingException.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/FunctionThrowingException.java index b0624fc2ff..82ec52472d 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/FunctionThrowingException.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/FunctionThrowingException.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; /** * @author Viren diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ListConst.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ListConst.java similarity index 96% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ListConst.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ListConst.java index 22bbbb23a5..29f0443fde 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ListConst.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ListConst.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; import java.util.LinkedList; diff --git a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Name.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/Name.java similarity index 94% rename from es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Name.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/Name.java index 1ab0f23d61..7831a57a80 100644 --- a/es2-persistence/src/main/java/com/netflix/conductor/dao/es/index/query/parser/Name.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/Name.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ParserException.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ParserException.java similarity index 93% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ParserException.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ParserException.java index 445e3090fd..02f226a907 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/ParserException.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/ParserException.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; /** * @author Viren diff --git a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Range.java b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/Range.java similarity index 96% rename from es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Range.java rename to es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/Range.java index 25e585a50f..896db71296 100644 --- a/es5-persistence/src/main/java/com/netflix/conductor/dao/es5/index/query/parser/Range.java +++ b/es5-persistence/src/main/java/com/netflix/conductor/elasticsearch/query/parser/Range.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.InputStream; diff --git a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestExpression.java b/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestExpression.java index e76bf6669a..3a282864b4 100644 --- a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestExpression.java +++ b/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestExpression.java @@ -18,13 +18,20 @@ */ package com.netflix.conductor.dao.es5.index.query.parser; +import com.netflix.conductor.elasticsearch.query.parser.AbstractParserTest; +import com.netflix.conductor.elasticsearch.query.parser.ConstValue; + import org.junit.Test; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.InputStream; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; /** * @author Viren diff --git a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/AbstractParserTest.java b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/AbstractParserTest.java similarity index 94% rename from es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/AbstractParserTest.java rename to es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/AbstractParserTest.java index 0a48142f7d..cd4c318a80 100644 --- a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/AbstractParserTest.java +++ b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/AbstractParserTest.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; diff --git a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestBooleanOp.java b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestBooleanOp.java similarity index 95% rename from es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestBooleanOp.java rename to es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestBooleanOp.java index 428fdd2c67..9c0ef2acb0 100644 --- a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestBooleanOp.java +++ b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestBooleanOp.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import org.junit.Test; diff --git a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestComparisonOp.java b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestComparisonOp.java similarity index 95% rename from es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestComparisonOp.java rename to es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestComparisonOp.java index 633d3fb02a..39d954a0f8 100644 --- a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestComparisonOp.java +++ b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestComparisonOp.java @@ -16,7 +16,8 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; + import org.junit.Test; diff --git a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestConstValue.java b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestConstValue.java similarity index 98% rename from es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestConstValue.java rename to es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestConstValue.java index bae8477014..8cc81641a3 100644 --- a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestConstValue.java +++ b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestConstValue.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import org.junit.Test; diff --git a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestName.java b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestName.java similarity index 94% rename from es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestName.java rename to es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestName.java index 7bd4c9ee8b..d3ea73c145 100644 --- a/es5-persistence/src/test/java/com/netflix/conductor/dao/es5/index/query/parser/TestName.java +++ b/es5-persistence/src/test/java/com/netflix/conductor/elasticsearch/query/parser/TestName.java @@ -16,7 +16,7 @@ /** * */ -package com.netflix.conductor.dao.es5.index.query.parser; +package com.netflix.conductor.elasticsearch.query.parser; import org.junit.Test; diff --git a/grpc-client/build.gradle b/grpc-client/build.gradle new file mode 100644 index 0000000000..033fdd2fd3 --- /dev/null +++ b/grpc-client/build.gradle @@ -0,0 +1,9 @@ +dependencies { + compile project(':conductor-common') + compile project(':conductor-core') + compile project(':conductor-grpc') + + compile "io.grpc:grpc-netty:${revGrpc}" + compile "log4j:log4j:1.2.17" + testCompile group: 'junit', name: 'junit', version: '4.12' +} diff --git a/grpc-client/src/main/java/com/netflix/conductor/client/grpc/ClientBase.java b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/ClientBase.java new file mode 100644 index 0000000000..26a161936f --- /dev/null +++ b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/ClientBase.java @@ -0,0 +1,29 @@ +package com.netflix.conductor.client.grpc; + +import com.netflix.conductor.grpc.ProtoMapper; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.TimeUnit; + +public abstract class ClientBase { + private static Logger logger = LoggerFactory.getLogger(ClientBase.class); + protected static ProtoMapper protoMapper = ProtoMapper.INSTANCE; + + protected final ManagedChannel channel; + + public ClientBase(String address, int port) { + this(ManagedChannelBuilder.forAddress(address, port).usePlaintext()); + } + + public ClientBase(ManagedChannelBuilder builder) { + channel = builder.build(); + } + + public void shutdown() throws InterruptedException { + channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } + +} diff --git a/grpc-client/src/main/java/com/netflix/conductor/client/grpc/MetadataClient.java b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/MetadataClient.java new file mode 100644 index 0000000000..df854c2652 --- /dev/null +++ b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/MetadataClient.java @@ -0,0 +1,131 @@ +package com.netflix.conductor.client.grpc; + +import com.google.common.base.Preconditions; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.grpc.MetadataServiceGrpc; +import com.netflix.conductor.grpc.MetadataServicePb; +import com.netflix.conductor.proto.WorkflowDefPb; +import org.apache.commons.lang3.StringUtils; + +import javax.annotation.Nullable; +import java.util.List; +import java.util.Optional; + +public class MetadataClient extends ClientBase { + private MetadataServiceGrpc.MetadataServiceBlockingStub stub; + + public MetadataClient(String address, int port) { + super(address, port); + this.stub = MetadataServiceGrpc.newBlockingStub(this.channel); + } + + /** + * Register a workflow definition with the server + * + * @param workflowDef the workflow definition + */ + public void registerWorkflowDef(WorkflowDef workflowDef) { + Preconditions.checkNotNull(workflowDef, "Worfklow definition cannot be null"); + stub.createWorkflow( + MetadataServicePb.CreateWorkflowRequest.newBuilder() + .setWorkflow(protoMapper.toProto(workflowDef)) + .build() + ); + } + + /** + * Updates a list of existing workflow definitions + * + * @param workflowDefs List of workflow definitions to be updated + */ + public void updateWorkflowDefs(List workflowDefs) { + Preconditions.checkNotNull(workflowDefs, "Workflow defs list cannot be null"); + stub.updateWorkflows( + MetadataServicePb.UpdateWorkflowsRequest.newBuilder() + .addAllDefs( + workflowDefs.stream().map(protoMapper::toProto)::iterator + ) + .build() + ); + } + + /** + * Retrieve the workflow definition + * + * @param name the name of the workflow + * @param version the version of the workflow def + * @return Workflow definition for the given workflow and version + */ + public WorkflowDef getWorkflowDef(String name, @Nullable Integer version) { + Preconditions.checkArgument(StringUtils.isNotBlank(name), "name cannot be blank"); + + MetadataServicePb.GetWorkflowRequest.Builder request = + MetadataServicePb.GetWorkflowRequest.newBuilder() + .setName(name); + + if (version != null) + request.setVersion(version); + + return protoMapper.fromProto(stub.getWorkflow(request.build()).getWorkflow()); + } + + /** + * Registers a list of task types with the conductor server + * + * @param taskDefs List of task types to be registered. + */ + public void registerTaskDefs(List taskDefs) { + Preconditions.checkNotNull(taskDefs, "Task defs list cannot be null"); + stub.createTasks(MetadataServicePb.CreateTasksRequest.newBuilder() + .addAllDefs( + taskDefs.stream().map(protoMapper::toProto)::iterator + ) + .build() + ); + } + + /** + * Updates an existing task definition + * + * @param taskDef the task definition to be updated + */ + public void updateTaskDef(TaskDef taskDef) { + Preconditions.checkNotNull(taskDef, "Task definition cannot be null"); + stub.updateTask( + MetadataServicePb.UpdateTaskRequest.newBuilder() + .setTask(protoMapper.toProto(taskDef)) + .build() + ); + } + + /** + * Retrieve the task definition of a given task type + * + * @param taskType type of task for which to retrieve the definition + * @return Task Definition for the given task type + */ + public TaskDef getTaskDef(String taskType) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + return protoMapper.fromProto( + stub.getTask(MetadataServicePb.GetTaskRequest.newBuilder() + .setTaskType(taskType) + .build() + ).getTask() + ); + } + + /** + * Removes the task definition of a task type from the conductor server. + * Use with caution. + * + * @param taskType Task type to be unregistered. + */ + public void unregisterTaskDef(String taskType) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + stub.deleteTask(MetadataServicePb.DeleteTaskRequest.newBuilder() + .setTaskType(taskType) + .build() + ); + } +} diff --git a/grpc-client/src/main/java/com/netflix/conductor/client/grpc/TaskClient.java b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/TaskClient.java new file mode 100644 index 0000000000..7e2b62786d --- /dev/null +++ b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/TaskClient.java @@ -0,0 +1,244 @@ +package com.netflix.conductor.client.grpc; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.tasks.TaskExecLog; +import com.netflix.conductor.common.metadata.tasks.TaskResult; +import com.netflix.conductor.grpc.MetadataServicePb; +import com.netflix.conductor.grpc.TaskServiceGrpc; +import com.netflix.conductor.grpc.TaskServicePb; +import com.netflix.conductor.proto.TaskPb; +import org.apache.commons.lang3.StringUtils; + +import javax.annotation.Nullable; +import java.util.*; +import java.util.stream.Collectors; + +public class TaskClient extends ClientBase { + private TaskServiceGrpc.TaskServiceBlockingStub stub; + + public TaskClient(String address, int port) { + super(address, port); + this.stub = TaskServiceGrpc.newBlockingStub(this.channel); + } + + /** + * Perform a poll for a task of a specific task type. + * + * @param taskType The taskType to poll for + * @param domain The domain of the task type + * @param workerId Name of the client worker. Used for logging. + * @return Task waiting to be executed. + */ + public Task pollTask(String taskType, String workerId, String domain) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(domain), "Domain cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(workerId), "Worker id cannot be blank"); + + TaskServicePb.PollResponse response = stub.poll( + TaskServicePb.PollRequest.newBuilder() + .setTaskType(taskType) + .setWorkerId(workerId) + .setDomain(domain) + .build() + ); + return protoMapper.fromProto(response.getTask()); + } + + /** + * Perform a batch poll for tasks by task type. Batch size is configurable by count. + * + * @param taskType Type of task to poll for + * @param workerId Name of the client worker. Used for logging. + * @param count Maximum number of tasks to be returned. Actual number of tasks returned can be less than this number. + * @param timeoutInMillisecond Long poll wait timeout. + * @return List of tasks awaiting to be executed. + */ + public List batchPollTasksByTaskType(String taskType, String workerId, int count, int timeoutInMillisecond) { + return Lists.newArrayList(batchPollTasksByTaskTypeAsync(taskType, workerId, count, timeoutInMillisecond)); + } + + /** + * Perform a batch poll for tasks by task type. Batch size is configurable by count. + * Returns an iterator that streams tasks as they become available through GRPC. + * + * @param taskType Type of task to poll for + * @param workerId Name of the client worker. Used for logging. + * @param count Maximum number of tasks to be returned. Actual number of tasks returned can be less than this number. + * @param timeoutInMillisecond Long poll wait timeout. + * @return Iterator of tasks awaiting to be executed. + */ + public Iterator batchPollTasksByTaskTypeAsync(String taskType, String workerId, int count, int timeoutInMillisecond) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(workerId), "Worker id cannot be blank"); + Preconditions.checkArgument(count > 0, "Count must be greater than 0"); + + Iterator it = stub.batchPoll( + TaskServicePb.BatchPollRequest.newBuilder() + .setTaskType(taskType) + .setWorkerId(workerId) + .setCount(count) + .setTimeout(timeoutInMillisecond) + .build() + ); + + return Iterators.transform(it, protoMapper::fromProto); + } + + /** + * Retrieve pending tasks by type + * + * @param taskType Type of task + * @param startKey id of the task from where to return the results. NULL to start from the beginning. + * @param count number of tasks to retrieve + * @return Returns the list of PENDING tasks by type, starting with a given task Id. + */ + public List getPendingTasksByType(String taskType, @Nullable String startKey, @Nullable Integer count) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + + TaskServicePb.TasksInProgressRequest.Builder request = TaskServicePb.TasksInProgressRequest.newBuilder(); + request.setTaskType(taskType); + if (startKey != null) { + request.setStartKey(startKey); + } + if (count != null) { + request.setCount(count); + } + + return stub.getTasksInProgress(request.build()) + .getTasksList() + .stream() + .map(protoMapper::fromProto) + .collect(Collectors.toList()); + } + + /** + * Retrieve pending task identified by reference name for a workflow + * + * @param workflowId Workflow instance id + * @param taskReferenceName reference name of the task + * @return Returns the pending workflow task identified by the reference name + */ + public Task getPendingTaskForWorkflow(String workflowId, String taskReferenceName) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "Workflow id cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(taskReferenceName), "Task reference name cannot be blank"); + + TaskServicePb.PendingTaskResponse response = stub.getPendingTaskForWorkflow( + TaskServicePb.PendingTaskRequest.newBuilder() + .setWorkflowId(workflowId) + .setTaskRefName(taskReferenceName) + .build() + ); + return protoMapper.fromProto(response.getTask()); + } + + /** + * Updates the result of a task execution. + * + * @param taskResult TaskResults to be updated. + */ + public void updateTask(TaskResult taskResult) { + Preconditions.checkNotNull(taskResult, "Task result cannot be null"); + stub.updateTask(TaskServicePb.UpdateTaskRequest.newBuilder() + .setResult(protoMapper.toProto(taskResult)) + .build() + ); + } + + /** + * Ack for the task poll. + * + * @param taskId Id of the task to be polled + * @param workerId user identified worker. + * @return true if the task was found with the given ID and acknowledged. False otherwise. If the server returns false, the client should NOT attempt to ack again. + */ + public boolean ack(String taskId, @Nullable String workerId) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank"); + + TaskServicePb.AckTaskRequest.Builder request = TaskServicePb.AckTaskRequest.newBuilder(); + request.setTaskId(taskId); + if (workerId != null) { + request.setWorkerId(workerId); + } + + return stub.ackTask(request.build()).getAck(); + } + + /** + * Log execution messages for a task. + * + * @param taskId id of the task + * @param logMessage the message to be logged + */ + public void logMessageForTask(String taskId, String logMessage) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank"); + stub.addLog( + TaskServicePb.AddLogRequest.newBuilder() + .setTaskId(taskId) + .setLog(logMessage) + .build() + ); + } + + /** + * Fetch execution logs for a task. + * + * @param taskId id of the task. + */ + public List getTaskLogs(String taskId) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank"); + return stub.getTaskLogs( + TaskServicePb.GetTaskLogsRequest.newBuilder().setTaskId(taskId).build() + ).getLogsList() + .stream() + .map(protoMapper::fromProto) + .collect(Collectors.toList()); + } + + /** + * Retrieve information about the task + * + * @param taskId ID of the task + * @return Task details + */ + public Task getTaskDetails(String taskId) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank"); + return protoMapper.fromProto( + stub.getTask(TaskServicePb.GetTaskRequest.newBuilder() + .setTaskId(taskId) + .build() + ).getTask() + ); + } + + /** + * Removes a task from a taskType queue + * + * @param taskType the taskType to identify the queue + * @param taskId the id of the task to be removed + */ + public void removeTaskFromQueue(String taskType, String taskId) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank"); + stub.removeTaskFromQueue( + TaskServicePb.RemoveTaskRequest.newBuilder() + .setTaskType(taskType) + .setTaskId(taskId) + .build() + ); + } + + public int getQueueSizeForTask(String taskType) { + Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank"); + + TaskServicePb.QueueSizesResponse sizes = stub.getQueueSizesForTasks( + TaskServicePb.QueueSizesRequest.newBuilder() + .addTaskTypes(taskType) + .build() + ); + + return sizes.getQueueForTaskOrDefault(taskType, 0); + } +} diff --git a/grpc-client/src/main/java/com/netflix/conductor/client/grpc/WorkflowClient.java b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/WorkflowClient.java new file mode 100644 index 0000000000..55d7438345 --- /dev/null +++ b/grpc-client/src/main/java/com/netflix/conductor/client/grpc/WorkflowClient.java @@ -0,0 +1,308 @@ +package com.netflix.conductor.client.grpc; + +import com.google.common.base.Preconditions; +import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; +import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; +import com.netflix.conductor.common.run.SearchResult; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.common.run.WorkflowSummary; +import com.netflix.conductor.grpc.SearchPb; +import com.netflix.conductor.grpc.WorkflowServiceGrpc; +import com.netflix.conductor.grpc.WorkflowServicePb; +import com.netflix.conductor.proto.WorkflowPb; +import org.apache.commons.lang3.StringUtils; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.util.*; +import java.util.stream.Collectors; + +public class WorkflowClient extends ClientBase { + private WorkflowServiceGrpc.WorkflowServiceBlockingStub stub; + + public WorkflowClient(String address, int port) { + super(address, port); + this.stub = WorkflowServiceGrpc.newBlockingStub(this.channel); + } + + /** + * Starts a workflow + * + * @param startWorkflowRequest the {@link StartWorkflowRequest} object to start the workflow + * @return the id of the workflow instance that can be used for tracking + */ + public String startWorkflow(StartWorkflowRequest startWorkflowRequest) { + Preconditions.checkNotNull(startWorkflowRequest, "StartWorkflowRequest cannot be null"); + return stub.startWorkflow( + protoMapper.toProto(startWorkflowRequest) + ).getWorkflowId(); + } + + /** + * Retrieve a workflow by workflow id + * + * @param workflowId the id of the workflow + * @param includeTasks specify if the tasks in the workflow need to be returned + * @return the requested workflow + */ + public Workflow getWorkflow(String workflowId, boolean includeTasks) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + WorkflowPb.Workflow workflow = stub.getWorkflowStatus( + WorkflowServicePb.GetWorkflowStatusRequest.newBuilder() + .setWorkflowId(workflowId) + .setIncludeTasks(includeTasks) + .build() + ); + return protoMapper.fromProto(workflow); + } + + /** + * Retrieve all workflows for a given correlation id and name + * + * @param name the name of the workflow + * @param correlationId the correlation id + * @param includeClosed specify if all workflows are to be returned or only running workflows + * @param includeTasks specify if the tasks in the workflow need to be returned + * @return list of workflows for the given correlation id and name + */ + public List getWorkflows(String name, String correlationId, boolean includeClosed, boolean includeTasks) { + Preconditions.checkArgument(StringUtils.isNotBlank(name), "name cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(correlationId), "correlationId cannot be blank"); + + WorkflowServicePb.GetWorkflowsResponse workflows = stub.getWorkflows( + WorkflowServicePb.GetWorkflowsRequest.newBuilder() + .setName(name) + .addCorrelationId(correlationId) + .setIncludeClosed(includeClosed) + .setIncludeTasks(includeTasks) + .build() + ); + + if (!workflows.containsWorkflowsById(correlationId)) { + return Collections.emptyList(); + } + + return workflows.getWorkflowsByIdOrThrow(correlationId) + .getWorkflowsList().stream() + .map(protoMapper::fromProto) + .collect(Collectors.toList()); + } + + /** + * Removes a workflow from the system + * + * @param workflowId the id of the workflow to be deleted + * @param archiveWorkflow flag to indicate if the workflow should be archived before deletion + */ + public void deleteWorkflow(String workflowId, boolean archiveWorkflow) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "Workflow id cannot be blank"); + stub.removeWorkflow( + WorkflowServicePb.RemoveWorkflowRequest.newBuilder() + .setWorkflodId(workflowId) + .setArchiveWorkflow(archiveWorkflow) + .build() + ); + } + + /* + * Retrieve all running workflow instances for a given name and version + * + * @param workflowName the name of the workflow + * @param version the version of the wokflow definition. Defaults to 1. + * @return the list of running workflow instances + */ + public List getRunningWorkflow(String workflowName, @Nullable Integer version) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowName), "Workflow name cannot be blank"); + + WorkflowServicePb.GetRunningWorkflowsResponse workflows = stub.getRunningWorkflows( + WorkflowServicePb.GetRunningWorkflowsRequest.newBuilder() + .setName(workflowName) + .setVersion(version == null ? 1 : version) + .build() + ); + return workflows.getWorkflowIdsList(); + } + + /** + * Retrieve all workflow instances for a given workflow name between a specific time period + * + * @param workflowName the name of the workflow + * @param version the version of the workflow definition. Defaults to 1. + * @param startTime the start time of the period + * @param endTime the end time of the period + * @return returns a list of workflows created during the specified during the time period + */ + public List getWorkflowsByTimePeriod(String workflowName, int version, Long startTime, Long endTime) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowName), "Workflow name cannot be blank"); + Preconditions.checkNotNull(startTime, "Start time cannot be null"); + Preconditions.checkNotNull(endTime, "End time cannot be null"); + // TODO + return null; + } + + /* + * Starts the decision task for the given workflow instance + * + * @param workflowId the id of the workflow instance + */ + public void runDecider(String workflowId) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.decideWorkflow(WorkflowServicePb.DecideWorkflowRequest.newBuilder() + .setWorkflowId(workflowId) + .build() + ); + } + + /** + * Pause a workflow by workflow id + * + * @param workflowId the workflow id of the workflow to be paused + */ + public void pauseWorkflow(String workflowId) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.pauseWorkflow(WorkflowServicePb.PauseWorkflowRequest.newBuilder() + .setWorkflowId(workflowId) + .build() + ); + } + + /** + * Resume a paused workflow by workflow id + * + * @param workflowId the workflow id of the paused workflow + */ + public void resumeWorkflow(String workflowId) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.resumeWorkflow(WorkflowServicePb.ResumeWorkflowRequest.newBuilder() + .setWorkflowId(workflowId) + .build() + ); + } + + /** + * Skips a given task from a current RUNNING workflow + * + * @param workflowId the id of the workflow instance + * @param taskReferenceName the reference name of the task to be skipped + */ + public void skipTaskFromWorkflow(String workflowId, String taskReferenceName) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + Preconditions.checkArgument(StringUtils.isNotBlank(taskReferenceName), "Task reference name cannot be blank"); + stub.skipTaskFromWorkflow(WorkflowServicePb.SkipTaskRequest.newBuilder() + .setWorkflowId(workflowId) + .setTaskReferenceName(taskReferenceName) + .build() + ); + } + + /** + * Reruns the workflow from a specific task + * + * @param rerunWorkflowRequest the request containing the task to rerun from + * @return the id of the workflow + */ + public String rerunWorkflow(RerunWorkflowRequest rerunWorkflowRequest) { + Preconditions.checkNotNull(rerunWorkflowRequest, "RerunWorkflowRequest cannot be null"); + return stub.rerunWorkflow( + protoMapper.toProto(rerunWorkflowRequest) + ).getWorkflowId(); + } + + /** + * Restart a completed workflow + * + * @param workflowId the workflow id of the workflow to be restarted + */ + public void restart(String workflowId) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.restartWorkflow(WorkflowServicePb.RestartWorkflowRequest.newBuilder() + .setWorkflowId(workflowId) + .build() + ); + } + + /** + * Retries the last failed task in a workflow + * + * @param workflowId the workflow id of the workflow with the failed task + */ + public void retryLastFailedTask(String workflowId) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.retryWorkflow(WorkflowServicePb.RetryWorkflowRequest.newBuilder() + .setWorkflowId(workflowId) + .build() + ); + } + + + /** + * Resets the callback times of all IN PROGRESS tasks to 0 for the given workflow + * + * @param workflowId the id of the workflow + */ + public void resetCallbacksForInProgressTasks(String workflowId) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.resetWorkflowCallbacks(WorkflowServicePb.ResetWorkflowCallbacksRequest.newBuilder() + .setWorkflowId(workflowId) + .build() + ); + } + + /** + * Terminates the execution of the given workflow instance + * + * @param workflowId the id of the workflow to be terminated + * @param reason the reason to be logged and displayed + */ + public void terminateWorkflow(String workflowId, String reason) { + Preconditions.checkArgument(StringUtils.isNotBlank(workflowId), "workflow id cannot be blank"); + stub.terminateWorkflow(WorkflowServicePb.TerminateWorkflowRequest.newBuilder() + .setWorkflowId(workflowId) + .setReason(reason) + .build() + ); + } + + /** + * Search for workflows based on payload + * + * @param query the search query + * @return the {@link SearchResult} containing the {@link WorkflowSummary} that match the query + */ + public SearchResult search(@Nonnull String query) { + return search(null, null, null, null, query); + } + + /** + * Paginated search for workflows based on payload + * + * @param start start value of page + * @param size number of workflows to be returned + * @param sort sort order + * @param freeText additional free text query + * @param query the search query + * @return the {@link SearchResult} containing the {@link WorkflowSummary} that match the query + */ + public SearchResult search( + @Nullable Integer start, @Nullable Integer size, + @Nullable String sort, @Nullable String freeText, @Nonnull String query) { + Preconditions.checkNotNull(query, "query cannot be null"); + + SearchPb.Request.Builder request = SearchPb.Request.newBuilder(); + request.setQuery(query); + if (start != null) + request.setStart(start); + if (size != null) + request.setSize(size); + if (sort != null) + request.setSort(sort); + if (freeText != null) + request.setFreeText(freeText); + + WorkflowServicePb.WorkflowSummarySearchResult result = stub.search(request.build()); + return new SearchResult( + result.getTotalHits(), + result.getResultsList().stream().map(protoMapper::fromProto).collect(Collectors.toList()) + ); + } +} diff --git a/grpc-server/build.gradle b/grpc-server/build.gradle new file mode 100644 index 0000000000..333da15731 --- /dev/null +++ b/grpc-server/build.gradle @@ -0,0 +1,18 @@ +plugins { + // FIXME This is temporary until the server module refactoring is completed. + id 'com.github.johnrengelman.shadow' version '1.2.3' +} + +dependencies { + compile project(':conductor-common') + compile project(':conductor-core') + compile project(':conductor-grpc') + + compile "io.grpc:grpc-netty:${revGrpc}" + compile "io.grpc:grpc-services:${revGrpc}" + compile "log4j:log4j:1.2.17" + + testCompile "io.grpc:grpc-testing:${revGrpc}" + testCompile "org.mockito:mockito-all:${revMockito}" + testCompile 'org.testinfected.hamcrest-matchers:all-matchers:1.8' +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCModule.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCModule.java new file mode 100644 index 0000000000..55f9488765 --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCModule.java @@ -0,0 +1,32 @@ +package com.netflix.conductor.grpc.server; + +import com.google.inject.AbstractModule; + +import com.netflix.conductor.grpc.EventServiceGrpc; +import com.netflix.conductor.grpc.MetadataServiceGrpc; +import com.netflix.conductor.grpc.TaskServiceGrpc; +import com.netflix.conductor.grpc.WorkflowServiceGrpc; +import com.netflix.conductor.grpc.server.service.EventServiceImpl; +import com.netflix.conductor.grpc.server.service.HealthServiceImpl; +import com.netflix.conductor.grpc.server.service.MetadataServiceImpl; +import com.netflix.conductor.grpc.server.service.TaskServiceImpl; +import com.netflix.conductor.grpc.server.service.WorkflowServiceImpl; + +import io.grpc.health.v1.HealthGrpc; + +public class GRPCModule extends AbstractModule { + + @Override + protected void configure() { + + bind(HealthGrpc.HealthImplBase.class).to(HealthServiceImpl.class); + + bind(EventServiceGrpc.EventServiceImplBase.class).to(EventServiceImpl.class); + bind(MetadataServiceGrpc.MetadataServiceImplBase.class).to(MetadataServiceImpl.class); + bind(TaskServiceGrpc.TaskServiceImplBase.class).to(TaskServiceImpl.class); + bind(WorkflowServiceGrpc.WorkflowServiceImplBase.class).to(WorkflowServiceImpl.class); + + bind(GRPCServerConfiguration.class).to(GRPCServerSystemConfiguration.class); + bind(GRPCServerProvider.class); + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServer.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServer.java new file mode 100644 index 0000000000..9067e04035 --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServer.java @@ -0,0 +1,41 @@ +package com.netflix.conductor.grpc.server; + +import com.netflix.conductor.service.Lifecycle; +import io.grpc.BindableService; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Singleton; +import java.io.IOException; +import java.util.Arrays; + +@Singleton +public class GRPCServer implements Lifecycle { + + private static final Logger logger = LoggerFactory.getLogger(GRPCServer.class); + + private final Server server; + + public GRPCServer(int port, BindableService... services) { + ServerBuilder builder = ServerBuilder.forPort(port); + Arrays.stream(services).forEach(builder::addService); + server = builder.build(); + } + + @Override + public void start() throws IOException { + registerShutdownHook(); + server.start(); + logger.info("grpc: Server started, listening on " + server.getPort()); + } + + @Override + public void stop() { + if (server != null) { + logger.info("grpc: server shutting down"); + server.shutdown(); + } + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerConfiguration.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerConfiguration.java new file mode 100644 index 0000000000..a81b83b21e --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerConfiguration.java @@ -0,0 +1,26 @@ +package com.netflix.conductor.grpc.server; + +import com.netflix.conductor.core.config.Configuration; + +public interface GRPCServerConfiguration extends Configuration { + String ENABLED_PROPERTY_NAME = "conductor.grpc.server.enabled"; + boolean ENABLED_DEFAULT_VALUE = false; + + String PORT_PROPERTY_NAME = "conductor.grpc.server.port"; + int PORT_DEFAULT_VALUE = 8090; + + String SERVICE_REFLECTION_ENABLED_PROPERTY_NAME = "conductor.grpc.server.reflection.enabled"; + boolean SERVICE_REFLECTION_ENABLED_DEFAULT_VALUE = true; + + default boolean isEnabled(){ + return getBooleanProperty(ENABLED_PROPERTY_NAME, ENABLED_DEFAULT_VALUE); + } + + default int getPort(){ + return getIntProperty(PORT_PROPERTY_NAME, PORT_DEFAULT_VALUE); + } + + default boolean isReflectionEnabled() { + return getBooleanProperty(SERVICE_REFLECTION_ENABLED_PROPERTY_NAME, SERVICE_REFLECTION_ENABLED_DEFAULT_VALUE); + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerProvider.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerProvider.java new file mode 100644 index 0000000000..414e1660fc --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerProvider.java @@ -0,0 +1,69 @@ +package com.netflix.conductor.grpc.server; + +import com.google.common.collect.ImmutableList; +import com.netflix.conductor.grpc.EventServiceGrpc; +import com.netflix.conductor.grpc.MetadataServiceGrpc; +import com.netflix.conductor.grpc.TaskServiceGrpc; +import com.netflix.conductor.grpc.WorkflowServiceGrpc; + +import java.util.Optional; + +import javax.inject.Inject; +import javax.inject.Provider; + +import io.grpc.BindableService; +import io.grpc.health.v1.HealthGrpc; +import io.grpc.protobuf.services.ProtoReflectionService; + +public class GRPCServerProvider implements Provider> { + + private final GRPCServerConfiguration configuration; + private final BindableService healthServiceImpl; + private final BindableService eventServiceImpl; + private final BindableService metadataServiceImpl; + private final BindableService taskServiceImpl; + private final BindableService workflowServiceImpl; + + @Inject + public GRPCServerProvider( + GRPCServerConfiguration grpcServerConfiguration, + HealthGrpc.HealthImplBase healthServiceImpl, + EventServiceGrpc.EventServiceImplBase eventServiceImpl, + MetadataServiceGrpc.MetadataServiceImplBase metadataServiceImpl, + TaskServiceGrpc.TaskServiceImplBase taskServiceImpl, + WorkflowServiceGrpc.WorkflowServiceImplBase workflowServiceImpl + ) { + this.configuration = grpcServerConfiguration; + this.healthServiceImpl = healthServiceImpl; + + this.eventServiceImpl = eventServiceImpl; + this.metadataServiceImpl = metadataServiceImpl; + this.taskServiceImpl = taskServiceImpl; + this.workflowServiceImpl = workflowServiceImpl; + } + + @Override + public Optional get() { + return configuration.isEnabled() ? + Optional.of(buildGRPCServer(configuration)) + : Optional.empty(); + } + + private GRPCServer buildGRPCServer(GRPCServerConfiguration grpcServerConfiguration) { + ImmutableList.Builder services = ImmutableList.builder().add( + healthServiceImpl, + eventServiceImpl, + metadataServiceImpl, + taskServiceImpl, + workflowServiceImpl); + + if (grpcServerConfiguration.isReflectionEnabled()) { + services.add(ProtoReflectionService.newInstance()); + } + + return new GRPCServer( + grpcServerConfiguration.getPort(), + services.build().toArray(new BindableService[]{}) + ); + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerSystemConfiguration.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerSystemConfiguration.java new file mode 100644 index 0000000000..a9ff4900ad --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/GRPCServerSystemConfiguration.java @@ -0,0 +1,6 @@ +package com.netflix.conductor.grpc.server; + +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +public class GRPCServerSystemConfiguration extends SystemPropertiesConfiguration implements GRPCServerConfiguration { +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/EventServiceImpl.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/EventServiceImpl.java new file mode 100644 index 0000000000..8f302b230b --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/EventServiceImpl.java @@ -0,0 +1,101 @@ +package com.netflix.conductor.grpc.server.service; + +import com.netflix.conductor.core.events.EventProcessor; +import com.netflix.conductor.core.events.EventQueues; +import com.netflix.conductor.grpc.EventServiceGrpc; +import com.netflix.conductor.grpc.EventServicePb; +import com.netflix.conductor.grpc.ProtoMapper; +import com.netflix.conductor.proto.EventHandlerPb; +import com.netflix.conductor.service.MetadataService; + +import java.util.Map; + +import javax.inject.Inject; + +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class EventServiceImpl extends EventServiceGrpc.EventServiceImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(EventServiceImpl.class); + private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE; + private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER); + + private final MetadataService service; + private final EventProcessor ep; + private final EventQueues eventQueues; + + @Inject + public EventServiceImpl(MetadataService service, EventProcessor ep, EventQueues eventQueues) { + this.service = service; + this.ep = ep; + this.eventQueues = eventQueues; + } + + @Override + public void addEventHandler(EventServicePb.AddEventHandlerRequest req, StreamObserver response) { + service.addEventHandler(PROTO_MAPPER.fromProto(req.getHandler())); + response.onNext(EventServicePb.AddEventHandlerResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void updateEventHandler(EventServicePb.UpdateEventHandlerRequest req, StreamObserver response) { + service.updateEventHandler(PROTO_MAPPER.fromProto(req.getHandler())); + response.onNext(EventServicePb.UpdateEventHandlerResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void removeEventHandler(EventServicePb.RemoveEventHandlerRequest req, StreamObserver response) { + service.removeEventHandlerStatus(req.getName()); + response.onNext(EventServicePb.RemoveEventHandlerResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void getEventHandlers(EventServicePb.GetEventHandlersRequest req, StreamObserver response) { + service.getEventHandlers().stream().map(PROTO_MAPPER::toProto).forEach(response::onNext); + response.onCompleted(); + } + + @Override + public void getEventHandlersForEvent(EventServicePb.GetEventHandlersForEventRequest req, StreamObserver response) { + service.getEventHandlersForEvent(req.getEvent(), req.getActiveOnly()) + .stream().map(PROTO_MAPPER::toProto).forEach(response::onNext); + response.onCompleted(); + } + + @Override + public void getQueues(EventServicePb.GetQueuesRequest req, StreamObserver response) { + response.onNext( + EventServicePb.GetQueuesResponse.newBuilder() + .putAllEventToQueueUri(ep.getQueues()) + .build() + ); + response.onCompleted(); + } + + @Override + public void getQueueSizes(EventServicePb.GetQueueSizesRequest req, StreamObserver response) { + EventServicePb.GetQueueSizesResponse.Builder builder = EventServicePb.GetQueueSizesResponse.newBuilder(); + for (Map.Entry> pair : ep.getQueueSizes().entrySet()) { + builder.putEventToQueueInfo(pair.getKey(), + EventServicePb.GetQueueSizesResponse.QueueInfo.newBuilder() + .putAllQueueSizes(pair.getValue()).build() + ); + } + response.onNext(builder.build()); + response.onCompleted(); + } + + @Override + public void getQueueProviders(EventServicePb.GetQueueProvidersRequest req, StreamObserver response) { + response.onNext( + EventServicePb.GetQueueProvidersResponse.newBuilder() + .addAllProviders(eventQueues.getProviders()) + .build() + ); + response.onCompleted(); + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/GRPCHelper.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/GRPCHelper.java new file mode 100644 index 0000000000..d57a73548c --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/GRPCHelper.java @@ -0,0 +1,158 @@ +package com.netflix.conductor.grpc.server.service; + +import com.google.protobuf.Empty; +import com.google.rpc.DebugInfo; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusException; +import io.grpc.protobuf.lite.ProtoLiteUtils; +import io.grpc.stub.StreamObserver; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.slf4j.Logger; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.util.Arrays; + +public class GRPCHelper { + private final Logger logger; + + private static final Metadata.Key STATUS_DETAILS_KEY = + Metadata.Key.of( + "grpc-status-details-bin", + ProtoLiteUtils.metadataMarshaller(DebugInfo.getDefaultInstance())); + + public GRPCHelper(Logger log) { + this.logger = log; + } + + /** + * Converts an internal exception thrown by Conductor into an StatusException + * that uses modern "Status" metadata for GRPC. + * + * Note that this is trickier than it ought to be because the GRPC APIs have + * not been upgraded yet. Here's a quick breakdown of how this works in practice: + * + * Reporting a "status" result back to a client with GRPC is pretty straightforward. + * GRPC implementations simply serialize the status into several HTTP/2 trailer headers that + * are sent back to the client before shutting down the HTTP/2 stream. + * + * - 'grpc-status', which is a string representation of a {@link com.google.rpc.Code} + * - 'grpc-message', which is the description of the returned status + * - 'grpc-status-details-bin' (optional), which is an arbitrary payload with a serialized + * ProtoBuf object, containing an accurate description of the error in case the status is not + * successful. + * + * By convention, Google provides a default set of ProtoBuf messages for the most common + * error cases. Here, we'll be using {@link DebugInfo}, as we're reporting an internal + * Java exception which we couldn't properly handle. + * + * Now, how do we go about sending all those headers _and_ the {@link DebugInfo} payload + * using the Java GRPC API? + * + * The only way we can return an error with the Java API is by passing an instance of + * {@link io.grpc.StatusException} or {@link io.grpc.StatusRuntimeException} to + * {@link StreamObserver#onError(Throwable)}. The easiest way to create either of these + * exceptions is by using the {@link Status} class and one of its predefined code + * identifiers (in this case, {@link Status#INTERNAL} because we're reporting an internal + * exception). The {@link Status} class has setters to set its most relevant attributes, + * namely those that will be automatically serialized into the 'grpc-status' and 'grpc-message' + * trailers in the response. There is, however, no setter to pass an arbitrary ProtoBuf message + * to be serialized into a `grpc-status-details-bin` trailer. This feature exists in the other + * language implementations but it hasn't been brought to Java yet. + * + * Fortunately, {@link Status#asException(Metadata)} exists, allowing us to pass any amount + * of arbitrary trailers before we close the response. So we're using this API to manually + * craft the 'grpc-status-detail-bin' trailer, in the same way that the GRPC server implementations + * for Go and C++ craft and serialize the header. This will allow us to access the metadata + * cleanly from Go and C++ clients by using the 'details' method which _has_ been implemented + * in those two clients. + * + * @param t The exception to convert + * @return an instance of {@link StatusException} which will properly serialize all its + * headers into the response. + */ + private StatusException throwableToStatusException(Throwable t) { + String[] frames = ExceptionUtils.getStackFrames(t); + Metadata metadata = new Metadata(); + metadata.put(STATUS_DETAILS_KEY, + DebugInfo.newBuilder() + .addAllStackEntries(Arrays.asList(frames)) + .setDetail(ExceptionUtils.getMessage(t)) + .build() + ); + + return Status.INTERNAL + .withDescription(t.getMessage()) + .withCause(t) + .asException(metadata); + } + + void onError(StreamObserver response, Throwable t) { + logger.error("internal exception during GRPC request", t); + response.onError(throwableToStatusException(t)); + } + + /** + * Convert a non-null String instance to a possibly null String instance + * based on ProtoBuf's rules for optional arguments. + * + * This helper converts an String instance from a ProtoBuf object into a + * possibly null String. In ProtoBuf objects, String fields are not + * nullable, but an empty String field is considered to be "missing". + * + * The internal Conductor APIs expect missing arguments to be passed + * as null values, so this helper performs such conversion. + * + * @param str a string from a ProtoBuf object + * @return the original string, or null + */ + String optional(@Nonnull String str) { + return str.isEmpty() ? null : str; + } + + /** + * Check if a given non-null String instance is "missing" according to ProtoBuf's + * missing field rules. If the String is missing, the given default value will be + * returned. Otherwise, the string itself will be returned. + * + * @param str the input String + * @param defaults the default value for the string + * @return 'str' if it is not empty according to ProtoBuf rules; 'defaults' otherwise + */ + String optionalOr(@Nonnull String str, String defaults) { + return str.isEmpty() ? defaults : str; + } + + /** + * Convert a non-null Integer instance to a possibly null Integer instance + * based on ProtoBuf's rules for optional arguments. + * + * This helper converts an Integer instance from a ProtoBuf object into a + * possibly null Integer. In ProtoBuf objects, Integer fields are not + * nullable, but a zero-value Integer field is considered to be "missing". + * + * The internal Conductor APIs expect missing arguments to be passed + * as null values, so this helper performs such conversion. + * + * @param i an Integer from a ProtoBuf object + * @return the original Integer, or null + */ + Integer optional(@Nonnull Integer i) { + return i == 0 ? null : i; + } + + /** + * Check if a given non-null Integer instance is "missing" according to ProtoBuf's + * missing field rules. If the Integer is missing (i.e. if it has a zero-value), + * the given default value will be returned. Otherwise, the Integer itself will be + * returned. + * + * @param i the input Integer + * @param defaults the default value for the Integer + * @return 'i' if it is not a zero-value according to ProtoBuf rules; 'defaults' otherwise + */ + Integer optionalOr(@Nonnull Integer i, int defaults) { + return i == 0 ? defaults : i; + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/HealthServiceImpl.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/HealthServiceImpl.java new file mode 100644 index 0000000000..1aca1f8781 --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/HealthServiceImpl.java @@ -0,0 +1,44 @@ +package com.netflix.conductor.grpc.server.service; + +import com.netflix.runtime.health.api.HealthCheckAggregator; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; + +import io.grpc.health.v1.HealthCheckRequest; +import io.grpc.health.v1.HealthCheckResponse; +import io.grpc.health.v1.HealthGrpc; +import io.grpc.stub.StreamObserver; + +public class HealthServiceImpl extends HealthGrpc.HealthImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(HealthServiceImpl.class); + private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER); + + private final HealthCheckAggregator healthCheck; + + @Inject + public HealthServiceImpl(HealthCheckAggregator healthCheck) { + this.healthCheck = healthCheck; + } + + @Override + public void check(HealthCheckRequest request, StreamObserver responseObserver) { + try { + if (healthCheck.check().get().isHealthy()) { + responseObserver.onNext( + HealthCheckResponse.newBuilder().setStatus(HealthCheckResponse.ServingStatus.SERVING).build() + ); + } else { + responseObserver.onNext( + HealthCheckResponse.newBuilder().setStatus(HealthCheckResponse.ServingStatus.NOT_SERVING).build() + ); + } + } catch (Exception ex) { + GRPC_HELPER.onError(responseObserver, ex); + } finally { + responseObserver.onCompleted(); + } + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/MetadataServiceImpl.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/MetadataServiceImpl.java new file mode 100644 index 0000000000..f47522eaa7 --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/MetadataServiceImpl.java @@ -0,0 +1,115 @@ +package com.netflix.conductor.grpc.server.service; + +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.grpc.MetadataServiceGrpc; +import com.netflix.conductor.grpc.MetadataServicePb; +import com.netflix.conductor.grpc.ProtoMapper; +import com.netflix.conductor.grpc.WorkflowServicePb; +import com.netflix.conductor.proto.TaskDefPb; +import com.netflix.conductor.proto.WorkflowDefPb; +import com.netflix.conductor.service.MetadataService; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; + +public class MetadataServiceImpl extends MetadataServiceGrpc.MetadataServiceImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(MetadataServiceImpl.class); + private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE; + private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER); + + private final MetadataService service; + + @Inject + public MetadataServiceImpl(MetadataService service) { + this.service = service; + } + + @Override + public void createWorkflow(MetadataServicePb.CreateWorkflowRequest req, StreamObserver response) { + WorkflowDef workflow = PROTO_MAPPER.fromProto(req.getWorkflow()); + service.registerWorkflowDef(workflow); + response.onNext(MetadataServicePb.CreateWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void updateWorkflows(MetadataServicePb.UpdateWorkflowsRequest req, StreamObserver response) { + List workflows = req.getDefsList().stream() + .map(PROTO_MAPPER::fromProto).collect(Collectors.toList()); + + service.updateWorkflowDef(workflows); + response.onNext(MetadataServicePb.UpdateWorkflowsResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void getWorkflow(MetadataServicePb.GetWorkflowRequest req, StreamObserver response) { + try { + WorkflowDef workflowDef = service.getWorkflowDef(req.getName(), GRPC_HELPER.optional(req.getVersion())); + WorkflowDefPb.WorkflowDef workflow = PROTO_MAPPER.toProto(workflowDef); + response.onNext(MetadataServicePb.GetWorkflowResponse.newBuilder() + .setWorkflow(workflow) + .build() + ); + response.onCompleted(); + } catch (ApplicationException e) { + // TODO replace this with gRPC exception interceptor. + response.onError(Status.NOT_FOUND + .withDescription("No such workflow found by name=" + req.getName()) + .asRuntimeException() + ); + } + } + + @Override + public void createTasks(MetadataServicePb.CreateTasksRequest req, StreamObserver response) { + service.registerTaskDef( + req.getDefsList().stream().map(PROTO_MAPPER::fromProto).collect(Collectors.toList()) + ); + response.onNext(MetadataServicePb.CreateTasksResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void updateTask(MetadataServicePb.UpdateTaskRequest req, StreamObserver response) { + TaskDef task = PROTO_MAPPER.fromProto(req.getTask()); + service.updateTaskDef(task); + response.onNext(MetadataServicePb.UpdateTaskResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void getTask(MetadataServicePb.GetTaskRequest req, StreamObserver response) { + TaskDef def = service.getTaskDef(req.getTaskType()); + if (def != null) { + TaskDefPb.TaskDef task = PROTO_MAPPER.toProto(def); + response.onNext(MetadataServicePb.GetTaskResponse.newBuilder() + .setTask(task) + .build() + ); + response.onCompleted(); + } else { + response.onError(Status.NOT_FOUND + .withDescription("No such TaskDef found by taskType=" + req.getTaskType()) + .asRuntimeException() + ); + } + } + + @Override + public void deleteTask(MetadataServicePb.DeleteTaskRequest req, StreamObserver response) { + service.unregisterTaskDef(req.getTaskType()); + response.onNext(MetadataServicePb.DeleteTaskResponse.getDefaultInstance()); + response.onCompleted(); + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/TaskServiceImpl.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/TaskServiceImpl.java new file mode 100644 index 0000000000..b0c519cc0b --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/TaskServiceImpl.java @@ -0,0 +1,256 @@ +package com.netflix.conductor.grpc.server.service; + +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import com.netflix.conductor.common.metadata.tasks.TaskExecLog; +import com.netflix.conductor.common.metadata.tasks.TaskResult; +import com.netflix.conductor.grpc.ProtoMapper; +import com.netflix.conductor.proto.TaskPb; +import com.netflix.conductor.grpc.TaskServiceGrpc; +import com.netflix.conductor.grpc.TaskServicePb; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; + +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.service.ExecutionService; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.dao.QueueDAO; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; + +public class TaskServiceImpl extends TaskServiceGrpc.TaskServiceImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(TaskServiceImpl.class); + private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE; + private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER); + + private static final int MAX_TASK_COUNT = 100; + private static final int POLL_TIMEOUT_MS = 100; + private static final int MAX_POLL_TIMEOUT_MS = 5000; + + private final ExecutionService taskService; + private final QueueDAO queues; + + @Inject + public TaskServiceImpl(ExecutionService taskService, QueueDAO queues, Configuration config) { + this.taskService = taskService; + this.queues = queues; + } + + @Override + public void poll(TaskServicePb.PollRequest req, StreamObserver response) { + try { + List tasks = taskService.poll(req.getTaskType(), req.getWorkerId(), + GRPC_HELPER.optional(req.getDomain()), 1, POLL_TIMEOUT_MS); + if (!tasks.isEmpty()) { + TaskPb.Task t = PROTO_MAPPER.toProto(tasks.get(0)); + response.onNext(TaskServicePb.PollResponse.newBuilder() + .setTask(t) + .build() + ); + } + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void batchPoll(TaskServicePb.BatchPollRequest req, StreamObserver response) { + final int count = GRPC_HELPER.optionalOr(req.getCount(), 1); + final int timeout = GRPC_HELPER.optionalOr(req.getTimeout(), POLL_TIMEOUT_MS); + + if (timeout > MAX_POLL_TIMEOUT_MS) { + response.onError(Status.INVALID_ARGUMENT + .withDescription("longpoll timeout cannot be longer than " + MAX_POLL_TIMEOUT_MS + "ms") + .asRuntimeException() + ); + return; + } + + try { + List polledTasks = taskService.poll(req.getTaskType(), req.getWorkerId(), + GRPC_HELPER.optional(req.getDomain()), count, timeout); + LOGGER.info("polled tasks: "+polledTasks); + polledTasks.stream().map(PROTO_MAPPER::toProto).forEach(response::onNext); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void getTasksInProgress(TaskServicePb.TasksInProgressRequest req, StreamObserver response) { + final String startKey = GRPC_HELPER.optional(req.getStartKey()); + final int count = GRPC_HELPER.optionalOr(req.getCount(), MAX_TASK_COUNT); + + try { + response.onNext( + TaskServicePb.TasksInProgressResponse.newBuilder().addAllTasks( + taskService.getTasks(req.getTaskType(), startKey, count) + .stream() + .map(PROTO_MAPPER::toProto)::iterator + ).build() + ); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void getPendingTaskForWorkflow(TaskServicePb.PendingTaskRequest req, StreamObserver response) { + try { + Task t = taskService.getPendingTaskForWorkflow(req.getTaskRefName(), req.getWorkflowId()); + response.onNext( + TaskServicePb.PendingTaskResponse.newBuilder() + .setTask(PROTO_MAPPER.toProto(t)) + .build() + ); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void updateTask(TaskServicePb.UpdateTaskRequest req, StreamObserver response) { + try { + TaskResult task = PROTO_MAPPER.fromProto(req.getResult()); + taskService.updateTask(task); + + response.onNext( + TaskServicePb.UpdateTaskResponse.newBuilder() + .setTaskId(task.getTaskId()) + .build() + ); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void ackTask(TaskServicePb.AckTaskRequest req, StreamObserver response) { + try { + boolean ack = taskService.ackTaskReceived(req.getTaskId()); + response.onNext(TaskServicePb.AckTaskResponse.newBuilder().setAck(ack).build()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void addLog(TaskServicePb.AddLogRequest req, StreamObserver response) { + taskService.log(req.getTaskId(), req.getLog()); + response.onNext(TaskServicePb.AddLogResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void getTaskLogs(TaskServicePb.GetTaskLogsRequest req, StreamObserver response) { + List logs = taskService.getTaskLogs(req.getTaskId()); + response.onNext(TaskServicePb.GetTaskLogsResponse.newBuilder() + .addAllLogs(logs.stream().map(PROTO_MAPPER::toProto)::iterator) + .build() + ); + response.onCompleted(); + } + + @Override + public void getTask(TaskServicePb.GetTaskRequest req, StreamObserver response) { + try { + Task task = taskService.getTask(req.getTaskId()); + if (task == null) { + response.onError(Status.NOT_FOUND + .withDescription("No such task found by id="+req.getTaskId()) + .asRuntimeException() + ); + } else { + response.onNext( + TaskServicePb.GetTaskResponse.newBuilder() + .setTask(PROTO_MAPPER.toProto(task)) + .build() + ); + response.onCompleted(); + } + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + + } + + @Override + public void removeTaskFromQueue(TaskServicePb.RemoveTaskRequest req, StreamObserver response) { + taskService.removeTaskfromQueue(req.getTaskId()); + response.onNext(TaskServicePb.RemoveTaskResponse.getDefaultInstance()); + response.onCompleted(); + } + + @Override + public void getQueueSizesForTasks(TaskServicePb.QueueSizesRequest req, StreamObserver response) { + Map sizes = taskService.getTaskQueueSizes(req.getTaskTypesList()); + response.onNext( + TaskServicePb.QueueSizesResponse.newBuilder() + .putAllQueueForTask(sizes) + .build() + ); + response.onCompleted(); + } + + @Override + public void getQueueInfo(TaskServicePb.QueueInfoRequest req, StreamObserver response) { + Map queueInfo = queues.queuesDetail().entrySet().stream() + .sorted(Comparator.comparing(Map.Entry::getKey)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> v1, HashMap::new)); + + response.onNext( + TaskServicePb.QueueInfoResponse.newBuilder() + .putAllQueues(queueInfo) + .build() + ); + response.onCompleted(); + } + + @Override + public void getQueueAllInfo(TaskServicePb.QueueAllInfoRequest req, StreamObserver response) { + Map>> info = queues.queuesDetailVerbose(); + TaskServicePb.QueueAllInfoResponse.Builder queuesBuilder = TaskServicePb.QueueAllInfoResponse.newBuilder(); + + for (Map.Entry>> queue : info.entrySet()) { + final String queueName = queue.getKey(); + final Map> queueShards = queue.getValue(); + + TaskServicePb.QueueAllInfoResponse.QueueInfo.Builder queueInfoBuilder = + TaskServicePb.QueueAllInfoResponse.QueueInfo.newBuilder(); + + for (Map.Entry> shard : queueShards.entrySet()) { + final String shardName = shard.getKey(); + final Map shardInfo = shard.getValue(); + + // FIXME: make shardInfo an actual type + // shardInfo is an immutable map with predefined keys, so we can always + // access 'size' and 'uacked'. It would be better if shardInfo + // were actually a POJO. + queueInfoBuilder.putShards(shardName, + TaskServicePb.QueueAllInfoResponse.ShardInfo.newBuilder() + .setSize(shardInfo.get("size")) + .setUacked(shardInfo.get("uacked")) + .build() + ); + } + + queuesBuilder.putQueues(queueName, queueInfoBuilder.build()); + } + + response.onNext(queuesBuilder.build()); + response.onCompleted(); + } +} diff --git a/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImpl.java b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImpl.java new file mode 100644 index 0000000000..5d1778b489 --- /dev/null +++ b/grpc-server/src/main/java/com/netflix/conductor/grpc/server/service/WorkflowServiceImpl.java @@ -0,0 +1,309 @@ +package com.netflix.conductor.grpc.server.service; + +import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest; +import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; +import com.netflix.conductor.common.run.SearchResult; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.common.run.WorkflowSummary; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.core.execution.WorkflowExecutor; +import com.netflix.conductor.grpc.ProtoMapper; +import com.netflix.conductor.grpc.SearchPb; +import com.netflix.conductor.grpc.WorkflowServiceGrpc; +import com.netflix.conductor.grpc.WorkflowServicePb; +import com.netflix.conductor.proto.RerunWorkflowRequestPb; +import com.netflix.conductor.proto.StartWorkflowRequestPb; +import com.netflix.conductor.proto.WorkflowPb; +import com.netflix.conductor.service.ExecutionService; +import com.netflix.conductor.service.MetadataService; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class WorkflowServiceImpl extends WorkflowServiceGrpc.WorkflowServiceImplBase { + private static final Logger LOGGER = LoggerFactory.getLogger(TaskServiceImpl.class); + private static final ProtoMapper PROTO_MAPPER = ProtoMapper.INSTANCE; + private static final GRPCHelper GRPC_HELPER = new GRPCHelper(LOGGER); + + private final WorkflowExecutor executor; + private final ExecutionService service; + private final MetadataService metadata; + private final int maxSearchSize; + + @Inject + public WorkflowServiceImpl(WorkflowExecutor executor, ExecutionService service, MetadataService metadata, Configuration config) { + this.executor = executor; + this.service = service; + this.metadata = metadata; + this.maxSearchSize = config.getIntProperty("workflow.max.search.size", 5_000); + } + + @Override + public void startWorkflow(StartWorkflowRequestPb.StartWorkflowRequest pbRequest, StreamObserver response) { + // TODO: better handling of optional 'version' + final StartWorkflowRequest request = PROTO_MAPPER.fromProto(pbRequest); + + try { + // TODO When moving to Java 9: Use ifPresentOrElse(Consumer action, Runnable emptyAction) + String id; + if (request.getWorkflowDef() == null) { + id = executor.startWorkflow( + request.getName(), + GRPC_HELPER.optional(request.getVersion()), + request.getCorrelationId(), + request.getInput(), + request.getExternalInputPayloadStoragePath(), + null, + request.getTaskToDomain()); + } else { + id = executor.startWorkflow( + request.getWorkflowDef(), + request.getInput(), + request.getExternalInputPayloadStoragePath(), + request.getCorrelationId(), + null, + request.getTaskToDomain()); + } + response.onNext(WorkflowServicePb.StartWorkflowResponse.newBuilder() + .setWorkflowId(id) + .build() + ); + response.onCompleted(); + } catch (ApplicationException ae) { + if (ae.getCode().equals(ApplicationException.Code.NOT_FOUND)) { + response.onError(Status.NOT_FOUND + .withDescription("No such workflow found by name="+request.getName()) + .asRuntimeException() + ); + } else { + GRPC_HELPER.onError(response, ae); + } + } + } + + @Override + public void getWorkflows(WorkflowServicePb.GetWorkflowsRequest req, StreamObserver response) { + final String name = req.getName(); + final boolean includeClosed = req.getIncludeClosed(); + final boolean includeTasks = req.getIncludeTasks(); + + WorkflowServicePb.GetWorkflowsResponse.Builder builder = WorkflowServicePb.GetWorkflowsResponse.newBuilder(); + + for (String correlationId : req.getCorrelationIdList()) { + List workflows = service.getWorkflowInstances(name, correlationId, includeClosed, includeTasks); + builder.putWorkflowsById(correlationId, + WorkflowServicePb.GetWorkflowsResponse.Workflows.newBuilder() + .addAllWorkflows(workflows.stream().map(PROTO_MAPPER::toProto)::iterator) + .build() + ); + } + + response.onNext(builder.build()); + response.onCompleted(); + } + + @Override + public void getWorkflowStatus(WorkflowServicePb.GetWorkflowStatusRequest req, StreamObserver response) { + try { + Workflow workflow = service.getExecutionStatus(req.getWorkflowId(), req.getIncludeTasks()); + response.onNext(PROTO_MAPPER.toProto(workflow)); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void removeWorkflow(WorkflowServicePb.RemoveWorkflowRequest req, StreamObserver response) { + try { + service.removeWorkflow(req.getWorkflodId(), req.getArchiveWorkflow()); + response.onNext(WorkflowServicePb.RemoveWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void getRunningWorkflows(WorkflowServicePb.GetRunningWorkflowsRequest req, StreamObserver response) { + try { + List workflowIds; + + if (req.getStartTime() != 0 && req.getEndTime() != 0) { + workflowIds = executor.getWorkflows(req.getName(), req.getVersion(), req.getStartTime(), req.getEndTime()); + } else { + workflowIds = executor.getRunningWorkflowIds(req.getName()); + } + + response.onNext( + WorkflowServicePb.GetRunningWorkflowsResponse.newBuilder() + .addAllWorkflowIds(workflowIds) + .build() + ); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void decideWorkflow(WorkflowServicePb.DecideWorkflowRequest req, StreamObserver response) { + try { + executor.decide(req.getWorkflowId()); + response.onNext(WorkflowServicePb.DecideWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void pauseWorkflow(WorkflowServicePb.PauseWorkflowRequest req, StreamObserver response) { + try { + executor.pauseWorkflow(req.getWorkflowId()); + response.onNext(WorkflowServicePb.PauseWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void resumeWorkflow(WorkflowServicePb.ResumeWorkflowRequest req, StreamObserver response) { + try { + executor.resumeWorkflow(req.getWorkflowId()); + response.onNext(WorkflowServicePb.ResumeWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void skipTaskFromWorkflow(WorkflowServicePb.SkipTaskRequest req, StreamObserver response) { + try { + SkipTaskRequest skipTask = PROTO_MAPPER.fromProto(req.getRequest()); + executor.skipTaskFromWorkflow(req.getWorkflowId(), req.getTaskReferenceName(), skipTask); + response.onNext(WorkflowServicePb.SkipTaskResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void rerunWorkflow(RerunWorkflowRequestPb.RerunWorkflowRequest req, StreamObserver response) { + try { + String id = executor.rerun(PROTO_MAPPER.fromProto(req)); + response.onNext(WorkflowServicePb.RerunWorkflowResponse.newBuilder() + .setWorkflowId(id) + .build() + ); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void restartWorkflow(WorkflowServicePb.RestartWorkflowRequest req, StreamObserver response) { + try { + executor.rewind(req.getWorkflowId()); + response.onNext(WorkflowServicePb.RestartWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void retryWorkflow(WorkflowServicePb.RetryWorkflowRequest req, StreamObserver response) { + try { + executor.retry(req.getWorkflowId()); + response.onNext(WorkflowServicePb.RetryWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void resetWorkflowCallbacks(WorkflowServicePb.ResetWorkflowCallbacksRequest req, StreamObserver response) { + try { + executor.resetCallbacksForInProgressTasks(req.getWorkflowId()); + response.onNext(WorkflowServicePb.ResetWorkflowCallbacksResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + @Override + public void terminateWorkflow(WorkflowServicePb.TerminateWorkflowRequest req, StreamObserver response) { + try { + executor.terminateWorkflow(req.getWorkflowId(), req.getReason()); + response.onNext(WorkflowServicePb.TerminateWorkflowResponse.getDefaultInstance()); + response.onCompleted(); + } catch (Exception e) { + GRPC_HELPER.onError(response, e); + } + } + + private void doSearch(boolean searchByTask, SearchPb.Request req, StreamObserver response) { + final int start = req.getStart(); + final int size = GRPC_HELPER.optionalOr(req.getSize(), maxSearchSize); + final List sort = convertSort(req.getSort()); + final String freeText = GRPC_HELPER.optionalOr(req.getFreeText(), "*"); + final String query = req.getQuery(); + + if (size > maxSearchSize) { + response.onError( + Status.INVALID_ARGUMENT + .withDescription("Cannot return more than "+maxSearchSize+" results") + .asRuntimeException() + ); + return; + } + + SearchResult search; + if (searchByTask) { + search = service.searchWorkflowByTasks(query, freeText, start, size, sort); + } else { + search = service.search(query, freeText, start, size, sort); + } + + response.onNext( + WorkflowServicePb.WorkflowSummarySearchResult.newBuilder() + .setTotalHits(search.getTotalHits()) + .addAllResults( + search.getResults().stream().map(PROTO_MAPPER::toProto)::iterator + ).build() + ); + response.onCompleted(); + } + + private List convertSort(String sortStr) { + List list = new ArrayList(); + if(sortStr != null && sortStr.length() != 0){ + list = Arrays.asList(sortStr.split("\\|")); + } + return list; + } + + @Override + public void search(SearchPb.Request request, StreamObserver responseObserver) { + doSearch(false, request, responseObserver); + } + + @Override + public void searchByTasks(SearchPb.Request request, StreamObserver responseObserver) { + doSearch(true, request, responseObserver); + } +} diff --git a/grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/HealthServiceImplTest.java b/grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/HealthServiceImplTest.java new file mode 100644 index 0000000000..8e320f6ddb --- /dev/null +++ b/grpc-server/src/test/java/com/netflix/conductor/grpc/server/service/HealthServiceImplTest.java @@ -0,0 +1,108 @@ +package com.netflix.conductor.grpc.server.service; + +import com.netflix.runtime.health.api.HealthCheckAggregator; +import com.netflix.runtime.health.api.HealthCheckStatus; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.concurrent.CompletableFuture; + +import io.grpc.BindableService; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.health.v1.HealthCheckRequest; +import io.grpc.health.v1.HealthCheckResponse; +import io.grpc.health.v1.HealthGrpc; +import io.grpc.inprocess.InProcessChannelBuilder; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.testing.GrpcCleanupRule; + +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HealthServiceImplTest { + + @Rule + public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule(); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void healthServing() throws Exception { + // Generate a unique in-process server name. + String serverName = InProcessServerBuilder.generateName(); + HealthCheckAggregator hca = mock(HealthCheckAggregator.class); + CompletableFuture hcsf = mock(CompletableFuture.class); + HealthCheckStatus hcs = mock(HealthCheckStatus.class); + when(hcs.isHealthy()).thenReturn(true); + when(hcsf.get()).thenReturn(hcs); + when(hca.check()).thenReturn(hcsf); + HealthServiceImpl healthyService = new HealthServiceImpl(hca); + + addService(serverName, healthyService); + HealthGrpc.HealthBlockingStub blockingStub = HealthGrpc.newBlockingStub( + // Create a client channel and register for automatic graceful shutdown. + grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build())); + + + HealthCheckResponse reply = blockingStub.check(HealthCheckRequest.newBuilder().build()); + + assertEquals(HealthCheckResponse.ServingStatus.SERVING, reply.getStatus()); + } + + @Test + public void healthNotServing() throws Exception { + // Generate a unique in-process server name. + String serverName = InProcessServerBuilder.generateName(); + HealthCheckAggregator hca = mock(HealthCheckAggregator.class); + CompletableFuture hcsf = mock(CompletableFuture.class); + HealthCheckStatus hcs = mock(HealthCheckStatus.class); + when(hcs.isHealthy()).thenReturn(false); + when(hcsf.get()).thenReturn(hcs); + when(hca.check()).thenReturn(hcsf); + HealthServiceImpl healthyService = new HealthServiceImpl(hca); + + addService(serverName, healthyService); + HealthGrpc.HealthBlockingStub blockingStub = HealthGrpc.newBlockingStub( + // Create a client channel and register for automatic graceful shutdown. + grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build())); + + + HealthCheckResponse reply = blockingStub.check(HealthCheckRequest.newBuilder().build()); + + assertEquals(HealthCheckResponse.ServingStatus.NOT_SERVING, reply.getStatus()); + } + + @Test + public void healthException() throws Exception { + // Generate a unique in-process server name. + String serverName = InProcessServerBuilder.generateName(); + HealthCheckAggregator hca = mock(HealthCheckAggregator.class); + CompletableFuture hcsf = mock(CompletableFuture.class); + when(hcsf.get()).thenThrow(InterruptedException.class); + when(hca.check()).thenReturn(hcsf); + HealthServiceImpl healthyService = new HealthServiceImpl(hca); + + addService(serverName, healthyService); + HealthGrpc.HealthBlockingStub blockingStub = HealthGrpc.newBlockingStub( + // Create a client channel and register for automatic graceful shutdown. + grpcCleanup.register(InProcessChannelBuilder.forName(serverName).directExecutor().build())); + + thrown.expect(StatusRuntimeException.class); + thrown.expect(hasProperty("status", is(Status.INTERNAL))); + blockingStub.check(HealthCheckRequest.newBuilder().build()); + + } + + private void addService(String name, BindableService service) throws Exception { + // Create a server, add service, start, and register for automatic graceful shutdown. + grpcCleanup.register(InProcessServerBuilder + .forName(name).directExecutor().addService(service).build().start()); + } +} diff --git a/grpc/build.gradle b/grpc/build.gradle new file mode 100644 index 0000000000..c653d82a19 --- /dev/null +++ b/grpc/build.gradle @@ -0,0 +1,52 @@ +buildscript { + dependencies { + classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.5' + } +} + +plugins { + id 'java' + id 'idea' + id "com.google.protobuf" version "0.8.5" +} + +repositories{ + maven { url "https://dl.bintray.com/chaos-systems/mvn" } +} + +dependencies { + compile project(':conductor-common') + compile project(':conductor-core') + + protobuf 'io.chaossystems.grpc:grpc-healthcheck:1.0.+:protos' + compile "com.google.api.grpc:proto-google-common-protos:1.0.0" + compile "io.grpc:grpc-protobuf:${revGrpc}" + compile "io.grpc:grpc-stub:${revGrpc}" + + compile "com.netflix.runtime:health-api:${revHealth}" +} + +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:${revProtoBuf}" + } + plugins { + grpc { + artifact = "io.grpc:protoc-gen-grpc-java:${revGrpc}" + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } +} + +idea { + module { + sourceDirs += file("${projectDir}/build/generated/source/proto/main/java"); + sourceDirs += file("${projectDir}/build/generated/source/proto/main/grpc"); + } +} + +compileJava.dependsOn(tasks.getByPath(":conductor-common:protogen")) diff --git a/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java b/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java new file mode 100644 index 0000000000..3b3deebce2 --- /dev/null +++ b/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java @@ -0,0 +1,1202 @@ +package com.netflix.conductor.grpc; + +import com.google.protobuf.Any; +import com.google.protobuf.Value; +import com.netflix.conductor.common.metadata.events.EventExecution; +import com.netflix.conductor.common.metadata.events.EventHandler; +import com.netflix.conductor.common.metadata.tasks.PollData; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.tasks.TaskExecLog; +import com.netflix.conductor.common.metadata.tasks.TaskResult; +import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTask; +import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; +import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; +import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest; +import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; +import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.TaskSummary; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.common.run.WorkflowSummary; +import com.netflix.conductor.proto.DynamicForkJoinTaskListPb; +import com.netflix.conductor.proto.DynamicForkJoinTaskPb; +import com.netflix.conductor.proto.EventExecutionPb; +import com.netflix.conductor.proto.EventHandlerPb; +import com.netflix.conductor.proto.PollDataPb; +import com.netflix.conductor.proto.RerunWorkflowRequestPb; +import com.netflix.conductor.proto.SkipTaskRequestPb; +import com.netflix.conductor.proto.StartWorkflowRequestPb; +import com.netflix.conductor.proto.SubWorkflowParamsPb; +import com.netflix.conductor.proto.TaskDefPb; +import com.netflix.conductor.proto.TaskExecLogPb; +import com.netflix.conductor.proto.TaskPb; +import com.netflix.conductor.proto.TaskResultPb; +import com.netflix.conductor.proto.TaskSummaryPb; +import com.netflix.conductor.proto.WorkflowDefPb; +import com.netflix.conductor.proto.WorkflowPb; +import com.netflix.conductor.proto.WorkflowSummaryPb; +import com.netflix.conductor.proto.WorkflowTaskPb; +import java.lang.IllegalArgumentException; +import java.lang.Object; +import java.lang.String; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import javax.annotation.Generated; + +@Generated("com.github.vmg.protogen.ProtoGen") +public abstract class AbstractProtoMapper { + public EventExecutionPb.EventExecution toProto(EventExecution from) { + EventExecutionPb.EventExecution.Builder to = EventExecutionPb.EventExecution.newBuilder(); + if (from.getId() != null) { + to.setId( from.getId() ); + } + if (from.getMessageId() != null) { + to.setMessageId( from.getMessageId() ); + } + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getEvent() != null) { + to.setEvent( from.getEvent() ); + } + to.setCreated( from.getCreated() ); + if (from.getStatus() != null) { + to.setStatus( toProto( from.getStatus() ) ); + } + if (from.getAction() != null) { + to.setAction( toProto( from.getAction() ) ); + } + for (Map.Entry pair : from.getOutput().entrySet()) { + to.putOutput( pair.getKey(), toProto( pair.getValue() ) ); + } + return to.build(); + } + + public EventExecution fromProto(EventExecutionPb.EventExecution from) { + EventExecution to = new EventExecution(); + to.setId( from.getId() ); + to.setMessageId( from.getMessageId() ); + to.setName( from.getName() ); + to.setEvent( from.getEvent() ); + to.setCreated( from.getCreated() ); + to.setStatus( fromProto( from.getStatus() ) ); + to.setAction( fromProto( from.getAction() ) ); + Map outputMap = new HashMap(); + for (Map.Entry pair : from.getOutputMap().entrySet()) { + outputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setOutput(outputMap); + return to; + } + + public EventExecutionPb.EventExecution.Status toProto(EventExecution.Status from) { + EventExecutionPb.EventExecution.Status to; + switch (from) { + case IN_PROGRESS: to = EventExecutionPb.EventExecution.Status.IN_PROGRESS; break; + case COMPLETED: to = EventExecutionPb.EventExecution.Status.COMPLETED; break; + case FAILED: to = EventExecutionPb.EventExecution.Status.FAILED; break; + case SKIPPED: to = EventExecutionPb.EventExecution.Status.SKIPPED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public EventExecution.Status fromProto(EventExecutionPb.EventExecution.Status from) { + EventExecution.Status to; + switch (from) { + case IN_PROGRESS: to = EventExecution.Status.IN_PROGRESS; break; + case COMPLETED: to = EventExecution.Status.COMPLETED; break; + case FAILED: to = EventExecution.Status.FAILED; break; + case SKIPPED: to = EventExecution.Status.SKIPPED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public EventHandlerPb.EventHandler toProto(EventHandler from) { + EventHandlerPb.EventHandler.Builder to = EventHandlerPb.EventHandler.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getEvent() != null) { + to.setEvent( from.getEvent() ); + } + if (from.getCondition() != null) { + to.setCondition( from.getCondition() ); + } + for (EventHandler.Action elem : from.getActions()) { + to.addActions( toProto(elem) ); + } + to.setActive( from.isActive() ); + return to.build(); + } + + public EventHandler fromProto(EventHandlerPb.EventHandler from) { + EventHandler to = new EventHandler(); + to.setName( from.getName() ); + to.setEvent( from.getEvent() ); + to.setCondition( from.getCondition() ); + to.setActions( from.getActionsList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) ); + to.setActive( from.getActive() ); + return to; + } + + public EventHandlerPb.EventHandler.StartWorkflow toProto(EventHandler.StartWorkflow from) { + EventHandlerPb.EventHandler.StartWorkflow.Builder to = EventHandlerPb.EventHandler.StartWorkflow.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getVersion() != null) { + to.setVersion( from.getVersion() ); + } + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + for (Map.Entry pair : from.getInput().entrySet()) { + to.putInput( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getInputMessage() != null) { + to.setInputMessage( toProto( from.getInputMessage() ) ); + } + return to.build(); + } + + public EventHandler.StartWorkflow fromProto(EventHandlerPb.EventHandler.StartWorkflow from) { + EventHandler.StartWorkflow to = new EventHandler.StartWorkflow(); + to.setName( from.getName() ); + to.setVersion( from.getVersion() ); + to.setCorrelationId( from.getCorrelationId() ); + Map inputMap = new HashMap(); + for (Map.Entry pair : from.getInputMap().entrySet()) { + inputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInput(inputMap); + if (from.hasInputMessage()) { + to.setInputMessage( fromProto( from.getInputMessage() ) ); + } + return to; + } + + public EventHandlerPb.EventHandler.TaskDetails toProto(EventHandler.TaskDetails from) { + EventHandlerPb.EventHandler.TaskDetails.Builder to = EventHandlerPb.EventHandler.TaskDetails.newBuilder(); + if (from.getWorkflowId() != null) { + to.setWorkflowId( from.getWorkflowId() ); + } + if (from.getTaskRefName() != null) { + to.setTaskRefName( from.getTaskRefName() ); + } + for (Map.Entry pair : from.getOutput().entrySet()) { + to.putOutput( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getOutputMessage() != null) { + to.setOutputMessage( toProto( from.getOutputMessage() ) ); + } + return to.build(); + } + + public EventHandler.TaskDetails fromProto(EventHandlerPb.EventHandler.TaskDetails from) { + EventHandler.TaskDetails to = new EventHandler.TaskDetails(); + to.setWorkflowId( from.getWorkflowId() ); + to.setTaskRefName( from.getTaskRefName() ); + Map outputMap = new HashMap(); + for (Map.Entry pair : from.getOutputMap().entrySet()) { + outputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setOutput(outputMap); + if (from.hasOutputMessage()) { + to.setOutputMessage( fromProto( from.getOutputMessage() ) ); + } + return to; + } + + public EventHandlerPb.EventHandler.Action toProto(EventHandler.Action from) { + EventHandlerPb.EventHandler.Action.Builder to = EventHandlerPb.EventHandler.Action.newBuilder(); + if (from.getAction() != null) { + to.setAction( toProto( from.getAction() ) ); + } + if (from.getStartWorkflow() != null) { + to.setStartWorkflow( toProto( from.getStartWorkflow() ) ); + } + if (from.getCompleteTask() != null) { + to.setCompleteTask( toProto( from.getCompleteTask() ) ); + } + if (from.getFailTask() != null) { + to.setFailTask( toProto( from.getFailTask() ) ); + } + to.setExpandInlineJson( from.isExpandInlineJson() ); + return to.build(); + } + + public EventHandler.Action fromProto(EventHandlerPb.EventHandler.Action from) { + EventHandler.Action to = new EventHandler.Action(); + to.setAction( fromProto( from.getAction() ) ); + if (from.hasStartWorkflow()) { + to.setStartWorkflow( fromProto( from.getStartWorkflow() ) ); + } + if (from.hasCompleteTask()) { + to.setCompleteTask( fromProto( from.getCompleteTask() ) ); + } + if (from.hasFailTask()) { + to.setFailTask( fromProto( from.getFailTask() ) ); + } + to.setExpandInlineJson( from.getExpandInlineJson() ); + return to; + } + + public EventHandlerPb.EventHandler.Action.Type toProto(EventHandler.Action.Type from) { + EventHandlerPb.EventHandler.Action.Type to; + switch (from) { + case START_WORKFLOW: to = EventHandlerPb.EventHandler.Action.Type.START_WORKFLOW; break; + case COMPLETE_TASK: to = EventHandlerPb.EventHandler.Action.Type.COMPLETE_TASK; break; + case FAIL_TASK: to = EventHandlerPb.EventHandler.Action.Type.FAIL_TASK; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public EventHandler.Action.Type fromProto(EventHandlerPb.EventHandler.Action.Type from) { + EventHandler.Action.Type to; + switch (from) { + case START_WORKFLOW: to = EventHandler.Action.Type.START_WORKFLOW; break; + case COMPLETE_TASK: to = EventHandler.Action.Type.COMPLETE_TASK; break; + case FAIL_TASK: to = EventHandler.Action.Type.FAIL_TASK; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public PollDataPb.PollData toProto(PollData from) { + PollDataPb.PollData.Builder to = PollDataPb.PollData.newBuilder(); + if (from.getQueueName() != null) { + to.setQueueName( from.getQueueName() ); + } + if (from.getDomain() != null) { + to.setDomain( from.getDomain() ); + } + if (from.getWorkerId() != null) { + to.setWorkerId( from.getWorkerId() ); + } + to.setLastPollTime( from.getLastPollTime() ); + return to.build(); + } + + public PollData fromProto(PollDataPb.PollData from) { + PollData to = new PollData(); + to.setQueueName( from.getQueueName() ); + to.setDomain( from.getDomain() ); + to.setWorkerId( from.getWorkerId() ); + to.setLastPollTime( from.getLastPollTime() ); + return to; + } + + public TaskPb.Task toProto(Task from) { + TaskPb.Task.Builder to = TaskPb.Task.newBuilder(); + if (from.getTaskType() != null) { + to.setTaskType( from.getTaskType() ); + } + if (from.getStatus() != null) { + to.setStatus( toProto( from.getStatus() ) ); + } + for (Map.Entry pair : from.getInputData().entrySet()) { + to.putInputData( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getReferenceTaskName() != null) { + to.setReferenceTaskName( from.getReferenceTaskName() ); + } + to.setRetryCount( from.getRetryCount() ); + to.setSeq( from.getSeq() ); + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + to.setPollCount( from.getPollCount() ); + if (from.getTaskDefName() != null) { + to.setTaskDefName( from.getTaskDefName() ); + } + to.setScheduledTime( from.getScheduledTime() ); + to.setStartTime( from.getStartTime() ); + to.setEndTime( from.getEndTime() ); + to.setUpdateTime( from.getUpdateTime() ); + to.setStartDelayInSeconds( from.getStartDelayInSeconds() ); + if (from.getRetriedTaskId() != null) { + to.setRetriedTaskId( from.getRetriedTaskId() ); + } + to.setRetried( from.isRetried() ); + to.setExecuted( from.isExecuted() ); + to.setCallbackFromWorker( from.isCallbackFromWorker() ); + to.setResponseTimeoutSeconds( from.getResponseTimeoutSeconds() ); + if (from.getWorkflowInstanceId() != null) { + to.setWorkflowInstanceId( from.getWorkflowInstanceId() ); + } + if (from.getWorkflowType() != null) { + to.setWorkflowType( from.getWorkflowType() ); + } + if (from.getTaskId() != null) { + to.setTaskId( from.getTaskId() ); + } + if (from.getReasonForIncompletion() != null) { + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + } + to.setCallbackAfterSeconds( from.getCallbackAfterSeconds() ); + if (from.getWorkerId() != null) { + to.setWorkerId( from.getWorkerId() ); + } + for (Map.Entry pair : from.getOutputData().entrySet()) { + to.putOutputData( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getWorkflowTask() != null) { + to.setWorkflowTask( toProto( from.getWorkflowTask() ) ); + } + if (from.getDomain() != null) { + to.setDomain( from.getDomain() ); + } + if (from.getInputMessage() != null) { + to.setInputMessage( toProto( from.getInputMessage() ) ); + } + if (from.getOutputMessage() != null) { + to.setOutputMessage( toProto( from.getOutputMessage() ) ); + } + to.setRateLimitPerFrequency( from.getRateLimitPerFrequency() ); + to.setRateLimitFrequencyInSeconds( from.getRateLimitFrequencyInSeconds() ); + if (from.getExternalInputPayloadStoragePath() != null) { + to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() ); + } + if (from.getExternalOutputPayloadStoragePath() != null) { + to.setExternalOutputPayloadStoragePath( from.getExternalOutputPayloadStoragePath() ); + } + return to.build(); + } + + public Task fromProto(TaskPb.Task from) { + Task to = new Task(); + to.setTaskType( from.getTaskType() ); + to.setStatus( fromProto( from.getStatus() ) ); + Map inputDataMap = new HashMap(); + for (Map.Entry pair : from.getInputDataMap().entrySet()) { + inputDataMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInputData(inputDataMap); + to.setReferenceTaskName( from.getReferenceTaskName() ); + to.setRetryCount( from.getRetryCount() ); + to.setSeq( from.getSeq() ); + to.setCorrelationId( from.getCorrelationId() ); + to.setPollCount( from.getPollCount() ); + to.setTaskDefName( from.getTaskDefName() ); + to.setScheduledTime( from.getScheduledTime() ); + to.setStartTime( from.getStartTime() ); + to.setEndTime( from.getEndTime() ); + to.setUpdateTime( from.getUpdateTime() ); + to.setStartDelayInSeconds( from.getStartDelayInSeconds() ); + to.setRetriedTaskId( from.getRetriedTaskId() ); + to.setRetried( from.getRetried() ); + to.setExecuted( from.getExecuted() ); + to.setCallbackFromWorker( from.getCallbackFromWorker() ); + to.setResponseTimeoutSeconds( from.getResponseTimeoutSeconds() ); + to.setWorkflowInstanceId( from.getWorkflowInstanceId() ); + to.setWorkflowType( from.getWorkflowType() ); + to.setTaskId( from.getTaskId() ); + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + to.setCallbackAfterSeconds( from.getCallbackAfterSeconds() ); + to.setWorkerId( from.getWorkerId() ); + Map outputDataMap = new HashMap(); + for (Map.Entry pair : from.getOutputDataMap().entrySet()) { + outputDataMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setOutputData(outputDataMap); + if (from.hasWorkflowTask()) { + to.setWorkflowTask( fromProto( from.getWorkflowTask() ) ); + } + to.setDomain( from.getDomain() ); + if (from.hasInputMessage()) { + to.setInputMessage( fromProto( from.getInputMessage() ) ); + } + if (from.hasOutputMessage()) { + to.setOutputMessage( fromProto( from.getOutputMessage() ) ); + } + to.setRateLimitPerFrequency( from.getRateLimitPerFrequency() ); + to.setRateLimitFrequencyInSeconds( from.getRateLimitFrequencyInSeconds() ); + to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() ); + to.setExternalOutputPayloadStoragePath( from.getExternalOutputPayloadStoragePath() ); + return to; + } + + public TaskPb.Task.Status toProto(Task.Status from) { + TaskPb.Task.Status to; + switch (from) { + case IN_PROGRESS: to = TaskPb.Task.Status.IN_PROGRESS; break; + case CANCELED: to = TaskPb.Task.Status.CANCELED; break; + case FAILED: to = TaskPb.Task.Status.FAILED; break; + case FAILED_WITH_TERMINAL_ERROR: to = TaskPb.Task.Status.FAILED_WITH_TERMINAL_ERROR; break; + case COMPLETED: to = TaskPb.Task.Status.COMPLETED; break; + case COMPLETED_WITH_ERRORS: to = TaskPb.Task.Status.COMPLETED_WITH_ERRORS; break; + case SCHEDULED: to = TaskPb.Task.Status.SCHEDULED; break; + case TIMED_OUT: to = TaskPb.Task.Status.TIMED_OUT; break; + case READY_FOR_RERUN: to = TaskPb.Task.Status.READY_FOR_RERUN; break; + case SKIPPED: to = TaskPb.Task.Status.SKIPPED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public Task.Status fromProto(TaskPb.Task.Status from) { + Task.Status to; + switch (from) { + case IN_PROGRESS: to = Task.Status.IN_PROGRESS; break; + case CANCELED: to = Task.Status.CANCELED; break; + case FAILED: to = Task.Status.FAILED; break; + case FAILED_WITH_TERMINAL_ERROR: to = Task.Status.FAILED_WITH_TERMINAL_ERROR; break; + case COMPLETED: to = Task.Status.COMPLETED; break; + case COMPLETED_WITH_ERRORS: to = Task.Status.COMPLETED_WITH_ERRORS; break; + case SCHEDULED: to = Task.Status.SCHEDULED; break; + case TIMED_OUT: to = Task.Status.TIMED_OUT; break; + case READY_FOR_RERUN: to = Task.Status.READY_FOR_RERUN; break; + case SKIPPED: to = Task.Status.SKIPPED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public TaskDefPb.TaskDef toProto(TaskDef from) { + TaskDefPb.TaskDef.Builder to = TaskDefPb.TaskDef.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getDescription() != null) { + to.setDescription( from.getDescription() ); + } + to.setRetryCount( from.getRetryCount() ); + to.setTimeoutSeconds( from.getTimeoutSeconds() ); + to.addAllInputKeys( from.getInputKeys() ); + to.addAllOutputKeys( from.getOutputKeys() ); + if (from.getTimeoutPolicy() != null) { + to.setTimeoutPolicy( toProto( from.getTimeoutPolicy() ) ); + } + if (from.getRetryLogic() != null) { + to.setRetryLogic( toProto( from.getRetryLogic() ) ); + } + to.setRetryDelaySeconds( from.getRetryDelaySeconds() ); + to.setResponseTimeoutSeconds( from.getResponseTimeoutSeconds() ); + if (from.getConcurrentExecLimit() != null) { + to.setConcurrentExecLimit( from.getConcurrentExecLimit() ); + } + for (Map.Entry pair : from.getInputTemplate().entrySet()) { + to.putInputTemplate( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getRateLimitPerFrequency() != null) { + to.setRateLimitPerFrequency( from.getRateLimitPerFrequency() ); + } + if (from.getRateLimitFrequencyInSeconds() != null) { + to.setRateLimitFrequencyInSeconds( from.getRateLimitFrequencyInSeconds() ); + } + return to.build(); + } + + public TaskDef fromProto(TaskDefPb.TaskDef from) { + TaskDef to = new TaskDef(); + to.setName( from.getName() ); + to.setDescription( from.getDescription() ); + to.setRetryCount( from.getRetryCount() ); + to.setTimeoutSeconds( from.getTimeoutSeconds() ); + to.setInputKeys( from.getInputKeysList().stream().collect(Collectors.toCollection(ArrayList::new)) ); + to.setOutputKeys( from.getOutputKeysList().stream().collect(Collectors.toCollection(ArrayList::new)) ); + to.setTimeoutPolicy( fromProto( from.getTimeoutPolicy() ) ); + to.setRetryLogic( fromProto( from.getRetryLogic() ) ); + to.setRetryDelaySeconds( from.getRetryDelaySeconds() ); + to.setResponseTimeoutSeconds( from.getResponseTimeoutSeconds() ); + to.setConcurrentExecLimit( from.getConcurrentExecLimit() ); + Map inputTemplateMap = new HashMap(); + for (Map.Entry pair : from.getInputTemplateMap().entrySet()) { + inputTemplateMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInputTemplate(inputTemplateMap); + to.setRateLimitPerFrequency( from.getRateLimitPerFrequency() ); + to.setRateLimitFrequencyInSeconds( from.getRateLimitFrequencyInSeconds() ); + return to; + } + + public TaskDefPb.TaskDef.RetryLogic toProto(TaskDef.RetryLogic from) { + TaskDefPb.TaskDef.RetryLogic to; + switch (from) { + case FIXED: to = TaskDefPb.TaskDef.RetryLogic.FIXED; break; + case EXPONENTIAL_BACKOFF: to = TaskDefPb.TaskDef.RetryLogic.EXPONENTIAL_BACKOFF; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public TaskDef.RetryLogic fromProto(TaskDefPb.TaskDef.RetryLogic from) { + TaskDef.RetryLogic to; + switch (from) { + case FIXED: to = TaskDef.RetryLogic.FIXED; break; + case EXPONENTIAL_BACKOFF: to = TaskDef.RetryLogic.EXPONENTIAL_BACKOFF; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public TaskDefPb.TaskDef.TimeoutPolicy toProto(TaskDef.TimeoutPolicy from) { + TaskDefPb.TaskDef.TimeoutPolicy to; + switch (from) { + case RETRY: to = TaskDefPb.TaskDef.TimeoutPolicy.RETRY; break; + case TIME_OUT_WF: to = TaskDefPb.TaskDef.TimeoutPolicy.TIME_OUT_WF; break; + case ALERT_ONLY: to = TaskDefPb.TaskDef.TimeoutPolicy.ALERT_ONLY; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public TaskDef.TimeoutPolicy fromProto(TaskDefPb.TaskDef.TimeoutPolicy from) { + TaskDef.TimeoutPolicy to; + switch (from) { + case RETRY: to = TaskDef.TimeoutPolicy.RETRY; break; + case TIME_OUT_WF: to = TaskDef.TimeoutPolicy.TIME_OUT_WF; break; + case ALERT_ONLY: to = TaskDef.TimeoutPolicy.ALERT_ONLY; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public TaskExecLogPb.TaskExecLog toProto(TaskExecLog from) { + TaskExecLogPb.TaskExecLog.Builder to = TaskExecLogPb.TaskExecLog.newBuilder(); + if (from.getLog() != null) { + to.setLog( from.getLog() ); + } + if (from.getTaskId() != null) { + to.setTaskId( from.getTaskId() ); + } + to.setCreatedTime( from.getCreatedTime() ); + return to.build(); + } + + public TaskExecLog fromProto(TaskExecLogPb.TaskExecLog from) { + TaskExecLog to = new TaskExecLog(); + to.setLog( from.getLog() ); + to.setTaskId( from.getTaskId() ); + to.setCreatedTime( from.getCreatedTime() ); + return to; + } + + public TaskResultPb.TaskResult toProto(TaskResult from) { + TaskResultPb.TaskResult.Builder to = TaskResultPb.TaskResult.newBuilder(); + if (from.getWorkflowInstanceId() != null) { + to.setWorkflowInstanceId( from.getWorkflowInstanceId() ); + } + if (from.getTaskId() != null) { + to.setTaskId( from.getTaskId() ); + } + if (from.getReasonForIncompletion() != null) { + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + } + to.setCallbackAfterSeconds( from.getCallbackAfterSeconds() ); + if (from.getWorkerId() != null) { + to.setWorkerId( from.getWorkerId() ); + } + if (from.getStatus() != null) { + to.setStatus( toProto( from.getStatus() ) ); + } + for (Map.Entry pair : from.getOutputData().entrySet()) { + to.putOutputData( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getOutputMessage() != null) { + to.setOutputMessage( toProto( from.getOutputMessage() ) ); + } + return to.build(); + } + + public TaskResult fromProto(TaskResultPb.TaskResult from) { + TaskResult to = new TaskResult(); + to.setWorkflowInstanceId( from.getWorkflowInstanceId() ); + to.setTaskId( from.getTaskId() ); + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + to.setCallbackAfterSeconds( from.getCallbackAfterSeconds() ); + to.setWorkerId( from.getWorkerId() ); + to.setStatus( fromProto( from.getStatus() ) ); + Map outputDataMap = new HashMap(); + for (Map.Entry pair : from.getOutputDataMap().entrySet()) { + outputDataMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setOutputData(outputDataMap); + if (from.hasOutputMessage()) { + to.setOutputMessage( fromProto( from.getOutputMessage() ) ); + } + return to; + } + + public TaskResultPb.TaskResult.Status toProto(TaskResult.Status from) { + TaskResultPb.TaskResult.Status to; + switch (from) { + case IN_PROGRESS: to = TaskResultPb.TaskResult.Status.IN_PROGRESS; break; + case FAILED: to = TaskResultPb.TaskResult.Status.FAILED; break; + case FAILED_WITH_TERMINAL_ERROR: to = TaskResultPb.TaskResult.Status.FAILED_WITH_TERMINAL_ERROR; break; + case COMPLETED: to = TaskResultPb.TaskResult.Status.COMPLETED; break; + case SCHEDULED: to = TaskResultPb.TaskResult.Status.SCHEDULED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public TaskResult.Status fromProto(TaskResultPb.TaskResult.Status from) { + TaskResult.Status to; + switch (from) { + case IN_PROGRESS: to = TaskResult.Status.IN_PROGRESS; break; + case FAILED: to = TaskResult.Status.FAILED; break; + case FAILED_WITH_TERMINAL_ERROR: to = TaskResult.Status.FAILED_WITH_TERMINAL_ERROR; break; + case COMPLETED: to = TaskResult.Status.COMPLETED; break; + case SCHEDULED: to = TaskResult.Status.SCHEDULED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public DynamicForkJoinTaskPb.DynamicForkJoinTask toProto(DynamicForkJoinTask from) { + DynamicForkJoinTaskPb.DynamicForkJoinTask.Builder to = DynamicForkJoinTaskPb.DynamicForkJoinTask.newBuilder(); + if (from.getTaskName() != null) { + to.setTaskName( from.getTaskName() ); + } + if (from.getWorkflowName() != null) { + to.setWorkflowName( from.getWorkflowName() ); + } + if (from.getReferenceName() != null) { + to.setReferenceName( from.getReferenceName() ); + } + for (Map.Entry pair : from.getInput().entrySet()) { + to.putInput( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getType() != null) { + to.setType( from.getType() ); + } + return to.build(); + } + + public DynamicForkJoinTask fromProto(DynamicForkJoinTaskPb.DynamicForkJoinTask from) { + DynamicForkJoinTask to = new DynamicForkJoinTask(); + to.setTaskName( from.getTaskName() ); + to.setWorkflowName( from.getWorkflowName() ); + to.setReferenceName( from.getReferenceName() ); + Map inputMap = new HashMap(); + for (Map.Entry pair : from.getInputMap().entrySet()) { + inputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInput(inputMap); + to.setType( from.getType() ); + return to; + } + + public DynamicForkJoinTaskListPb.DynamicForkJoinTaskList toProto(DynamicForkJoinTaskList from) { + DynamicForkJoinTaskListPb.DynamicForkJoinTaskList.Builder to = DynamicForkJoinTaskListPb.DynamicForkJoinTaskList.newBuilder(); + for (DynamicForkJoinTask elem : from.getDynamicTasks()) { + to.addDynamicTasks( toProto(elem) ); + } + return to.build(); + } + + public DynamicForkJoinTaskList fromProto( + DynamicForkJoinTaskListPb.DynamicForkJoinTaskList from) { + DynamicForkJoinTaskList to = new DynamicForkJoinTaskList(); + to.setDynamicTasks( from.getDynamicTasksList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) ); + return to; + } + + public RerunWorkflowRequestPb.RerunWorkflowRequest toProto(RerunWorkflowRequest from) { + RerunWorkflowRequestPb.RerunWorkflowRequest.Builder to = RerunWorkflowRequestPb.RerunWorkflowRequest.newBuilder(); + if (from.getReRunFromWorkflowId() != null) { + to.setReRunFromWorkflowId( from.getReRunFromWorkflowId() ); + } + for (Map.Entry pair : from.getWorkflowInput().entrySet()) { + to.putWorkflowInput( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getReRunFromTaskId() != null) { + to.setReRunFromTaskId( from.getReRunFromTaskId() ); + } + for (Map.Entry pair : from.getTaskInput().entrySet()) { + to.putTaskInput( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + return to.build(); + } + + public RerunWorkflowRequest fromProto(RerunWorkflowRequestPb.RerunWorkflowRequest from) { + RerunWorkflowRequest to = new RerunWorkflowRequest(); + to.setReRunFromWorkflowId( from.getReRunFromWorkflowId() ); + Map workflowInputMap = new HashMap(); + for (Map.Entry pair : from.getWorkflowInputMap().entrySet()) { + workflowInputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setWorkflowInput(workflowInputMap); + to.setReRunFromTaskId( from.getReRunFromTaskId() ); + Map taskInputMap = new HashMap(); + for (Map.Entry pair : from.getTaskInputMap().entrySet()) { + taskInputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setTaskInput(taskInputMap); + to.setCorrelationId( from.getCorrelationId() ); + return to; + } + + public SkipTaskRequest fromProto(SkipTaskRequestPb.SkipTaskRequest from) { + SkipTaskRequest to = new SkipTaskRequest(); + Map taskInputMap = new HashMap(); + for (Map.Entry pair : from.getTaskInputMap().entrySet()) { + taskInputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setTaskInput(taskInputMap); + Map taskOutputMap = new HashMap(); + for (Map.Entry pair : from.getTaskOutputMap().entrySet()) { + taskOutputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setTaskOutput(taskOutputMap); + if (from.hasTaskInputMessage()) { + to.setTaskInputMessage( fromProto( from.getTaskInputMessage() ) ); + } + if (from.hasTaskOutputMessage()) { + to.setTaskOutputMessage( fromProto( from.getTaskOutputMessage() ) ); + } + return to; + } + + public StartWorkflowRequestPb.StartWorkflowRequest toProto(StartWorkflowRequest from) { + StartWorkflowRequestPb.StartWorkflowRequest.Builder to = StartWorkflowRequestPb.StartWorkflowRequest.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getVersion() != null) { + to.setVersion( from.getVersion() ); + } + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + for (Map.Entry pair : from.getInput().entrySet()) { + to.putInput( pair.getKey(), toProto( pair.getValue() ) ); + } + to.putAllTaskToDomain( from.getTaskToDomain() ); + if (from.getWorkflowDef() != null) { + to.setWorkflowDef( toProto( from.getWorkflowDef() ) ); + } + if (from.getExternalInputPayloadStoragePath() != null) { + to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() ); + } + return to.build(); + } + + public StartWorkflowRequest fromProto(StartWorkflowRequestPb.StartWorkflowRequest from) { + StartWorkflowRequest to = new StartWorkflowRequest(); + to.setName( from.getName() ); + to.setVersion( from.getVersion() ); + to.setCorrelationId( from.getCorrelationId() ); + Map inputMap = new HashMap(); + for (Map.Entry pair : from.getInputMap().entrySet()) { + inputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInput(inputMap); + to.setTaskToDomain( from.getTaskToDomainMap() ); + if (from.hasWorkflowDef()) { + to.setWorkflowDef( fromProto( from.getWorkflowDef() ) ); + } + to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() ); + return to; + } + + public SubWorkflowParamsPb.SubWorkflowParams toProto(SubWorkflowParams from) { + SubWorkflowParamsPb.SubWorkflowParams.Builder to = SubWorkflowParamsPb.SubWorkflowParams.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getVersion() != null) { + to.setVersion( from.getVersion() ); + } + return to.build(); + } + + public SubWorkflowParams fromProto(SubWorkflowParamsPb.SubWorkflowParams from) { + SubWorkflowParams to = new SubWorkflowParams(); + to.setName( from.getName() ); + to.setVersion( from.getVersion() ); + return to; + } + + public WorkflowDefPb.WorkflowDef toProto(WorkflowDef from) { + WorkflowDefPb.WorkflowDef.Builder to = WorkflowDefPb.WorkflowDef.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getDescription() != null) { + to.setDescription( from.getDescription() ); + } + to.setVersion( from.getVersion() ); + for (WorkflowTask elem : from.getTasks()) { + to.addTasks( toProto(elem) ); + } + to.addAllInputParameters( from.getInputParameters() ); + for (Map.Entry pair : from.getOutputParameters().entrySet()) { + to.putOutputParameters( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getFailureWorkflow() != null) { + to.setFailureWorkflow( from.getFailureWorkflow() ); + } + to.setSchemaVersion( from.getSchemaVersion() ); + to.setRestartable( from.isRestartable() ); + return to.build(); + } + + public WorkflowDef fromProto(WorkflowDefPb.WorkflowDef from) { + WorkflowDef to = new WorkflowDef(); + to.setName( from.getName() ); + to.setDescription( from.getDescription() ); + to.setVersion( from.getVersion() ); + to.setTasks( from.getTasksList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) ); + to.setInputParameters( from.getInputParametersList().stream().collect(Collectors.toCollection(ArrayList::new)) ); + Map outputParametersMap = new HashMap(); + for (Map.Entry pair : from.getOutputParametersMap().entrySet()) { + outputParametersMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setOutputParameters(outputParametersMap); + to.setFailureWorkflow( from.getFailureWorkflow() ); + to.setSchemaVersion( from.getSchemaVersion() ); + to.setRestartable( from.getRestartable() ); + return to; + } + + public WorkflowTaskPb.WorkflowTask toProto(WorkflowTask from) { + WorkflowTaskPb.WorkflowTask.Builder to = WorkflowTaskPb.WorkflowTask.newBuilder(); + if (from.getName() != null) { + to.setName( from.getName() ); + } + if (from.getTaskReferenceName() != null) { + to.setTaskReferenceName( from.getTaskReferenceName() ); + } + if (from.getDescription() != null) { + to.setDescription( from.getDescription() ); + } + for (Map.Entry pair : from.getInputParameters().entrySet()) { + to.putInputParameters( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getType() != null) { + to.setType( from.getType() ); + } + if (from.getDynamicTaskNameParam() != null) { + to.setDynamicTaskNameParam( from.getDynamicTaskNameParam() ); + } + if (from.getCaseValueParam() != null) { + to.setCaseValueParam( from.getCaseValueParam() ); + } + if (from.getCaseExpression() != null) { + to.setCaseExpression( from.getCaseExpression() ); + } + for (Map.Entry> pair : from.getDecisionCases().entrySet()) { + to.putDecisionCases( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getDynamicForkTasksParam() != null) { + to.setDynamicForkTasksParam( from.getDynamicForkTasksParam() ); + } + if (from.getDynamicForkTasksInputParamName() != null) { + to.setDynamicForkTasksInputParamName( from.getDynamicForkTasksInputParamName() ); + } + for (WorkflowTask elem : from.getDefaultCase()) { + to.addDefaultCase( toProto(elem) ); + } + for (List elem : from.getForkTasks()) { + to.addForkTasks( toProto(elem) ); + } + to.setStartDelay( from.getStartDelay() ); + if (from.getSubWorkflowParam() != null) { + to.setSubWorkflowParam( toProto( from.getSubWorkflowParam() ) ); + } + to.addAllJoinOn( from.getJoinOn() ); + if (from.getSink() != null) { + to.setSink( from.getSink() ); + } + to.setOptional( from.isOptional() ); + if (from.getTaskDefinition() != null) { + to.setTaskDefinition( toProto( from.getTaskDefinition() ) ); + } + if (from.isRateLimited() != null) { + to.setRateLimited( from.isRateLimited() ); + } + return to.build(); + } + + public WorkflowTask fromProto(WorkflowTaskPb.WorkflowTask from) { + WorkflowTask to = new WorkflowTask(); + to.setName( from.getName() ); + to.setTaskReferenceName( from.getTaskReferenceName() ); + to.setDescription( from.getDescription() ); + Map inputParametersMap = new HashMap(); + for (Map.Entry pair : from.getInputParametersMap().entrySet()) { + inputParametersMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInputParameters(inputParametersMap); + to.setType( from.getType() ); + to.setDynamicTaskNameParam( from.getDynamicTaskNameParam() ); + to.setCaseValueParam( from.getCaseValueParam() ); + to.setCaseExpression( from.getCaseExpression() ); + Map> decisionCasesMap = new HashMap>(); + for (Map.Entry pair : from.getDecisionCasesMap().entrySet()) { + decisionCasesMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setDecisionCases(decisionCasesMap); + to.setDynamicForkTasksParam( from.getDynamicForkTasksParam() ); + to.setDynamicForkTasksInputParamName( from.getDynamicForkTasksInputParamName() ); + to.setDefaultCase( from.getDefaultCaseList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) ); + to.setForkTasks( from.getForkTasksList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) ); + to.setStartDelay( from.getStartDelay() ); + if (from.hasSubWorkflowParam()) { + to.setSubWorkflowParam( fromProto( from.getSubWorkflowParam() ) ); + } + to.setJoinOn( from.getJoinOnList().stream().collect(Collectors.toCollection(ArrayList::new)) ); + to.setSink( from.getSink() ); + to.setOptional( from.getOptional() ); + if (from.hasTaskDefinition()) { + to.setTaskDefinition( fromProto( from.getTaskDefinition() ) ); + } + to.setRateLimited( from.getRateLimited() ); + return to; + } + + public TaskSummaryPb.TaskSummary toProto(TaskSummary from) { + TaskSummaryPb.TaskSummary.Builder to = TaskSummaryPb.TaskSummary.newBuilder(); + if (from.getWorkflowId() != null) { + to.setWorkflowId( from.getWorkflowId() ); + } + if (from.getWorkflowType() != null) { + to.setWorkflowType( from.getWorkflowType() ); + } + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + if (from.getScheduledTime() != null) { + to.setScheduledTime( from.getScheduledTime() ); + } + if (from.getStartTime() != null) { + to.setStartTime( from.getStartTime() ); + } + if (from.getUpdateTime() != null) { + to.setUpdateTime( from.getUpdateTime() ); + } + if (from.getEndTime() != null) { + to.setEndTime( from.getEndTime() ); + } + if (from.getStatus() != null) { + to.setStatus( toProto( from.getStatus() ) ); + } + if (from.getReasonForIncompletion() != null) { + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + } + to.setExecutionTime( from.getExecutionTime() ); + to.setQueueWaitTime( from.getQueueWaitTime() ); + if (from.getTaskDefName() != null) { + to.setTaskDefName( from.getTaskDefName() ); + } + if (from.getTaskType() != null) { + to.setTaskType( from.getTaskType() ); + } + if (from.getInput() != null) { + to.setInput( from.getInput() ); + } + if (from.getOutput() != null) { + to.setOutput( from.getOutput() ); + } + if (from.getTaskId() != null) { + to.setTaskId( from.getTaskId() ); + } + return to.build(); + } + + public WorkflowPb.Workflow toProto(Workflow from) { + WorkflowPb.Workflow.Builder to = WorkflowPb.Workflow.newBuilder(); + if (from.getStatus() != null) { + to.setStatus( toProto( from.getStatus() ) ); + } + to.setEndTime( from.getEndTime() ); + if (from.getWorkflowId() != null) { + to.setWorkflowId( from.getWorkflowId() ); + } + if (from.getParentWorkflowId() != null) { + to.setParentWorkflowId( from.getParentWorkflowId() ); + } + if (from.getParentWorkflowTaskId() != null) { + to.setParentWorkflowTaskId( from.getParentWorkflowTaskId() ); + } + for (Task elem : from.getTasks()) { + to.addTasks( toProto(elem) ); + } + for (Map.Entry pair : from.getInput().entrySet()) { + to.putInput( pair.getKey(), toProto( pair.getValue() ) ); + } + for (Map.Entry pair : from.getOutput().entrySet()) { + to.putOutput( pair.getKey(), toProto( pair.getValue() ) ); + } + if (from.getWorkflowType() != null) { + to.setWorkflowType( from.getWorkflowType() ); + } + to.setVersion( from.getVersion() ); + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + if (from.getReRunFromWorkflowId() != null) { + to.setReRunFromWorkflowId( from.getReRunFromWorkflowId() ); + } + if (from.getReasonForIncompletion() != null) { + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + } + to.setSchemaVersion( from.getSchemaVersion() ); + if (from.getEvent() != null) { + to.setEvent( from.getEvent() ); + } + to.putAllTaskToDomain( from.getTaskToDomain() ); + to.addAllFailedReferenceTaskNames( from.getFailedReferenceTaskNames() ); + if (from.getWorkflowDefinition() != null) { + to.setWorkflowDefinition( toProto( from.getWorkflowDefinition() ) ); + } + if (from.getExternalInputPayloadStoragePath() != null) { + to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() ); + } + if (from.getExternalOutputPayloadStoragePath() != null) { + to.setExternalOutputPayloadStoragePath( from.getExternalOutputPayloadStoragePath() ); + } + return to.build(); + } + + public Workflow fromProto(WorkflowPb.Workflow from) { + Workflow to = new Workflow(); + to.setStatus( fromProto( from.getStatus() ) ); + to.setEndTime( from.getEndTime() ); + to.setWorkflowId( from.getWorkflowId() ); + to.setParentWorkflowId( from.getParentWorkflowId() ); + to.setParentWorkflowTaskId( from.getParentWorkflowTaskId() ); + to.setTasks( from.getTasksList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) ); + Map inputMap = new HashMap(); + for (Map.Entry pair : from.getInputMap().entrySet()) { + inputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setInput(inputMap); + Map outputMap = new HashMap(); + for (Map.Entry pair : from.getOutputMap().entrySet()) { + outputMap.put( pair.getKey(), fromProto( pair.getValue() ) ); + } + to.setOutput(outputMap); + to.setWorkflowType( from.getWorkflowType() ); + to.setVersion( from.getVersion() ); + to.setCorrelationId( from.getCorrelationId() ); + to.setReRunFromWorkflowId( from.getReRunFromWorkflowId() ); + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + to.setSchemaVersion( from.getSchemaVersion() ); + to.setEvent( from.getEvent() ); + to.setTaskToDomain( from.getTaskToDomainMap() ); + to.setFailedReferenceTaskNames( from.getFailedReferenceTaskNamesList().stream().collect(Collectors.toCollection(HashSet::new)) ); + if (from.hasWorkflowDefinition()) { + to.setWorkflowDefinition( fromProto( from.getWorkflowDefinition() ) ); + } + to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() ); + to.setExternalOutputPayloadStoragePath( from.getExternalOutputPayloadStoragePath() ); + return to; + } + + public WorkflowPb.Workflow.WorkflowStatus toProto(Workflow.WorkflowStatus from) { + WorkflowPb.Workflow.WorkflowStatus to; + switch (from) { + case RUNNING: to = WorkflowPb.Workflow.WorkflowStatus.RUNNING; break; + case COMPLETED: to = WorkflowPb.Workflow.WorkflowStatus.COMPLETED; break; + case FAILED: to = WorkflowPb.Workflow.WorkflowStatus.FAILED; break; + case TIMED_OUT: to = WorkflowPb.Workflow.WorkflowStatus.TIMED_OUT; break; + case TERMINATED: to = WorkflowPb.Workflow.WorkflowStatus.TERMINATED; break; + case PAUSED: to = WorkflowPb.Workflow.WorkflowStatus.PAUSED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public Workflow.WorkflowStatus fromProto(WorkflowPb.Workflow.WorkflowStatus from) { + Workflow.WorkflowStatus to; + switch (from) { + case RUNNING: to = Workflow.WorkflowStatus.RUNNING; break; + case COMPLETED: to = Workflow.WorkflowStatus.COMPLETED; break; + case FAILED: to = Workflow.WorkflowStatus.FAILED; break; + case TIMED_OUT: to = Workflow.WorkflowStatus.TIMED_OUT; break; + case TERMINATED: to = Workflow.WorkflowStatus.TERMINATED; break; + case PAUSED: to = Workflow.WorkflowStatus.PAUSED; break; + default: throw new IllegalArgumentException("Unexpected enum constant: " + from); + } + return to; + } + + public WorkflowSummaryPb.WorkflowSummary toProto(WorkflowSummary from) { + WorkflowSummaryPb.WorkflowSummary.Builder to = WorkflowSummaryPb.WorkflowSummary.newBuilder(); + if (from.getWorkflowType() != null) { + to.setWorkflowType( from.getWorkflowType() ); + } + to.setVersion( from.getVersion() ); + if (from.getWorkflowId() != null) { + to.setWorkflowId( from.getWorkflowId() ); + } + if (from.getCorrelationId() != null) { + to.setCorrelationId( from.getCorrelationId() ); + } + if (from.getStartTime() != null) { + to.setStartTime( from.getStartTime() ); + } + if (from.getUpdateTime() != null) { + to.setUpdateTime( from.getUpdateTime() ); + } + if (from.getEndTime() != null) { + to.setEndTime( from.getEndTime() ); + } + if (from.getStatus() != null) { + to.setStatus( toProto( from.getStatus() ) ); + } + if (from.getInput() != null) { + to.setInput( from.getInput() ); + } + if (from.getOutput() != null) { + to.setOutput( from.getOutput() ); + } + if (from.getReasonForIncompletion() != null) { + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + } + to.setExecutionTime( from.getExecutionTime() ); + if (from.getEvent() != null) { + to.setEvent( from.getEvent() ); + } + if (from.getFailedReferenceTaskNames() != null) { + to.setFailedReferenceTaskNames( from.getFailedReferenceTaskNames() ); + } + return to.build(); + } + + public WorkflowSummary fromProto(WorkflowSummaryPb.WorkflowSummary from) { + WorkflowSummary to = new WorkflowSummary(); + to.setWorkflowType( from.getWorkflowType() ); + to.setVersion( from.getVersion() ); + to.setWorkflowId( from.getWorkflowId() ); + to.setCorrelationId( from.getCorrelationId() ); + to.setStartTime( from.getStartTime() ); + to.setUpdateTime( from.getUpdateTime() ); + to.setEndTime( from.getEndTime() ); + to.setStatus( fromProto( from.getStatus() ) ); + to.setInput( from.getInput() ); + to.setOutput( from.getOutput() ); + to.setReasonForIncompletion( from.getReasonForIncompletion() ); + to.setExecutionTime( from.getExecutionTime() ); + to.setEvent( from.getEvent() ); + to.setFailedReferenceTaskNames( from.getFailedReferenceTaskNames() ); + return to; + } + + public abstract WorkflowTaskPb.WorkflowTask.WorkflowTaskList toProto(List in); + + public abstract List fromProto(WorkflowTaskPb.WorkflowTask.WorkflowTaskList in); + + public abstract Value toProto(Object in); + + public abstract Object fromProto(Value in); + + public abstract Any toProto(Any in); + + public abstract Any fromProto(Any in); +} diff --git a/grpc/src/main/java/com/netflix/conductor/grpc/ProtoMapper.java b/grpc/src/main/java/com/netflix/conductor/grpc/ProtoMapper.java new file mode 100644 index 0000000000..f39beb1355 --- /dev/null +++ b/grpc/src/main/java/com/netflix/conductor/grpc/ProtoMapper.java @@ -0,0 +1,147 @@ +package com.netflix.conductor.grpc; + +import com.google.protobuf.*; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.proto.WorkflowTaskPb; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * ProtoMapper implements conversion code between the internal models + * used by Conductor (POJOs) and their corresponding equivalents in + * the exposed Protocol Buffers interface. + * + * The vast majority of the mapping logic is implemented in the autogenerated + * {@link AbstractProtoMapper} class. This class only implements the custom + * logic for objects that need to be special cased in the API. + */ +public final class ProtoMapper extends AbstractProtoMapper { + public static final ProtoMapper INSTANCE = new ProtoMapper(); + + private ProtoMapper() {} + + /** + * Convert an {@link Object} instance into its equivalent {@link Value} + * ProtoBuf object. + * + * The {@link Value} ProtoBuf message is a variant type that can define any + * value representable as a native JSON type. Consequently, this method expects + * the given {@link Object} instance to be a Java object instance of JSON-native + * value, namely: null, {@link Boolean}, {@link Double}, {@link String}, + * {@link Map}, {@link List}. + * + * Any other values will cause an exception to be thrown. + * See {@link ProtoMapper#fromProto(Value)} for the reverse mapping. + * + * @param val a Java object that can be represented natively in JSON + * @return an instance of a {@link Value} ProtoBuf message + */ + @Override + public Value toProto(Object val) { + Value.Builder builder = Value.newBuilder(); + + if (val == null) { + builder.setNullValue(NullValue.NULL_VALUE); + } else if (val instanceof Boolean) { + builder.setBoolValue((Boolean) val); + } else if (val instanceof Double) { + builder.setNumberValue((Double) val); + } else if (val instanceof String) { + builder.setStringValue((String) val); + } else if (val instanceof Map) { + Map map = (Map) val; + Struct.Builder struct = Struct.newBuilder(); + for (Map.Entry pair : map.entrySet()) { + struct.putFields(pair.getKey(), toProto(pair.getValue())); + } + builder.setStructValue(struct.build()); + } else if (val instanceof List) { + ListValue.Builder list = ListValue.newBuilder(); + for (Object obj : (List)val) { + list.addValues(toProto(obj)); + } + builder.setListValue(list.build()); + } else { + throw new ClassCastException("cannot map to Value type: "+val); + } + return builder.build(); + } + + /** + * Convert a ProtoBuf {@link Value} message into its native Java object + * equivalent. + * + * See {@link ProtoMapper#toProto(Object)} for the reverse mapping and the + * possible values that can be returned from this method. + * + * @param any an instance of a ProtoBuf {@link Value} message + * @return a native Java object representing the value + */ + @Override + public Object fromProto(Value any) { + switch (any.getKindCase()) { + case NULL_VALUE: + return null; + case BOOL_VALUE: + return any.getBoolValue(); + case NUMBER_VALUE: + return any.getNumberValue(); + case STRING_VALUE: + return any.getStringValue(); + case STRUCT_VALUE: + Struct struct = any.getStructValue(); + Map map = new HashMap<>(); + for (Map.Entry pair : struct.getFieldsMap().entrySet()) { + map.put(pair.getKey(), fromProto(pair.getValue())); + } + return map; + case LIST_VALUE: + List list = new ArrayList<>(); + for (Value val : any.getListValue().getValuesList()) { + list.add(fromProto(val)); + } + return list; + default: + throw new ClassCastException("unset Value element: "+any); + } + } + + /** + * Convert a WorkflowTaskList message wrapper into a {@link List} instance + * with its contents. + * + * @param list an instance of a ProtoBuf message + * @return a list with the contents of the message + */ + @Override + public List fromProto(WorkflowTaskPb.WorkflowTask.WorkflowTaskList list) { + return list.getTasksList().stream().map(this::fromProto).collect(Collectors.toList()); + } + + /** + * Convert a list of {@link WorkflowTask} instances into a ProtoBuf wrapper object. + * + * @param list a list of {@link WorkflowTask} instances + * @return a ProtoBuf message wrapping the contents of the list + */ + @Override + public WorkflowTaskPb.WorkflowTask.WorkflowTaskList toProto(List list) { + return WorkflowTaskPb.WorkflowTask.WorkflowTaskList.newBuilder() + .addAllTasks(list.stream().map(this::toProto)::iterator) + .build(); + } + + @Override + public Any toProto(Any in) { + return in; + } + + @Override + public Any fromProto(Any in) { + return in; + } +} diff --git a/grpc/src/main/proto/grpc/event_service.proto b/grpc/src/main/proto/grpc/event_service.proto new file mode 100644 index 0000000000..88ebb9e033 --- /dev/null +++ b/grpc/src/main/proto/grpc/event_service.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; +package conductor.grpc.events; + +import "model/eventhandler.proto"; + +option java_package = "com.netflix.conductor.grpc"; +option java_outer_classname = "EventServicePb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/grpc/events"; + +service EventService { + // POST / + rpc AddEventHandler(AddEventHandlerRequest) returns (AddEventHandlerResponse); + + // PUT / + rpc UpdateEventHandler(UpdateEventHandlerRequest) returns (UpdateEventHandlerResponse); + + // DELETE /{name} + rpc RemoveEventHandler(RemoveEventHandlerRequest) returns (RemoveEventHandlerResponse); + + // GET / + rpc GetEventHandlers(GetEventHandlersRequest) returns (stream conductor.proto.EventHandler); + + // GET /{name} + rpc GetEventHandlersForEvent(GetEventHandlersForEventRequest) returns (stream conductor.proto.EventHandler); + + // GET /queues + rpc GetQueues(GetQueuesRequest) returns (GetQueuesResponse); + rpc GetQueueSizes(GetQueueSizesRequest) returns (GetQueueSizesResponse); + + // GET /queues/providers + rpc GetQueueProviders(GetQueueProvidersRequest) returns (GetQueueProvidersResponse); +} + +message AddEventHandlerRequest { + conductor.proto.EventHandler handler = 1; +} + +message AddEventHandlerResponse {} + +message UpdateEventHandlerRequest { + conductor.proto.EventHandler handler = 1; +} + +message UpdateEventHandlerResponse {} + +message RemoveEventHandlerRequest { + string name = 1; +} + +message RemoveEventHandlerResponse {} + +message GetEventHandlersRequest {} + +message GetEventHandlersForEventRequest { + string event = 1; + bool active_only = 2; +} + +message GetQueuesRequest {} + +message GetQueuesResponse { + map event_to_queue_uri = 1; +} + +message GetQueueSizesRequest {} + +message GetQueueSizesResponse { + message QueueInfo { + map queue_sizes = 1; + } + map event_to_queue_info = 2; +} + +message GetQueueProvidersRequest {} + +message GetQueueProvidersResponse { + repeated string providers = 1; +} diff --git a/grpc/src/main/proto/grpc/metadata_service.proto b/grpc/src/main/proto/grpc/metadata_service.proto new file mode 100644 index 0000000000..1716c6bbec --- /dev/null +++ b/grpc/src/main/proto/grpc/metadata_service.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; +package conductor.grpc.metadata; + +import "model/taskdef.proto"; +import "model/workflowdef.proto"; + +option java_package = "com.netflix.conductor.grpc"; +option java_outer_classname = "MetadataServicePb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/grpc/metadata"; + +service MetadataService { + // POST /workflow + rpc CreateWorkflow(CreateWorkflowRequest) returns (CreateWorkflowResponse); + + // PUT /workflow + rpc UpdateWorkflows(UpdateWorkflowsRequest) returns (UpdateWorkflowsResponse); + + // GET /workflow/{name} + rpc GetWorkflow(GetWorkflowRequest) returns (GetWorkflowResponse); + + // POST /taskdefs + rpc CreateTasks(CreateTasksRequest) returns (CreateTasksResponse); + + // PUT /taskdefs + rpc UpdateTask(UpdateTaskRequest) returns (UpdateTaskResponse); + + // GET /taskdefs/{tasktype} + rpc GetTask(GetTaskRequest) returns (GetTaskResponse); + + // DELETE /taskdefs/{tasktype} + rpc DeleteTask(DeleteTaskRequest) returns (DeleteTaskResponse); +} + +message CreateWorkflowRequest { + conductor.proto.WorkflowDef workflow = 1; +} + +message CreateWorkflowResponse {} + +message UpdateWorkflowsRequest { + repeated conductor.proto.WorkflowDef defs = 1; +} + +message UpdateWorkflowsResponse {} + +message GetWorkflowRequest { + string name = 1; + int32 version = 2; +} + +message GetWorkflowResponse { + conductor.proto.WorkflowDef workflow = 1; +} + +message CreateTasksRequest { + repeated conductor.proto.TaskDef defs = 1; +} + +message CreateTasksResponse {} + +message UpdateTaskRequest { + conductor.proto.TaskDef task = 1; +} + +message UpdateTaskResponse {} + + +message GetTaskRequest { + string task_type = 1; +} + +message GetTaskResponse { + conductor.proto.TaskDef task = 1; +} + +message DeleteTaskRequest { + string task_type = 1; +} + +message DeleteTaskResponse {} diff --git a/grpc/src/main/proto/grpc/search.proto b/grpc/src/main/proto/grpc/search.proto new file mode 100644 index 0000000000..e9ad0f069c --- /dev/null +++ b/grpc/src/main/proto/grpc/search.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package conductor.grpc.search; + +option java_package = "com.netflix.conductor.grpc"; +option java_outer_classname = "SearchPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/grpc/search"; + +message Request { + int32 start = 1; + int32 size = 2; + string sort = 3; + string free_text = 4; + string query = 5; +} + diff --git a/grpc/src/main/proto/grpc/task_service.proto b/grpc/src/main/proto/grpc/task_service.proto new file mode 100644 index 0000000000..916e4745da --- /dev/null +++ b/grpc/src/main/proto/grpc/task_service.proto @@ -0,0 +1,162 @@ +syntax = "proto3"; +package conductor.grpc.tasks; + +import "model/taskexeclog.proto"; +import "model/taskresult.proto"; +import "model/task.proto"; + +option java_package = "com.netflix.conductor.grpc"; +option java_outer_classname = "TaskServicePb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/grpc/tasks"; + +service TaskService { + // GET /poll/{tasktype} + rpc Poll(PollRequest) returns (PollResponse); + + // /poll/batch/{tasktype} + rpc BatchPoll(BatchPollRequest) returns (stream conductor.proto.Task); + + // GET /in_progress/{tasktype} + rpc GetTasksInProgress(TasksInProgressRequest) returns (TasksInProgressResponse); + + // GET /in_progress/{workflowId}/{taskRefName} + rpc GetPendingTaskForWorkflow(PendingTaskRequest) returns (PendingTaskResponse); + + // POST / + rpc UpdateTask(UpdateTaskRequest) returns (UpdateTaskResponse); + + // POST /{taskId}/ack + rpc AckTask(AckTaskRequest) returns (AckTaskResponse); + + // POST /{taskId}/log + rpc AddLog(AddLogRequest) returns (AddLogResponse); + + // GET {taskId}/log + rpc GetTaskLogs(GetTaskLogsRequest) returns (GetTaskLogsResponse); + + // GET /{taskId} + rpc GetTask(GetTaskRequest) returns (GetTaskResponse); + + // DELETE /queue/{taskType}/{taskId} + rpc RemoveTaskFromQueue(RemoveTaskRequest) returns (RemoveTaskResponse); + + // GET /queue/sizes + rpc GetQueueSizesForTasks(QueueSizesRequest) returns (QueueSizesResponse); + + // GET /queue/all + rpc GetQueueInfo(QueueInfoRequest) returns (QueueInfoResponse); + + // GET /queue/all/verbose + rpc GetQueueAllInfo(QueueAllInfoRequest) returns (QueueAllInfoResponse); +} + +message PollRequest { + string task_type = 1; + string worker_id = 2; + string domain = 3; +} + +message PollResponse { + conductor.proto.Task task = 1; +} + +message BatchPollRequest { + string task_type = 1; + string worker_id = 2; + string domain = 3; + int32 count = 4; + int32 timeout = 5; +} + +message TasksInProgressRequest { + string task_type = 1; + string start_key = 2; + int32 count = 3; +} + +message TasksInProgressResponse { + repeated conductor.proto.Task tasks = 1; +} + +message PendingTaskRequest { + string workflow_id = 1; + string task_ref_name = 2; +} + +message PendingTaskResponse { + conductor.proto.Task task = 1; +} + +message UpdateTaskRequest { + conductor.proto.TaskResult result = 1; +} + +message UpdateTaskResponse { + string task_id = 1; +} + +message AckTaskRequest { + string task_id = 1; + string worker_id = 2; +} + +message AckTaskResponse { + bool ack = 1; +} + +message AddLogRequest { + string task_id = 1; + string log = 2; +} + +message AddLogResponse {} + +message GetTaskLogsRequest { + string task_id = 1; +} + +message GetTaskLogsResponse { + repeated conductor.proto.TaskExecLog logs = 1; +} + +message GetTaskRequest { + string task_id = 1; +} + +message GetTaskResponse { + conductor.proto.Task task = 1; +} + +message RemoveTaskRequest { + string task_type = 1; + string task_id = 2; +} + +message RemoveTaskResponse {} + +message QueueSizesRequest { + repeated string task_types = 1; +} + +message QueueSizesResponse { + map queue_for_task = 1; +} + +message QueueInfoRequest {} + +message QueueInfoResponse { + map queues = 1; +} + +message QueueAllInfoRequest {} + +message QueueAllInfoResponse { + message ShardInfo { + int64 size = 1; + int64 uacked = 2; + } + message QueueInfo { + map shards = 1; + } + map queues = 1; +} \ No newline at end of file diff --git a/grpc/src/main/proto/grpc/workflow_service.proto b/grpc/src/main/proto/grpc/workflow_service.proto new file mode 100644 index 0000000000..a1644cdbfe --- /dev/null +++ b/grpc/src/main/proto/grpc/workflow_service.proto @@ -0,0 +1,166 @@ +syntax = "proto3"; +package conductor.grpc.workflows; + +import "grpc/search.proto"; +import "model/workflow.proto"; +import "model/workflowsummary.proto"; +import "model/skiptaskrequest.proto"; +import "model/startworkflowrequest.proto"; +import "model/rerunworkflowrequest.proto"; + +option java_package = "com.netflix.conductor.grpc"; +option java_outer_classname = "WorkflowServicePb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/grpc/workflows"; + +service WorkflowService { + // POST / + rpc StartWorkflow(conductor.proto.StartWorkflowRequest) returns (StartWorkflowResponse); + + // GET /{name}/correlated/{correlationId} + rpc GetWorkflows(GetWorkflowsRequest) returns (GetWorkflowsResponse); + + // GET /{workflowId} + rpc GetWorkflowStatus(GetWorkflowStatusRequest) returns (conductor.proto.Workflow); + + // DELETE /{workflodId}/remove + rpc RemoveWorkflow(RemoveWorkflowRequest) returns (RemoveWorkflowResponse); + + // GET /running/{name} + rpc GetRunningWorkflows(GetRunningWorkflowsRequest) returns (GetRunningWorkflowsResponse); + + // PUT /decide/{workflowId} + rpc DecideWorkflow(DecideWorkflowRequest) returns (DecideWorkflowResponse); + + // PUT /{workflowId}/pause + rpc PauseWorkflow(PauseWorkflowRequest) returns (PauseWorkflowResponse); + + // PUT /{workflowId}/pause + rpc ResumeWorkflow(ResumeWorkflowRequest) returns (ResumeWorkflowResponse); + + // PUT /{workflowId}/skiptask/{taskReferenceName} + rpc SkipTaskFromWorkflow(SkipTaskRequest) returns (SkipTaskResponse); + + // POST /{workflowId}/rerun + rpc RerunWorkflow(conductor.proto.RerunWorkflowRequest) returns (RerunWorkflowResponse); + + // POST /{workflowId}/restart + rpc RestartWorkflow(RestartWorkflowRequest) returns (RestartWorkflowResponse); + + // POST /{workflowId}retry + rpc RetryWorkflow(RetryWorkflowRequest) returns (RetryWorkflowResponse); + + // POST /{workflowId}/resetcallbacks + rpc ResetWorkflowCallbacks(ResetWorkflowCallbacksRequest) returns (ResetWorkflowCallbacksResponse); + + // DELETE /{workflowId} + rpc TerminateWorkflow(TerminateWorkflowRequest) returns (TerminateWorkflowResponse); + + // GET /search + rpc Search(conductor.grpc.search.Request) returns (WorkflowSummarySearchResult); + rpc SearchByTasks(conductor.grpc.search.Request) returns (WorkflowSummarySearchResult); +} + +message StartWorkflowResponse { + string workflow_id = 1; +} + +message GetWorkflowsRequest { + string name = 1; + repeated string correlation_id = 2; + bool include_closed = 3; + bool include_tasks = 4; +} + +message GetWorkflowsResponse { + message Workflows { + repeated conductor.proto.Workflow workflows = 1; + } + map workflows_by_id = 1; +} + +message GetWorkflowStatusRequest { + string workflow_id = 1; + bool include_tasks = 2; +} + +message GetWorkflowStatusResponse { + conductor.proto.Workflow workflow = 1; +} + +message RemoveWorkflowRequest { + string workflod_id = 1; + bool archive_workflow = 2; +} + +message RemoveWorkflowResponse {} + +message GetRunningWorkflowsRequest { + string name = 1; + int32 version = 2; + int64 start_time = 3; + int64 end_time = 4; +} + +message GetRunningWorkflowsResponse { + repeated string workflow_ids = 1; +} + +message DecideWorkflowRequest { + string workflow_id = 1; +} + +message DecideWorkflowResponse {} + +message PauseWorkflowRequest { + string workflow_id = 1; +} + +message PauseWorkflowResponse {} + +message ResumeWorkflowRequest { + string workflow_id = 1; +} + +message ResumeWorkflowResponse {} + +message SkipTaskRequest { + string workflow_id = 1; + string task_reference_name = 2; + conductor.proto.SkipTaskRequest request = 3; +} + +message SkipTaskResponse {} + +message RerunWorkflowResponse { + string workflow_id = 1; +} + +message RestartWorkflowRequest { + string workflow_id = 1; +} + +message RestartWorkflowResponse {} + +message RetryWorkflowRequest { + string workflow_id = 1; +} + +message RetryWorkflowResponse {} + +message ResetWorkflowCallbacksRequest { + string workflow_id = 1; +} + +message ResetWorkflowCallbacksResponse {} + +message TerminateWorkflowRequest { + string workflow_id = 1; + string reason = 2; +} + +message TerminateWorkflowResponse {} + +message WorkflowSummarySearchResult { + int64 total_hits = 1; + repeated conductor.proto.WorkflowSummary results = 2; +} diff --git a/grpc/src/main/proto/model/dynamicforkjointask.proto b/grpc/src/main/proto/model/dynamicforkjointask.proto new file mode 100644 index 0000000000..12e66bb1e6 --- /dev/null +++ b/grpc/src/main/proto/model/dynamicforkjointask.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package conductor.proto; + +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "DynamicForkJoinTaskPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message DynamicForkJoinTask { + string task_name = 1; + string workflow_name = 2; + string reference_name = 3; + map input = 4; + string type = 5; +} diff --git a/grpc/src/main/proto/model/dynamicforkjointasklist.proto b/grpc/src/main/proto/model/dynamicforkjointasklist.proto new file mode 100644 index 0000000000..3ac3f44d9e --- /dev/null +++ b/grpc/src/main/proto/model/dynamicforkjointasklist.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/dynamicforkjointask.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "DynamicForkJoinTaskListPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message DynamicForkJoinTaskList { + repeated DynamicForkJoinTask dynamic_tasks = 1; +} diff --git a/grpc/src/main/proto/model/eventexecution.proto b/grpc/src/main/proto/model/eventexecution.proto new file mode 100644 index 0000000000..e4aee81aa6 --- /dev/null +++ b/grpc/src/main/proto/model/eventexecution.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/eventhandler.proto"; +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "EventExecutionPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message EventExecution { + enum Status { + IN_PROGRESS = 0; + COMPLETED = 1; + FAILED = 2; + SKIPPED = 3; + } + string id = 1; + string message_id = 2; + string name = 3; + string event = 4; + int64 created = 5; + EventExecution.Status status = 6; + EventHandler.Action.Type action = 7; + map output = 8; +} diff --git a/grpc/src/main/proto/model/eventhandler.proto b/grpc/src/main/proto/model/eventhandler.proto new file mode 100644 index 0000000000..5dd866230e --- /dev/null +++ b/grpc/src/main/proto/model/eventhandler.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package conductor.proto; + +import "google/protobuf/struct.proto"; +import "google/protobuf/any.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "EventHandlerPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message EventHandler { + message StartWorkflow { + string name = 1; + int32 version = 2; + string correlation_id = 3; + map input = 4; + google.protobuf.Any input_message = 5; + } + message TaskDetails { + string workflow_id = 1; + string task_ref_name = 2; + map output = 3; + google.protobuf.Any output_message = 4; + } + message Action { + enum Type { + START_WORKFLOW = 0; + COMPLETE_TASK = 1; + FAIL_TASK = 2; + } + EventHandler.Action.Type action = 1; + EventHandler.StartWorkflow start_workflow = 2; + EventHandler.TaskDetails complete_task = 3; + EventHandler.TaskDetails fail_task = 4; + bool expand_inline_json = 5; + } + string name = 1; + string event = 2; + string condition = 3; + repeated EventHandler.Action actions = 4; + bool active = 5; +} diff --git a/grpc/src/main/proto/model/polldata.proto b/grpc/src/main/proto/model/polldata.proto new file mode 100644 index 0000000000..59169430c3 --- /dev/null +++ b/grpc/src/main/proto/model/polldata.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package conductor.proto; + + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "PollDataPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message PollData { + string queue_name = 1; + string domain = 2; + string worker_id = 3; + int64 last_poll_time = 4; +} diff --git a/grpc/src/main/proto/model/rerunworkflowrequest.proto b/grpc/src/main/proto/model/rerunworkflowrequest.proto new file mode 100644 index 0000000000..280e8cfae6 --- /dev/null +++ b/grpc/src/main/proto/model/rerunworkflowrequest.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package conductor.proto; + +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "RerunWorkflowRequestPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message RerunWorkflowRequest { + string re_run_from_workflow_id = 1; + map workflow_input = 2; + string re_run_from_task_id = 3; + map task_input = 4; + string correlation_id = 5; +} diff --git a/grpc/src/main/proto/model/skiptaskrequest.proto b/grpc/src/main/proto/model/skiptaskrequest.proto new file mode 100644 index 0000000000..323e5162fe --- /dev/null +++ b/grpc/src/main/proto/model/skiptaskrequest.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package conductor.proto; + +import "google/protobuf/struct.proto"; +import "google/protobuf/any.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "SkipTaskRequestPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message SkipTaskRequest { + map task_input = 1; + map task_output = 2; + google.protobuf.Any task_input_message = 3; + google.protobuf.Any task_output_message = 4; +} diff --git a/grpc/src/main/proto/model/startworkflowrequest.proto b/grpc/src/main/proto/model/startworkflowrequest.proto new file mode 100644 index 0000000000..33eb1aa8be --- /dev/null +++ b/grpc/src/main/proto/model/startworkflowrequest.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/workflowdef.proto"; +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "StartWorkflowRequestPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message StartWorkflowRequest { + string name = 1; + int32 version = 2; + string correlation_id = 3; + map input = 4; + map task_to_domain = 5; + WorkflowDef workflow_def = 6; + string external_input_payload_storage_path = 7; +} diff --git a/grpc/src/main/proto/model/subworkflowparams.proto b/grpc/src/main/proto/model/subworkflowparams.proto new file mode 100644 index 0000000000..4a2005c37f --- /dev/null +++ b/grpc/src/main/proto/model/subworkflowparams.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package conductor.proto; + + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "SubWorkflowParamsPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message SubWorkflowParams { + string name = 1; + int32 version = 2; +} diff --git a/grpc/src/main/proto/model/task.proto b/grpc/src/main/proto/model/task.proto new file mode 100644 index 0000000000..3887f7d69b --- /dev/null +++ b/grpc/src/main/proto/model/task.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/workflowtask.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/any.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "TaskPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message Task { + enum Status { + IN_PROGRESS = 0; + CANCELED = 1; + FAILED = 2; + FAILED_WITH_TERMINAL_ERROR = 3; + COMPLETED = 4; + COMPLETED_WITH_ERRORS = 5; + SCHEDULED = 6; + TIMED_OUT = 7; + READY_FOR_RERUN = 8; + SKIPPED = 9; + } + string task_type = 1; + Task.Status status = 2; + map input_data = 3; + string reference_task_name = 4; + int32 retry_count = 5; + int32 seq = 6; + string correlation_id = 7; + int32 poll_count = 8; + string task_def_name = 9; + int64 scheduled_time = 10; + int64 start_time = 11; + int64 end_time = 12; + int64 update_time = 13; + int32 start_delay_in_seconds = 14; + string retried_task_id = 15; + bool retried = 16; + bool executed = 17; + bool callback_from_worker = 18; + int32 response_timeout_seconds = 19; + string workflow_instance_id = 20; + string workflow_type = 21; + string task_id = 22; + string reason_for_incompletion = 23; + int64 callback_after_seconds = 24; + string worker_id = 25; + map output_data = 26; + WorkflowTask workflow_task = 27; + string domain = 28; + google.protobuf.Any input_message = 29; + google.protobuf.Any output_message = 30; + int32 rate_limit_per_frequency = 32; + int32 rate_limit_frequency_in_seconds = 33; + string external_input_payload_storage_path = 34; + string external_output_payload_storage_path = 35; +} diff --git a/grpc/src/main/proto/model/taskdef.proto b/grpc/src/main/proto/model/taskdef.proto new file mode 100644 index 0000000000..da9d13e311 --- /dev/null +++ b/grpc/src/main/proto/model/taskdef.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package conductor.proto; + +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "TaskDefPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message TaskDef { + enum RetryLogic { + FIXED = 0; + EXPONENTIAL_BACKOFF = 1; + } + enum TimeoutPolicy { + RETRY = 0; + TIME_OUT_WF = 1; + ALERT_ONLY = 2; + } + string name = 1; + string description = 2; + int32 retry_count = 3; + int64 timeout_seconds = 4; + repeated string input_keys = 5; + repeated string output_keys = 6; + TaskDef.TimeoutPolicy timeout_policy = 7; + TaskDef.RetryLogic retry_logic = 8; + int32 retry_delay_seconds = 9; + int32 response_timeout_seconds = 10; + int32 concurrent_exec_limit = 11; + map input_template = 12; + int32 rate_limit_per_frequency = 14; + int32 rate_limit_frequency_in_seconds = 15; +} diff --git a/grpc/src/main/proto/model/taskexeclog.proto b/grpc/src/main/proto/model/taskexeclog.proto new file mode 100644 index 0000000000..f67b2e4b2e --- /dev/null +++ b/grpc/src/main/proto/model/taskexeclog.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package conductor.proto; + + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "TaskExecLogPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message TaskExecLog { + string log = 1; + string task_id = 2; + int64 created_time = 3; +} diff --git a/grpc/src/main/proto/model/taskresult.proto b/grpc/src/main/proto/model/taskresult.proto new file mode 100644 index 0000000000..d94fd25716 --- /dev/null +++ b/grpc/src/main/proto/model/taskresult.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; +package conductor.proto; + +import "google/protobuf/struct.proto"; +import "google/protobuf/any.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "TaskResultPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message TaskResult { + enum Status { + IN_PROGRESS = 0; + FAILED = 1; + FAILED_WITH_TERMINAL_ERROR = 2; + COMPLETED = 3; + SCHEDULED = 4; + } + string workflow_instance_id = 1; + string task_id = 2; + string reason_for_incompletion = 3; + int64 callback_after_seconds = 4; + string worker_id = 5; + TaskResult.Status status = 6; + map output_data = 7; + google.protobuf.Any output_message = 8; +} diff --git a/grpc/src/main/proto/model/tasksummary.proto b/grpc/src/main/proto/model/tasksummary.proto new file mode 100644 index 0000000000..6ad99d0dc3 --- /dev/null +++ b/grpc/src/main/proto/model/tasksummary.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/task.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "TaskSummaryPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message TaskSummary { + string workflow_id = 1; + string workflow_type = 2; + string correlation_id = 3; + string scheduled_time = 4; + string start_time = 5; + string update_time = 6; + string end_time = 7; + Task.Status status = 8; + string reason_for_incompletion = 9; + int64 execution_time = 10; + int64 queue_wait_time = 11; + string task_def_name = 12; + string task_type = 13; + string input = 14; + string output = 15; + string task_id = 16; +} diff --git a/grpc/src/main/proto/model/workflow.proto b/grpc/src/main/proto/model/workflow.proto new file mode 100644 index 0000000000..db785863f7 --- /dev/null +++ b/grpc/src/main/proto/model/workflow.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/workflowdef.proto"; +import "model/task.proto"; +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "WorkflowPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message Workflow { + enum WorkflowStatus { + RUNNING = 0; + COMPLETED = 1; + FAILED = 2; + TIMED_OUT = 3; + TERMINATED = 4; + PAUSED = 5; + } + Workflow.WorkflowStatus status = 1; + int64 end_time = 2; + string workflow_id = 3; + string parent_workflow_id = 4; + string parent_workflow_task_id = 5; + repeated Task tasks = 6; + map input = 8; + map output = 9; + string workflow_type = 10; + int32 version = 11; + string correlation_id = 12; + string re_run_from_workflow_id = 13; + string reason_for_incompletion = 14; + int32 schema_version = 15; + string event = 16; + map task_to_domain = 17; + repeated string failed_reference_task_names = 18; + WorkflowDef workflow_definition = 19; + string external_input_payload_storage_path = 20; + string external_output_payload_storage_path = 21; +} diff --git a/grpc/src/main/proto/model/workflowdef.proto b/grpc/src/main/proto/model/workflowdef.proto new file mode 100644 index 0000000000..9e5be4f627 --- /dev/null +++ b/grpc/src/main/proto/model/workflowdef.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/workflowtask.proto"; +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "WorkflowDefPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message WorkflowDef { + string name = 1; + string description = 2; + int32 version = 3; + repeated WorkflowTask tasks = 4; + repeated string input_parameters = 5; + map output_parameters = 6; + string failure_workflow = 7; + int32 schema_version = 8; + bool restartable = 9; +} diff --git a/grpc/src/main/proto/model/workflowsummary.proto b/grpc/src/main/proto/model/workflowsummary.proto new file mode 100644 index 0000000000..6e3d4202a0 --- /dev/null +++ b/grpc/src/main/proto/model/workflowsummary.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/workflow.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "WorkflowSummaryPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message WorkflowSummary { + string workflow_type = 1; + int32 version = 2; + string workflow_id = 3; + string correlation_id = 4; + string start_time = 5; + string update_time = 6; + string end_time = 7; + Workflow.WorkflowStatus status = 8; + string input = 9; + string output = 10; + string reason_for_incompletion = 11; + int64 execution_time = 12; + string event = 13; + string failed_reference_task_names = 14; +} diff --git a/grpc/src/main/proto/model/workflowtask.proto b/grpc/src/main/proto/model/workflowtask.proto new file mode 100644 index 0000000000..e16b54e9f4 --- /dev/null +++ b/grpc/src/main/proto/model/workflowtask.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package conductor.proto; + +import "model/taskdef.proto"; +import "model/subworkflowparams.proto"; +import "google/protobuf/struct.proto"; + +option java_package = "com.netflix.conductor.proto"; +option java_outer_classname = "WorkflowTaskPb"; +option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; + +message WorkflowTask { + message WorkflowTaskList { + repeated WorkflowTask tasks = 1; + } + string name = 1; + string task_reference_name = 2; + string description = 3; + map input_parameters = 4; + string type = 5; + string dynamic_task_name_param = 6; + string case_value_param = 7; + string case_expression = 8; + map decision_cases = 9; + string dynamic_fork_tasks_param = 10; + string dynamic_fork_tasks_input_param_name = 11; + repeated WorkflowTask default_case = 12; + repeated WorkflowTask.WorkflowTaskList fork_tasks = 13; + int32 start_delay = 14; + SubWorkflowParams sub_workflow_param = 15; + repeated string join_on = 16; + string sink = 17; + bool optional = 18; + TaskDef task_definition = 19; + bool rate_limited = 20; +} diff --git a/jersey/build.gradle b/jersey/build.gradle index b6f77bc1bd..7e6ba28843 100644 --- a/jersey/build.gradle +++ b/jersey/build.gradle @@ -1,11 +1,13 @@ dependencies { - compile project(':conductor-common') - compile project(':conductor-core') - - compile "javax.ws.rs:jsr311-api:${revJsr311Api}" - compile "io.swagger:swagger-jaxrs:${revSwagger}" - compile "com.sun.jersey:jersey-bundle:${revJerseyBundle}" - - provided "javax.servlet:javax.servlet-api:${revServletApi}" + compile project(':conductor-common') + compile project(':conductor-core') + + compile "com.netflix.runtime:health-api:${revHealth}" + + compile "javax.ws.rs:jsr311-api:${revJsr311Api}" + compile "io.swagger:swagger-jaxrs:${revSwagger}" + compile "com.sun.jersey:jersey-bundle:${revJerseyBundle}" + + compileOnly "javax.servlet:javax.servlet-api:${revServletApi}" } diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java index c2ccb140eb..13f0702295 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/AdminResource.java @@ -16,8 +16,10 @@ package com.netflix.conductor.server.resources; -import java.util.List; -import java.util.Map; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.service.AdminService; +import io.swagger.annotations.Api; +import io.swagger.annotations.ApiOperation; import javax.inject.Inject; import javax.inject.Singleton; @@ -30,15 +32,8 @@ import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; - -import com.google.common.base.Preconditions; -import com.netflix.conductor.service.AdminService; -import org.apache.commons.lang3.StringUtils; - -import com.netflix.conductor.common.metadata.tasks.Task; - -import io.swagger.annotations.Api; -import io.swagger.annotations.ApiOperation; +import java.util.List; +import java.util.Map; /** * @author Viren @@ -50,7 +45,6 @@ @Consumes({ MediaType.APPLICATION_JSON }) @Singleton public class AdminResource { - private final AdminService adminService; @Inject diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/ApplicationExceptionMapper.java b/jersey/src/main/java/com/netflix/conductor/server/resources/ApplicationExceptionMapper.java index 76e91458ea..55206ee8a7 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/ApplicationExceptionMapper.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/ApplicationExceptionMapper.java @@ -18,8 +18,13 @@ */ package com.netflix.conductor.server.resources; -import java.util.List; -import java.util.Map; +import com.google.common.annotations.VisibleForTesting; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.metrics.Monitors; +import com.sun.jersey.api.core.HttpContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.inject.Inject; import javax.inject.Singleton; @@ -28,18 +33,9 @@ import javax.ws.rs.core.Request; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; -import javax.ws.rs.core.Variant; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.config.Configuration; -import com.netflix.conductor.core.execution.ApplicationException; -import com.netflix.conductor.metrics.Monitors; -import com.sun.jersey.api.core.HttpContext; +import java.util.Map; /** * @author Viren @@ -48,11 +44,7 @@ @Provider @Singleton public class ApplicationExceptionMapper implements ExceptionMapper { - - private static Logger logger = LoggerFactory.getLogger(ApplicationExceptionMapper.class); - - private static List supportedMediaTypes = Variant.mediaTypes(MediaType.APPLICATION_JSON_TYPE, - MediaType.TEXT_HTML_TYPE, MediaType.TEXT_PLAIN_TYPE).add().build(); + private static Logger LOGGER = LoggerFactory.getLogger(ApplicationExceptionMapper.class); @Context private HttpContext context; @@ -62,7 +54,6 @@ public class ApplicationExceptionMapper implements ExceptionMapper request; - private String host; @Inject @@ -100,7 +91,7 @@ Request getRequest() { } private void logException(ApplicationException exception) { - logger.error(String.format("Error %s url: '%s'", exception.getClass().getSimpleName(), + LOGGER.error(String.format("Error %s url: '%s'", exception.getClass().getSimpleName(), getUriInfo().getPath()), exception); } diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/GenericExceptionMapper.java b/jersey/src/main/java/com/netflix/conductor/server/resources/GenericExceptionMapper.java index 38def7b5d7..990b41b5b0 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/GenericExceptionMapper.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/GenericExceptionMapper.java @@ -15,8 +15,14 @@ */ package com.netflix.conductor.server.resources; -import java.util.List; -import java.util.Map; +import com.fasterxml.jackson.databind.exc.InvalidFormatException; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.core.execution.ApplicationException.Code; +import com.netflix.conductor.metrics.Monitors; +import com.sun.jersey.api.core.HttpContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.inject.Inject; import javax.inject.Singleton; @@ -24,19 +30,9 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; -import javax.ws.rs.core.Variant; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.config.Configuration; -import com.netflix.conductor.core.execution.ApplicationException; -import com.netflix.conductor.core.execution.ApplicationException.Code; -import com.netflix.conductor.metrics.Monitors; -import com.sun.jersey.api.core.HttpContext; -import com.fasterxml.jackson.databind.exc.InvalidFormatException; +import java.util.Map; /** * @author Viren @@ -46,10 +42,8 @@ @Singleton public class GenericExceptionMapper implements ExceptionMapper { - private static Logger logger = LoggerFactory.getLogger(GenericExceptionMapper.class); - - private static List supportedMediaTypes = Variant.mediaTypes(MediaType.APPLICATION_JSON_TYPE, MediaType.TEXT_HTML_TYPE, MediaType.TEXT_PLAIN_TYPE).add().build(); - + private static final Logger LOGGER = LoggerFactory.getLogger(GenericExceptionMapper.class); + @Context private HttpContext context; @@ -65,7 +59,8 @@ public GenericExceptionMapper(Configuration config) { @Override public Response toResponse(Throwable exception) { - logger.error(String.format("Error %s url: '%s'", exception.getClass().getSimpleName(), uriInfo.getPath()), exception); + LOGGER.error(String.format("Error %s url: '%s'", exception.getClass().getSimpleName(), uriInfo.getPath()), exception); + Monitors.error("error", "error"); ApplicationException applicationException = null; diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/HealthCheckResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/HealthCheckResource.java new file mode 100644 index 0000000000..c23e5d21c0 --- /dev/null +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/HealthCheckResource.java @@ -0,0 +1,31 @@ +package com.netflix.conductor.server.resources; + +import com.netflix.runtime.health.api.HealthCheckAggregator; +import com.netflix.runtime.health.api.HealthCheckStatus; + +import javax.inject.Inject; +import javax.inject.Singleton; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; + +import io.swagger.annotations.Api; + +@Api(value = "/health", produces = MediaType.APPLICATION_JSON, tags = "Health Check") +@Path("/health") +@Produces({MediaType.APPLICATION_JSON}) +@Singleton +public class HealthCheckResource { + private final HealthCheckAggregator healthCheck; + + @Inject + public HealthCheckResource(HealthCheckAggregator healthCheck) { + this.healthCheck = healthCheck; + } + + @GET + public HealthCheckStatus doCheck() throws Exception { + return healthCheck.check().get(); + } +} diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/MetadataResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/MetadataResource.java index 0c62e0e4a0..7082ec116d 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/MetadataResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/MetadataResource.java @@ -37,14 +37,12 @@ /** * @author Viren - * */ -@Api(value="/metadata", produces=MediaType.APPLICATION_JSON, consumes=MediaType.APPLICATION_JSON, tags="Metadata Management") +@Api(value = "/metadata", produces = MediaType.APPLICATION_JSON, consumes = MediaType.APPLICATION_JSON, tags = "Metadata Management") @Path("/metadata") @Produces({MediaType.APPLICATION_JSON}) @Consumes({MediaType.APPLICATION_JSON}) public class MetadataResource { - private final MetadataService metadataService; @Inject diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java index d811c6da00..35f3457d90 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/TaskResource.java @@ -25,8 +25,6 @@ import com.netflix.conductor.service.TaskService; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import javax.inject.Inject; import javax.inject.Singleton; @@ -44,7 +42,7 @@ import java.util.Map; /** - * + * * @author visingh * */ @@ -54,9 +52,6 @@ @Consumes({ MediaType.APPLICATION_JSON }) @Singleton public class TaskResource { - - private static final Logger logger = LoggerFactory.getLogger(TaskResource.class); - private final TaskService taskService; @Inject @@ -69,8 +64,8 @@ public TaskResource(TaskService taskService) { @ApiOperation("Poll for a task of a certain type") @Consumes({MediaType.WILDCARD}) public Task poll(@PathParam("tasktype") String taskType, - @QueryParam("workerid") String workerId, - @QueryParam("domain") String domain) { + @QueryParam("workerid") String workerId, + @QueryParam("domain") String domain) { return taskService.poll(taskType, workerId, domain); } @@ -91,8 +86,8 @@ public List batchPoll(@PathParam("tasktype") String taskType, @ApiOperation("Get in progress tasks. The results are paginated.") @Consumes({MediaType.WILDCARD}) public List getTasks(@PathParam("tasktype") String taskType, - @QueryParam("startKey") String startKey, - @QueryParam("count") @DefaultValue("100") Integer count) { + @QueryParam("startKey") String startKey, + @QueryParam("count") @DefaultValue("100") Integer count) { return taskService.getTasks(taskType, startKey, count); } @@ -101,7 +96,7 @@ public List getTasks(@PathParam("tasktype") String taskType, @ApiOperation("Get in progress task for a given workflow id.") @Consumes({MediaType.WILDCARD}) public Task getPendingTaskForWorkflow(@PathParam("workflowId") String workflowId, - @PathParam("taskRefName") String taskReferenceName) { + @PathParam("taskRefName") String taskReferenceName) { return taskService.getPendingTaskForWorkflow(workflowId, taskReferenceName); } @@ -116,22 +111,22 @@ public String updateTask(TaskResult taskResult) { @ApiOperation("Ack Task is received") @Consumes({MediaType.WILDCARD}) public String ack(@PathParam("taskId") String taskId, - @QueryParam("workerid") String workerId) { + @QueryParam("workerid") String workerId) { return taskService.ackTaskReceived(taskId, workerId); } - + @POST @Path("/{taskId}/log") @ApiOperation("Log Task Execution Details") public void log(@PathParam("taskId") String taskId, String log) { - taskService.log(taskId, log); + taskService.log(taskId, log); } - + @GET @Path("/{taskId}/log") @ApiOperation("Get Task Execution Logs") public List getTaskLogs(@PathParam("taskId") String taskId) { - return taskService.getTaskLogs(taskId); + return taskService.getTaskLogs(taskId); } @GET @@ -139,7 +134,7 @@ public List getTaskLogs(@PathParam("taskId") String taskId) { @ApiOperation("Get task by Id") @Consumes(MediaType.WILDCARD) public Task getTask(@PathParam("taskId") String taskId) { - return taskService.getTask(taskId); + return taskService.getTask(taskId); } @DELETE @@ -148,7 +143,7 @@ public Task getTask(@PathParam("taskId") String taskId) { @Consumes({MediaType.WILDCARD}) public void removeTaskFromQueue(@PathParam("taskType") String taskType, @PathParam("taskId") String taskId) { - taskService.removeTaskFromQueue(taskType, taskId); + taskService.removeTaskFromQueue(taskType, taskId); } @GET @@ -197,7 +192,7 @@ public List getAllPollData() { public String requeue() { return taskService.requeue(); } - + @POST @Path("/queue/requeue/{taskType}") @ApiOperation("Requeue pending tasks") @@ -206,19 +201,19 @@ public String requeue() { public String requeuePendingTask(@PathParam("taskType") String taskType) { return taskService.requeuePendingTask(taskType); } - + @ApiOperation(value="Search for tasks based in payload and other parameters", - notes="use sort options as sort=:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + - " If order is not specified, defaults to ASC") + notes="use sort options as sort=:ASC|DESC e.g. sort=name&sort=workflowId:DESC." + + " If order is not specified, defaults to ASC") @GET @Consumes(MediaType.WILDCARD) @Produces(MediaType.APPLICATION_JSON) @Path("/search") - public SearchResult search(@QueryParam("start") @DefaultValue("0") int start, - @QueryParam("size") @DefaultValue("100") int size, - @QueryParam("sort") String sort, - @QueryParam("freeText") @DefaultValue("*") String freeText, - @QueryParam("query") String query) { + public SearchResult search(@QueryParam("start") @DefaultValue("0") int start, + @QueryParam("size") @DefaultValue("100") int size, + @QueryParam("sort") String sort, + @QueryParam("freeText") @DefaultValue("*") String freeText, + @QueryParam("query") String query) { return taskService.search(start, size, sort, freeText, query); } @@ -229,4 +224,4 @@ public SearchResult search(@QueryParam("start") @DefaultValue("0") public ExternalStorageLocation getExternalStorageLocation(@QueryParam("path") String path) { return taskService.getExternalStorageLocation(path); } -} +} \ No newline at end of file diff --git a/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java b/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java index 0d7ed64c4b..5101aaa09b 100644 --- a/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java +++ b/jersey/src/main/java/com/netflix/conductor/server/resources/WorkflowResource.java @@ -250,4 +250,4 @@ public SearchResult searchWorkflowsByTasks(@QueryParam("start") public ExternalStorageLocation getExternalStorageLocation(@QueryParam("path") String path) { return workflowService.getExternalStorageLocation(path); } -} +} \ No newline at end of file diff --git a/mysql-persistence/build.gradle b/mysql-persistence/build.gradle index 5982268562..f7c5cdbe65 100644 --- a/mysql-persistence/build.gradle +++ b/mysql-persistence/build.gradle @@ -1,19 +1,20 @@ dependencies { - compile project(':conductor-core') - compile "com.google.inject:guice:${revGuice}" + compile project(':conductor-core') + compile "com.google.inject:guice:${revGuice}" - compile "commons-io:commons-io:${revCommonsIo}" - compile "mysql:mysql-connector-java:${revMySqlConnector}" - compile "com.zaxxer:HikariCP:${revHikariCP}" - compile "org.flywaydb:flyway-core:${revFlywayCore}" + compile "commons-io:commons-io:${revCommonsIo}" + compile "mysql:mysql-connector-java:${revMySqlConnector}" + compile "com.zaxxer:HikariCP:${revHikariCP}" + compile "org.flywaydb:flyway-core:${revFlywayCore}" - testCompile "ch.vorburger.mariaDB4j:mariaDB4j:${revMariaDB4j}" - //TODO Change the below deps to use the same version as one in versionsOfDependencies.gradle - testCompile 'ch.qos.logback:logback-core:1.2.3' - testCompile 'ch.qos.logback:logback-classic:1.2.3' + testCompile project(':conductor-core').sourceSets.test.output + testCompile "ch.vorburger.mariaDB4j:mariaDB4j:${revMariaDB4j}" + //TODO Change the below deps to use the same version as one in versionsOfDependencies.gradle + testCompile 'ch.qos.logback:logback-core:1.2.3' + testCompile 'ch.qos.logback:logback-classic:1.2.3' } test { - //the MySQL unit tests must run within the same JVM to share the same embedded DB - maxParallelForks = 1 + //the MySQL unit tests must run within the same JVM to share the same embedded DB + maxParallelForks = 1 } diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLBaseDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLBaseDAO.java index 2869073750..3cba07cd47 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLBaseDAO.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLBaseDAO.java @@ -5,6 +5,10 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.sql.ExecuteFunction; +import com.netflix.conductor.sql.QueryFunction; +import com.netflix.conductor.sql.TransactionalFunction; + import java.io.IOException; import java.sql.Connection; import java.sql.SQLException; diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAO.java index fa8cc56e8c..437a63b299 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAO.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAO.java @@ -13,7 +13,6 @@ import com.netflix.conductor.core.execution.ApplicationException; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.IndexDAO; -import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.metrics.Monitors; import javax.inject.Inject; @@ -25,6 +24,7 @@ import java.util.Date; import java.util.LinkedList; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; public class MySQLExecutionDAO extends MySQLBaseDAO implements ExecutionDAO { @@ -34,13 +34,10 @@ public class MySQLExecutionDAO extends MySQLBaseDAO implements ExecutionDAO { private IndexDAO indexer; - private MetadataDAO metadata; - @Inject - public MySQLExecutionDAO(IndexDAO indexer, MetadataDAO metadata, ObjectMapper om, DataSource dataSource) { + public MySQLExecutionDAO(IndexDAO indexer, ObjectMapper om, DataSource dataSource) { super(om, dataSource); this.indexer = indexer; - this.metadata = metadata; } private static String dateStr(Long timeInMs) { @@ -139,11 +136,14 @@ public boolean exceedsRateLimitPerFrequency(Task task) { @Override public boolean exceedsInProgressLimit(Task task) { - TaskDef taskDef = metadata.getTaskDef(task.getTaskDefName()); - if (taskDef == null) { + + Optional taskDefinition = task.getTaskDefinition(); + if (!taskDefinition.isPresent()) { return false; } + TaskDef taskDef = taskDefinition.get(); + int limit = taskDef.concurrencyLimit(); if (limit <= 0) { return false; @@ -267,7 +267,7 @@ public void removeWorkflow(String workflowId, boolean archiveWorkflow) { withTransaction(connection -> { removeWorkflowDefToWorkflowMapping(connection, wf); removeWorkflow(connection, workflowId); - removePendingWorkflow(connection, wf.getWorkflowType(), workflowId); + removePendingWorkflow(connection, wf.getWorkflowName(), workflowId); }); for (Task task : wf.getTasks()) { @@ -504,9 +504,9 @@ private String insertOrUpdateWorkflow(Workflow workflow, boolean update) { } if (terminal) { - removePendingWorkflow(tx, workflow.getWorkflowType(), workflow.getWorkflowId()); + removePendingWorkflow(tx, workflow.getWorkflowName(), workflow.getWorkflowId()); } else { - addPendingWorkflow(tx, workflow.getWorkflowType(), workflow.getWorkflowId()); + addPendingWorkflow(tx, workflow.getWorkflowName(), workflow.getWorkflowId()); } }); @@ -521,9 +521,9 @@ private void updateTask(Connection connection, Task task) { task.setEndTime(System.currentTimeMillis()); } - TaskDef taskDef = metadata.getTaskDef(task.getTaskDefName()); + Optional taskDefinition = task.getTaskDefinition(); - if (taskDef != null && taskDef.concurrencyLimit() > 0) { + if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { boolean inProgress = task.getStatus() != null && task.getStatus().equals(Task.Status.IN_PROGRESS); updateInProgressStatus(connection, task, inProgress); } @@ -632,7 +632,7 @@ private void addWorkflowDefToWorkflowMapping(Connection connection, Workflow wor String INSERT_WORKFLOW_DEF_TO_WORKFLOW = "INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)"; execute(connection, INSERT_WORKFLOW_DEF_TO_WORKFLOW, - q -> q.addParameter(workflow.getWorkflowType()).addParameter(dateStr(workflow.getCreateTime())) + q -> q.addParameter(workflow.getWorkflowName()).addParameter(dateStr(workflow.getCreateTime())) .addParameter(workflow.getWorkflowId()).executeUpdate()); } @@ -640,7 +640,7 @@ private void removeWorkflowDefToWorkflowMapping(Connection connection, Workflow String REMOVE_WORKFLOW_DEF_TO_WORKFLOW = "DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?"; execute(connection, REMOVE_WORKFLOW_DEF_TO_WORKFLOW, - q -> q.addParameter(workflow.getWorkflowType()).addParameter(dateStr(workflow.getCreateTime())) + q -> q.addParameter(workflow.getWorkflowName()).addParameter(dateStr(workflow.getCreateTime())) .addParameter(workflow.getWorkflowId()).executeUpdate()); } diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAO.java index 4c43015864..eff70a2263 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAO.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAO.java @@ -13,7 +13,11 @@ import javax.inject.Inject; import javax.sql.DataSource; import java.sql.Connection; -import java.util.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -33,11 +37,12 @@ public MySQLMetadataDAO(ObjectMapper om, DataSource dataSource, Configuration co int cacheRefreshTime = config.getIntProperty(PROP_TASKDEF_CACHE_REFRESH, DEFAULT_TASKDEF_CACHE_REFRESH_SECONDS); Executors.newSingleThreadScheduledExecutor() - .scheduleWithFixedDelay(this::refreshTaskDefs, cacheRefreshTime, cacheRefreshTime, TimeUnit.SECONDS); + .scheduleWithFixedDelay(this::refreshTaskDefs, cacheRefreshTime, cacheRefreshTime, TimeUnit.SECONDS); } @Override public String createTaskDef(TaskDef taskDef) { + validate(taskDef); if (null == taskDef.getCreateTime() || taskDef.getCreateTime() < 1) { taskDef.setCreateTime(System.currentTimeMillis()); } @@ -47,6 +52,7 @@ public String createTaskDef(TaskDef taskDef) { @Override public String updateTaskDef(TaskDef taskDef) { + validate(taskDef); taskDef.setUpdateTime(System.currentTimeMillis()); return insertOrUpdateTaskDef(taskDef); } @@ -85,6 +91,7 @@ public void removeTaskDef(String name) { @Override public void create(WorkflowDef def) { + validate(def); if (null == def.getCreateTime() || def.getCreateTime() == 0) { def.setCreateTime(System.currentTimeMillis()); } @@ -92,7 +99,7 @@ public void create(WorkflowDef def) { withTransaction(tx -> { if (workflowExists(tx, def)) { throw new ApplicationException(ApplicationException.Code.CONFLICT, - "Workflow with " + def.key() + " already exists!"); + "Workflow with " + def.key() + " already exists!"); } insertOrUpdateWorkflowDef(tx, def); @@ -101,26 +108,31 @@ public void create(WorkflowDef def) { @Override public void update(WorkflowDef def) { + validate(def); def.setUpdateTime(System.currentTimeMillis()); withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def)); } @Override - public WorkflowDef getLatest(String name) { + public Optional getLatest(String name) { final String GET_LATEST_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND " + - "version = latest_version"; + "version = latest_version"; - return queryWithTransaction(GET_LATEST_WORKFLOW_DEF_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class)); + return Optional.ofNullable( + queryWithTransaction(GET_LATEST_WORKFLOW_DEF_QUERY, + q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class)) + ); } @Override - public WorkflowDef get(String name, int version) { + public Optional get(String name, int version) { final String GET_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?"; - return queryWithTransaction(GET_WORKFLOW_DEF_QUERY, q -> q.addParameter(name) - .addParameter(version) - .executeAndFetchFirst(WorkflowDef.class)); + return Optional.ofNullable( + queryWithTransaction(GET_WORKFLOW_DEF_QUERY, q -> q.addParameter(name) + .addParameter(version) + .executeAndFetchFirst(WorkflowDef.class)) + ); } @Override @@ -150,7 +162,7 @@ public List getAll() { public List getAllLatest() { final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE version = " + - "latest_version"; + "latest_version"; return queryWithTransaction(GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); } @@ -158,10 +170,10 @@ public List getAllLatest() { @Override public List getAllVersions(String name) { final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY = "SELECT json_data FROM meta_workflow_def WHERE name = ? " + - "ORDER BY version"; + "ORDER BY version"; return queryWithTransaction(GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY, - q -> q.addParameter(name).executeAndFetch(WorkflowDef.class)); + q -> q.addParameter(name).executeAndFetch(WorkflowDef.class)); } @Override @@ -169,19 +181,19 @@ public void addEventHandler(EventHandler eventHandler) { Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); final String INSERT_EVENT_HANDLER_QUERY = "INSERT INTO meta_event_handler (name, event, active, json_data) " + - "VALUES (?, ?, ?, ?)"; + "VALUES (?, ?, ?, ?)"; withTransaction(tx -> { if (getEventHandler(tx, eventHandler.getName()) != null) { throw new ApplicationException(ApplicationException.Code.CONFLICT, - "EventHandler with name " + eventHandler.getName() + " already exists!"); + "EventHandler with name " + eventHandler.getName() + " already exists!"); } execute(tx, INSERT_EVENT_HANDLER_QUERY, q -> q.addParameter(eventHandler.getName()) - .addParameter(eventHandler.getEvent()) - .addParameter(eventHandler.isActive()) - .addJsonParameter(eventHandler) - .executeUpdate()); + .addParameter(eventHandler.getEvent()) + .addParameter(eventHandler.isActive()) + .addJsonParameter(eventHandler) + .executeUpdate()); }); } @@ -191,22 +203,22 @@ public void updateEventHandler(EventHandler eventHandler) { //@formatter:off final String UPDATE_EVENT_HANDLER_QUERY = "UPDATE meta_event_handler SET " + - "event = ?, active = ?, json_data = ?, " + - "modified_on = CURRENT_TIMESTAMP WHERE name = ?"; + "event = ?, active = ?, json_data = ?, " + + "modified_on = CURRENT_TIMESTAMP WHERE name = ?"; //@formatter:on withTransaction(tx -> { EventHandler existing = getEventHandler(tx, eventHandler.getName()); if (existing == null) { throw new ApplicationException(ApplicationException.Code.NOT_FOUND, - "EventHandler with name " + eventHandler.getName() + " not found!"); + "EventHandler with name " + eventHandler.getName() + " not found!"); } execute(tx, UPDATE_EVENT_HANDLER_QUERY, q -> q.addParameter(eventHandler.getEvent()) - .addParameter(eventHandler.isActive()) - .addJsonParameter(eventHandler) - .addParameter(eventHandler.getName()) - .executeUpdate()); + .addParameter(eventHandler.isActive()) + .addJsonParameter(eventHandler) + .addParameter(eventHandler.getName()) + .executeUpdate()); }); } @@ -218,7 +230,7 @@ public void removeEventHandlerStatus(String name) { EventHandler existing = getEventHandler(tx, name); if (existing == null) { throw new ApplicationException(ApplicationException.Code.NOT_FOUND, - "EventHandler with name " + name + " not found!"); + "EventHandler with name " + name + " not found!"); } execute(tx, DELETE_EVENT_HANDLER_QUERY, q -> q.addParameter(name).executeDelete()); @@ -250,6 +262,28 @@ public List getEventHandlersForEvent(String event, boolean activeO }); } + /** + * Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime exception if + * validations fail. + * + * @param taskDef The {@code TaskDef} to check. + */ + private void validate(TaskDef taskDef) { + Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null"); + Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null"); + } + + /** + * Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a Runtime exception if + * validations fail. + * + * @param def The {@code WorkflowDef} to check. + */ + private void validate(WorkflowDef def) { + Preconditions.checkNotNull(def, "WorkflowDef object cannot be null"); + Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null"); + } + /** * Retrieve a {@link EventHandler} by {@literal name}. * @@ -261,7 +295,7 @@ private EventHandler getEventHandler(Connection connection, String name) { final String READ_ONE_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler WHERE name = ?"; return query(connection, READ_ONE_EVENT_HANDLER_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class)); + q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class)); } /** @@ -273,10 +307,10 @@ private EventHandler getEventHandler(Connection connection, String name) { */ private Boolean workflowExists(Connection connection, WorkflowDef def) { final String CHECK_WORKFLOW_DEF_EXISTS_QUERY = "SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + - "version = ?"; + "version = ?"; return query(connection, CHECK_WORKFLOW_DEF_EXISTS_QUERY, - q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists()); + q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists()); } /** @@ -288,7 +322,7 @@ private Boolean workflowExists(Connection connection, WorkflowDef def) { */ private Optional getLatestVersion(Connection tx, WorkflowDef def) { final String GET_LATEST_WORKFLOW_DEF_VERSION = "SELECT max(version) AS version FROM meta_workflow_def WHERE " + - "name = ?"; + "name = ?"; Integer val = query(tx, GET_LATEST_WORKFLOW_DEF_VERSION, q -> { q.addParameter(def.getName()); @@ -312,7 +346,7 @@ private Optional getLatestVersion(Connection tx, WorkflowDef def) { */ private void updateLatestVersion(Connection tx, WorkflowDef def) { final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY = "UPDATE meta_workflow_def SET latest_version = ? " + - "WHERE name = ?"; + "WHERE name = ?"; execute(tx, UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY, q -> q.addParameter(def.getVersion()).addParameter(def.getName()).executeUpdate()); @@ -320,26 +354,26 @@ private void updateLatestVersion(Connection tx, WorkflowDef def) { private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) { final String INSERT_WORKFLOW_DEF_QUERY = "INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + - " ?, ?)"; + " ?, ?)"; Optional version = getLatestVersion(tx, def); if (!version.isPresent() || version.get() < def.getVersion()) { execute(tx, INSERT_WORKFLOW_DEF_QUERY, q -> q.addParameter(def.getName()) - .addParameter(def.getVersion()) - .addJsonParameter(def) - .executeUpdate()); + .addParameter(def.getVersion()) + .addJsonParameter(def) + .executeUpdate()); } else { //@formatter:off final String UPDATE_WORKFLOW_DEF_QUERY = - "UPDATE meta_workflow_def " + - "SET json_data = ?, modified_on = CURRENT_TIMESTAMP " + - "WHERE name = ? AND version = ?"; + "UPDATE meta_workflow_def " + + "SET json_data = ?, modified_on = CURRENT_TIMESTAMP " + + "WHERE name = ? AND version = ?"; //@formatter:on execute(tx, UPDATE_WORKFLOW_DEF_QUERY, q -> q.addJsonParameter(def) - .addParameter(def.getName()) - .addParameter(def.getVersion()) - .executeUpdate()); + .addParameter(def.getName()) + .addParameter(def.getVersion()) + .executeUpdate()); } updateLatestVersion(tx, def); @@ -391,7 +425,7 @@ private TaskDef getTaskDefFromDB(String name) { final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?"; return queryWithTransaction(READ_ONE_TASKDEF_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class)); + q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class)); } private String insertOrUpdateTaskDef(TaskDef taskDef) { diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLWorkflowModule.java b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLWorkflowModule.java deleted file mode 100644 index d28cdafdd4..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/MySQLWorkflowModule.java +++ /dev/null @@ -1,74 +0,0 @@ -package com.netflix.conductor.dao.mysql; - -import javax.sql.DataSource; - -import org.flywaydb.core.Flyway; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.inject.AbstractModule; -import com.google.inject.Provides; -import com.google.inject.Singleton; -import com.netflix.conductor.core.config.Configuration; -import com.netflix.conductor.dao.ExecutionDAO; -import com.netflix.conductor.dao.MetadataDAO; -import com.netflix.conductor.dao.QueueDAO; -import com.zaxxer.hikari.HikariDataSource; - -/** - * @author mustafa - */ -public class MySQLWorkflowModule extends AbstractModule { - protected final Logger logger = LoggerFactory.getLogger(getClass()); - - @Provides - @Singleton - public DataSource getDataSource(Configuration config) { - HikariDataSource dataSource = new HikariDataSource(); - dataSource.setJdbcUrl(config.getProperty("jdbc.url", "jdbc:mysql://localhost:3306/conductor")); - dataSource.setUsername(config.getProperty("jdbc.username", "conductor")); - dataSource.setPassword(config.getProperty("jdbc.password", "password")); - dataSource.setAutoCommit(false); - - dataSource.setMaximumPoolSize(config.getIntProperty("jdbc.maxPoolSize", 20)); - dataSource.setMinimumIdle(config.getIntProperty("jdbc.minIdleSize", 5)); - dataSource.setIdleTimeout(config.getIntProperty("jdbc.idleTimeout", 1000*300)); - dataSource.setTransactionIsolation(config.getProperty("jdbc.isolationLevel", "TRANSACTION_REPEATABLE_READ")); - - flywayMigrate(config, dataSource); - - return dataSource; - } - - @Override - protected void configure() { - bind(MetadataDAO.class).to(MySQLMetadataDAO.class); - bind(ExecutionDAO.class).to(MySQLExecutionDAO.class); - bind(QueueDAO.class).to(MySQLQueueDAO.class); - } - - private void flywayMigrate(Configuration config, DataSource dataSource) { - boolean enabled = getBool(config.getProperty("flyway.enabled", "true"), true); - if(!enabled) { - logger.debug("Flyway migrations are disabled"); - return; - } - - String migrationTable = config.getProperty("flyway.table", null); - - Flyway flyway = new Flyway(); - if(null != migrationTable) { - logger.debug("Using Flyway migration table '{}'", migrationTable); - flyway.setTable(migrationTable); - } - - flyway.setDataSource(dataSource); - flyway.setPlaceholderReplacement(false); - flyway.migrate(); - } - - private boolean getBool(String value, boolean defaultValue) { - if(null == value || value.trim().length() == 0){ return defaultValue; } - return Boolean.valueOf(value.trim()); - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/Query.java b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/Query.java index 086c5490bd..2eb43db6df 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/Query.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/Query.java @@ -1,26 +1,28 @@ package com.netflix.conductor.dao.mysql; -import static com.netflix.conductor.core.execution.ApplicationException.Code; - import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.sql.ResultSetHandler; + +import org.apache.commons.lang3.math.NumberUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.sql.Connection; +import java.sql.Date; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Timestamp; import java.util.ArrayList; import java.util.Collection; -import java.sql.Date; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang3.math.NumberUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import static com.netflix.conductor.core.execution.ApplicationException.Code; /** * Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities. diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLConfiguration.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLConfiguration.java new file mode 100644 index 0000000000..e0bfff664a --- /dev/null +++ b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLConfiguration.java @@ -0,0 +1,96 @@ +package com.netflix.conductor.mysql; + +import com.netflix.conductor.core.config.Configuration; +import com.zaxxer.hikari.HikariConfig; + +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +public interface MySQLConfiguration extends Configuration { + + String JDBC_URL_PROPERTY_NAME = "jdbc.url"; + String JDBC_URL_DEFAULT_VALUE = "jdbc:mysql://localhost:3306/conductor"; + + String JDBC_USER_NAME_PROPERTY_NAME = "jdbc.username"; + String JDBC_USER_NAME_DEFAULT_VALUE = "conductor"; + + String JDBC_PASSWORD_PROPERTY_NAME = "jdbc.password"; + String JDBC_PASSWORD_DEFAULT_VALUE = "password"; + + String FLYWAY_ENABLED_PROPERTY_NAME = "flyway.enabled"; + boolean FLYWAY_ENABLED_DEFAULT_VALUE = true; + + String FLYWAY_TABLE_PROPERTY_NAME = "flyway.table"; + Optional FLYWAY_TABLE_DEFAULT_VALUE = Optional.empty(); + + // The defaults are currently in line with the HikariConfig defaults, which are unfortunately private. + String CONNECTION_POOL_MAX_SIZE_PROPERTY_NAME = "conductor.mysql.connection.pool.size.max"; + int CONNECTION_POOL_MAX_SIZE_DEFAULT_VALUE = -1; + + String CONNECTION_POOL_MINIMUM_IDLE_PROPERTY_NAME = "conductor.mysql.connection.pool.idle.min"; + int CONNECTION_POOL_MINIMUM_IDLE_DEFAULT_VALUE = -1; + + String CONNECTION_MAX_LIFETIME_PROPERTY_NAME = "conductor.mysql.connection.lifetime.max"; + long CONNECTION_MAX_LIFETIME_DEFAULT_VALUE = TimeUnit.MINUTES.toMillis(30); + + String CONNECTION_IDLE_TIMEOUT_PROPERTY_NAME = "conductor.mysql.connection.idle.timeout"; + long CONNECTION_IDLE_TIMEOUT_DEFAULT_VALUE = TimeUnit.MINUTES.toMillis(10); + + String CONNECTION_TIMEOUT_PROPERTY_NAME = "conductor.mysql.connection.timeout"; + long CONNECTION_TIMEOUT_DEFAULT_VALUE = TimeUnit.SECONDS.toMillis(30); + + String ISOLATION_LEVEL_PROPERTY_NAME = "conductor.mysql.transaction.isolation.level"; + String ISOLATION_LEVEL_DEFAULT_VALUE = ""; + + String AUTO_COMMIT_PROPERTY_NAME = "conductor.mysql.autocommit"; + // This is consistent with the current default when building the Hikari Client. + boolean AUTO_COMMIT_DEFAULT_VALUE = false; + + default String getJdbcUrl() { + return getProperty(JDBC_URL_PROPERTY_NAME, JDBC_URL_DEFAULT_VALUE); + } + + default String getJdbcUserName() { + return getProperty(JDBC_USER_NAME_PROPERTY_NAME, JDBC_USER_NAME_DEFAULT_VALUE); + } + + default String getJdbcPassword() { + return getProperty(JDBC_PASSWORD_PROPERTY_NAME, JDBC_PASSWORD_DEFAULT_VALUE); + } + + default boolean isFlywayEnabled() { + return getBoolProperty(FLYWAY_ENABLED_PROPERTY_NAME, FLYWAY_ENABLED_DEFAULT_VALUE); + } + + default Optional getFlywayTable() { + return Optional.ofNullable(getProperty(FLYWAY_TABLE_PROPERTY_NAME, null)); + } + + default int getConnectionPoolMaxSize() { + return getIntProperty(CONNECTION_POOL_MAX_SIZE_PROPERTY_NAME, CONNECTION_POOL_MAX_SIZE_DEFAULT_VALUE); + } + + default int getConnectionPoolMinIdle() { + return getIntProperty(CONNECTION_POOL_MINIMUM_IDLE_PROPERTY_NAME, CONNECTION_POOL_MINIMUM_IDLE_DEFAULT_VALUE); + } + + default long getConnectionMaxLifetime() { + return getLongProperty(CONNECTION_MAX_LIFETIME_PROPERTY_NAME, CONNECTION_MAX_LIFETIME_DEFAULT_VALUE); + } + + default long getConnectionIdleTimeout() { + return getLongProperty(CONNECTION_IDLE_TIMEOUT_PROPERTY_NAME, CONNECTION_IDLE_TIMEOUT_DEFAULT_VALUE); + } + + default long getConnectionTimeout() { + return getLongProperty(CONNECTION_TIMEOUT_PROPERTY_NAME, CONNECTION_TIMEOUT_DEFAULT_VALUE); + } + + default String getTransactionIsolationLevel() { + return getProperty(ISOLATION_LEVEL_PROPERTY_NAME, ISOLATION_LEVEL_DEFAULT_VALUE); + } + + default boolean isAutoCommit() { + return getBoolProperty(AUTO_COMMIT_PROPERTY_NAME, AUTO_COMMIT_DEFAULT_VALUE); + } +} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLDataSourceProvider.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLDataSourceProvider.java new file mode 100644 index 0000000000..264334c538 --- /dev/null +++ b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLDataSourceProvider.java @@ -0,0 +1,77 @@ +package com.netflix.conductor.mysql; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +import org.flywaydb.core.Flyway; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.ThreadFactory; + +import javax.inject.Inject; +import javax.inject.Provider; +import javax.sql.DataSource; + +public class MySQLDataSourceProvider implements Provider { + private static final Logger logger = LoggerFactory.getLogger(MySQLDataSourceProvider.class); + + private final MySQLConfiguration configuration; + + @Inject + public MySQLDataSourceProvider(MySQLConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public DataSource get() { + HikariDataSource dataSource = new HikariDataSource(createConfiguration()); + dataSource.setJdbcUrl(configuration.getJdbcUrl()); + dataSource.setUsername(configuration.getJdbcUserName()); + dataSource.setPassword(configuration.getJdbcPassword()); + dataSource.setAutoCommit(false); + flywayMigrate(dataSource); + + return dataSource; + } + + private HikariConfig createConfiguration(){ + HikariConfig cfg = new HikariConfig(); + cfg.setMaximumPoolSize(configuration.getConnectionPoolMaxSize()); + cfg.setMinimumIdle(configuration.getConnectionPoolMinIdle()); + cfg.setMaxLifetime(configuration.getConnectionMaxLifetime()); + cfg.setIdleTimeout(configuration.getConnectionIdleTimeout()); + cfg.setConnectionTimeout(configuration.getConnectionTimeout()); + cfg.setTransactionIsolation(configuration.getTransactionIsolationLevel()); + cfg.setAutoCommit(configuration.isAutoCommit()); + + ThreadFactory tf = new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("hikari-mysql-%d") + .build(); + + cfg.setThreadFactory(tf); + return cfg; + } + // TODO Move this into a class that has complete lifecycle for the connection, i.e. startup and shutdown. + private void flywayMigrate(DataSource dataSource) { + boolean enabled = configuration.isFlywayEnabled(); + if (!enabled) { + logger.debug("Flyway migrations are disabled"); + return; + } + + + Flyway flyway = new Flyway(); + configuration.getFlywayTable().ifPresent(tableName -> { + logger.debug("Using Flyway migration table '{}'", tableName); + flyway.setTable(tableName); + }); + + flyway.setDataSource(dataSource); + flyway.setPlaceholderReplacement(false); + flyway.migrate(); + } +} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLWorkflowModule.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLWorkflowModule.java new file mode 100644 index 0000000000..4bc03fe9de --- /dev/null +++ b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/MySQLWorkflowModule.java @@ -0,0 +1,29 @@ +package com.netflix.conductor.mysql; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; + +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.dao.mysql.MySQLExecutionDAO; +import com.netflix.conductor.dao.mysql.MySQLMetadataDAO; +import com.netflix.conductor.dao.mysql.MySQLQueueDAO; + +import javax.sql.DataSource; + +/** + * @author mustafa + */ +public class MySQLWorkflowModule extends AbstractModule { + + @Override + protected void configure() { + bind(MySQLConfiguration.class).to(SystemPropertiesMySQLConfiguration.class); + bind(DataSource.class).toProvider(MySQLDataSourceProvider.class).in(Scopes.SINGLETON); + bind(MetadataDAO.class).to(MySQLMetadataDAO.class); + bind(ExecutionDAO.class).to(MySQLExecutionDAO.class); + bind(QueueDAO.class).to(MySQLQueueDAO.class); + } + +} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/SystemPropertiesMySQLConfiguration.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/SystemPropertiesMySQLConfiguration.java new file mode 100644 index 0000000000..1ffa1e0cff --- /dev/null +++ b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/SystemPropertiesMySQLConfiguration.java @@ -0,0 +1,6 @@ +package com.netflix.conductor.mysql; + +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +public class SystemPropertiesMySQLConfiguration extends SystemPropertiesConfiguration implements MySQLConfiguration { +} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/ExecuteFunction.java b/mysql-persistence/src/main/java/com/netflix/conductor/sql/ExecuteFunction.java similarity index 74% rename from mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/ExecuteFunction.java rename to mysql-persistence/src/main/java/com/netflix/conductor/sql/ExecuteFunction.java index ad3b4d7f94..f1cabce830 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/ExecuteFunction.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/sql/ExecuteFunction.java @@ -1,4 +1,6 @@ -package com.netflix.conductor.dao.mysql; +package com.netflix.conductor.sql; + +import com.netflix.conductor.dao.mysql.Query; import java.sql.SQLException; diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/QueryFunction.java b/mysql-persistence/src/main/java/com/netflix/conductor/sql/QueryFunction.java similarity index 74% rename from mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/QueryFunction.java rename to mysql-persistence/src/main/java/com/netflix/conductor/sql/QueryFunction.java index f2ada44783..6f6a304659 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/QueryFunction.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/sql/QueryFunction.java @@ -1,4 +1,6 @@ -package com.netflix.conductor.dao.mysql; +package com.netflix.conductor.sql; + +import com.netflix.conductor.dao.mysql.Query; import java.sql.SQLException; diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/ResultSetHandler.java b/mysql-persistence/src/main/java/com/netflix/conductor/sql/ResultSetHandler.java similarity index 77% rename from mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/ResultSetHandler.java rename to mysql-persistence/src/main/java/com/netflix/conductor/sql/ResultSetHandler.java index 28e80ce1af..ddaa145ad6 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/ResultSetHandler.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/sql/ResultSetHandler.java @@ -1,4 +1,6 @@ -package com.netflix.conductor.dao.mysql; +package com.netflix.conductor.sql; + +import com.netflix.conductor.dao.mysql.Query; import java.sql.ResultSet; import java.sql.SQLException; diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/TransactionalFunction.java b/mysql-persistence/src/main/java/com/netflix/conductor/sql/TransactionalFunction.java similarity index 86% rename from mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/TransactionalFunction.java rename to mysql-persistence/src/main/java/com/netflix/conductor/sql/TransactionalFunction.java index 4f8af2d372..00a6119316 100644 --- a/mysql-persistence/src/main/java/com/netflix/conductor/dao/mysql/TransactionalFunction.java +++ b/mysql-persistence/src/main/java/com/netflix/conductor/sql/TransactionalFunction.java @@ -1,4 +1,4 @@ -package com.netflix.conductor.dao.mysql; +package com.netflix.conductor.sql; import java.sql.Connection; import java.sql.SQLException; diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java b/mysql-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java index 747fb85d3e..2fcfde56c9 100644 --- a/mysql-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java +++ b/mysql-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java @@ -17,7 +17,10 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import com.netflix.conductor.core.config.Configuration; + +import com.netflix.conductor.mysql.MySQLConfiguration; + +import java.util.Map; import java.util.Map; @@ -25,7 +28,7 @@ * @author Viren * */ -public class TestConfiguration implements Configuration { +public class TestConfiguration implements MySQLConfiguration { private Map testProperties = Maps.newHashMap(ImmutableMap.of("test", "dummy")); @@ -110,7 +113,12 @@ public String getProperty(String string, String def) { return val != null ? val : def; } - public void setProperty(String key, String value) { + @Override + public boolean getBooleanProperty(String name, boolean defaultValue) { + return false; + } + + public void setProperty(String key, String value) { testProperties.put(key, value); } diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLBaseDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLBaseDAOTest.java deleted file mode 100644 index e98d11770f..0000000000 --- a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLBaseDAOTest.java +++ /dev/null @@ -1,106 +0,0 @@ -package com.netflix.conductor.dao.mysql; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.conductor.config.TestConfiguration; -import com.netflix.conductor.core.config.Configuration; -import com.zaxxer.hikari.HikariDataSource; - -import org.flywaydb.core.Flyway; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.concurrent.atomic.AtomicBoolean; - -import javax.sql.DataSource; - -import ch.vorburger.mariadb4j.DB; - - -@SuppressWarnings("Duplicates") -public class MySQLBaseDAOTest { - protected final Logger logger = LoggerFactory.getLogger(getClass()); - protected final DataSource dataSource; - protected final TestConfiguration testConfiguration = new TestConfiguration(); - protected final ObjectMapper objectMapper = createObjectMapper(); - protected final DB db = EmbeddedDatabase.INSTANCE.getDB(); - - static AtomicBoolean migrated = new AtomicBoolean(false); - - MySQLBaseDAOTest() { - testConfiguration.setProperty("jdbc.url", "jdbc:mysql://localhost:33307/conductor"); - testConfiguration.setProperty("jdbc.username", "root"); - testConfiguration.setProperty("jdbc.password", ""); - this.dataSource = getDataSource(testConfiguration); - } - - private DataSource getDataSource(Configuration config) { - - HikariDataSource dataSource = new HikariDataSource(); - dataSource.setJdbcUrl(config.getProperty("jdbc.url", "jdbc:mysql://localhost:33307/conductor")); - dataSource.setUsername(config.getProperty("jdbc.username", "conductor")); - dataSource.setPassword(config.getProperty("jdbc.password", "password")); - dataSource.setAutoCommit(false); - - // Prevent DB from getting exhausted during rapid testing - dataSource.setMaximumPoolSize(8); - - if (!migrated.get()) { - flywayMigrate(dataSource); - } - - return dataSource; - } - - private synchronized static void flywayMigrate(DataSource dataSource) { - if(migrated.get()) { - return; - } - - synchronized (MySQLBaseDAOTest.class) { - Flyway flyway = new Flyway(); - flyway.setDataSource(dataSource); - flyway.setPlaceholderReplacement(false); - flyway.migrate(); - migrated.getAndSet(true); - } - } - - private static ObjectMapper createObjectMapper() { - ObjectMapper om = new ObjectMapper(); - om.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - om.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - om.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - om.setSerializationInclusion(JsonInclude.Include.NON_NULL); - om.setSerializationInclusion(JsonInclude.Include.NON_EMPTY); - return om; - } - - protected void resetAllData() { - logger.info("Resetting data for test"); - try (Connection connection = dataSource.getConnection()) { - try(ResultSet rs = connection.prepareStatement("SHOW TABLES").executeQuery(); - PreparedStatement keysOn = connection.prepareStatement("SET FOREIGN_KEY_CHECKS=1")) { - try(PreparedStatement keysOff = connection.prepareStatement("SET FOREIGN_KEY_CHECKS=0")){ - keysOff.execute(); - while(rs.next()) { - String table = rs.getString(1); - try(PreparedStatement ps = connection.prepareStatement("TRUNCATE TABLE " + table)) { - ps.execute(); - } - } - } finally { - keysOn.execute(); - } - } - } catch (SQLException ex) { - logger.error(ex.getMessage(), ex); - throw new RuntimeException(ex); - } - } -} diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLDAOTestUtil.java b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLDAOTestUtil.java new file mode 100644 index 0000000000..f80b9c48d4 --- /dev/null +++ b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLDAOTestUtil.java @@ -0,0 +1,96 @@ +package com.netflix.conductor.dao.mysql; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.conductor.common.utils.JsonMapperProvider; +import com.netflix.conductor.config.TestConfiguration; +import com.netflix.conductor.core.config.Configuration; +import com.zaxxer.hikari.HikariDataSource; + +import org.flywaydb.core.Flyway; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import javax.sql.DataSource; + + +@SuppressWarnings("Duplicates") +public class MySQLDAOTestUtil { + private static final Logger logger = LoggerFactory.getLogger(MySQLDAOTestUtil.class); + private final HikariDataSource dataSource; + private final TestConfiguration testConfiguration = new TestConfiguration(); + private final ObjectMapper objectMapper = new JsonMapperProvider().get(); + + MySQLDAOTestUtil(String dbName) throws Exception { + testConfiguration.setProperty("jdbc.url", "jdbc:mysql://localhost:33307/" + dbName); + testConfiguration.setProperty("jdbc.username", "root"); + testConfiguration.setProperty("jdbc.password", ""); + // Ensure the DB starts + EmbeddedDatabase.INSTANCE.getDB().createDB(dbName); + + this.dataSource = getDataSource(testConfiguration); + } + + private HikariDataSource getDataSource(Configuration config) { + + HikariDataSource dataSource = new HikariDataSource(); + dataSource.setJdbcUrl(config.getProperty("jdbc.url", "jdbc:mysql://localhost:33307/conductor")); + dataSource.setUsername(config.getProperty("jdbc.username", "conductor")); + dataSource.setPassword(config.getProperty("jdbc.password", "password")); + dataSource.setAutoCommit(false); + + // Prevent DB from getting exhausted during rapid testing + dataSource.setMaximumPoolSize(8); + + flywayMigrate(dataSource); + + return dataSource; + } + + private void flywayMigrate(DataSource dataSource) { + + Flyway flyway = new Flyway(); + flyway.setDataSource(dataSource); + flyway.setPlaceholderReplacement(false); + flyway.migrate(); + } + + public HikariDataSource getDataSource() { + return dataSource; + } + + public TestConfiguration getTestConfiguration() { + return testConfiguration; + } + + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + public void resetAllData() { + logger.info("Resetting data for test"); + try (Connection connection = dataSource.getConnection()) { + try (ResultSet rs = connection.prepareStatement("SHOW TABLES").executeQuery(); + PreparedStatement keysOn = connection.prepareStatement("SET FOREIGN_KEY_CHECKS=1")) { + try (PreparedStatement keysOff = connection.prepareStatement("SET FOREIGN_KEY_CHECKS=0")) { + keysOff.execute(); + while (rs.next()) { + String table = rs.getString(1); + try (PreparedStatement ps = connection.prepareStatement("TRUNCATE TABLE " + table)) { + ps.execute(); + } + } + } finally { + keysOn.execute(); + } + } + } catch (SQLException ex) { + logger.error(ex.getMessage(), ex); + throw new RuntimeException(ex); + } + } +} diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAOTest.java index 1a168f535e..7a856f0f6c 100644 --- a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAOTest.java +++ b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLExecutionDAOTest.java @@ -1,451 +1,69 @@ package com.netflix.conductor.dao.mysql; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.stream.Collectors; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.ExecutionDAOTest; +import com.netflix.conductor.dao.IndexDAO; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.junit.Assert; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.rules.TestName; -import com.netflix.conductor.common.metadata.tasks.PollData; -import com.netflix.conductor.common.metadata.tasks.Task; -import com.netflix.conductor.common.metadata.tasks.Task.Status; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.run.Workflow; -import com.netflix.conductor.common.run.Workflow.WorkflowStatus; -import com.netflix.conductor.core.execution.ApplicationException; -import com.netflix.conductor.dao.IndexDAO; - -@SuppressWarnings("Duplicates") -public class MySQLExecutionDAOTest extends MySQLBaseDAOTest { - - private MySQLMetadataDAO metadata; - private MySQLExecutionDAO dao; - - @Before - public void setup() throws Exception { - metadata = new MySQLMetadataDAO(objectMapper, dataSource, testConfiguration); - dao = new MySQLExecutionDAO(mock(IndexDAO.class), metadata, objectMapper, dataSource); - resetAllData(); - } - - @Rule - public ExpectedException expected = ExpectedException.none(); - - @Test - public void testTaskExceedsLimit() throws Exception { - TaskDef def = new TaskDef(); - def.setName("task1"); - def.setConcurrentExecLimit(1); - metadata.createTaskDef(def); - - List tasks = new LinkedList<>(); - for(int i = 0; i < 15; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t_" + i); - task.setWorkflowInstanceId("workflow_" + i); - task.setReferenceTaskName("task1"); - task.setTaskDefName("task1"); - tasks.add(task); - task.setStatus(Status.SCHEDULED); - } - - dao.createTasks(tasks); - assertFalse(dao.exceedsInProgressLimit(tasks.get(0))); - tasks.get(0).setStatus(Status.IN_PROGRESS); - dao.updateTask(tasks.get(0)); - - for(Task task : tasks) { - assertTrue(dao.exceedsInProgressLimit(task)); - } - } - - @Test - public void testCreateTaskException() throws Exception { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t1"); - task.setTaskDefName("task1"); - - expected.expect(ApplicationException.class); - expected.expectMessage("Workflow instance id cannot be null"); - dao.createTasks(Collections.singletonList(task)); - - task.setWorkflowInstanceId("wfid"); - expected.expect(ApplicationException.class); - expected.expectMessage("Task reference name cannot be null"); - dao.createTasks(Collections.singletonList(task)); - } - - @Test - public void testCreateTaskException2() throws Exception { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t1"); - task.setTaskDefName("task1"); - task.setWorkflowInstanceId("wfid"); - - expected.expect(ApplicationException.class); - expected.expectMessage("Task reference name cannot be null"); - dao.createTasks(Collections.singletonList(task)); - } - - @Test - public void testPollData() throws Exception { - dao.updateLastPoll("taskDef", null, "workerId1"); - PollData pd = dao.getPollData("taskDef", null); - assertNotNull(pd); - assertTrue(pd.getLastPollTime() > 0); - assertEquals(pd.getQueueName(), "taskDef"); - assertEquals(pd.getDomain(), null); - assertEquals(pd.getWorkerId(), "workerId1"); - - dao.updateLastPoll("taskDef", "domain1", "workerId1"); - pd = dao.getPollData("taskDef", "domain1"); - assertNotNull(pd); - assertTrue(pd.getLastPollTime() > 0); - assertEquals(pd.getQueueName(), "taskDef"); - assertEquals(pd.getDomain(), "domain1"); - assertEquals(pd.getWorkerId(), "workerId1"); +import java.util.List; - List pData = dao.getPollData("taskDef"); - assertEquals(pData.size(), 2); +import static junit.framework.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; - pd = dao.getPollData("taskDef", "domain2"); - assertTrue(pd == null); - } - - @Test - public void testWith2THreads() throws InterruptedException, ExecutionException { - testPollDataWithParallelThreads(2); +@SuppressWarnings("Duplicates") +public class MySQLExecutionDAOTest extends ExecutionDAOTest { + + private MySQLDAOTestUtil testMySQL; + private MySQLExecutionDAO executionDAO; + + @Rule public TestName name = new TestName(); + + @Before + public void setup() throws Exception { + testMySQL = new MySQLDAOTestUtil(name.getMethodName()); + executionDAO = new MySQLExecutionDAO( + mock(IndexDAO.class), + testMySQL.getObjectMapper(), + testMySQL.getDataSource() + ); + testMySQL.resetAllData(); } - - - private void testPollDataWithParallelThreads(final int threadCount) throws InterruptedException, ExecutionException { - Callable task = new Callable() { - @Override - public PollData call() { - dao.updateLastPoll("taskDef", null, "workerId1"); - return dao.getPollData("taskDef", null); - } - }; - List> tasks = Collections.nCopies(threadCount, task); - - ExecutorService executorService = Executors.newFixedThreadPool(threadCount); - List> futures = executorService.invokeAll(tasks); - List resultList = new ArrayList(futures.size()); - // Check for exceptions - for (Future future : futures) { - // Throws an exception if an exception was thrown by the task. - PollData pollData = future.get(); - System.out.println(pollData); - if(pollData !=null) - resultList.add(future.get().getQueueName()); - } - // Validate the IDs - Assert.assertEquals(threadCount, futures.size()); - List expectedList = new ArrayList(threadCount); - for (long i = 1; i <= threadCount; i++) { - expectedList.add("taskDef"); - } - Collections.sort(resultList); - Assert.assertEquals(expectedList, resultList); + @After + public void teardown() throws Exception { + testMySQL.resetAllData(); + testMySQL.getDataSource().close(); } - @Test - public void testTaskCreateDups() throws Exception { - List tasks = new LinkedList<>(); - String workflowId = UUID.randomUUID().toString(); - - for(int i = 0; i < 3; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + i); - task.setReferenceTaskName("t" + i); - task.setRetryCount(0); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("task" + i); - task.setStatus(Status.IN_PROGRESS); - tasks.add(task); - } - - //Let's insert a retried task - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + 2); - task.setReferenceTaskName("t" + 2); - task.setRetryCount(1); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("task" + 2); - task.setStatus(Status.IN_PROGRESS); - tasks.add(task); - - //Duplicate task! - task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + 1); - task.setReferenceTaskName("t" + 1); - task.setRetryCount(0); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("task" + 1); - task.setStatus(Status.IN_PROGRESS); - tasks.add(task); + @Test + public void testPendingByCorrelationId() throws Exception { - List created = dao.createTasks(tasks); - assertEquals(tasks.size()-1, created.size()); //1 less + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_correlation_jtest"); - Set srcIds = tasks.stream().map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()).collect(Collectors.toSet()); - Set createdIds = created.stream().map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()).collect(Collectors.toSet()); + Workflow workflow = createTestWorkflow(); + workflow.setWorkflowDefinition(def); - assertEquals(srcIds, createdIds); + String idBase = workflow.getWorkflowId(); + generateWorkflows(workflow, idBase, 10); - List pending = dao.getPendingTasksByWorkflow("task0", workflowId); - assertNotNull(pending); - assertEquals(1, pending.size()); - assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0))); + List bycorrelationId = getExecutionDAO().getWorkflowsByCorrelationId("corr001", true); + assertNotNull(bycorrelationId); + assertEquals(10, bycorrelationId.size()); - List found = dao.getTasks(tasks.get(0).getTaskDefName(), null, 1); - assertNotNull(found); - assertEquals(1, found.size()); - assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0))); } - @Test - public void testTaskOps() throws Exception { - List tasks = new LinkedList<>(); - String workflowId = UUID.randomUUID().toString(); - - for(int i = 0; i < 3; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + i); - task.setReferenceTaskName("testTaskOps" + i); - task.setRetryCount(0); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("testTaskOps" + i); - task.setStatus(Status.IN_PROGRESS); - tasks.add(task); - } - - for(int i = 0; i < 3; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("x" + workflowId + "_t" + i); - task.setReferenceTaskName("testTaskOps" + i); - task.setRetryCount(0); - task.setWorkflowInstanceId("x" + workflowId); - task.setTaskDefName("testTaskOps" + i); - task.setStatus(Status.IN_PROGRESS); - dao.createTasks(Arrays.asList(task)); - } - - - List created = dao.createTasks(tasks); - assertEquals(tasks.size(), created.size()); - - List pending = dao.getPendingTasksForTaskType(tasks.get(0).getTaskDefName()); - assertNotNull(pending); - assertEquals(2, pending.size()); - //Pending list can come in any order. finding the one we are looking for and then comparing - Task matching = pending.stream().filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId())).findAny().get(); - assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0))); - - List update = new LinkedList<>(); - for(int i = 0; i < 3; i++) { - Task found = dao.getTask(workflowId + "_t" + i); - assertNotNull(found); - found.getOutputData().put("updated", true); - found.setStatus(Status.COMPLETED); - update.add(found); - } - dao.updateTasks(update); - - List taskIds = tasks.stream().map(Task::getTaskId).collect(Collectors.toList()); - List found = dao.getTasks(taskIds); - assertEquals(taskIds.size(), found.size()); - found.forEach(task -> { - assertTrue(task.getOutputData().containsKey("updated")); - assertEquals(true, task.getOutputData().get("updated")); - dao.removeTask(task.getTaskId()); - }); - - found = dao.getTasks(taskIds); - assertTrue(found.isEmpty()); - } - - @Test - public void test() throws Exception { - Workflow workflow = new Workflow(); - workflow.setCorrelationId("correlationX"); - workflow.setCreatedBy("junit_tester"); - workflow.setEndTime(200L); - - Map input = new HashMap<>(); - input.put("param1", "param1 value"); - input.put("param2", 100); - workflow.setInput(input); - - Map output = new HashMap<>(); - output.put("ouput1", "output 1 value"); - output.put("op2", 300); - workflow.setOutput(output); - - workflow.setOwnerApp("workflow"); - workflow.setParentWorkflowId("parentWorkflowId"); - workflow.setParentWorkflowTaskId("parentWFTaskId"); - workflow.setReasonForIncompletion("missing recipe"); - workflow.setReRunFromWorkflowId("re-run from id1"); - workflow.setSchemaVersion(2); - workflow.setStartTime(90L); - workflow.setStatus(WorkflowStatus.FAILED); - workflow.setWorkflowId("workflow0"); - - List tasks = new LinkedList<>(); - - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t1"); - task.setReferenceTaskName("t1"); - task.setWorkflowInstanceId(workflow.getWorkflowId()); - task.setTaskDefName("task1"); - - Task task2 = new Task(); - task2.setScheduledTime(2L); - task2.setSeq(2); - task2.setTaskId("t2"); - task2.setReferenceTaskName("t2"); - task2.setWorkflowInstanceId(workflow.getWorkflowId()); - task2.setTaskDefName("task2"); - - Task task3 = new Task(); - task3.setScheduledTime(2L); - task3.setSeq(3); - task3.setTaskId("t3"); - task3.setReferenceTaskName("t3"); - task3.setWorkflowInstanceId(workflow.getWorkflowId()); - task3.setTaskDefName("task3"); - - tasks.add(task); - tasks.add(task2); - tasks.add(task3); - - workflow.setTasks(tasks); - - workflow.setUpdatedBy("junit_tester"); - workflow.setUpdateTime(800L); - workflow.setVersion(3); - //workflow.setWorkflowId("wf0001"); - workflow.setWorkflowType("Junit Workflow"); - - String workflowId = dao.createWorkflow(workflow); - List created = dao.createTasks(tasks); - assertEquals(tasks.size(), created.size()); - - Workflow workflowWithTasks = dao.getWorkflow(workflow.getWorkflowId(), true); - assertEquals(workflowWithTasks.getWorkflowId(), workflowId); - assertTrue(!workflowWithTasks.getTasks().isEmpty()); - - assertEquals(workflow.getWorkflowId(), workflowId); - Workflow found = dao.getWorkflow(workflowId, false); - assertTrue(found.getTasks().isEmpty()); - - workflow.getTasks().clear(); - assertTrue(EqualsBuilder.reflectionEquals(workflow, found)); - - workflow.getInput().put("updated", true); - dao.updateWorkflow(workflow); - found = dao.getWorkflow(workflowId); - assertNotNull(found); - assertTrue(found.getInput().containsKey("updated")); - assertEquals(true, found.getInput().get("updated")); - - List running = dao.getRunningWorkflowIds(workflow.getWorkflowType()); - assertNotNull(running); - assertTrue(running.isEmpty()); - - workflow.setStatus(WorkflowStatus.RUNNING); - dao.updateWorkflow(workflow); - - running = dao.getRunningWorkflowIds(workflow.getWorkflowType()); - assertNotNull(running); - assertEquals(1, running.size()); - assertEquals(workflow.getWorkflowId(), running.get(0)); - - List pending = dao.getPendingWorkflowsByType(workflow.getWorkflowType()); - assertNotNull(pending); - assertEquals(1, pending.size()); - assertEquals(3, pending.get(0).getTasks().size()); - pending.get(0).getTasks().clear(); - assertTrue(EqualsBuilder.reflectionEquals(workflow, pending.get(0))); - - workflow.setStatus(WorkflowStatus.COMPLETED); - dao.updateWorkflow(workflow); - running = dao.getRunningWorkflowIds(workflow.getWorkflowType()); - assertNotNull(running); - assertTrue(running.isEmpty()); - - List bytime = dao.getWorkflowsByType(workflow.getWorkflowType(), System.currentTimeMillis(), System.currentTimeMillis()+100); - assertNotNull(bytime); - assertTrue(bytime.isEmpty()); - - bytime = dao.getWorkflowsByType(workflow.getWorkflowType(), workflow.getCreateTime() - 10, workflow.getCreateTime() + 10); - assertNotNull(bytime); - assertEquals(1, bytime.size()); - - String workflowName = "pending_count_test"; - String idBase = workflow.getWorkflowId(); - for(int i = 0; i < 10; i++) { - workflow.setWorkflowId("x" + i + idBase); - workflow.setCorrelationId("corr001"); - workflow.setStatus(WorkflowStatus.RUNNING); - workflow.setWorkflowType(workflowName); - dao.createWorkflow(workflow); - } - - List bycorrelationId = dao.getWorkflowsByCorrelationId("corr001", true); - assertNotNull(bycorrelationId); - assertEquals(10, bycorrelationId.size()); - - long count = dao.getPendingWorkflowCount(workflowName); - assertEquals(10, count); + @Override + public ExecutionDAO getExecutionDAO() { + return executionDAO; + } - for(int i = 0; i < 10; i++) { - dao.removeFromPendingWorkflow(workflowName, "x" + i + idBase); - } - count = dao.getPendingWorkflowCount(workflowName); - assertEquals(0, count); - } } diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAOTest.java index a65d45f3e2..96c3ef48f5 100644 --- a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAOTest.java +++ b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLMetadataDAOTest.java @@ -5,32 +5,46 @@ import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.core.execution.ApplicationException; import org.apache.commons.lang3.builder.EqualsBuilder; +import org.junit.After; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @SuppressWarnings("Duplicates") @RunWith(JUnit4.class) -public class MySQLMetadataDAOTest extends MySQLBaseDAOTest { +public class MySQLMetadataDAOTest { + private MySQLDAOTestUtil testUtil; private MySQLMetadataDAO dao; + @Rule + public TestName name = new TestName(); + @Before public void setup() throws Exception { - dao = new MySQLMetadataDAO(objectMapper, dataSource, testConfiguration); - resetAllData(); + testUtil = new MySQLDAOTestUtil(name.getMethodName()); + dao = new MySQLMetadataDAO(testUtil.getObjectMapper(), testUtil.getDataSource(), testUtil.getTestConfiguration()); + } + + @After + public void teardown() throws Exception { + testUtil.resetAllData(); + testUtil.getDataSource().close(); } @Test(expected=ApplicationException.class) @@ -63,7 +77,7 @@ public void testWorkflowDefOperations() throws Exception { assertEquals("test", all.get(0).getName()); assertEquals(1, all.get(0).getVersion()); - WorkflowDef found = dao.get("test", 1); + WorkflowDef found = dao.get("test", 1).get(); assertTrue(EqualsBuilder.reflectionEquals(def, found)); def.setVersion(2); @@ -75,7 +89,7 @@ public void testWorkflowDefOperations() throws Exception { assertEquals("test", all.get(0).getName()); assertEquals(1, all.get(0).getVersion()); - found = dao.getLatest(def.getName()); + found = dao.getLatest(def.getName()).get(); assertEquals(def.getName(), found.getName()); assertEquals(def.getVersion(), found.getVersion()); assertEquals(2, found.getVersion()); @@ -96,7 +110,7 @@ public void testWorkflowDefOperations() throws Exception { def.setDescription("updated"); dao.update(def); - found = dao.get(def.getName(), def.getVersion()); + found = dao.get(def.getName(), def.getVersion()).get(); assertEquals(def.getDescription(), found.getDescription()); List allnames = dao.findAll(); @@ -105,8 +119,8 @@ public void testWorkflowDefOperations() throws Exception { assertEquals(def.getName(), allnames.get(0)); dao.removeWorkflowDef("test", 1); - WorkflowDef deleted = dao.get("test", 1); - assertNull(deleted); + Optional deleted = dao.get("test", 1); + assertFalse(deleted.isPresent()); } @Test @@ -176,9 +190,9 @@ public void testEventHandlers() { eh.setName(UUID.randomUUID().toString()); eh.setActive(false); EventHandler.Action action = new EventHandler.Action(); - action.setAction(EventHandler.Action.Type.start_workflow); - action.setStart_workflow(new EventHandler.StartWorkflow()); - action.getStart_workflow().setName("workflow_x"); + action.setAction(EventHandler.Action.Type.START_WORKFLOW); + action.setStartWorkflow(new EventHandler.StartWorkflow()); + action.getStartWorkflow().setName("workflow_x"); eh.getActions().add(action); eh.setEvent(event1); diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLPushPopQueueDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLPushPopQueueDAOTest.java index 1756e4a41c..e948038ffe 100644 --- a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLPushPopQueueDAOTest.java +++ b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLPushPopQueueDAOTest.java @@ -9,23 +9,36 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings("Duplicates") -public class MySQLPushPopQueueDAOTest extends MySQLBaseDAOTest { +public class MySQLPushPopQueueDAOTest { private static final Logger LOGGER = LoggerFactory.getLogger(MySQLPushPopQueueDAOTest.class); + private MySQLDAOTestUtil testUtil; private MySQLQueueDAO dao; + @Rule + public TestName name = new TestName(); + @Before public void setup() throws Exception { - dao = new MySQLQueueDAO(objectMapper, dataSource); - resetAllData(); + testUtil = new MySQLDAOTestUtil(name.getMethodName()); + dao = new MySQLQueueDAO(testUtil.getObjectMapper(), testUtil.getDataSource()); + } + + @After + public void teardown() throws Exception { + testUtil.resetAllData(); + testUtil.getDataSource().close(); } @Test diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLQueueDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLQueueDAOTest.java index 92e8b7adfc..e1d64d06bf 100644 --- a/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLQueueDAOTest.java +++ b/mysql-persistence/src/test/java/com/netflix/conductor/dao/mysql/MySQLQueueDAOTest.java @@ -2,10 +2,12 @@ import com.google.common.collect.ImmutableList; import com.netflix.conductor.core.events.queue.Message; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -23,23 +25,33 @@ import static org.junit.Assert.fail; @SuppressWarnings("Duplicates") -public class MySQLQueueDAOTest extends MySQLBaseDAOTest { +public class MySQLQueueDAOTest { private static final Logger LOGGER = LoggerFactory.getLogger(MySQLQueueDAOTest.class); + private MySQLDAOTestUtil testUtil; private MySQLQueueDAO dao; + @Rule + public TestName name = new TestName(); + + @Rule + public ExpectedException expected = ExpectedException.none(); + @Before - public void setup() { - dao = new MySQLQueueDAO(objectMapper, dataSource); - resetAllData(); + public void setup() throws Exception { + testUtil = new MySQLDAOTestUtil(name.getMethodName()); + dao = new MySQLQueueDAO(testUtil.getObjectMapper(), testUtil.getDataSource()); } - @Rule - public ExpectedException expected = ExpectedException.none(); + @After + public void teardown() throws Exception { + testUtil.resetAllData(); + testUtil.getDataSource().close(); + } @Test - public void test() { + public void complexQueueTest() { String queueName = "TestQueue"; long offsetTimeInSecond = 0; @@ -146,9 +158,9 @@ public void pollMessagesTest() { // Assert that our un-popped messages match our expected size final long expectedSize = totalSize - firstPollSize - secondPollSize; - try(Connection c = dataSource.getConnection()) { + try(Connection c = testUtil.getDataSource().getConnection()) { String UNPOPPED = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false"; - try(Query q = new Query(objectMapper, c, UNPOPPED)) { + try(Query q = new Query(testUtil.getObjectMapper(), c, UNPOPPED)) { long count = q.addParameter(queueName).executeCount(); assertEquals("Remaining queue size mismatch", expectedSize, count); } @@ -225,9 +237,9 @@ else if(i == 6 || i == 7){ // Assert that our un-popped messages match our expected size final long expectedSize = totalSize - firstPollSize - secondPollSize; - try(Connection c = dataSource.getConnection()) { + try(Connection c = testUtil.getDataSource().getConnection()) { String UNPOPPED = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false"; - try(Query q = new Query(objectMapper, c, UNPOPPED)) { + try(Query q = new Query(testUtil.getObjectMapper(), c, UNPOPPED)) { long count = q.addParameter(queueName).executeCount(); assertEquals("Remaining queue size mismatch", expectedSize, count); } diff --git a/redis-persistence/build.gradle b/redis-persistence/build.gradle index fe7175ba1b..68fa8d37e9 100644 --- a/redis-persistence/build.gradle +++ b/redis-persistence/build.gradle @@ -7,7 +7,8 @@ dependencies { compile ("com.netflix.dyno:dyno-jedis:${revDynoJedis}") compile ("com.netflix.dyno-queues:dyno-queues-redis:${revDynoQueues}") - //In memory redis for unit testing - testCompile "org.rarefiedredis.redis:redis-java:${revRarefiedRedis}" + //In memory + compile "org.rarefiedredis.redis:redis-java:${revRarefiedRedis}" + testCompile project(':conductor-core').sourceSets.test.output } diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/RedisWorkflowModule.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/RedisWorkflowModule.java index 0021453dfb..f09a4daaae 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/RedisWorkflowModule.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/RedisWorkflowModule.java @@ -11,55 +11,26 @@ * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.dao; -/** - * - */ import com.google.inject.AbstractModule; -import com.netflix.conductor.core.config.Configuration; -import com.netflix.conductor.dao.dynomite.DynoProxy; import com.netflix.conductor.dao.dynomite.RedisExecutionDAO; import com.netflix.conductor.dao.dynomite.RedisMetadataDAO; import com.netflix.conductor.dao.dynomite.queue.DynoQueueDAO; -import com.netflix.dyno.connectionpool.HostSupplier; -import com.netflix.dyno.queues.shard.DynoShardSupplier; -import redis.clients.jedis.JedisCommands; - -import javax.inject.Inject; - +import com.netflix.conductor.dyno.DynoProxy; +import com.netflix.conductor.dyno.RedisQueuesProvider; +import com.netflix.dyno.queues.redis.RedisQueues; /** * @author Viren */ public class RedisWorkflowModule extends AbstractModule { - private final Configuration config; - private final JedisCommands dynomiteConnection; - private final HostSupplier hostSupplier; - - @Inject - public RedisWorkflowModule(Configuration config, JedisCommands dynomiteConnection, HostSupplier hostSupplier) { - this.config = config; - this.dynomiteConnection = dynomiteConnection; - this.hostSupplier = hostSupplier; - } @Override protected void configure() { - bind(MetadataDAO.class).to(RedisMetadataDAO.class); bind(ExecutionDAO.class).to(RedisExecutionDAO.class); bind(QueueDAO.class).to(DynoQueueDAO.class); - bind(DynoQueueDAO.class).toInstance(createQueueDAO()); - bind(DynoProxy.class).toInstance(new DynoProxy(dynomiteConnection)); - - } - - private DynoQueueDAO createQueueDAO() { - - String localDC = config.getAvailabilityZone(); - localDC = localDC.replaceAll(config.getRegion(), ""); - DynoShardSupplier ss = new DynoShardSupplier(hostSupplier, config.getRegion(), localDC); - - return new DynoQueueDAO(dynomiteConnection, dynomiteConnection, ss, config); + bind(RedisQueues.class).toProvider(RedisQueuesProvider.class).asEagerSingleton(); + bind(DynoProxy.class).asEagerSingleton(); } } diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/BaseDynoDAO.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/BaseDynoDAO.java index e2200998e9..4ff3509292 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/BaseDynoDAO.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/BaseDynoDAO.java @@ -18,6 +18,7 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.dyno.DynoProxy; import com.netflix.conductor.metrics.Monitors; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/DynoProxy.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/DynoProxy.java deleted file mode 100644 index a17ec35eee..0000000000 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/DynoProxy.java +++ /dev/null @@ -1,260 +0,0 @@ -/** - * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.conductor.dao.dynomite; - -import com.google.inject.Singleton; -import com.netflix.conductor.core.config.Configuration; -import com.netflix.discovery.DiscoveryClient; -import com.netflix.dyno.connectionpool.exception.DynoException; -import com.netflix.dyno.jedis.DynoJedisClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import redis.clients.jedis.JedisCommands; -import redis.clients.jedis.ScanParams; -import redis.clients.jedis.ScanResult; -import redis.clients.jedis.Tuple; -import redis.clients.jedis.params.sortedset.ZAddParams; - -import javax.inject.Inject; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.ExecutionException; - -/** - * - * @author Viren Proxy for the Dynomite client - */ -@Singleton -public class DynoProxy { - - private static Logger logger = LoggerFactory.getLogger(DynoProxy.class); - - protected DiscoveryClient dc; - - protected JedisCommands dynoClient; - - @Inject - public DynoProxy(DiscoveryClient dc, Configuration config) throws DynoException, InterruptedException, ExecutionException { - this.dc = dc; - String cluster = config.getProperty("workflow.dynomite.cluster", null); - String applicationName = config.getAppId(); - this.dynoClient = new DynoJedisClient.Builder() - .withApplicationName(applicationName) - .withDynomiteClusterName(cluster) - .withDiscoveryClient(dc) - .build(); - } - - public DynoProxy(JedisCommands dynoClient) { - this.dynoClient = dynoClient; - } - - public Set zrange(String key, long start, long end) { - return dynoClient.zrange(key, start, end); - } - - public Set zrangeByScoreWithScores(String key, double maxScore, int count) { - return dynoClient.zrangeByScoreWithScores(key, 0, maxScore, 0, count); - } - - public Set zrangeByScore(String key, double maxScore, int count) { - return dynoClient.zrangeByScore(key, 0, maxScore, 0, count); - } - - public Set zrangeByScore(String key, double minScore, double maxScore, int count) { - return dynoClient.zrangeByScore(key, minScore, maxScore, 0, count); - } - - public ScanResult zscan(String key, int cursor) { - return dynoClient.zscan(key, "" + cursor); - } - - public String get(String key) { - return dynoClient.get(key); - } - - public Long zcard(String key) { - return dynoClient.zcard(key); - } - - public Long del(String key) { - return dynoClient.del(key); - } - - public Long zrem(String key, String member) { - return dynoClient.zrem(key, member); - } - - public long zremrangeByScore(String key, String start, String end) { return dynoClient.zremrangeByScore(key, start, end);} - - public long zcount(String key, double min, double max) { return dynoClient.zcount(key, min, max);} - - public String set(String key, String value) { - String retVal = dynoClient.set(key, value); - return retVal; - } - - public Long setnx(String key, String value) { - Long added = dynoClient.setnx(key, value); - return added; - } - - public Long zadd(String key, double score, String member) { - Long retVal = dynoClient.zadd(key, score, member); - return retVal; - } - - public Long zaddnx(String key, double score, String member) { - ZAddParams params = ZAddParams.zAddParams().nx(); - Long retVal = dynoClient.zadd(key, score, member, params); - return retVal; - } - - public Long hset(String key, String field, String value) { - Long retVal = dynoClient.hset(key, field, value); - return retVal; - } - - public Long hsetnx(String key, String field, String value) { - Long retVal = dynoClient.hsetnx(key, field, value); - return retVal; - } - - public Long hlen(String key) { - Long retVal = dynoClient.hlen(key); - return retVal; - } - - public String hget(String key, String field) { - return dynoClient.hget(key, field); - } - - public Optional optionalHget(String key, String field) { - return Optional.ofNullable(dynoClient.hget(key, field)); - } - - public Map hscan(String key, int count) { - Map m = new HashMap<>(); - int cursor = 0; - do { - ScanResult> sr = dynoClient.hscan(key, "" + cursor); - cursor = Integer.parseInt(sr.getStringCursor()); - for (Entry r : sr.getResult()) { - m.put(r.getKey(), r.getValue()); - } - if(m.size() > count) { - break; - } - } while (cursor > 0); - - return m; - } - - public Map hgetAll(String key) { - Map m = new HashMap<>(); - JedisCommands dyno = dynoClient; - int cursor = 0; - do { - ScanResult> sr = dyno.hscan(key, "" + cursor); - cursor = Integer.parseInt(sr.getStringCursor()); - for (Entry r : sr.getResult()) { - m.put(r.getKey(), r.getValue()); - } - } while (cursor > 0); - - return m; - } - - public List hvals(String key) { - logger.trace("hvals {}", key); - return dynoClient.hvals(key); - } - - public Set hkeys(String key) { - logger.trace("hkeys {}", key); - JedisCommands client = dynoClient; - Set keys = new HashSet<>(); - int cursor = 0; - do { - ScanResult> sr = client.hscan(key, "" + cursor); - cursor = Integer.parseInt(sr.getStringCursor()); - List> result = sr.getResult(); - for (Entry e : result) { - keys.add(e.getKey()); - } - } while (cursor > 0); - - return keys; - } - - public Long hdel(String key, String... fields) { - logger.trace("hdel {} {}", key, fields[0]); - return dynoClient.hdel(key, fields); - } - - public Long expire(String key, int seconds) { - return dynoClient.expire(key, seconds); - } - - public Boolean hexists(String key, String field) { - return dynoClient.hexists(key, field); - } - - public Long sadd(String key, String value) { - logger.trace("sadd {} {}", key, value); - Long retVal = dynoClient.sadd(key, value); - return retVal; - } - - public Long srem(String key, String member) { - logger.trace("srem {} {}", key, member); - Long retVal = dynoClient.srem(key, member); - return retVal; - } - - public boolean sismember(String key, String member) { - return dynoClient.sismember(key, member); - } - - public Set smembers(String key) { - logger.trace("smembers {}", key); - JedisCommands client = dynoClient; - Set r = new HashSet<>(); - int cursor = 0; - ScanParams sp = new ScanParams(); - sp.count(50); - - do { - ScanResult sr = client.sscan(key, "" + cursor, sp); - cursor = Integer.parseInt(sr.getStringCursor()); - r.addAll(sr.getResult()); - - } while (cursor > 0); - - return r; - - } - - public Long scard(String key) { - return dynoClient.scard(key); - } - -} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java index 3d91d42feb..1a8eaa8768 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAO.java @@ -1,4 +1,4 @@ -/* +/** * Copyright 2016 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,9 +34,8 @@ import com.netflix.conductor.core.execution.ApplicationException.Code; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.IndexDAO; -import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.dyno.DynoProxy; import com.netflix.conductor.metrics.Monitors; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,14 +83,11 @@ public class RedisExecutionDAO extends BaseDynoDAO implements ExecutionDAO { private IndexDAO indexDAO; - private MetadataDAO metadataDA0; - @Inject public RedisExecutionDAO(DynoProxy dynoClient, ObjectMapper objectMapper, - IndexDAO indexDAO, MetadataDAO metadataDA0, Configuration config) { + IndexDAO indexDAO, Configuration config) { super(dynoClient, objectMapper, config); this.indexDAO = indexDAO; - this.metadataDA0 = metadataDA0; } @Override @@ -135,14 +131,10 @@ public List getTasks(String taskDefName, String startKey, int count) { @Override public List createTasks(List tasks) { - List created = new LinkedList<>(); + List tasksCreated = new LinkedList<>(); for (Task task : tasks) { - - Preconditions.checkNotNull(task, "task object cannot be null"); - Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); - Preconditions.checkNotNull(task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); - Preconditions.checkNotNull(task.getReferenceTaskName(), "Task reference name cannot be null"); + validate(task); recordRedisDaoRequests("createTask", task.getTaskType(), task.getWorkflowType()); @@ -165,10 +157,10 @@ public List createTasks(List tasks) { inProgressTaskKey, task.getWorkflowInstanceId(), task.getTaskId(), task.getTaskType()); updateTask(task); - created.add(task); + tasksCreated.add(task); } - return created; + return tasksCreated; } @@ -187,9 +179,9 @@ public void updateTask(Task task) { task.setEndTime(System.currentTimeMillis()); } - TaskDef taskDef = metadataDA0.getTaskDef(task.getTaskDefName()); + Optional taskDefinition = task.getTaskDefinition(); - if(taskDef != null && taskDef.concurrencyLimit() > 0) { + if(taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { if(task.getStatus() != null && task.getStatus().equals(Status.IN_PROGRESS)) { dynoClient.sadd(nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId()); @@ -207,9 +199,10 @@ public void updateTask(Task task) { } String payload = toJson(task); - recordRedisDaoPayloadSize("updateTask", payload.length(), Optional.ofNullable(taskDef) + recordRedisDaoPayloadSize("updateTask", payload.length(), taskDefinition .map(TaskDef::getName) .orElse("n/a"), task.getWorkflowType()); + recordRedisDaoRequests("updateTask", task.getTaskType(), task.getWorkflowType()); dynoClient.set(nsKey(TASK, task.getTaskId()), payload); logger.debug("Workflow task payload saved to TASK with taskKey: {}, workflowId: {}, taskId: {}, taskType: {} during updateTask", @@ -251,7 +244,7 @@ public boolean exceedsRateLimitPerFrequency(Task task) { int rateLimitFrequencyInSeconds = task.getRateLimitFrequencyInSeconds(); if (rateLimitPerFrequency <= 0 || rateLimitFrequencyInSeconds <=0) { logger.debug("Rate limit not applied to the Task: {} either rateLimitPerFrequency: {} or rateLimitFrequencyInSeconds: {} is 0 or less", - task, rateLimitPerFrequency, rateLimitFrequencyInSeconds); + task, rateLimitPerFrequency, rateLimitFrequencyInSeconds); return false; } else { logger.debug("Evaluating rate limiting for Task: {} with rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {}", @@ -264,7 +257,6 @@ public boolean exceedsRateLimitPerFrequency(Task task) { dynoClient.zcount(key, currentTimeEpochMinusRateLimitBucket, currentTimeEpochMillis)); - if (currentBucketCount < rateLimitPerFrequency) { dynoClient.zadd(key, currentTimeEpochMillis, String.valueOf(currentTimeEpochMillis)); dynoClient.expire(key, rateLimitFrequencyInSeconds); @@ -280,13 +272,14 @@ public boolean exceedsRateLimitPerFrequency(Task task) { } } + @Override public boolean exceedsInProgressLimit(Task task) { - TaskDef taskDef = metadataDA0.getTaskDef(task.getTaskDefName()); - if(taskDef == null) { + Optional taskDefinition = task.getTaskDefinition(); + if(!taskDefinition.isPresent()) { return false; } - int limit = taskDef.concurrencyLimit(); + int limit = taskDefinition.get().concurrencyLimit(); if(limit <= 0) { return false; } @@ -311,7 +304,7 @@ public boolean exceedsInProgressLimit(Task task) { String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()); //Cleanup any items that are still present in the rate limit bucket but not in progress anymore! ids.stream().filter(id -> !dynoClient.sismember(inProgressKey, id)).forEach(id2 -> dynoClient.zrem(rateLimitKey, id2)); - Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); + Monitors.recordTaskRateLimited(task.getTaskDefName(), limit); } return rateLimited; } @@ -366,7 +359,7 @@ public List getTasks(List taskIds) { recordRedisDaoPayloadSize("getTask", jsonString.length(), task.getTaskType(), task.getWorkflowType()); return task; }) - .collect(Collectors.toCollection(LinkedList::new)); + .collect(Collectors.toList()); } @Override @@ -416,10 +409,10 @@ public void removeWorkflow(String workflowId, boolean archiveWorkflow) { recordRedisDaoRequests("removeWorkflow"); // Remove from lists - String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, wf.getWorkflowType(), dateStr(wf.getCreateTime())); + String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, wf.getWorkflowName(), dateStr(wf.getCreateTime())); dynoClient.srem(key, workflowId); dynoClient.srem(nsKey(CORR_ID_TO_WORKFLOWS, wf.getCorrelationId()), workflowId); - dynoClient.srem(nsKey(PENDING_WORKFLOWS, wf.getWorkflowType()), workflowId); + dynoClient.srem(nsKey(PENDING_WORKFLOWS, wf.getWorkflowName()), workflowId); // Remove the object dynoClient.del(nsKey(WORKFLOW, workflowId)); @@ -449,8 +442,8 @@ public Workflow getWorkflow(String workflowId, boolean includeTasks) { if(json != null) { workflow = readValue(json, Workflow.class); - recordRedisDaoRequests("getWorkflow", "n/a", workflow.getWorkflowType()); - recordRedisDaoPayloadSize("getWorkflow", json.length(),"n/a", workflow.getWorkflowType()); + recordRedisDaoRequests("getWorkflow", "n/a", workflow.getWorkflowName()); + recordRedisDaoPayloadSize("getWorkflow", json.length(),"n/a", workflow.getWorkflowName()); if (includeTasks) { List tasks = getTasksForWorkflow(workflowId); tasks.sort(Comparator.comparingLong(Task::getScheduledTime).thenComparingInt(Task::getSeq)); @@ -562,11 +555,11 @@ private String insertOrUpdateWorkflow(Workflow workflow, boolean update) { String payload = toJson(workflow); // Store the workflow object dynoClient.set(nsKey(WORKFLOW, workflow.getWorkflowId()), payload); - recordRedisDaoRequests("storeWorkflow", "n/a", workflow.getWorkflowType()); - recordRedisDaoPayloadSize("storeWorkflow", payload.length(), "n/a", workflow.getWorkflowType()); + recordRedisDaoRequests("storeWorkflow", "n/a", workflow.getWorkflowName()); + recordRedisDaoPayloadSize("storeWorkflow", payload.length(), "n/a", workflow.getWorkflowName()); if (!update) { // Add to list of workflows for a workflowdef - String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflow.getWorkflowType(), dateStr(workflow.getCreateTime())); + String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflow.getWorkflowName(), dateStr(workflow.getCreateTime())); dynoClient.sadd(key, workflow.getWorkflowId()); if (workflow.getCorrelationId() != null) { // Add to list of workflows for a correlationId @@ -575,9 +568,9 @@ private String insertOrUpdateWorkflow(Workflow workflow, boolean update) { } // Add or remove from the pending workflows if (workflow.getStatus().isTerminal()) { - dynoClient.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowType()), workflow.getWorkflowId()); + dynoClient.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId()); } else { - dynoClient.sadd(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowType()), workflow.getWorkflowId()); + dynoClient.sadd(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId()); } workflow.setTasks(tasks); @@ -763,4 +756,20 @@ public List getPollData(String taskDefName) { } return pollData; } + + /** + * + * @param task + * @throws ApplicationException + */ + private void validate(Task task) { + try { + Preconditions.checkNotNull(task, "task object cannot be null"); + Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); + Preconditions.checkNotNull(task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); + Preconditions.checkNotNull(task.getReferenceTaskName(), "Task reference name cannot be null"); + } catch (NullPointerException npe){ + throw new ApplicationException(Code.INVALID_INPUT, npe.getMessage(), npe); + } + } } diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAO.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAO.java index 7113844d6d..4a1863bfff 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAO.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAO.java @@ -26,6 +26,7 @@ import com.netflix.conductor.core.execution.ApplicationException; import com.netflix.conductor.core.execution.ApplicationException.Code; import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.dyno.DynoProxy; import com.netflix.conductor.metrics.Monitors; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; @@ -99,277 +100,277 @@ private void refreshTaskDefs() { Monitors.error(className, "refreshTaskDefs"); logger.error("refresh TaskDefs failed ", e); } - } - - @Override - public TaskDef getTaskDef(String name) { - return Optional.ofNullable(taskDefCache.get(name)) - .orElseGet(() -> getTaskDefFromDB(name)); - } - - private TaskDef getTaskDefFromDB(String name) { - Preconditions.checkNotNull(name, "TaskDef name cannot be null"); - - TaskDef taskDef = null; - String taskDefJsonStr = dynoClient.hget(nsKey(ALL_TASK_DEFS), name); - if (taskDefJsonStr != null) { - taskDef = readValue(taskDefJsonStr, TaskDef.class); - recordRedisDaoRequests("getTaskDef"); - recordRedisDaoPayloadSize("getTaskDef", taskDefJsonStr.length(), taskDef.getName(), "n/a"); - } - return taskDef; - } - - @Override - public List getAllTaskDefs() { - List allTaskDefs = new LinkedList(); - - recordRedisDaoRequests("getAllTaskDefs"); - Map taskDefs = dynoClient.hgetAll(nsKey(ALL_TASK_DEFS)); - int size = 0; - if (taskDefs.size() > 0) { - for (String taskDefJsonStr : taskDefs.values()) { - if (taskDefJsonStr != null) { - allTaskDefs.add(readValue(taskDefJsonStr, TaskDef.class)); - size += taskDefJsonStr.length(); - } - } - recordRedisDaoPayloadSize("getAllTaskDefs", size, "n/a", "n/a"); - } - - return allTaskDefs; - } - - @Override - public void removeTaskDef(String name) { - Preconditions.checkNotNull(name, "TaskDef name cannot be null"); - Long result = dynoClient.hdel(nsKey(ALL_TASK_DEFS), name); - if (!result.equals(1L)) { - throw new ApplicationException(Code.NOT_FOUND, "Cannot remove the task - no such task definition"); - } - recordRedisDaoRequests("removeTaskDef"); - refreshTaskDefs(); - } - - @Override - public void create(WorkflowDef def) { - if (dynoClient.hexists(nsKey(WORKFLOW_DEF, def.getName()), String.valueOf(def.getVersion()))) { - throw new ApplicationException(Code.CONFLICT, "Workflow with " + def.key() + " already exists!"); - } - def.setCreateTime(System.currentTimeMillis()); - _createOrUpdate(def); - } - - @Override - public void update(WorkflowDef def) { - def.setUpdateTime(System.currentTimeMillis()); - _createOrUpdate(def); - } - - private Optional getWorkflowMaxVersion(String workflowName) { - return dynoClient.hkeys(nsKey(WORKFLOW_DEF, workflowName)).stream() - .filter(key -> !key.equals(LATEST)) - .map(Integer::valueOf) - .max(Comparator.naturalOrder()); - } - - @Override - /* - * @param name Name of the workflow definition - * @return Latest version of workflow definition - * @see WorkflowDef - */ - public WorkflowDef getLatest(String name) { - Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); - WorkflowDef workflowDef = null; - - Optional optionalMaxVersion = getWorkflowMaxVersion(name); - - if (optionalMaxVersion.isPresent()) { - String latestdata = dynoClient.hget(nsKey(WORKFLOW_DEF, name), optionalMaxVersion.get().toString()); - if (latestdata != null) { - workflowDef = readValue(latestdata, WorkflowDef.class); - } - } - - return workflowDef; - } - - public List getAllVersions(String name) { - Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); - List workflows = new LinkedList(); - - recordRedisDaoRequests("getAllWorkflowDefsByName"); - Map workflowDefs = dynoClient.hgetAll(nsKey(WORKFLOW_DEF, name)); - int size = 0; - for (String key : workflowDefs.keySet()) { - if (key.equals(LATEST)) { - continue; - } - String workflowDef = workflowDefs.get(key); - workflows.add(readValue(workflowDef, WorkflowDef.class)); - size += workflowDef.length(); - } - recordRedisDaoPayloadSize("getAllWorkflowDefsByName", size, "n/a", name); - - return workflows; - } - - @Override - public WorkflowDef get(String name, int version) { - Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); - WorkflowDef def = null; - - recordRedisDaoRequests("getWorkflowDef"); - String workflowDefJsonString = dynoClient.hget(nsKey(WORKFLOW_DEF, name), String.valueOf(version)); - if (workflowDefJsonString != null) { - def = readValue(workflowDefJsonString, WorkflowDef.class); - recordRedisDaoPayloadSize("getWorkflowDef", workflowDefJsonString.length(), "n/a", name); - } - return def; - } - - @Override - public void removeWorkflowDef(String name, Integer version) { - Preconditions.checkArgument(StringUtils.isNotBlank(name), "WorkflowDef name cannot be null"); - Preconditions.checkNotNull(version, "Input version cannot be null"); - Long result = dynoClient.hdel(nsKey(WORKFLOW_DEF, name), String.valueOf(version)); - if (!result.equals(1L)) { - throw new ApplicationException(Code.NOT_FOUND, String.format("Cannot remove the workflow - no such workflow" + - " definition: %s version: %d", name, version)); - } - - // check if there are any more versions remaining if not delete the - // workflow name - Optional optionMaxVersion = getWorkflowMaxVersion(name); - - // delete workflow name - if (!optionMaxVersion.isPresent()) { - dynoClient.srem(nsKey(WORKFLOW_DEF_NAMES), name); - } - - recordRedisDaoRequests("removeWorkflowDef"); - } - - @Override - public List findAll() { - Set wfNames = dynoClient.smembers(nsKey(WORKFLOW_DEF_NAMES)); - return new ArrayList<>(wfNames); - } - - @Override - public List getAll() { - List workflows = new LinkedList(); - - // Get all from WORKFLOW_DEF_NAMES - recordRedisDaoRequests("getAllWorkflowDefs"); - Set wfNames = dynoClient.smembers(nsKey(WORKFLOW_DEF_NAMES)); - int size = 0; - for (String wfName : wfNames) { - Map workflowDefs = dynoClient.hgetAll(nsKey(WORKFLOW_DEF, wfName)); - for (String key : workflowDefs.keySet()) { - if (key.equals(LATEST)) { - continue; - } - String workflowDef = workflowDefs.get(key); - workflows.add(readValue(workflowDef, WorkflowDef.class)); - size += workflowDef.length(); - } - } - recordRedisDaoPayloadSize("getAllWorkflowDefs", size, "n/a", "n/a"); - return workflows; - } - - //Event Handler APIs - - @Override - public void addEventHandler(EventHandler eventHandler) { - Preconditions.checkNotNull(eventHandler.getName(), "Missing Name"); - if(getEventHandler(eventHandler.getName()) != null) { - throw new ApplicationException(Code.CONFLICT, "EventHandler with name " + eventHandler.getName() + " already exists!"); - } - index(eventHandler); - dynoClient.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler)); - recordRedisDaoRequests("addEventHandler"); - } - - @Override - public void updateEventHandler(EventHandler eventHandler) { - Preconditions.checkNotNull(eventHandler.getName(), "Missing Name"); - EventHandler existing = getEventHandler(eventHandler.getName()); - if(existing == null) { - throw new ApplicationException(Code.NOT_FOUND, "EventHandler with name " + eventHandler.getName() + " not found!"); - } - index(eventHandler); - dynoClient.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler)); - recordRedisDaoRequests("updateEventHandler"); - } - - @Override - public void removeEventHandlerStatus(String name) { - EventHandler existing = getEventHandler(name); - if(existing == null) { - throw new ApplicationException(Code.NOT_FOUND, "EventHandler with name " + name + " not found!"); - } - dynoClient.hdel(nsKey(EVENT_HANDLERS), name); - recordRedisDaoRequests("removeEventHandler"); - removeIndex(existing); - } - - @Override - public List getEventHandlers() { - Map all = dynoClient.hgetAll(nsKey(EVENT_HANDLERS)); - List handlers = new LinkedList<>(); - all.entrySet().forEach(e -> { - String json = e.getValue(); - EventHandler eh = readValue(json, EventHandler.class); - handlers.add(eh); - }); - recordRedisDaoRequests("getAllEventHandlers"); - return handlers; - } - - private void index(EventHandler eh) { - String event = eh.getEvent(); - String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); - dynoClient.sadd(key, eh.getName()); - } - - private void removeIndex(EventHandler eh) { - String event = eh.getEvent(); - String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); - dynoClient.srem(key, eh.getName()); - } - - @Override - public List getEventHandlersForEvent(String event, boolean activeOnly) { - String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); - Set names = dynoClient.smembers(key); - List handlers = new LinkedList<>(); - for(String name : names) { - try { - EventHandler eventHandler = getEventHandler(name); - recordRedisDaoEventRequests("getEventHandler", event); - if(eventHandler.getEvent().equals(event) && (!activeOnly || eventHandler.isActive())) { - handlers.add(eventHandler); - } - } catch (ApplicationException ae) { - if(ae.getCode() == Code.NOT_FOUND) {} - throw ae; - } - } - return handlers; - } - - private EventHandler getEventHandler(String name) { - EventHandler eventHandler = null; - String json = dynoClient.hget(nsKey(EVENT_HANDLERS), name); - if (json != null) { - eventHandler = readValue(json, EventHandler.class); - } - return eventHandler; - - } + } + + @Override + public TaskDef getTaskDef(String name) { + return Optional.ofNullable(taskDefCache.get(name)) + .orElseGet(() -> getTaskDefFromDB(name)); + } + + private TaskDef getTaskDefFromDB(String name) { + Preconditions.checkNotNull(name, "TaskDef name cannot be null"); + + TaskDef taskDef = null; + String taskDefJsonStr = dynoClient.hget(nsKey(ALL_TASK_DEFS), name); + if (taskDefJsonStr != null) { + taskDef = readValue(taskDefJsonStr, TaskDef.class); + recordRedisDaoRequests("getTaskDef"); + recordRedisDaoPayloadSize("getTaskDef", taskDefJsonStr.length(), taskDef.getName(), "n/a"); + } + return taskDef; + } + + @Override + public List getAllTaskDefs() { + List allTaskDefs = new LinkedList(); + + recordRedisDaoRequests("getAllTaskDefs"); + Map taskDefs = dynoClient.hgetAll(nsKey(ALL_TASK_DEFS)); + int size = 0; + if (taskDefs.size() > 0) { + for (String taskDefJsonStr : taskDefs.values()) { + if (taskDefJsonStr != null) { + allTaskDefs.add(readValue(taskDefJsonStr, TaskDef.class)); + size += taskDefJsonStr.length(); + } + } + recordRedisDaoPayloadSize("getAllTaskDefs", size, "n/a", "n/a"); + } + + return allTaskDefs; + } + + @Override + public void removeTaskDef(String name) { + Preconditions.checkNotNull(name, "TaskDef name cannot be null"); + Long result = dynoClient.hdel(nsKey(ALL_TASK_DEFS), name); + if (!result.equals(1L)) { + throw new ApplicationException(Code.NOT_FOUND, "Cannot remove the task - no such task definition"); + } + recordRedisDaoRequests("removeTaskDef"); + refreshTaskDefs(); + } + + @Override + public void create(WorkflowDef def) { + if (dynoClient.hexists(nsKey(WORKFLOW_DEF, def.getName()), String.valueOf(def.getVersion()))) { + throw new ApplicationException(Code.CONFLICT, "Workflow with " + def.key() + " already exists!"); + } + def.setCreateTime(System.currentTimeMillis()); + _createOrUpdate(def); + } + + @Override + public void update(WorkflowDef def) { + def.setUpdateTime(System.currentTimeMillis()); + _createOrUpdate(def); + } + + @Override + /* + * @param name Name of the workflow definition + * @return Latest version of workflow definition + * @see WorkflowDef + */ + public Optional getLatest(String name) { + Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); + WorkflowDef workflowDef = null; + + Optional optionalMaxVersion = getWorkflowMaxVersion(name); + + if (optionalMaxVersion.isPresent()) { + String latestdata = dynoClient.hget(nsKey(WORKFLOW_DEF, name), optionalMaxVersion.get().toString()); + if (latestdata != null) { + workflowDef = readValue(latestdata, WorkflowDef.class); + } + } + + return Optional.ofNullable(workflowDef); + } + + private Optional getWorkflowMaxVersion(String workflowName) { + return dynoClient.hkeys(nsKey(WORKFLOW_DEF, workflowName)).stream() + .filter(key -> !key.equals(LATEST)) + .map(Integer::valueOf) + .max(Comparator.naturalOrder()); + } + + public List getAllVersions(String name) { + Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); + List workflows = new LinkedList(); + + recordRedisDaoRequests("getAllWorkflowDefsByName"); + Map workflowDefs = dynoClient.hgetAll(nsKey(WORKFLOW_DEF, name)); + int size = 0; + for (String key : workflowDefs.keySet()) { + if (key.equals(LATEST)) { + continue; + } + String workflowDef = workflowDefs.get(key); + workflows.add(readValue(workflowDef, WorkflowDef.class)); + size += workflowDef.length(); + } + recordRedisDaoPayloadSize("getAllWorkflowDefsByName", size, "n/a", name); + + return workflows; + } + + @Override + public Optional get(String name, int version) { + Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); + WorkflowDef def = null; + + recordRedisDaoRequests("getWorkflowDef"); + String workflowDefJsonString = dynoClient.hget(nsKey(WORKFLOW_DEF, name), String.valueOf(version)); + if (workflowDefJsonString != null) { + def = readValue(workflowDefJsonString, WorkflowDef.class); + recordRedisDaoPayloadSize("getWorkflowDef", workflowDefJsonString.length(), "n/a", name); + } + return Optional.ofNullable(def); + } + + @Override + public void removeWorkflowDef(String name, Integer version) { + Preconditions.checkArgument(StringUtils.isNotBlank(name), "WorkflowDef name cannot be null"); + Preconditions.checkNotNull(version, "Input version cannot be null"); + Long result = dynoClient.hdel(nsKey(WORKFLOW_DEF, name), String.valueOf(version)); + if (!result.equals(1L)) { + throw new ApplicationException(Code.NOT_FOUND, String.format("Cannot remove the workflow - no such workflow" + + " definition: %s version: %d", name, version)); + } + + // check if there are any more versions remaining if not delete the + // workflow name + Optional optionMaxVersion = getWorkflowMaxVersion(name); + + // delete workflow name + if (!optionMaxVersion.isPresent()) { + dynoClient.srem(nsKey(WORKFLOW_DEF_NAMES), name); + } + + recordRedisDaoRequests("removeWorkflowDef"); + } + + @Override + public List findAll() { + Set wfNames = dynoClient.smembers(nsKey(WORKFLOW_DEF_NAMES)); + return new ArrayList<>(wfNames); + } + + @Override + public List getAll() { + List workflows = new LinkedList(); + + // Get all from WORKFLOW_DEF_NAMES + recordRedisDaoRequests("getAllWorkflowDefs"); + Set wfNames = dynoClient.smembers(nsKey(WORKFLOW_DEF_NAMES)); + int size = 0; + for (String wfName : wfNames) { + Map workflowDefs = dynoClient.hgetAll(nsKey(WORKFLOW_DEF, wfName)); + for (String key : workflowDefs.keySet()) { + if (key.equals(LATEST)) { + continue; + } + String workflowDef = workflowDefs.get(key); + workflows.add(readValue(workflowDef, WorkflowDef.class)); + size += workflowDef.length(); + } + } + recordRedisDaoPayloadSize("getAllWorkflowDefs", size, "n/a", "n/a"); + return workflows; + } + + //Event Handler APIs + + @Override + public void addEventHandler(EventHandler eventHandler) { + Preconditions.checkNotNull(eventHandler.getName(), "Missing Name"); + if(getEventHandler(eventHandler.getName()) != null) { + throw new ApplicationException(Code.CONFLICT, "EventHandler with name " + eventHandler.getName() + " already exists!"); + } + index(eventHandler); + dynoClient.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler)); + recordRedisDaoRequests("addEventHandler"); + } + + @Override + public void updateEventHandler(EventHandler eventHandler) { + Preconditions.checkNotNull(eventHandler.getName(), "Missing Name"); + EventHandler existing = getEventHandler(eventHandler.getName()); + if(existing == null) { + throw new ApplicationException(Code.NOT_FOUND, "EventHandler with name " + eventHandler.getName() + " not found!"); + } + index(eventHandler); + dynoClient.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler)); + recordRedisDaoRequests("updateEventHandler"); + } + + @Override + public void removeEventHandlerStatus(String name) { + EventHandler existing = getEventHandler(name); + if(existing == null) { + throw new ApplicationException(Code.NOT_FOUND, "EventHandler with name " + name + " not found!"); + } + dynoClient.hdel(nsKey(EVENT_HANDLERS), name); + recordRedisDaoRequests("removeEventHandler"); + removeIndex(existing); + } + + @Override + public List getEventHandlers() { + Map all = dynoClient.hgetAll(nsKey(EVENT_HANDLERS)); + List handlers = new LinkedList<>(); + all.entrySet().forEach(e -> { + String json = e.getValue(); + EventHandler eh = readValue(json, EventHandler.class); + handlers.add(eh); + }); + recordRedisDaoRequests("getAllEventHandlers"); + return handlers; + } + + private void index(EventHandler eh) { + String event = eh.getEvent(); + String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); + dynoClient.sadd(key, eh.getName()); + } + + private void removeIndex(EventHandler eh) { + String event = eh.getEvent(); + String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); + dynoClient.srem(key, eh.getName()); + } + + @Override + public List getEventHandlersForEvent(String event, boolean activeOnly) { + String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); + Set names = dynoClient.smembers(key); + List handlers = new LinkedList<>(); + for(String name : names) { + try { + EventHandler eventHandler = getEventHandler(name); + recordRedisDaoEventRequests("getEventHandler", event); + if(eventHandler.getEvent().equals(event) && (!activeOnly || eventHandler.isActive())) { + handlers.add(eventHandler); + } + } catch (ApplicationException ae) { + if(ae.getCode() == Code.NOT_FOUND) {} + throw ae; + } + } + return handlers; + } + + private EventHandler getEventHandler(String name) { + EventHandler eventHandler = null; + String json = dynoClient.hget(nsKey(EVENT_HANDLERS), name); + if (json != null) { + eventHandler = readValue(json, EventHandler.class); + } + return eventHandler; + + } private void _createOrUpdate(WorkflowDef workflowDef) { // First set the workflow def @@ -379,4 +380,5 @@ private void _createOrUpdate(WorkflowDef workflowDef) { dynoClient.sadd(nsKey(WORKFLOW_DEF_NAMES), workflowDef.getName()); recordRedisDaoRequests("storeWorkflowDef", "n/a", workflowDef.getName()); } + } diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/queue/DynoQueueDAO.java b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/queue/DynoQueueDAO.java index 85e35471d4..f7ce8ed8cb 100644 --- a/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/queue/DynoQueueDAO.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/dao/dynomite/queue/DynoQueueDAO.java @@ -1,17 +1,14 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ package com.netflix.conductor.dao.dynomite.queue; @@ -19,7 +16,6 @@ import com.netflix.conductor.dao.QueueDAO; import com.netflix.discovery.DiscoveryClient; import com.netflix.dyno.connectionpool.Host; -import com.netflix.dyno.connectionpool.Host.Status; import com.netflix.dyno.contrib.EurekaHostsSupplier; import com.netflix.dyno.jedis.DynoJedisClient; import com.netflix.dyno.queues.DynoQueue; @@ -45,177 +41,184 @@ @Singleton public class DynoQueueDAO implements QueueDAO { - private static Logger logger = LoggerFactory.getLogger(DynoQueueDAO.class); - - private RedisQueues queues; - - private JedisCommands dynoClient; - - private JedisCommands dynoClientRead; - - private ShardSupplier ss; - - private String domain; - - private Configuration config; - - @Inject - public DynoQueueDAO(DiscoveryClient dc, Configuration config) { - - logger.info("DynoQueueDAO::INIT"); - - this.config = config; - this.domain = config.getProperty("workflow.dyno.keyspace.domain", null); - String cluster = config.getProperty("workflow.dynomite.cluster", null); - final int readConnPort = config.getIntProperty("queues.dynomite.nonQuorum.port", 22122); - - EurekaHostsSupplier hostSupplier = new EurekaHostsSupplier(cluster, dc) { - @Override - public List getHosts() { - List hosts = super.getHosts(); - List updatedHosts = new ArrayList<>(hosts.size()); - hosts.forEach(host -> { - updatedHosts.add(new Host(host.getHostName(), host.getIpAddress(), readConnPort, host.getRack(), host.getDatacenter(), host.isUp() ? Status.Up : Status.Down)); - }); - return updatedHosts; - } - }; - - this.dynoClientRead = new DynoJedisClient.Builder().withApplicationName(config.getAppId()).withDynomiteClusterName(cluster).withHostSupplier(hostSupplier).build(); - DynoJedisClient dyno = new DynoJedisClient.Builder().withApplicationName(config.getAppId()).withDynomiteClusterName(cluster).withDiscoveryClient(dc).build(); - - this.dynoClient = dyno; - - String region = config.getRegion(); - String localDC = config.getAvailabilityZone(); - - if(localDC == null) { - throw new Error("Availability zone is not defined. Ensure Configuration.getAvailabilityZone() returns a non-null and non-empty value."); - } - - localDC = localDC.replaceAll(region, ""); - this.ss = new DynoShardSupplier(dyno.getConnPool().getConfiguration().getHostSupplier(), region, localDC); - init(); - } + private static Logger logger = LoggerFactory.getLogger(DynoQueueDAO.class); - public DynoQueueDAO(JedisCommands dynoClient, JedisCommands dynoClientRead, ShardSupplier shardSupplier, Configuration config) { - this.dynoClient = dynoClient; - this.dynoClientRead = dynoClientRead; - this.ss = shardSupplier; - this.config = config; - init(); - } + private RedisQueues queues; - public void init() { + private JedisCommands dynoClient; - String rootNamespace = config.getProperty("workflow.namespace.queue.prefix", null); - String stack = config.getStack(); - String prefix = rootNamespace + "." + stack; - if (domain != null) { - prefix = prefix + "." + domain; - } - queues = new RedisQueues(dynoClient, dynoClientRead, prefix, ss, 60_000, 60_000); - logger.info("DynoQueueDAO initialized with prefix " + prefix + "!"); - } + private JedisCommands dynoClientRead; - @Override - public void push(String queueName, String id, long offsetTimeInSecond) { - Message msg = new Message(id, null); - msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS); - queues.get(queueName).push(Collections.singletonList(msg)); - } + private ShardSupplier ss; - @Override - public void push(String queueName, List messages) { - List msgs = messages.stream() - .map(msg -> new Message(msg.getId(), msg.getPayload())) - .collect(Collectors.toList()); - queues.get(queueName).push(msgs); - } - - @Override - public boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) { - DynoQueue queue = queues.get(queueName); - if (queue.get(id) != null) { - return false; - } - Message msg = new Message(id, null); - msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS); - queue.push(Collections.singletonList(msg)); - return true; - } + private String domain; - @Override - public List pop(String queueName, int count, int timeout) { - List msg = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS); - return msg.stream() - .map(Message::getId) - .collect(Collectors.toList()); - } + private Configuration config; - @Override - public List pollMessages(String queueName, int count, int timeout) { - List msgs = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS); - return msgs.stream() - .map(msg -> new com.netflix.conductor.core.events.queue.Message(msg.getId(), msg.getPayload(), null)) - .collect(Collectors.toList()); - } - - @Override - public void remove(String queueName, String messageId) { - queues.get(queueName).remove(messageId); - } + @Inject + public DynoQueueDAO(RedisQueues queues) { + this.queues = queues; + } - @Override - public int getSize(String queueName) { - return (int) queues.get(queueName).size(); - } + @Deprecated + public DynoQueueDAO(DiscoveryClient dc, Configuration config) { - @Override - public boolean ack(String queueName, String messageId) { - return queues.get(queueName).ack(messageId); + logger.info("DynoQueueDAO::INIT"); - } - - @Override - public boolean setUnackTimeout(String queueName, String messageId, long timeout) { - return queues.get(queueName).setUnackTimeout(messageId, timeout); - } + this.config = config; + this.domain = config.getProperty("workflow.dyno.keyspace.domain", null); + String cluster = config.getProperty("workflow.dynomite.cluster", null); + final int readConnPort = config.getIntProperty("queues.dynomite.nonQuorum.port", 22122); - @Override - public void flush(String queueName) { - DynoQueue queue = queues.get(queueName); - if (queue != null) { - queue.clear(); - } - } + EurekaHostsSupplier hostSupplier = new EurekaHostsSupplier(cluster, dc) { + @Override + public List getHosts() { + List hosts = super.getHosts(); + List updatedHosts = new ArrayList<>(hosts.size()); + hosts.forEach(host -> { + updatedHosts.add(new Host(host.getHostName(), host.getIpAddress(), readConnPort, host.getRack(), host.getDatacenter(), host.isUp() ? Host.Status.Up : Host.Status.Down)); + }); + return updatedHosts; + } + }; - @Override - public Map queuesDetail() { - return queues.queues().stream() - .collect(Collectors.toMap(DynoQueue::getName, DynoQueue::size)); - } + this.dynoClientRead = new DynoJedisClient.Builder().withApplicationName(config.getAppId()).withDynomiteClusterName(cluster).withHostSupplier(hostSupplier).build(); + DynoJedisClient dyno = new DynoJedisClient.Builder().withApplicationName(config.getAppId()).withDynomiteClusterName(cluster).withDiscoveryClient(dc).build(); - @Override - public Map>> queuesDetailVerbose() { - return queues.queues().stream() - .collect(Collectors.toMap(DynoQueue::getName, DynoQueue::shardSizes)); - } - - public void processUnacks(String queueName) { - ((RedisDynoQueue)queues.get(queueName)).processUnacks(); - } + this.dynoClient = dyno; - @Override - public boolean setOffsetTime(String queueName, String id, long offsetTimeInSecond) { - DynoQueue queue = queues.get(queueName); - return queue.setTimeout(id, offsetTimeInSecond); - - } + String region = config.getRegion(); + String localDC = config.getAvailabilityZone(); + + if (localDC == null) { + throw new Error("Availability zone is not defined. Ensure Configuration.getAvailabilityZone() returns a non-null and non-empty value."); + } + + localDC = localDC.replaceAll(region, ""); + this.ss = new DynoShardSupplier(dyno.getConnPool().getConfiguration().getHostSupplier(), region, localDC); + init(); + } + + @Deprecated + public DynoQueueDAO(JedisCommands dynoClient, JedisCommands dynoClientRead, ShardSupplier ss, Configuration config) { + this.dynoClient = dynoClient; + this.dynoClientRead = dynoClient; + this.ss = ss; + this.config = config; + init(); + } + + @Deprecated + private void init() { + + String rootNamespace = config.getProperty("workflow.namespace.queue.prefix", null); + String stack = config.getStack(); + String prefix = rootNamespace + "." + stack; + if (domain != null) { + prefix = prefix + "." + domain; + } + queues = new RedisQueues(dynoClient, dynoClientRead, prefix, ss, 60_000, 60_000); + logger.info("DynoQueueDAO initialized with prefix " + prefix + "!"); + } + + @Override + public void push(String queueName, String id, long offsetTimeInSecond) { + Message msg = new Message(id, null); + msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS); + queues.get(queueName).push(Collections.singletonList(msg)); + } + + @Override + public void push(String queueName, List messages) { + List msgs = messages.stream() + .map(msg -> new Message(msg.getId(), msg.getPayload())) + .collect(Collectors.toList()); + queues.get(queueName).push(msgs); + } + + @Override + public boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) { + DynoQueue queue = queues.get(queueName); + if (queue.get(id) != null) { + return false; + } + Message msg = new Message(id, null); + msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS); + queue.push(Collections.singletonList(msg)); + return true; + } + + @Override + public List pop(String queueName, int count, int timeout) { + List msg = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS); + return msg.stream() + .map(Message::getId) + .collect(Collectors.toList()); + } + + @Override + public List pollMessages(String queueName, int count, int timeout) { + List msgs = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS); + return msgs.stream() + .map(msg -> new com.netflix.conductor.core.events.queue.Message(msg.getId(), msg.getPayload(), null)) + .collect(Collectors.toList()); + } + + @Override + public void remove(String queueName, String messageId) { + queues.get(queueName).remove(messageId); + } + + @Override + public int getSize(String queueName) { + return (int) queues.get(queueName).size(); + } + + @Override + public boolean ack(String queueName, String messageId) { + return queues.get(queueName).ack(messageId); + + } + + @Override + public boolean setUnackTimeout(String queueName, String messageId, long timeout) { + return queues.get(queueName).setUnackTimeout(messageId, timeout); + } + + @Override + public void flush(String queueName) { + DynoQueue queue = queues.get(queueName); + if (queue != null) { + queue.clear(); + } + } + + @Override + public Map queuesDetail() { + Map map = queues.queues().stream().collect(Collectors.toMap(queue -> queue.getName(), q -> q.size())); + return map; + } + + @Override + public Map>> queuesDetailVerbose() { + return queues.queues().stream() + .collect(Collectors.toMap(DynoQueue::getName, DynoQueue::shardSizes)); + } + + public void processUnacks(String queueName) { + ((RedisDynoQueue) queues.get(queueName)).processUnacks(); + } + + @Override + public boolean setOffsetTime(String queueName, String id, long offsetTimeInSecond) { + DynoQueue queue = queues.get(queueName); + return queue.setTimeout(id, offsetTimeInSecond); + + } @Override public boolean exists(String queueName, String id) { DynoQueue queue = queues.get(queueName); return Optional.ofNullable(queue.get(id)).isPresent(); } -} \ No newline at end of file +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoProxy.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoProxy.java new file mode 100644 index 0000000000..7f3415b0b7 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoProxy.java @@ -0,0 +1,265 @@ +/** + * Copyright 2016 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.dyno; + +import com.google.inject.Singleton; + +import com.netflix.conductor.core.config.Configuration; +import com.netflix.discovery.DiscoveryClient; +import com.netflix.dyno.connectionpool.exception.DynoException; +import com.netflix.dyno.jedis.DynoJedisClient; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; + +import redis.clients.jedis.JedisCommands; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.Tuple; +import redis.clients.jedis.params.sortedset.ZAddParams; + +/** + * + * @author Viren Proxy for the Dynomite client + */ +@Singleton +public class DynoProxy { + + private static Logger logger = LoggerFactory.getLogger(DynoProxy.class); + + protected DiscoveryClient dc; + + protected JedisCommands dynoClient; + + @Inject + public DynoProxy(JedisCommands dynoClient) { + this.dynoClient = dynoClient; + } + + @Deprecated + /** + * @deprecated The preferred method of construction for this use case is via DynoProxyDiscoveryProvider. + */ + public DynoProxy(DiscoveryClient dc, Configuration config) throws DynoException, InterruptedException, ExecutionException { + this.dc = dc; + String cluster = config.getProperty("workflow.dynomite.cluster", null); + String applicationName = config.getAppId(); + this.dynoClient = new DynoJedisClient.Builder() + .withApplicationName(applicationName) + .withDynomiteClusterName(cluster) + .withDiscoveryClient(dc) + .build(); + } + + public Set zrange(String key, long start, long end) { + return dynoClient.zrange(key, start, end); + } + + public Set zrangeByScoreWithScores(String key, double maxScore, int count) { + return dynoClient.zrangeByScoreWithScores(key, 0, maxScore, 0, count); + } + + public Set zrangeByScore(String key, double maxScore, int count) { + return dynoClient.zrangeByScore(key, 0, maxScore, 0, count); + } + + public Set zrangeByScore(String key, double minScore, double maxScore, int count) { + return dynoClient.zrangeByScore(key, minScore, maxScore, 0, count); + } + + public ScanResult zscan(String key, int cursor) { + return dynoClient.zscan(key, "" + cursor); + } + + public String get(String key) { + return dynoClient.get(key); + } + + public Long zcard(String key) { + return dynoClient.zcard(key); + } + + public Long del(String key) { + return dynoClient.del(key); + } + + public Long zrem(String key, String member) { + return dynoClient.zrem(key, member); + } + + public long zremrangeByScore(String key, String start, String end) { return dynoClient.zremrangeByScore(key, start, end);} + + public long zcount(String key, double min, double max) { return dynoClient.zcount(key, min, max);} + + public String set(String key, String value) { + String retVal = dynoClient.set(key, value); + return retVal; + } + + public Long setnx(String key, String value) { + Long added = dynoClient.setnx(key, value); + return added; + } + + public Long zadd(String key, double score, String member) { + Long retVal = dynoClient.zadd(key, score, member); + return retVal; + } + + public Long zaddnx(String key, double score, String member) { + ZAddParams params = ZAddParams.zAddParams().nx(); + Long retVal = dynoClient.zadd(key, score, member, params); + return retVal; + } + + public Long hset(String key, String field, String value) { + Long retVal = dynoClient.hset(key, field, value); + return retVal; + } + + public Long hsetnx(String key, String field, String value) { + Long retVal = dynoClient.hsetnx(key, field, value); + return retVal; + } + + public Long hlen(String key) { + Long retVal = dynoClient.hlen(key); + return retVal; + } + + public String hget(String key, String field) { + return dynoClient.hget(key, field); + } + + public Optional optionalHget(String key, String field) { + return Optional.ofNullable(dynoClient.hget(key, field)); + } + + public Map hscan(String key, int count) { + Map m = new HashMap<>(); + int cursor = 0; + do { + ScanResult> sr = dynoClient.hscan(key, "" + cursor); + cursor = Integer.parseInt(sr.getStringCursor()); + for (Entry r : sr.getResult()) { + m.put(r.getKey(), r.getValue()); + } + if (m.size() > count) { + break; + } + } while (cursor > 0); + + return m; + } + + public Map hgetAll(String key) { + Map m = new HashMap<>(); + JedisCommands dyno = dynoClient; + int cursor = 0; + do { + ScanResult> sr = dyno.hscan(key, "" + cursor); + cursor = Integer.parseInt(sr.getStringCursor()); + for (Entry r : sr.getResult()) { + m.put(r.getKey(), r.getValue()); + } + } while (cursor > 0); + + return m; + } + + public List hvals(String key) { + logger.trace("hvals {}", key); + return dynoClient.hvals(key); + } + + public Set hkeys(String key) { + logger.trace("hkeys {}", key); + JedisCommands client = dynoClient; + Set keys = new HashSet<>(); + int cursor = 0; + do { + ScanResult> sr = client.hscan(key, "" + cursor); + cursor = Integer.parseInt(sr.getStringCursor()); + List> result = sr.getResult(); + for (Entry e : result) { + keys.add(e.getKey()); + } + } while (cursor > 0); + + return keys; + } + + public Long hdel(String key, String... fields) { + logger.trace("hdel {} {}", key, fields[0]); + return dynoClient.hdel(key, fields); + } + + public Long expire(String key, int seconds) { + return dynoClient.expire(key, seconds); + } + + public Boolean hexists(String key, String field) { + return dynoClient.hexists(key, field); + } + + public Long sadd(String key, String value) { + logger.trace("sadd {} {}", key, value); + Long retVal = dynoClient.sadd(key, value); + return retVal; + } + + public Long srem(String key, String member) { + logger.trace("srem {} {}", key, member); + Long retVal = dynoClient.srem(key, member); + return retVal; + } + + public boolean sismember(String key, String member) { + return dynoClient.sismember(key, member); + } + + public Set smembers(String key) { + logger.trace("smembers {}", key); + JedisCommands client = dynoClient; + Set r = new HashSet<>(); + int cursor = 0; + ScanParams sp = new ScanParams(); + sp.count(50); + + do { + ScanResult sr = client.sscan(key, "" + cursor, sp); + cursor = Integer.parseInt(sr.getStringCursor()); + r.addAll(sr.getResult()); + + } while (cursor > 0); + + return r; + + } + + public Long scard(String key) { + return dynoClient.scard(key); + } + +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoProxyDiscoveryProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoProxyDiscoveryProvider.java new file mode 100644 index 0000000000..753321d5f6 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoProxyDiscoveryProvider.java @@ -0,0 +1,30 @@ +package com.netflix.conductor.dyno; + +import com.netflix.discovery.DiscoveryClient; +import com.netflix.dyno.jedis.DynoJedisClient; + +import javax.inject.Inject; +import javax.inject.Provider; + +import redis.clients.jedis.JedisCommands; + +public class DynoProxyDiscoveryProvider implements Provider { + private final DiscoveryClient discoveryClient; + private final DynomiteConfiguration configuration; + + @Inject + public DynoProxyDiscoveryProvider(DiscoveryClient discoveryClient, DynomiteConfiguration configuration) { + this.discoveryClient = discoveryClient; + this.configuration = configuration; + } + + @Override + public JedisCommands get() { + return new DynoJedisClient + .Builder() + .withApplicationName(configuration.getAppId()) + .withDynomiteClusterName(configuration.getCluster()) + .withDiscoveryClient(discoveryClient) + .build(); + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoShardSupplierProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoShardSupplierProvider.java new file mode 100644 index 0000000000..6b9eae08d4 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynoShardSupplierProvider.java @@ -0,0 +1,35 @@ +package com.netflix.conductor.dyno; + +import com.google.inject.ProvisionException; +import com.netflix.dyno.connectionpool.HostSupplier; +import com.netflix.dyno.queues.ShardSupplier; +import com.netflix.dyno.queues.shard.DynoShardSupplier; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class DynoShardSupplierProvider implements Provider { + + private final HostSupplier hostSupplier; + private final DynomiteConfiguration configuration; + + @Inject + public DynoShardSupplierProvider(HostSupplier hostSupplier, DynomiteConfiguration dynomiteConfiguration) { + this.hostSupplier = hostSupplier; + this.configuration = dynomiteConfiguration; + } + + @Override + public ShardSupplier get() { + if(configuration.getAvailabilityZone() == null) { + throw new ProvisionException( + "Availability zone is not defined. Ensure Configuration.getAvailabilityZone() returns a non-null " + + "and non-empty value." + ); + } + + String localDC = configuration.getAvailabilityZone().replaceAll(configuration.getRegion(), ""); + + return new DynoShardSupplier(hostSupplier, configuration.getRegion(), localDC); + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynomiteConfiguration.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynomiteConfiguration.java new file mode 100644 index 0000000000..9607b24acf --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/DynomiteConfiguration.java @@ -0,0 +1,65 @@ +package com.netflix.conductor.dyno; + +import com.netflix.conductor.core.config.Configuration; + +public interface DynomiteConfiguration extends Configuration { + // FIXME Are cluster and cluster name really different things? + String CLUSTER_PROPERTY_NAME = "workflow.dynomite.cluster"; + String CLUSTER_DEFAULT_VALUE = null; + + String CLUSTER_NAME_PROPERTY_NAME = "workflow.dynomite.cluster.name"; + String HOSTS_PROPERTY_NAME = "workflow.dynomite.cluster.hosts"; + + String MAX_CONNECTIONS_PER_HOST_PROPERTY_NAME = "workflow.dynomite.connection.maxConnsPerHost"; + int MAX_CONNECTIONS_PER_HOST_DEFAULT_VALUE = 10; + + String ROOT_NAMESPACE_PROPERTY_NAME = "workflow.namespace.queue.prefix"; + String ROOT_NAMESPACE_DEFAULT_VALUE = null; + + String DOMAIN_PROPERTY_NAME = "workflow.dyno.keyspace.domain"; + String DOMAIN_DEFAULT_VALUE = null; + + String NON_QUORUM_PORT_PROPERTY_NAME = "queues.dynomite.nonQuorum.port"; + int NON_QUORUM_PORT_DEFAULT_VALUE = 22122; + + default String getCluster() { + return getProperty(CLUSTER_PROPERTY_NAME, CLUSTER_DEFAULT_VALUE); + } + + default String getClusterName() { + return getProperty(CLUSTER_NAME_PROPERTY_NAME, ""); + } + + default String getHosts() { + return getProperty(HOSTS_PROPERTY_NAME, null); + } + + default String getRootNamespace() { + return getProperty(ROOT_NAMESPACE_PROPERTY_NAME, ROOT_NAMESPACE_DEFAULT_VALUE); + } + + default String getDomain() { + return getProperty(DOMAIN_PROPERTY_NAME, DOMAIN_DEFAULT_VALUE); + } + + default int getMaxConnectionsPerHost() { + return getIntProperty( + MAX_CONNECTIONS_PER_HOST_PROPERTY_NAME, + MAX_CONNECTIONS_PER_HOST_DEFAULT_VALUE + ); + } + + default int getNonQuorumPort() { + return getIntProperty(NON_QUORUM_PORT_PROPERTY_NAME, NON_QUORUM_PORT_DEFAULT_VALUE); + } + + default String getQueuePrefix() { + String prefix = getRootNamespace() + "." + getStack(); + + if (getDomain() != null) { + prefix = prefix + "." + getDomain(); + } + + return prefix; + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/RedisQueuesDiscoveryProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/RedisQueuesDiscoveryProvider.java new file mode 100644 index 0000000000..52689f1ea7 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/RedisQueuesDiscoveryProvider.java @@ -0,0 +1,103 @@ +package com.netflix.conductor.dyno; + +import com.netflix.discovery.DiscoveryClient; +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.contrib.EurekaHostsSupplier; +import com.netflix.dyno.jedis.DynoJedisClient; +import com.netflix.dyno.queues.ShardSupplier; +import com.netflix.dyno.queues.redis.RedisQueues; +import com.netflix.dyno.queues.shard.DynoShardSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import javax.inject.Provider; +import java.util.ArrayList; +import java.util.List; + +public class RedisQueuesDiscoveryProvider implements Provider { + + private static final Logger logger = LoggerFactory.getLogger(RedisQueuesDiscoveryProvider.class); + + private final DiscoveryClient discoveryClient; + private final DynomiteConfiguration configuration; + + @Inject + RedisQueuesDiscoveryProvider(DiscoveryClient discoveryClient, DynomiteConfiguration configuration) { + this.discoveryClient = discoveryClient; + this.configuration = configuration; + } + + @Override + public RedisQueues get() { + + logger.info("DynoQueueDAO::INIT"); + + String domain = configuration.getDomain(); + String cluster = configuration.getCluster(); + final int readConnPort = configuration.getNonQuorumPort(); + + EurekaHostsSupplier hostSupplier = new EurekaHostsSupplier(cluster, discoveryClient) { + @Override + public List getHosts() { + List hosts = super.getHosts(); + List updatedHosts = new ArrayList<>(hosts.size()); + hosts.forEach(host -> { + updatedHosts.add( + new Host( + host.getHostName(), + host.getIpAddress(), + readConnPort, + host.getRack(), + host.getDatacenter(), + host.isUp() ? Host.Status.Up : Host.Status.Down + ) + ); + }); + return updatedHosts; + } + }; + + DynoJedisClient dynoClient = new DynoJedisClient + .Builder() + .withApplicationName(configuration.getAppId()) + .withDynomiteClusterName(cluster) + .withDiscoveryClient(discoveryClient) + .build(); + + DynoJedisClient dynoClientRead = new DynoJedisClient + .Builder() + .withApplicationName(configuration.getAppId()) + .withDynomiteClusterName(cluster) + .withHostSupplier(hostSupplier) + .build(); + + String region = configuration.getRegion(); + String localDC = configuration.getAvailabilityZone(); + + if (localDC == null) { + throw new Error("Availability zone is not defined. " + + "Ensure Configuration.getAvailabilityZone() returns a non-null and non-empty value."); + } + + localDC = localDC.replaceAll(region, ""); + ShardSupplier ss = new DynoShardSupplier( + dynoClient.getConnPool().getConfiguration().getHostSupplier(), + region, + localDC + ); + + RedisQueues queues = new RedisQueues( + dynoClient, + dynoClientRead, + configuration.getQueuePrefix(), + ss, + 60_000, + 60_000 + ); + + logger.info("DynoQueueDAO initialized with prefix " + configuration.getQueuePrefix() + "!"); + + return queues; + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/RedisQueuesProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/RedisQueuesProvider.java new file mode 100644 index 0000000000..c9521cdf64 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/RedisQueuesProvider.java @@ -0,0 +1,54 @@ +package com.netflix.conductor.dyno; + +import com.netflix.dyno.queues.ShardSupplier; +import com.netflix.dyno.queues.redis.RedisQueues; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import javax.inject.Named; +import javax.inject.Provider; + +import redis.clients.jedis.JedisCommands; + +public class RedisQueuesProvider implements Provider { + + public static final String READ_CLIENT_INJECTION_NAME = "DynoReadClient"; + + private static final Logger logger = LoggerFactory.getLogger(RedisQueuesProvider.class); + + private final JedisCommands dynoClient; + private final JedisCommands dynoClientRead; + private final ShardSupplier shardSupplier; + private final DynomiteConfiguration configuration; + + @Inject + public RedisQueuesProvider( + JedisCommands dynoClient, + @Named(READ_CLIENT_INJECTION_NAME) JedisCommands dynoClientRead, + ShardSupplier ss, + DynomiteConfiguration config + ) { + this.dynoClient = dynoClient; + this.dynoClientRead = dynoClientRead; + this.shardSupplier = ss; + this.configuration = config; + } + + @Override + public RedisQueues get() { + RedisQueues queues = new RedisQueues( + dynoClient, + dynoClientRead, + configuration.getQueuePrefix(), + shardSupplier, + 60_000, + 60_000 + ); + + logger.info("DynoQueueDAO initialized with prefix " + configuration.getQueuePrefix() + "!"); + + return queues; + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/dyno/SystemPropertiesDynomiteConfiguration.java b/redis-persistence/src/main/java/com/netflix/conductor/dyno/SystemPropertiesDynomiteConfiguration.java new file mode 100644 index 0000000000..029bcc10c7 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/dyno/SystemPropertiesDynomiteConfiguration.java @@ -0,0 +1,6 @@ +package com.netflix.conductor.dyno; + +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +public class SystemPropertiesDynomiteConfiguration extends SystemPropertiesConfiguration + implements DynomiteConfiguration {} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/ConfigurationHostSupplierProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/ConfigurationHostSupplierProvider.java new file mode 100644 index 0000000000..57e0a3f0be --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/ConfigurationHostSupplierProvider.java @@ -0,0 +1,59 @@ +package com.netflix.conductor.jedis; + +import com.netflix.conductor.dyno.DynomiteConfiguration; +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostSupplier; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class ConfigurationHostSupplierProvider implements Provider { + private static Logger logger = LoggerFactory.getLogger(ConfigurationHostSupplierProvider.class); + + private final DynomiteConfiguration configuration; + + @Inject + public ConfigurationHostSupplierProvider(DynomiteConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public HostSupplier get() { + return () -> parseHostsFromConfig(configuration); + } + + private List parseHostsFromConfig(DynomiteConfiguration configuration) { + String hosts = configuration.getHosts(); + if(hosts == null) { + // FIXME This type of validation probably doesn't belong here. + String message = String.format( + "Missing dynomite/redis hosts. Ensure '%s' has been set in the supplied configuration.", + DynomiteConfiguration.HOSTS_PROPERTY_NAME + ); + logger.error(message); + throw new RuntimeException(message); + } + return parseHostsFrom(hosts); + } + + private List parseHostsFrom(String hostConfig){ + List hostConfigs = Arrays.asList(hostConfig.split(";")); + + List hosts = hostConfigs.stream().map(hc -> { + String[] hostConfigValues = hc.split(":"); + String host = hostConfigValues[0]; + int port = Integer.parseInt(hostConfigValues[1]); + String rack = hostConfigValues[2]; + return new Host(host, port, rack, Host.Status.Up); + }).collect(Collectors.toList()); + + return hosts; + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/DynomiteJedisProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/DynomiteJedisProvider.java new file mode 100644 index 0000000000..9652e98e8b --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/DynomiteJedisProvider.java @@ -0,0 +1,51 @@ +package com.netflix.conductor.jedis; + +import com.netflix.conductor.dyno.DynomiteConfiguration; +import com.netflix.dyno.connectionpool.HostSupplier; +import com.netflix.dyno.connectionpool.TokenMapSupplier; +import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl; +import com.netflix.dyno.jedis.DynoJedisClient; + +import javax.inject.Inject; +import javax.inject.Provider; + +import redis.clients.jedis.JedisCommands; + +public class DynomiteJedisProvider implements Provider { + + private final HostSupplier hostSupplier; + private final TokenMapSupplier tokenMapSupplier; + private final DynomiteConfiguration configuration; + + @Inject + public DynomiteJedisProvider( + DynomiteConfiguration configuration, + HostSupplier hostSupplier, + TokenMapSupplier tokenMapSupplier + ){ + this.configuration = configuration; + this.hostSupplier = hostSupplier; + this.tokenMapSupplier = tokenMapSupplier; + } + + @Override + public JedisCommands get() { + ConnectionPoolConfigurationImpl connectionPoolConfiguration = + new ConnectionPoolConfigurationImpl(configuration.getClusterName()) + .withTokenSupplier(tokenMapSupplier) + .setLocalRack(configuration.getAvailabilityZone()) + .setLocalDataCenter(configuration.getRegion()) + .setSocketTimeout(0) + .setConnectTimeout(0) + .setMaxConnsPerHost( + configuration.getMaxConnectionsPerHost() + ); + + return new DynoJedisClient.Builder() + .withHostSupplier(hostSupplier) + .withApplicationName(configuration.getAppId()) + .withDynomiteClusterName(configuration.getClusterName()) + .withCPConfig(connectionPoolConfiguration) + .build(); + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/InMemoryJedisProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/InMemoryJedisProvider.java new file mode 100644 index 0000000000..f8987db791 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/InMemoryJedisProvider.java @@ -0,0 +1,16 @@ +package com.netflix.conductor.jedis; + +import javax.inject.Provider; +import javax.inject.Singleton; + +import redis.clients.jedis.JedisCommands; + +@Singleton +public class InMemoryJedisProvider implements Provider { + private final JedisCommands mock = new JedisMock(); + + @Override + public JedisCommands get() { + return mock; + } +} diff --git a/server/src/main/java/com/netflix/conductor/redis/utils/JedisMock.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/JedisMock.java similarity index 99% rename from server/src/main/java/com/netflix/conductor/redis/utils/JedisMock.java rename to redis-persistence/src/main/java/com/netflix/conductor/jedis/JedisMock.java index 0e3d4b67cd..5e7bac8a61 100644 --- a/server/src/main/java/com/netflix/conductor/redis/utils/JedisMock.java +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/JedisMock.java @@ -16,7 +16,11 @@ /** * */ -package com.netflix.conductor.redis.utils; +package com.netflix.conductor.jedis; + +import org.rarefiedredis.redis.IRedisClient; +import org.rarefiedredis.redis.IRedisSortedSet.ZsetPair; +import org.rarefiedredis.redis.RedisMock; import java.util.ArrayList; import java.util.HashSet; @@ -27,10 +31,6 @@ import java.util.Set; import java.util.stream.Collectors; -import org.rarefiedredis.redis.IRedisClient; -import org.rarefiedredis.redis.IRedisSortedSet.ZsetPair; -import org.rarefiedredis.redis.RedisMock; - import redis.clients.jedis.Jedis; import redis.clients.jedis.ScanParams; import redis.clients.jedis.ScanResult; diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/LocalHostSupplierProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/LocalHostSupplierProvider.java new file mode 100644 index 0000000000..1280bd424f --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/LocalHostSupplierProvider.java @@ -0,0 +1,25 @@ +package com.netflix.conductor.jedis; + +import com.google.common.collect.Lists; + +import com.netflix.conductor.core.config.Configuration; +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostSupplier; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class LocalHostSupplierProvider implements Provider { + private final Configuration configuration; + + @Inject + public LocalHostSupplierProvider(Configuration configuration) { + this.configuration = configuration; + } + + @Override + public HostSupplier get() { + Host dynoHost = new Host("localhost", 0, configuration.getAvailabilityZone(), Host.Status.Up); + return ()-> Lists.newArrayList(dynoHost); + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/RedisClusterJedisProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/RedisClusterJedisProvider.java new file mode 100644 index 0000000000..ef663469ac --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/RedisClusterJedisProvider.java @@ -0,0 +1,35 @@ +package com.netflix.conductor.jedis; + +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostSupplier; + +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; + +import java.util.ArrayList; + +import javax.inject.Inject; +import javax.inject.Provider; + +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.JedisCommands; + +public class RedisClusterJedisProvider implements Provider { + + private final HostSupplier hostSupplier; + + @Inject + public RedisClusterJedisProvider(HostSupplier hostSupplier){ + this.hostSupplier = hostSupplier; + } + + @Override + public JedisCommands get() { + // FIXME This doesn't seem very safe, but is how it was in the code this was moved from. + Host host = new ArrayList(hostSupplier.getHosts()).get(0); + GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig(); + poolConfig.setMinIdle(5); + poolConfig.setMaxTotal(1000); + return new JedisCluster(new HostAndPort(host.getHostName(), host.getPort()), poolConfig); + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/RedisJedisProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/RedisJedisProvider.java new file mode 100644 index 0000000000..7c347d4227 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/RedisJedisProvider.java @@ -0,0 +1,43 @@ +package com.netflix.conductor.jedis; + +import com.google.common.collect.Lists; + +import com.netflix.conductor.dyno.DynomiteConfiguration; +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostSupplier; + +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import javax.inject.Provider; + +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.JedisCommands; + +public class RedisJedisProvider implements Provider { + private static Logger logger = LoggerFactory.getLogger(RedisJedisProvider.class); + + private final HostSupplier hostSupplier; + private final DynomiteConfiguration configuration; + + @Inject + public RedisJedisProvider(HostSupplier hostSupplier, DynomiteConfiguration configuration) { + this.hostSupplier = hostSupplier; + this.configuration = configuration; + } + + @Override + public JedisCommands get() { + // FIXME Do we really want to ignore all additional hosts? + Host host = Lists.newArrayList(hostSupplier.getHosts()).get(0); + + GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig(); + poolConfig.setMinIdle(5); + poolConfig.setMaxTotal(1000); + logger.info("Starting conductor server using redis_cluster " + configuration.getClusterName()); + return new JedisCluster(new HostAndPort(host.getHostName(), host.getPort()), poolConfig); + } +} diff --git a/redis-persistence/src/main/java/com/netflix/conductor/jedis/TokenMapSupplierProvider.java b/redis-persistence/src/main/java/com/netflix/conductor/jedis/TokenMapSupplierProvider.java new file mode 100644 index 0000000000..627fed0729 --- /dev/null +++ b/redis-persistence/src/main/java/com/netflix/conductor/jedis/TokenMapSupplierProvider.java @@ -0,0 +1,44 @@ +package com.netflix.conductor.jedis; + +import com.google.common.collect.Lists; + +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostSupplier; +import com.netflix.dyno.connectionpool.TokenMapSupplier; +import com.netflix.dyno.connectionpool.impl.lb.HostToken; + +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class TokenMapSupplierProvider implements Provider { + private final HostSupplier hostSupplier; + + @Inject + public TokenMapSupplierProvider(HostSupplier hostSupplier) { + this.hostSupplier = hostSupplier; + } + + @Override + public TokenMapSupplier get() { + return new TokenMapSupplier() { + + // FIXME This isn't particularly safe, but it is equivalent to the existing code. + // FIXME It seems like we should be supply tokens for more than one host? + HostToken token = new HostToken(1L, Lists.newArrayList(hostSupplier.getHosts()).get(0)); + + @Override + public List getTokens(Set activeHosts) { + return Arrays.asList(token); + } + + @Override + public HostToken getTokenForHost(Host host, Set activeHosts) { + return token; + } + }; + } +} diff --git a/redis-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java b/redis-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java index 72b929859a..1c538ba7f3 100644 --- a/redis-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java +++ b/redis-persistence/src/test/java/com/netflix/conductor/config/TestConfiguration.java @@ -19,10 +19,10 @@ */ -import java.util.Map; - import com.netflix.conductor.core.config.Configuration; +import java.util.Map; + /** * @author Viren * @@ -68,8 +68,13 @@ public String getAppId() { public String getProperty(String string, String def) { return "dummy"; } - - @Override + + @Override + public boolean getBooleanProperty(String name, boolean defaultValue) { + return false; + } + + @Override public String getAvailabilityZone() { return "us-east-1a"; } diff --git a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/BaseDynoDAOTest.java b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/BaseDynoDAOTest.java index 78f3f7c157..6650cc2991 100644 --- a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/BaseDynoDAOTest.java +++ b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/BaseDynoDAOTest.java @@ -2,6 +2,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.dyno.DynoProxy; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -9,7 +10,7 @@ import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; @RunWith(MockitoJUnitRunner.class) public class BaseDynoDAOTest { diff --git a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/DynoQueueDAOTest.java b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/DynoQueueDAOTest.java index a0a7904bcb..2484c89597 100644 --- a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/DynoQueueDAOTest.java +++ b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/DynoQueueDAOTest.java @@ -15,31 +15,30 @@ */ package com.netflix.conductor.dao.dynomite; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.config.TestConfiguration; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.netflix.conductor.dao.QueueDAO; import com.netflix.conductor.dao.dynomite.queue.DynoQueueDAO; import com.netflix.conductor.dao.redis.JedisMock; import com.netflix.dyno.queues.ShardSupplier; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + import redis.clients.jedis.JedisCommands; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + /** * * @author Viren @@ -49,15 +48,7 @@ public class DynoQueueDAOTest { private QueueDAO dao; - private static ObjectMapper om = new ObjectMapper(); - - static { - om.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - om.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - om.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - om.setSerializationInclusion(Include.NON_NULL); - om.setSerializationInclusion(Include.NON_EMPTY); - } + private static ObjectMapper om = new JsonMapperProvider().get(); @Before public void init() throws Exception { diff --git a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAOTest.java b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAOTest.java index 5a76cb5301..cd4ab2990b 100644 --- a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAOTest.java +++ b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisExecutionDAOTest.java @@ -1,55 +1,41 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** * */ package com.netflix.conductor.dao.dynomite; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; -import com.netflix.conductor.common.metadata.tasks.PollData; import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.run.Workflow; -import com.netflix.conductor.common.run.Workflow.WorkflowStatus; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.netflix.conductor.config.TestConfiguration; import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.ExecutionDAOTest; import com.netflix.conductor.dao.IndexDAO; import com.netflix.conductor.dao.redis.JedisMock; -import org.apache.commons.lang.builder.EqualsBuilder; +import com.netflix.conductor.dyno.DynoProxy; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import redis.clients.jedis.JedisCommands; -import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -57,462 +43,88 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; /** * @author Viren * */ @RunWith(MockitoJUnitRunner.class) -public class RedisExecutionDAOTest { +public class RedisExecutionDAOTest extends ExecutionDAOTest { - private RedisMetadataDAO metadataDAO; - - private RedisExecutionDAO executionDAO; + private RedisExecutionDAO executionDAO; @Mock private IndexDAO indexDAO; - private static ObjectMapper objectMapper = new ObjectMapper(); - - static { - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - objectMapper.setSerializationInclusion(Include.NON_NULL); - objectMapper.setSerializationInclusion(Include.NON_EMPTY); - } - - @SuppressWarnings("unchecked") - @Before - public void init() { - Configuration config = new TestConfiguration(); - JedisCommands jedisMock = new JedisMock(); - DynoProxy dynoClient = new DynoProxy(jedisMock); - - metadataDAO = new RedisMetadataDAO(dynoClient, objectMapper, config); - executionDAO = new RedisExecutionDAO(dynoClient, objectMapper, indexDAO, metadataDAO, config); - - // Ignore indexing in Redis tests. - doNothing().when(indexDAO).indexTask(any(Task.class)); - } - - @Rule - public ExpectedException expected = ExpectedException.none(); - - @Test - public void testTaskExceedsLimit() { - - TaskDef def = new TaskDef(); - def.setName("task1"); - def.setConcurrentExecLimit(1); - metadataDAO.createTaskDef(def); - - List tasks = new LinkedList<>(); - for(int i = 0; i < 15; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t_" + i); - task.setWorkflowInstanceId("workflow_" + i); - task.setReferenceTaskName("task1"); - task.setTaskDefName("task1"); - tasks.add(task); - task.setStatus(Status.SCHEDULED); - } - - executionDAO.createTasks(tasks); - assertFalse(executionDAO.exceedsInProgressLimit(tasks.get(0))); - tasks.get(0).setStatus(Status.IN_PROGRESS); - executionDAO.updateTask(tasks.get(0)); - - for(Task task : tasks) { - assertTrue(executionDAO.exceedsInProgressLimit(task)); - } - - } - @Test - public void testCreateTaskException() { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t1"); - task.setTaskDefName("task1"); - expected.expect(NullPointerException.class); - expected.expectMessage("Workflow instance id cannot be null"); - executionDAO.createTasks(Arrays.asList(task)); - - task.setWorkflowInstanceId("wfid"); - expected.expect(NullPointerException.class); - expected.expectMessage("Task reference name cannot be nullss"); - executionDAO.createTasks(Arrays.asList(task)); - - } - - @Test - public void testCreateTaskException2() { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t1"); - task.setTaskDefName("task1"); - task.setWorkflowInstanceId("wfid"); - expected.expect(NullPointerException.class); - expected.expectMessage("Task reference name cannot be null"); - executionDAO.createTasks(Arrays.asList(task)); - } - - @Test - public void testPollData() { - executionDAO.updateLastPoll("taskDef", null, "workerId1"); - PollData pd = executionDAO.getPollData("taskDef", null); - assertNotNull(pd); - assertTrue(pd.getLastPollTime() > 0); - assertEquals(pd.getQueueName(), "taskDef"); - assertEquals(pd.getDomain(), null); - assertEquals(pd.getWorkerId(), "workerId1"); - - executionDAO.updateLastPoll("taskDef", "domain1", "workerId1"); - pd = executionDAO.getPollData("taskDef", "domain1"); - assertNotNull(pd); - assertTrue(pd.getLastPollTime() > 0); - assertEquals(pd.getQueueName(), "taskDef"); - assertEquals(pd.getDomain(), "domain1"); - assertEquals(pd.getWorkerId(), "workerId1"); - - List pData = executionDAO.getPollData("taskDef"); - assertEquals(pData.size(), 2); - - pd = executionDAO.getPollData("taskDef", "domain2"); - assertTrue(pd == null); - } - - @Test - public void testTaskCreateDups() { - List tasks = new LinkedList<>(); - String workflowId = UUID.randomUUID().toString(); - - for(int i = 0; i < 3; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + i); - task.setReferenceTaskName("t" + i); - task.setRetryCount(0); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("task" + i); - task.setStatus(Task.Status.IN_PROGRESS); - tasks.add(task); - } - - //Let's insert a retried task - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + 2); - task.setReferenceTaskName("t" + 2); - task.setRetryCount(1); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("task" + 2); - task.setStatus(Task.Status.IN_PROGRESS); - tasks.add(task); - - //Duplicate task! - task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + 1); - task.setReferenceTaskName("t" + 1); - task.setRetryCount(0); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("task" + 1); - task.setStatus(Task.Status.IN_PROGRESS); - tasks.add(task); - - List created = executionDAO.createTasks(tasks); - assertEquals(tasks.size()-1, created.size()); //1 less - - Set srcIds = tasks.stream().map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()).collect(Collectors.toSet()); - Set createdIds = created.stream().map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()).collect(Collectors.toSet()); - - assertEquals(srcIds, createdIds); - - List pending = executionDAO.getPendingTasksByWorkflow("task0", workflowId); - assertNotNull(pending); - assertEquals(1, pending.size()); - assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0))); - - List found = executionDAO.getTasks(tasks.get(0).getTaskDefName(), null, 1); - assertNotNull(found); - assertEquals(1, found.size()); - assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0))); - } - - @Test - public void testTaskOps() { - List tasks = new LinkedList<>(); - String workflowId = UUID.randomUUID().toString(); - - for(int i = 0; i < 3; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId(workflowId + "_t" + i); - task.setReferenceTaskName("testTaskOps" + i); - task.setRetryCount(0); - task.setWorkflowInstanceId(workflowId); - task.setTaskDefName("testTaskOps" + i); - task.setStatus(Task.Status.IN_PROGRESS); - tasks.add(task); - } - - for(int i = 0; i < 3; i++) { - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("x" + workflowId + "_t" + i); - task.setReferenceTaskName("testTaskOps" + i); - task.setRetryCount(0); - task.setWorkflowInstanceId("x" + workflowId); - task.setTaskDefName("testTaskOps" + i); - task.setStatus(Task.Status.IN_PROGRESS); - executionDAO.createTasks(Arrays.asList(task)); - } - - - List created = executionDAO.createTasks(tasks); - assertEquals(tasks.size(), created.size()); - - List pending = executionDAO.getPendingTasksForTaskType(tasks.get(0).getTaskDefName()); - assertNotNull(pending); - assertEquals(2, pending.size()); - //Pending list can come in any order. finding the one we are looking for and then comparing - Task matching = pending.stream().filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId())).findAny().get(); - assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0))); - - List update = new LinkedList<>(); - for(int i = 0; i < 3; i++) { - Task found = executionDAO.getTask(workflowId + "_t" + i); - assertNotNull(found); - found.getOutputData().put("updated", true); - found.setStatus(Task.Status.COMPLETED); - update.add(found); - } - executionDAO.updateTasks(update); - - List taskIds = tasks.stream().map(Task::getTaskId).collect(Collectors.toList()); - List found = executionDAO.getTasks(taskIds); - assertEquals(taskIds.size(), found.size()); - found.forEach(task -> { - assertTrue(task.getOutputData().containsKey("updated")); - assertEquals(true, task.getOutputData().get("updated")); - executionDAO.removeTask(task.getTaskId()); - }); - - found = executionDAO.getTasks(taskIds); - assertTrue(found.isEmpty()); - } - - @Test - public void test() { - Workflow workflow = new Workflow(); - workflow.setCorrelationId("correlationX"); - workflow.setCreatedBy("junit_tester"); - workflow.setEndTime(200L); - - Map input = new HashMap<>(); - input.put("param1", "param1 value"); - input.put("param2", 100); - workflow.setInput(input); - - Map output = new HashMap<>(); - output.put("ouput1", "output 1 value"); - output.put("op2", 300); - workflow.setOutput(output); - - workflow.setOwnerApp("workflow"); - workflow.setParentWorkflowId("parentWorkflowId"); - workflow.setParentWorkflowTaskId("parentWFTaskId"); - workflow.setReasonForIncompletion("missing recipe"); - workflow.setReRunFromWorkflowId("re-run from id1"); - workflow.setSchemaVersion(2); - workflow.setStartTime(90L); - workflow.setStatus(WorkflowStatus.FAILED); - workflow.setWorkflowId("workflow0"); - - List tasks = new LinkedList<>(); - - Task task = new Task(); - task.setScheduledTime(1L); - task.setSeq(1); - task.setTaskId("t1"); - task.setReferenceTaskName("t1"); - task.setWorkflowInstanceId(workflow.getWorkflowId()); - task.setTaskDefName("task1"); - - Task task2 = new Task(); - task2.setScheduledTime(2L); - task2.setSeq(2); - task2.setTaskId("t2"); - task2.setReferenceTaskName("t2"); - task2.setWorkflowInstanceId(workflow.getWorkflowId()); - task2.setTaskDefName("task2"); - - Task task3 = new Task(); - task3.setScheduledTime(2L); - task3.setSeq(3); - task3.setTaskId("t3"); - task3.setReferenceTaskName("t3"); - task3.setWorkflowInstanceId(workflow.getWorkflowId()); - task3.setTaskDefName("task3"); - - tasks.add(task); - tasks.add(task2); - tasks.add(task3); - - workflow.setTasks(tasks); - - workflow.setUpdatedBy("junit_tester"); - workflow.setUpdateTime(800L); - workflow.setVersion(3); - //workflow.setWorkflowId("wf0001"); - workflow.setWorkflowType("Junit Workflow"); - - String workflowId = executionDAO.createWorkflow(workflow); - List created = executionDAO.createTasks(tasks); - assertEquals(tasks.size(), created.size()); - - Workflow workflowWithTasks = executionDAO.getWorkflow(workflow.getWorkflowId(), true); - assertEquals(workflowWithTasks.getWorkflowId(), workflowId); - assertTrue(!workflowWithTasks.getTasks().isEmpty()); - - assertEquals(workflow.getWorkflowId(), workflowId); - Workflow found = executionDAO.getWorkflow(workflowId, false); - assertTrue(found.getTasks().isEmpty()); - - workflow.getTasks().clear(); - assertTrue(EqualsBuilder.reflectionEquals(workflow, found)); - - workflow.getInput().put("updated", true); - executionDAO.updateWorkflow(workflow); - found = executionDAO.getWorkflow(workflowId); - assertNotNull(found); - assertTrue(found.getInput().containsKey("updated")); - assertEquals(true, found.getInput().get("updated")); - - List running = executionDAO.getRunningWorkflowIds(workflow.getWorkflowType()); - assertNotNull(running); - assertTrue(running.isEmpty()); - - workflow.setStatus(WorkflowStatus.RUNNING); - executionDAO.updateWorkflow(workflow); - - running = executionDAO.getRunningWorkflowIds(workflow.getWorkflowType()); - assertNotNull(running); - assertEquals(1, running.size()); - assertEquals(workflow.getWorkflowId(), running.get(0)); - - List pending = executionDAO.getPendingWorkflowsByType(workflow.getWorkflowType()); - assertNotNull(pending); - assertEquals(1, pending.size()); - assertEquals(3, pending.get(0).getTasks().size()); - pending.get(0).getTasks().clear(); - assertTrue(EqualsBuilder.reflectionEquals(workflow, pending.get(0))); - - workflow.setStatus(WorkflowStatus.COMPLETED); - executionDAO.updateWorkflow(workflow); - running = executionDAO.getRunningWorkflowIds(workflow.getWorkflowType()); - assertNotNull(running); - assertTrue(running.isEmpty()); - - List bytime = executionDAO.getWorkflowsByType(workflow.getWorkflowType(), System.currentTimeMillis(), System.currentTimeMillis()+100); - assertNotNull(bytime); - assertTrue(bytime.isEmpty()); - - bytime = executionDAO.getWorkflowsByType(workflow.getWorkflowType(), workflow.getCreateTime() - 10, workflow.getCreateTime() + 10); - assertNotNull(bytime); - assertEquals(1, bytime.size()); - - String workflowName = "pending_count_test"; - String idBase = workflow.getWorkflowId(); - for(int i = 0; i < 10; i++) { - workflow.setWorkflowId("x" + i + idBase); - workflow.setCorrelationId("corr001"); - workflow.setStatus(WorkflowStatus.RUNNING); - workflow.setWorkflowType(workflowName); - executionDAO.createWorkflow(workflow); - } - - /* - List bycorrelationId = executionDAO.getWorkflowsByCorrelationId("corr001"); - assertNotNull(bycorrelationId); - assertEquals(10, bycorrelationId.size()); - */ - long count = executionDAO.getPendingWorkflowCount(workflowName); - assertEquals(10, count); - - for(int i = 0; i < 10; i++) { - executionDAO.removeFromPendingWorkflow(workflowName, "x" + i + idBase); - } - count = executionDAO.getPendingWorkflowCount(workflowName); - assertEquals(0, count); - } - - @Test - @SuppressWarnings("unchecked") - public void testCorrelateTaskToWorkflowInDS() { - String workflowId = "workflowId"; - String taskId = "taskId1"; - String taskDefName = "task1"; - - TaskDef def = new TaskDef(); - def.setName("task1"); - def.setConcurrentExecLimit(1); - metadataDAO.createTaskDef(def); - - Task task = new Task(); - task.setTaskId(taskId); - task.setWorkflowInstanceId(workflowId); - task.setReferenceTaskName("ref_name"); - task.setTaskDefName(taskDefName); - task.setTaskType(taskDefName); - task.setStatus(Status.IN_PROGRESS); - List tasks = executionDAO.createTasks(Collections.singletonList(task)); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - - executionDAO.correlateTaskToWorkflowInDS(taskId, workflowId); - tasks = executionDAO.getTasksForWorkflow(workflowId); - assertNotNull(tasks); - assertEquals(workflowId, tasks.get(0).getWorkflowInstanceId()); - assertEquals(taskId, tasks.get(0).getTaskId()); - } + private static ObjectMapper objectMapper = new JsonMapperProvider().get(); + + @SuppressWarnings("unchecked") + @Before + public void init() { + Configuration config = new TestConfiguration(); + JedisCommands jedisMock = new JedisMock(); + DynoProxy dynoClient = new DynoProxy(jedisMock); + + executionDAO = new RedisExecutionDAO(dynoClient, objectMapper, mock(IndexDAO.class), config); + + // Ignore indexing in Redis tests. + doNothing().when(indexDAO).indexTask(any(Task.class)); + } + + @Test + @SuppressWarnings("unchecked") + public void testCorrelateTaskToWorkflowInDS() throws Exception { + String workflowId = "workflowId"; + String taskId = "taskId1"; + String taskDefName = "task1"; + + TaskDef def = new TaskDef(); + def.setName("task1"); + def.setConcurrentExecLimit(1); + + Task task = new Task(); + task.setTaskId(taskId); + task.setWorkflowInstanceId(workflowId); + task.setReferenceTaskName("ref_name"); + task.setTaskDefName(taskDefName); + task.setTaskType(taskDefName); + task.setStatus(Status.IN_PROGRESS); + List tasks = executionDAO.createTasks(Collections.singletonList(task)); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + + executionDAO.correlateTaskToWorkflowInDS(taskId, workflowId); + tasks = executionDAO.getTasksForWorkflow(workflowId); + assertNotNull(tasks); + assertEquals(workflowId, tasks.get(0).getWorkflowInstanceId()); + assertEquals(taskId, tasks.get(0).getTaskId()); + } @Test public void testExceedsRateLimitWhenNoRateLimitSet() { Task task =new Task(); assertFalse(executionDAO.exceedsRateLimitPerFrequency(task)); } - @Test public void testExceedsRateLimitWithinLimit() { Task task =new Task(); task.setRateLimitFrequencyInSeconds(60); task.setRateLimitPerFrequency(20); - assertFalse(executionDAO.exceedsRateLimitPerFrequency(task)); - } - @Test public void testExceedsRateLimitOutOfLimit() { Task task =new Task(); task.setRateLimitFrequencyInSeconds(60); task.setRateLimitPerFrequency(1); - assertFalse(executionDAO.exceedsRateLimitPerFrequency(task)); assertTrue(executionDAO.exceedsRateLimitPerFrequency(task)); - } + @Override + protected ExecutionDAO getExecutionDAO() { + return executionDAO; + } + } diff --git a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAOTest.java b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAOTest.java index 12346b00a7..b8d70b26cb 100644 --- a/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAOTest.java +++ b/redis-persistence/src/test/java/com/netflix/conductor/dao/dynomite/RedisMetadataDAOTest.java @@ -14,12 +14,10 @@ * limitations under the License. */ /** - * + * */ package com.netflix.conductor.dao.dynomite; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.conductor.common.metadata.events.EventHandler; import com.netflix.conductor.common.metadata.events.EventHandler.Action; @@ -29,10 +27,12 @@ import com.netflix.conductor.common.metadata.tasks.TaskDef.RetryLogic; import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.netflix.conductor.config.TestConfiguration; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.execution.ApplicationException; import com.netflix.conductor.dao.redis.JedisMock; +import com.netflix.conductor.dyno.DynoProxy; import org.apache.commons.lang.builder.EqualsBuilder; import org.junit.Before; import org.junit.Test; @@ -40,13 +40,14 @@ import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; /** @@ -55,226 +56,220 @@ */ public class RedisMetadataDAOTest { - private RedisMetadataDAO dao; - - private static ObjectMapper om = new ObjectMapper(); - - static { - om.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - om.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - om.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - om.setSerializationInclusion(Include.NON_NULL); - om.setSerializationInclusion(Include.NON_EMPTY); - } - - @Before - public void init() { - Configuration config = new TestConfiguration(); - JedisCommands jedisMock = new JedisMock(); - DynoProxy dynoClient = new DynoProxy(jedisMock); - - dao = new RedisMetadataDAO(dynoClient, om, config); - } - - @Test(expected=ApplicationException.class) - public void testDup() throws Exception { - WorkflowDef def = new WorkflowDef(); - def.setName("testDup"); - def.setVersion(1); - - dao.create(def); - dao.create(def); - } - - @Test - public void testWorkflowDefOperations() throws Exception { - - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - def.setVersion(1); - def.setDescription("description"); - def.setCreatedBy("unit_test"); - def.setCreateTime(1L); - def.setOwnerApp("ownerApp"); - def.setUpdatedBy("unit_test2"); - def.setUpdateTime(2L); - - dao.create(def); - - List all = dao.getAll(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(1, all.get(0).getVersion()); - - WorkflowDef found = dao.get("test", 1); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - - def.setVersion(2); - dao.create(def); - - all = dao.getAll(); - assertNotNull(all); - assertEquals(2, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(1, all.get(0).getVersion()); - - found = dao.getLatest(def.getName()); - assertEquals(def.getName(), found.getName()); - assertEquals(def.getVersion(), found.getVersion()); - assertEquals(2, found.getVersion()); - - all = dao.getAllVersions(def.getName()); - assertNotNull(all); - assertEquals(2, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals("test", all.get(1).getName()); - assertEquals(1, all.get(0).getVersion()); - assertEquals(2, all.get(1).getVersion()); - - def.setDescription("updated"); - dao.update(def); - found = dao.get(def.getName(), def.getVersion()); - assertEquals(def.getDescription(), found.getDescription()); - - List allnames = dao.findAll(); - assertNotNull(allnames); - assertEquals(1, allnames.size()); - assertEquals(def.getName(), allnames.get(0)); - - dao.removeWorkflowDef("test", 1); - WorkflowDef deleted = dao.get("test", 1); - assertNull(deleted); - dao.removeWorkflowDef("test", 2); - WorkflowDef latestDef = dao.getLatest("test"); - assertNull(latestDef); - - WorkflowDef[] workflowDefsArray = new WorkflowDef[3]; - for(int i=1; i <=3; i++) { - workflowDefsArray[i-1] = new WorkflowDef(); - workflowDefsArray[i-1].setName("test"); - workflowDefsArray[i-1].setVersion(i); - workflowDefsArray[i-1].setDescription("description"); - workflowDefsArray[i-1].setCreatedBy("unit_test"); - workflowDefsArray[i-1].setCreateTime(1L); - workflowDefsArray[i-1].setOwnerApp("ownerApp"); - workflowDefsArray[i-1].setUpdatedBy("unit_test2"); - workflowDefsArray[i-1].setUpdateTime(2L); - dao.create( workflowDefsArray[i-1]); - } - dao.removeWorkflowDef("test", 1); - dao.removeWorkflowDef("test", 2); - WorkflowDef workflow = dao.getLatest("test"); - assertEquals(workflow.getVersion(), 3); - } - - @Test(expected=ApplicationException.class) - public void removeInvalidWorkflowDef() throws Exception { - WorkflowDef def = new WorkflowDef(); - dao.removeWorkflowDef("hello", 1); - } - - @Test - public void testTaskDefOperations() throws Exception { - - TaskDef def = new TaskDef("taskA"); - def.setDescription("description"); - def.setCreatedBy("unit_test"); - def.setCreateTime(1L); - def.setInputKeys(Arrays.asList("a","b","c")); - def.setOutputKeys(Arrays.asList("01","o2")); - def.setOwnerApp("ownerApp"); - def.setRetryCount(3); - def.setRetryDelaySeconds(100); - def.setRetryLogic(RetryLogic.FIXED); - def.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY); - def.setUpdatedBy("unit_test2"); - def.setUpdateTime(2L); - - dao.createTaskDef(def); - - TaskDef found = dao.getTaskDef(def.getName()); - assertEquals(found, def); - - def.setDescription("updated description"); - dao.updateTaskDef(def); - found = dao.getTaskDef(def.getName()); - assertEquals(found, def); - assertEquals("updated description", found.getDescription()); - - for(int i = 0; i < 9; i++) { - TaskDef tdf = new TaskDef("taskA" + i); - dao.createTaskDef(tdf); - } - - List all = dao.getAllTaskDefs(); - assertNotNull(all); - assertEquals(10, all.size()); - Set allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet()); - assertEquals(10, allnames.size()); - List sorted = allnames.stream().sorted().collect(Collectors.toList()); - assertEquals(def.getName(), sorted.get(0)); - - for(int i = 0; i < 9; i++) { - assertEquals(def.getName() + i, sorted.get(i+1)); - } - - for(int i = 0; i < 9; i++) { - dao.removeTaskDef(def.getName() + i); - } - all = dao.getAllTaskDefs(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals(def.getName(), all.get(0).getName()); - } - - @Test(expected=ApplicationException.class) - public void testRemoveTaskDef() throws Exception { - dao.removeTaskDef("test" + UUID.randomUUID().toString()); - } - - @Test - public void testEventHandlers() { - String event1 = "SQS::arn:account090:sqstest1"; - String event2 = "SQS::arn:account090:sqstest2"; - - EventHandler eh = new EventHandler(); - eh.setName(UUID.randomUUID().toString()); - eh.setActive(false); - Action action = new Action(); - action.setAction(Type.start_workflow); - action.setStart_workflow(new StartWorkflow()); - action.getStart_workflow().setName("workflow_x"); - eh.getActions().add(action); - eh.setEvent(event1); - - dao.addEventHandler(eh); - List all = dao.getEventHandlers(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals(eh.getName(), all.get(0).getName()); - assertEquals(eh.getEvent(), all.get(0).getEvent()); - - List byEvents = dao.getEventHandlersForEvent(event1, true); - assertNotNull(byEvents); - assertEquals(0, byEvents.size()); //event is marked as in-active - - eh.setActive(true); - eh.setEvent(event2); - dao.updateEventHandler(eh); - - all = dao.getEventHandlers(); - assertNotNull(all); - assertEquals(1, all.size()); - - byEvents = dao.getEventHandlersForEvent(event1, true); - assertNotNull(byEvents); - assertEquals(0, byEvents.size()); - - byEvents = dao.getEventHandlersForEvent(event2, true); - assertNotNull(byEvents); - assertEquals(1, byEvents.size()); - - } + private RedisMetadataDAO dao; + + private static ObjectMapper om = new JsonMapperProvider().get(); + + @Before + public void init() { + Configuration config = new TestConfiguration(); + JedisCommands jedisMock = new JedisMock(); + DynoProxy dynoClient = new DynoProxy(jedisMock); + + dao = new RedisMetadataDAO(dynoClient, om, config); + } + + @Test(expected = ApplicationException.class) + public void testDup() throws Exception { + WorkflowDef def = new WorkflowDef(); + def.setName("testDup"); + def.setVersion(1); + + dao.create(def); + dao.create(def); + } + + @Test + public void testWorkflowDefOperations() throws Exception { + + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + def.setVersion(1); + def.setDescription("description"); + def.setCreatedBy("unit_test"); + def.setCreateTime(1L); + def.setOwnerApp("ownerApp"); + def.setUpdatedBy("unit_test2"); + def.setUpdateTime(2L); + + dao.create(def); + + List all = dao.getAll(); + assertNotNull(all); + assertEquals(1, all.size()); + assertEquals("test", all.get(0).getName()); + assertEquals(1, all.get(0).getVersion()); + + WorkflowDef found = dao.get("test", 1).get(); + assertTrue(EqualsBuilder.reflectionEquals(def, found)); + + def.setVersion(2); + dao.create(def); + + all = dao.getAll(); + assertNotNull(all); + assertEquals(2, all.size()); + assertEquals("test", all.get(0).getName()); + assertEquals(1, all.get(0).getVersion()); + + found = dao.getLatest(def.getName()).get(); + assertEquals(def.getName(), found.getName()); + assertEquals(def.getVersion(), found.getVersion()); + assertEquals(2, found.getVersion()); + + all = dao.getAllVersions(def.getName()); + assertNotNull(all); + assertEquals(2, all.size()); + assertEquals("test", all.get(0).getName()); + assertEquals("test", all.get(1).getName()); + assertEquals(1, all.get(0).getVersion()); + assertEquals(2, all.get(1).getVersion()); + + def.setDescription("updated"); + dao.update(def); + found = dao.get(def.getName(), def.getVersion()).get(); + assertEquals(def.getDescription(), found.getDescription()); + + List allnames = dao.findAll(); + assertNotNull(allnames); + assertEquals(1, allnames.size()); + assertEquals(def.getName(), allnames.get(0)); + + dao.removeWorkflowDef("test", 1); + Optional deleted = dao.get("test", 1); + assertFalse(deleted.isPresent()); + dao.removeWorkflowDef("test", 2); + Optional latestDef = dao.getLatest("test"); + assertFalse(latestDef.isPresent()); + + WorkflowDef[] workflowDefsArray = new WorkflowDef[3]; + for(int i=1; i <=3; i++) { + workflowDefsArray[i-1] = new WorkflowDef(); + workflowDefsArray[i-1].setName("test"); + workflowDefsArray[i-1].setVersion(i); + workflowDefsArray[i-1].setDescription("description"); + workflowDefsArray[i-1].setCreatedBy("unit_test"); + workflowDefsArray[i-1].setCreateTime(1L); + workflowDefsArray[i-1].setOwnerApp("ownerApp"); + workflowDefsArray[i-1].setUpdatedBy("unit_test2"); + workflowDefsArray[i-1].setUpdateTime(2L); + dao.create( workflowDefsArray[i-1]); + } + dao.removeWorkflowDef("test", 1); + dao.removeWorkflowDef("test", 2); + WorkflowDef workflow = dao.getLatest("test").get(); + assertEquals(workflow.getVersion(), 3); + } + + @Test(expected = ApplicationException.class) + public void removeInvalidWorkflowDef() throws Exception { + WorkflowDef def = new WorkflowDef(); + dao.removeWorkflowDef("hello", 1); + } + + @Test + public void testTaskDefOperations() throws Exception { + + TaskDef def = new TaskDef("taskA"); + def.setDescription("description"); + def.setCreatedBy("unit_test"); + def.setCreateTime(1L); + def.setInputKeys(Arrays.asList("a","b","c")); + def.setOutputKeys(Arrays.asList("01","o2")); + def.setOwnerApp("ownerApp"); + def.setRetryCount(3); + def.setRetryDelaySeconds(100); + def.setRetryLogic(RetryLogic.FIXED); + def.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY); + def.setUpdatedBy("unit_test2"); + def.setUpdateTime(2L); + def.setRateLimitPerFrequency(50); + def.setRateLimitFrequencyInSeconds(1); + + dao.createTaskDef(def); + + TaskDef found = dao.getTaskDef(def.getName()); + assertTrue(EqualsBuilder.reflectionEquals(def, found)); + + def.setDescription("updated description"); + dao.updateTaskDef(def); + found = dao.getTaskDef(def.getName()); + assertTrue(EqualsBuilder.reflectionEquals(def, found)); + assertEquals("updated description", found.getDescription()); + + for(int i = 0; i < 9; i++) { + TaskDef tdf = new TaskDef("taskA" + i); + dao.createTaskDef(tdf); + } + + List all = dao.getAllTaskDefs(); + assertNotNull(all); + assertEquals(10, all.size()); + Set allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet()); + assertEquals(10, allnames.size()); + List sorted = allnames.stream().sorted().collect(Collectors.toList()); + assertEquals(def.getName(), sorted.get(0)); + + for(int i = 0; i < 9; i++) { + assertEquals(def.getName() + i, sorted.get(i+1)); + } + + for(int i = 0; i < 9; i++) { + dao.removeTaskDef(def.getName() + i); + } + all = dao.getAllTaskDefs(); + assertNotNull(all); + assertEquals(1, all.size()); + assertEquals(def.getName(), all.get(0).getName()); + } + + @Test(expected = ApplicationException.class) + public void testRemoveTaskDef() throws Exception { + dao.removeTaskDef("test" + UUID.randomUUID().toString()); + } + + @Test + public void testEventHandlers() { + String event1 = "SQS::arn:account090:sqstest1"; + String event2 = "SQS::arn:account090:sqstest2"; + + EventHandler eh = new EventHandler(); + eh.setName(UUID.randomUUID().toString()); + eh.setActive(false); + Action action = new Action(); + action.setAction(Type.START_WORKFLOW); + action.setStartWorkflow(new StartWorkflow()); + action.getStartWorkflow().setName("workflow_x"); + eh.getActions().add(action); + eh.setEvent(event1); + + dao.addEventHandler(eh); + List all = dao.getEventHandlers(); + assertNotNull(all); + assertEquals(1, all.size()); + assertEquals(eh.getName(), all.get(0).getName()); + assertEquals(eh.getEvent(), all.get(0).getEvent()); + + List byEvents = dao.getEventHandlersForEvent(event1, true); + assertNotNull(byEvents); + assertEquals(0, byEvents.size()); //event is marked as in-active + + eh.setActive(true); + eh.setEvent(event2); + dao.updateEventHandler(eh); + + all = dao.getEventHandlers(); + assertNotNull(all); + assertEquals(1, all.size()); + + byEvents = dao.getEventHandlersForEvent(event1, true); + assertNotNull(byEvents); + assertEquals(0, byEvents.size()); + + byEvents = dao.getEventHandlersForEvent(event2, true); + assertNotNull(byEvents); + assertEquals(1, byEvents.size()); + + } } diff --git a/server/README.md b/server/README.md index 699f39c9db..6b4610f085 100644 --- a/server/README.md +++ b/server/README.md @@ -9,7 +9,6 @@ At the minimum, provide these options through VM or Config file: `workflow.elasticsearch.url` `workflow.elasticsearch.index.name` -`workflow.elasticsearch.version` (Defaults to 2.X; Provided implementations for major versions 2.X and 5.X) ### Database persistence model Possible values are memory, redis, redis_cluster and dynomite. diff --git a/server/build.gradle b/server/build.gradle index bc63c9036c..c27dbf71c8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -5,12 +5,12 @@ buildscript { } plugins { - id 'com.github.johnrengelman.shadow' version '2.0.4' + id 'com.github.johnrengelman.shadow' version '2.0.4' } configurations.all { resolutionStrategy { - force 'com.fasterxml.jackson.core:jackson-core:2.7.5' + force 'com.fasterxml.jackson.core:jackson-core:2.7.5' } } @@ -18,46 +18,45 @@ apply plugin: 'war' apply plugin: "org.akhikhl.gretty" dependencies { - - //Conductor - compile project(':conductor-core') - compile project(':conductor-jersey') - compile project(':conductor-redis-persistence') - compile project(':conductor-mysql-persistence') - compile project(':conductor-contribs') - compile project(':conductor-es2-persistence') + + //Conductor + compile project(':conductor-core') + compile project(':conductor-jersey') + compile project(':conductor-redis-persistence') + compile project(':conductor-mysql-persistence') + compile project(':conductor-contribs') compile project(':conductor-es5-persistence') - - //Jetty - compile "org.eclipse.jetty:jetty-server:${revJetteyServer}" - compile "org.eclipse.jetty:jetty-servlet:${revJettyServlet}" - - //Guice - compile "com.sun.jersey.contribs:jersey-guice:${revJerseyGuice}" - compile "com.google.inject:guice:${revGuice}" - compile "com.google.inject.extensions:guice-servlet:${revGuiceServlet}" - - //Swagger - compile "io.swagger:swagger-jersey-jaxrs:${revSwagger}" - - //In memory - compile "org.rarefiedredis.redis:redis-java:${revRarefiedRedis}" + compile project(':conductor-grpc-server') + + compile "com.netflix.runtime:health-guice:${revHealth}" + + //Jetty + compile "org.eclipse.jetty:jetty-server:${revJetteyServer}" + compile "org.eclipse.jetty:jetty-servlet:${revJettyServlet}" + + //Guice + compile "com.sun.jersey.contribs:jersey-guice:${revJerseyGuice}" + compile "com.google.inject:guice:${revGuice}" + compile "com.google.inject.extensions:guice-servlet:${revGuiceServlet}" + + //Swagger + compile "io.swagger:swagger-jersey-jaxrs:${revSwagger}" } shadowJar { - mergeServiceFiles() - configurations = [project.configurations.compile] - manifest { - attributes 'Description': 'Self contained Conductor server jar' - attributes 'Main-Class' : 'com.netflix.conductor.server.Main' - } + mergeServiceFiles() + configurations = [project.configurations.compile] + manifest { + attributes 'Description': 'Self contained Conductor server jar' + attributes 'Main-Class': 'com.netflix.conductor.bootstrap.Main' + } } publishing { - publications { - nebula(MavenPublication) { - artifact shadowJar + publications { + nebula(MavenPublication) { + artifact shadowJar + } } - } } gretty { @@ -77,11 +76,10 @@ configurations.grettyRunnerTomcat8 { build.dependsOn('shadowJar') task server(type: JavaExec) { - systemProperty 'workflow.elasticsearch.url', 'localhost:9300' + systemProperty 'workflow.elasticsearch.url', 'localhost:9300' // Switch between Elasticsearch versions 2 & 5 with major version number. - systemProperty 'workflow.elasticsearch.version', '2' - systemProperty 'loadSample', 'true' - systemProperties System.properties - main = 'com.netflix.conductor.server.Main' - classpath = sourceSets.test.runtimeClasspath + systemProperty 'loadSample', 'true' + systemProperties System.properties + main = 'com.netflix.conductor.server.Main' + classpath = sourceSets.test.runtimeClasspath } diff --git a/server/src/main/java/com/netflix/conductor/bootstrap/BootstrapModule.java b/server/src/main/java/com/netflix/conductor/bootstrap/BootstrapModule.java new file mode 100644 index 0000000000..d68d77c8e3 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/bootstrap/BootstrapModule.java @@ -0,0 +1,15 @@ +package com.netflix.conductor.bootstrap; + +import com.google.inject.AbstractModule; + +import com.netflix.conductor.common.utils.JsonMapperProvider; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +public class BootstrapModule extends AbstractModule { + @Override + protected void configure() { + bind(Configuration.class).to(SystemPropertiesConfiguration.class); + bind(ModulesProvider.class); + } +} diff --git a/server/src/main/java/com/netflix/conductor/bootstrap/Main.java b/server/src/main/java/com/netflix/conductor/bootstrap/Main.java new file mode 100644 index 0000000000..ddd17f3089 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/bootstrap/Main.java @@ -0,0 +1,113 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.bootstrap; + +import com.google.inject.Guice; +import com.google.inject.Injector; + +import com.netflix.conductor.dao.IndexDAO; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearch; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearchProvider; +import com.netflix.conductor.grpc.server.GRPCServerProvider; +import com.netflix.conductor.jetty.server.JettyServerProvider; + +import org.apache.log4j.PropertyConfigurator; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Optional; +import java.util.Properties; + +/** + * @author Viren Entry point for the server + */ +public class Main { + + private static final int EMBEDDED_ES_INIT_TIME = 5000; + + public static void main(String[] args) throws Exception { + + loadConfigFile(args.length > 0 ? args[0] : System.getenv("CONDUCTOR_CONFIG_FILE")); + + if (args.length == 2) { + System.out.println("Using log4j config " + args[1]); + PropertyConfigurator.configure(new FileInputStream(new File(args[1]))); + } + + Injector bootstrapInjector = Guice.createInjector(new BootstrapModule()); + ModulesProvider modulesProvider = bootstrapInjector.getInstance(ModulesProvider.class); + Injector serverInjector = Guice.createInjector(modulesProvider.get()); + + Optional embeddedSearchInstance = serverInjector.getInstance(EmbeddedElasticSearchProvider.class).get(); + if (embeddedSearchInstance.isPresent()) { + try { + embeddedSearchInstance.get().start(); + /* + * Elasticsearch embedded instance does not notify when it is up and ready to accept incoming requests. + * A possible solution for reading and writing into the index is to wait a specific amount of time. + */ + Thread.sleep(EMBEDDED_ES_INIT_TIME); + } catch (Exception ioe) { + ioe.printStackTrace(System.err); + System.exit(3); + } + } + + try { + serverInjector.getInstance(IndexDAO.class).setup(); + } catch (Exception e){ + e.printStackTrace(System.err); + System.exit(3); + } + + + System.out.println("\n\n\n"); + System.out.println(" _ _ "); + System.out.println(" ___ ___ _ __ __| |_ _ ___| |_ ___ _ __ "); + System.out.println(" / __/ _ \\| '_ \\ / _` | | | |/ __| __/ _ \\| '__|"); + System.out.println("| (_| (_) | | | | (_| | |_| | (__| || (_) | | "); + System.out.println(" \\___\\___/|_| |_|\\__,_|\\__,_|\\___|\\__\\___/|_| "); + System.out.println("\n\n\n"); + + serverInjector.getInstance(GRPCServerProvider.class).get().ifPresent(server -> { + try { + server.start(); + } catch (IOException ioe) { + ioe.printStackTrace(System.err); + System.exit(3); + } + }); + + serverInjector.getInstance(JettyServerProvider.class).get().ifPresent(server -> { + try { + server.start(); + } catch (Exception ioe) { + ioe.printStackTrace(System.err); + System.exit(3); + } + }); + + } + + private static void loadConfigFile(String propertyFile) throws IOException { + if (propertyFile == null) return; + System.out.println("Using config file" + propertyFile); + Properties props = new Properties(System.getProperties()); + props.load(new FileInputStream(propertyFile)); + System.setProperties(props); + } +} diff --git a/server/src/main/java/com/netflix/conductor/bootstrap/ModulesProvider.java b/server/src/main/java/com/netflix/conductor/bootstrap/ModulesProvider.java new file mode 100644 index 0000000000..482f86fb0d --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/bootstrap/ModulesProvider.java @@ -0,0 +1,124 @@ +package com.netflix.conductor.bootstrap; + +import com.google.inject.AbstractModule; +import com.google.inject.ProvisionException; +import com.netflix.conductor.common.utils.ExternalPayloadStorage; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.core.utils.DummyPayloadStorage; +import com.netflix.conductor.core.utils.S3PayloadStorage; +import com.netflix.conductor.dao.RedisWorkflowModule; +import com.netflix.conductor.elasticsearch.es5.ElasticSearchV5Module; +import com.netflix.conductor.mysql.MySQLWorkflowModule; +import com.netflix.conductor.server.DynomiteClusterModule; +import com.netflix.conductor.server.JerseyModule; +import com.netflix.conductor.server.LocalRedisModule; +import com.netflix.conductor.server.RedisClusterModule; +import com.netflix.conductor.server.ServerModule; +import com.netflix.conductor.server.SwaggerModule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import javax.inject.Provider; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +// TODO Investigate whether this should really be a ThrowingProvider. +public class ModulesProvider implements Provider> { + private static final Logger logger = LoggerFactory.getLogger(ModulesProvider.class); + + private final Configuration configuration; + + enum ExternalPayloadStorageType { + S3 + } + + @Inject + public ModulesProvider(Configuration configuration) { + this.configuration = configuration; + } + + @Override + public List get() { + List modulesToLoad = new ArrayList<>(); + + modulesToLoad.addAll(selectModulesToLoad()); + modulesToLoad.addAll(configuration.getAdditionalModules()); + + return modulesToLoad; + } + + private List selectModulesToLoad() { + Configuration.DB database = null; + List modules = new ArrayList<>(); + + try { + database = configuration.getDB(); + } catch (IllegalArgumentException ie) { + final String message = "Invalid db name: " + configuration.getDBString() + + ", supported values are: " + Arrays.toString(Configuration.DB.values()); + logger.error(message); + throw new ProvisionException(message, ie); + } + + switch (database) { + case REDIS: + case DYNOMITE: + modules.add(new DynomiteClusterModule()); + modules.add(new RedisWorkflowModule()); + logger.info("Starting conductor server using dynomite/redis cluster."); + break; + + case MYSQL: + modules.add(new MySQLWorkflowModule()); + logger.info("Starting conductor server using MySQL data store", database); + break; + case MEMORY: + modules.add(new LocalRedisModule()); + modules.add(new RedisWorkflowModule()); + logger.info("Starting conductor server using in memory data store"); + break; + case REDIS_CLUSTER: + modules.add(new RedisClusterModule()); + modules.add(new RedisWorkflowModule()); + logger.info("Starting conductor server using redis_cluster."); + break; + } + + modules.add(new ElasticSearchV5Module()); + + if (configuration.getJerseyEnabled()) { + modules.add(new JerseyModule()); + modules.add(new SwaggerModule()); + } + + ExternalPayloadStorageType externalPayloadStorageType = null; + String externalPayloadStorageString = configuration.getProperty("workflow.external.payload.storage", ""); + try { + externalPayloadStorageType = ExternalPayloadStorageType.valueOf(externalPayloadStorageString); + } catch(IllegalArgumentException e) { + logger.info("External payload storage is not configured, provided: {}, supported values are: {}", externalPayloadStorageString, Arrays.toString(ExternalPayloadStorageType.values()), e); + } + + if (externalPayloadStorageType == ExternalPayloadStorageType.S3) { + modules.add(new AbstractModule() { + @Override + protected void configure() { + bind(ExternalPayloadStorage.class).to(S3PayloadStorage.class); + } + }); + } else { + modules.add(new AbstractModule() { + @Override + protected void configure() { + bind(ExternalPayloadStorage.class).to(DummyPayloadStorage.class); + } + }); + } + + modules.add(new ServerModule()); + + return modules; + } +} diff --git a/server/src/main/java/com/netflix/conductor/jetty/server/JettyModule.java b/server/src/main/java/com/netflix/conductor/jetty/server/JettyModule.java new file mode 100644 index 0000000000..e20a625e99 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/jetty/server/JettyModule.java @@ -0,0 +1,11 @@ +package com.netflix.conductor.jetty.server; + +import com.google.inject.AbstractModule; + +public class JettyModule extends AbstractModule { + @Override + protected void configure() { + bind(JettyServerConfiguration.class).to(JettyServerSystemConfiguration.class); + bind(JettyServerProvider.class); + } +} diff --git a/server/src/main/java/com/netflix/conductor/jetty/server/JettyServer.java b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServer.java new file mode 100644 index 0000000000..2f026e3611 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServer.java @@ -0,0 +1,146 @@ +/** + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.jetty.server; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.servlet.GuiceFilter; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.conductor.bootstrap.Main; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.service.Lifecycle; +import com.sun.jersey.api.client.Client; + +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.InputStream; +import java.util.EnumSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.servlet.DispatcherType; +import javax.ws.rs.core.MediaType; + +/** + * @author Viren + */ +public class JettyServer implements Lifecycle { + + private static Logger logger = LoggerFactory.getLogger(JettyServer.class); + + private final int port; + private final boolean join; + + private Server server; + + + public JettyServer(int port, boolean join) { + this.port = port; + this.join = join; + } + + + @Override + public synchronized void start() throws Exception { + + if (server != null) { + throw new IllegalStateException("Server is already running"); + } + + this.server = new Server(port); + + ServletContextHandler context = new ServletContextHandler(); + context.addFilter(GuiceFilter.class, "/*", EnumSet.allOf(DispatcherType.class)); + context.setWelcomeFiles(new String[]{"index.html"}); + + server.setHandler(context); + + server.start(); + System.out.println("Started server on http://localhost:" + port + "/"); + try { + boolean create = Boolean.getBoolean("loadSample"); + if (create) { + System.out.println("Creating kitchensink workflow"); + createKitchenSink(port); + } + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + + if (join) { + server.join(); + } + + } + + public synchronized void stop() throws Exception { + if (server == null) { + throw new IllegalStateException("Server is not running. call #start() method to start the server"); + } + server.stop(); + server = null; + } + + + private static void createKitchenSink(int port) throws Exception { + Client client = Client.create(); + ObjectMapper objectMapper = new ObjectMapper(); + + + List taskDefs = new LinkedList<>(); + for (int i = 0; i < 40; i++) { + taskDefs.add(new TaskDef("task_" + i, "task_" + i, 1, 0)); + } + taskDefs.add(new TaskDef("search_elasticsearch", "search_elasticsearch", 1, 0)); + + client.resource("http://localhost:" + port + "/api/metadata/taskdefs").type(MediaType.APPLICATION_JSON).post(objectMapper.writeValueAsString(taskDefs)); + + /* + * Kitchensink example (stored workflow with stored tasks) + */ + InputStream stream = Main.class.getResourceAsStream("/kitchensink.json"); + client.resource("http://localhost:" + port + "/api/metadata/workflow").type(MediaType.APPLICATION_JSON).post(stream); + + stream = Main.class.getResourceAsStream("/sub_flow_1.json"); + client.resource("http://localhost:" + port + "/api/metadata/workflow").type(MediaType.APPLICATION_JSON).post(stream); + + Map payload = ImmutableMap.of("input", + ImmutableMap.of("task2Name", "task_5")); + String payloadStr = objectMapper.writeValueAsString(payload); + client.resource("http://localhost:" + port + "/api/workflow/kitchensink").type(MediaType.APPLICATION_JSON).post(payloadStr); + + logger.info("Kitchen sink workflow is created!"); + + /* + * Kitchensink example with ephemeral workflow and stored tasks + */ + InputStream ephemeralInputStream = Main.class.getResourceAsStream("/kitchenSink-ephemeralWorkflowWithStoredTasks.json"); + client.resource("http://localhost:" + port + "/api/workflow/").type(MediaType.APPLICATION_JSON).post(ephemeralInputStream); + logger.info("Ephemeral Kitchen sink workflow with stored tasks is created!"); + + /* + * Kitchensink example with ephemeral workflow and ephemeral tasks + */ + ephemeralInputStream = Main.class.getResourceAsStream("/kitchenSink-ephemeralWorkflowWithEphemeralTasks.json"); + client.resource("http://localhost:" + port + "/api/workflow/").type(MediaType.APPLICATION_JSON).post(ephemeralInputStream); + logger.info("Ephemeral Kitchen sink workflow with ephemeral tasks is created!"); + + } +} diff --git a/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerConfiguration.java b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerConfiguration.java new file mode 100644 index 0000000000..9b3a1e9919 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerConfiguration.java @@ -0,0 +1,26 @@ +package com.netflix.conductor.jetty.server; + +import com.netflix.conductor.core.config.Configuration; + +public interface JettyServerConfiguration extends Configuration { + String ENABLED_PROPERTY_NAME = "conductor.jetty.server.enabled"; + boolean ENABLED_DEFAULT_VALUE = true; + + String PORT_PROPERTY_NAME = "conductor.jetty.server.port"; + int PORT_DEFAULT_VALUE = 8080; + + String JOIN_PROPERTY_NAME = "conductor.jetty.server.join"; + boolean JOIN_DEFAULT_VALUE = true; + + default boolean isEnabled(){ + return getBooleanProperty(ENABLED_PROPERTY_NAME, ENABLED_DEFAULT_VALUE); + } + + default int getPort() { + return getIntProperty(PORT_PROPERTY_NAME, PORT_DEFAULT_VALUE); + } + + default boolean isJoin(){ + return getBooleanProperty(JOIN_PROPERTY_NAME, JOIN_DEFAULT_VALUE); + } +} diff --git a/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerProvider.java b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerProvider.java new file mode 100644 index 0000000000..ed1ccc75e8 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerProvider.java @@ -0,0 +1,26 @@ +package com.netflix.conductor.jetty.server; + +import java.util.Optional; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class JettyServerProvider implements Provider> { + private final JettyServerConfiguration configuration; + + @Inject + public JettyServerProvider(JettyServerConfiguration configuration) { + this.configuration = configuration; + } + + @Override + public Optional get() { + return configuration.isEnabled() ? + Optional.of( + new JettyServer( + configuration.getPort(), + configuration.isJoin() + )) + : Optional.empty(); + } +} diff --git a/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerSystemConfiguration.java b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerSystemConfiguration.java new file mode 100644 index 0000000000..869c850c3c --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/jetty/server/JettyServerSystemConfiguration.java @@ -0,0 +1,6 @@ +package com.netflix.conductor.jetty.server; + +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +public class JettyServerSystemConfiguration extends SystemPropertiesConfiguration implements JettyServerConfiguration { +} diff --git a/server/src/main/java/com/netflix/conductor/server/ConductorConfig.java b/server/src/main/java/com/netflix/conductor/server/ConductorConfig.java deleted file mode 100644 index 674361c567..0000000000 --- a/server/src/main/java/com/netflix/conductor/server/ConductorConfig.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.server; - -import com.google.inject.AbstractModule; -import com.netflix.conductor.core.config.Configuration; -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; - -/** - * @author Viren - * - */ -public class ConductorConfig implements Configuration { - - private static Logger logger = LoggerFactory.getLogger(ConductorConfig.class); - - @Override - public int getSweepFrequency() { - return getIntProperty("decider.sweep.frequency.seconds", 30); - } - - @Override - public boolean disableSweep() { - String disable = getProperty("decider.sweep.disable", "false"); - return Boolean.getBoolean(disable); - } - - @Override - public boolean disableAsyncWorkers() { - String disable = getProperty("conductor.disable.async.workers", "false"); - return Boolean.getBoolean(disable); - } - - @Override - public String getServerId() { - try { - return InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - return "unknown"; - } - } - - @Override - public String getEnvironment() { - return getProperty("environment", "test"); - } - - @Override - public String getStack() { - return getProperty("STACK", "test"); - } - - @Override - public String getAppId() { - return getProperty("APP_ID", "conductor"); - } - - @Override - public String getRegion() { - return getProperty("EC2_REGION", "us-east-1"); - } - - @Override - public String getAvailabilityZone() { - return getProperty("EC2_AVAILABILITY_ZONE", "us-east-1c"); - } - - @Override - public Long getWorkflowInputPayloadSizeThresholdKB() { - return getLongProperty("conductor.workflow.input.payload.threshold.kb", 5120L); - } - - @Override - public Long getMaxWorkflowInputPayloadSizeThresholdKB() { - return getLongProperty("conductor.max.workflow.input.payload.threshold.kb", 10240L); - } - - @Override - public Long getWorkflowOutputPayloadSizeThresholdKB() { - return getLongProperty("conductor.workflow.output.payload.threshold.kb", 5120L); - } - - @Override - public Long getMaxWorkflowOutputPayloadSizeThresholdKB() { - return getLongProperty("conductor.max.workflow.output.payload.threshold.kb", 10240L); - } - - @Override - public Long getTaskInputPayloadSizeThresholdKB() { - return getLongProperty("conductor.task.input.payload.threshold.kb", 3072L); - } - - @Override - public Long getMaxTaskInputPayloadSizeThresholdKB() { - return getLongProperty("conductor.max.task.input.payload.threshold.kb", 10240L); - } - - @Override - public Long getTaskOutputPayloadSizeThresholdKB() { - return getLongProperty("conductor.task.output.payload.threshold.kb", 3072L); - } - - @Override - public Long getMaxTaskOutputPayloadSizeThresholdKB() { - return getLongProperty("conductor.max.task.output.payload.threshold.kb", 10240L); - } - - @Override - public int getIntProperty(String key, int defaultValue) { - String val = getProperty(key, Integer.toString(defaultValue)); - try{ - defaultValue = Integer.parseInt(val); - } catch(NumberFormatException e){ - logger.error("Error parsing the Int value for Key:{} , returning a default value: {}", key, defaultValue); - } - return defaultValue; - } - - @Override - public long getLongProperty(String key, long defaultValue) { - String val = getProperty(key, Long.toString(defaultValue)); - try { - defaultValue = Long.parseLong(val); - } catch (NumberFormatException e) { - logger.error("Error parsing the Long value for Key:{} , returning a default value: {}", key, defaultValue); - } - return defaultValue; - } - - @Override - public String getProperty(String key, String defaultValue) { - - String val = null; - try{ - val = System.getenv(key.replace('.','_')); - if (val == null || val.isEmpty()) { - val = Optional.ofNullable(System.getProperty(key)).orElse(defaultValue); - } - }catch(Exception e){ - logger.error("Error reading property: {}", key, e); - } - return val; - } - - @Override - public Map getAll() { - Map map = new HashMap<>(); - Properties props = System.getProperties(); - props.forEach((key, value) -> map.put(key.toString(), value)); - return map; - } - - @Override - public List getAdditionalModules() { - - String additionalModuleClasses = getProperty("conductor.additional.modules", null); - if(!StringUtils.isEmpty(additionalModuleClasses)) { - try { - List modules = new LinkedList<>(); - String[] classes = additionalModuleClasses.split(","); - for(String clazz : classes) { - Object moduleObj = Class.forName(clazz).newInstance(); - if(moduleObj instanceof AbstractModule) { - AbstractModule abstractModule = (AbstractModule)moduleObj; - modules.add(abstractModule); - } else { - logger.error(clazz + " does not implement " + AbstractModule.class.getName() + ", skipping..."); - } - } - return modules; - }catch(Exception e) { - logger.warn("Error loading additional modules", e); - } - } - return null; - } -} diff --git a/server/src/main/java/com/netflix/conductor/server/ConductorServer.java b/server/src/main/java/com/netflix/conductor/server/ConductorServer.java deleted file mode 100644 index 48af63d348..0000000000 --- a/server/src/main/java/com/netflix/conductor/server/ConductorServer.java +++ /dev/null @@ -1,286 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.server; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.inject.Guice; -import com.google.inject.servlet.GuiceFilter; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.dao.es.EmbeddedElasticSearch; -import com.netflix.conductor.dao.es5.EmbeddedElasticSearchV5; -import com.netflix.conductor.redis.utils.JedisMock; -import com.netflix.dyno.connectionpool.Host; -import com.netflix.dyno.connectionpool.Host.Status; -import com.netflix.dyno.connectionpool.HostSupplier; -import com.netflix.dyno.connectionpool.TokenMapSupplier; -import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl; -import com.netflix.dyno.connectionpool.impl.lb.HostToken; -import com.netflix.dyno.jedis.DynoJedisClient; -import com.sun.jersey.api.client.Client; -import org.apache.commons.pool2.impl.GenericObjectPoolConfig; -import org.eclipse.jetty.server.Server; -import org.eclipse.jetty.servlet.DefaultServlet; -import org.eclipse.jetty.servlet.ServletContextHandler; -import org.eclipse.jetty.servlet.ServletHolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.JedisCluster; -import redis.clients.jedis.JedisCommands; - -import javax.servlet.DispatcherType; -import javax.ws.rs.core.MediaType; -import java.io.InputStream; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -/** - * @author Viren - * - */ -public class ConductorServer { - - private static Logger logger = LoggerFactory.getLogger(ConductorServer.class); - - enum DB { - redis, dynomite, memory, redis_cluster, mysql - } - - enum ExternalPayloadStorageType { - S3 - } - - private ServerModule serverModule; - - private Server server; - - private ConductorConfig conductorConfig; - - private DB database; - - private ExternalPayloadStorageType externalPayloadStorageType; - - public ConductorServer(ConductorConfig conductorConfig) { - this.conductorConfig = conductorConfig; - String dynoClusterName = conductorConfig.getProperty("workflow.dynomite.cluster.name", ""); - - List dynoHosts = new LinkedList<>(); - String dbstring = conductorConfig.getProperty("db", "memory"); - try { - database = DB.valueOf(dbstring); - }catch(IllegalArgumentException ie) { - logger.error("Invalid db name: " + dbstring + ", supported values are: " + Arrays.toString(DB.values())); - System.exit(1); - } - - if(!(database.equals(DB.memory) || database.equals(DB.mysql))) { - String hosts = conductorConfig.getProperty("workflow.dynomite.cluster.hosts", null); - if(hosts == null) { - System.err.println("Missing dynomite/redis hosts. Ensure 'workflow.dynomite.cluster.hosts' has been set in the supplied configuration."); - logger.error("Missing dynomite/redis hosts. Ensure 'workflow.dynomite.cluster.hosts' has been set in the supplied configuration."); - System.exit(1); - } - String[] hostConfigs = hosts.split(";"); - - for(String hostConfig : hostConfigs) { - String[] hostConfigValues = hostConfig.split(":"); - String host = hostConfigValues[0]; - int port = Integer.parseInt(hostConfigValues[1]); - String rack = hostConfigValues[2]; - Host dynoHost = new Host(host, port, rack, Status.Up); - dynoHosts.add(dynoHost); - } - }else { - //Create a single shard host supplier - Host dynoHost = new Host("localhost", 0, conductorConfig.getAvailabilityZone(), Status.Up); - dynoHosts.add(dynoHost); - } - - String externalPayloadStorageString = conductorConfig.getProperty("workflow.external.payload.storage", ""); - try { - externalPayloadStorageType = ExternalPayloadStorageType.valueOf(externalPayloadStorageString); - } catch(IllegalArgumentException e) { - logger.info("External payload storage is not configured, provided: {}, supported values are: {}", externalPayloadStorageString, Arrays.toString(ExternalPayloadStorageType.values()), e); - } - - init(dynoClusterName, dynoHosts); - } - - private void init(String dynoClusterName, List dynoHosts) { - HostSupplier hostSupplier = () -> dynoHosts; - - JedisCommands jedis = null; - - switch(database) { - case redis: - case dynomite: - ConnectionPoolConfigurationImpl connectionPoolConfiguration = new ConnectionPoolConfigurationImpl(dynoClusterName) - .withTokenSupplier(getTokenMapSupplier(dynoHosts)) - .setLocalRack(conductorConfig.getAvailabilityZone()) - .setLocalDataCenter(conductorConfig.getRegion()) - .setSocketTimeout(0) - .setConnectTimeout(0) - .setMaxConnsPerHost(conductorConfig.getIntProperty("workflow.dynomite.connection.maxConnsPerHost", 10)); - - jedis = new DynoJedisClient.Builder() - .withHostSupplier(hostSupplier) - .withApplicationName(conductorConfig.getAppId()) - .withDynomiteClusterName(dynoClusterName) - .withCPConfig(connectionPoolConfiguration) - .build(); - - logger.info("Starting conductor server using dynomite/redis cluster " + dynoClusterName); - - break; - - case mysql: - logger.info("Starting conductor server using MySQL data store", database); - break; - case memory: - jedis = new JedisMock(); - try { - if (conductorConfig.getProperty("workflow.elasticsearch.version", "2").equals("5")){ - EmbeddedElasticSearchV5.start(); - } - else { - // Use ES2 as default. - EmbeddedElasticSearch.start(); - } - if(System.getProperty("workflow.elasticsearch.url") == null) { - System.setProperty("workflow.elasticsearch.url", "localhost:9300"); - } - if(System.getProperty("workflow.elasticsearch.index.name") == null) { - System.setProperty("workflow.elasticsearch.index.name", "conductor"); - } - } catch (Exception e) { - logger.error("Error starting embedded elasticsearch. Search functionality will be impacted: " + e.getMessage(), e); - } - logger.info("Starting conductor server using in memory data store"); - break; - - case redis_cluster: - Host host = dynoHosts.get(0); - GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig(); - poolConfig.setMinIdle(5); - poolConfig.setMaxTotal(1000); - jedis = new JedisCluster(new HostAndPort(host.getHostName(), host.getPort()), poolConfig); - logger.info("Starting conductor server using redis_cluster " + dynoClusterName); - break; - } - - this.serverModule = new ServerModule(jedis, hostSupplier, conductorConfig, database, externalPayloadStorageType); - } - - private TokenMapSupplier getTokenMapSupplier(List dynoHosts) { - return new TokenMapSupplier() { - - HostToken token = new HostToken(1L, dynoHosts.get(0)); - - @Override - public List getTokens(Set activeHosts) { - return Arrays.asList(token); - } - - @Override - public HostToken getTokenForHost(Host host, Set activeHosts) { - return token; - } - }; - } - - public ServerModule getGuiceModule() { - return serverModule; - } - - public synchronized void start(int port, boolean join) throws Exception { - - if(server != null) { - throw new IllegalStateException("Server is already running"); - } - - Guice.createInjector(serverModule); - - //Swagger - String resourceBasePath = Main.class.getResource("/swagger-ui").toExternalForm(); - this.server = new Server(port); - - ServletContextHandler context = new ServletContextHandler(); - context.addFilter(GuiceFilter.class, "/*", EnumSet.allOf(DispatcherType.class)); - context.setResourceBase(resourceBasePath); - context.setWelcomeFiles(new String[] { "index.html" }); - - server.setHandler(context); - - - DefaultServlet staticServlet = new DefaultServlet(); - context.addServlet(new ServletHolder(staticServlet), "/*"); - - server.start(); - System.out.println("Started server on http://localhost:" + port + "/"); - try { - boolean create = Boolean.getBoolean("loadSample"); - if(create) { - System.out.println("Creating kitchensink workflow"); - createKitchenSink(port); - } - }catch(Exception e) { - logger.error(e.getMessage(), e); - } - - if(join) { - server.join(); - } - - } - - public synchronized void stop() throws Exception { - if(server == null) { - throw new IllegalStateException("Server is not running. call #start() method to start the server"); - } - server.stop(); - server = null; - } - - private static void createKitchenSink(int port) throws Exception { - - List taskDefs = new LinkedList<>(); - for(int i = 0; i < 40; i++) { - taskDefs.add(new TaskDef("task_" + i, "task_" + i, 1, 0)); - } - taskDefs.add(new TaskDef("search_elasticsearch", "search_elasticsearch", 1, 0)); - - Client client = Client.create(); - ObjectMapper om = new ObjectMapper(); - client.resource("http://localhost:" + port + "/api/metadata/taskdefs").type(MediaType.APPLICATION_JSON).post(om.writeValueAsString(taskDefs)); - - InputStream stream = Main.class.getResourceAsStream("/kitchensink.json"); - client.resource("http://localhost:" + port + "/api/metadata/workflow").type(MediaType.APPLICATION_JSON).post(stream); - - stream = Main.class.getResourceAsStream("/sub_flow_1.json"); - client.resource("http://localhost:" + port + "/api/metadata/workflow").type(MediaType.APPLICATION_JSON).post(stream); - - String input = "{\"task2Name\":\"task_5\"}"; - client.resource("http://localhost:" + port + "/api/workflow/kitchensink").type(MediaType.APPLICATION_JSON).post(input); - - logger.info("Kitchen sink workflows are created!"); - } -} diff --git a/server/src/main/java/com/netflix/conductor/server/DynomiteClusterModule.java b/server/src/main/java/com/netflix/conductor/server/DynomiteClusterModule.java new file mode 100644 index 0000000000..56ac5c5f2d --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/server/DynomiteClusterModule.java @@ -0,0 +1,34 @@ +package com.netflix.conductor.server; + +import com.google.inject.AbstractModule; +import com.google.inject.name.Names; + +import com.netflix.conductor.dyno.DynoShardSupplierProvider; +import com.netflix.conductor.dyno.DynomiteConfiguration; +import com.netflix.conductor.dyno.RedisQueuesProvider; +import com.netflix.conductor.dyno.SystemPropertiesDynomiteConfiguration; +import com.netflix.conductor.jedis.ConfigurationHostSupplierProvider; +import com.netflix.conductor.jedis.DynomiteJedisProvider; +import com.netflix.conductor.jedis.TokenMapSupplierProvider; +import com.netflix.dyno.connectionpool.HostSupplier; +import com.netflix.dyno.connectionpool.TokenMapSupplier; +import com.netflix.dyno.queues.ShardSupplier; + +import redis.clients.jedis.JedisCommands; + +public class DynomiteClusterModule extends AbstractModule { + + @Override + protected void configure() { + + bind(DynomiteConfiguration.class).to(SystemPropertiesDynomiteConfiguration.class); + bind(JedisCommands.class).toProvider(DynomiteJedisProvider.class).asEagerSingleton(); + bind(JedisCommands.class) + .annotatedWith(Names.named(RedisQueuesProvider.READ_CLIENT_INJECTION_NAME)) + .toProvider(DynomiteJedisProvider.class) + .asEagerSingleton(); + bind(HostSupplier.class).toProvider(ConfigurationHostSupplierProvider.class); + bind(TokenMapSupplier.class).toProvider(TokenMapSupplierProvider.class); + bind(ShardSupplier.class).toProvider(DynoShardSupplierProvider.class); + } +} diff --git a/server/src/main/java/com/netflix/conductor/server/ExecutorServiceProvider.java b/server/src/main/java/com/netflix/conductor/server/ExecutorServiceProvider.java new file mode 100644 index 0000000000..869d7a5aad --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/server/ExecutorServiceProvider.java @@ -0,0 +1,37 @@ +package com.netflix.conductor.server; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import com.netflix.conductor.core.config.Configuration; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class ExecutorServiceProvider implements Provider { + private static final int MAX_THREADS = 50; + + private final Configuration configuration; + private final ExecutorService executorService; + + @Inject + public ExecutorServiceProvider(Configuration configuration) { + this.configuration = configuration; + // TODO Use configuration to set max threads. + this.executorService = java.util.concurrent.Executors.newFixedThreadPool(MAX_THREADS, buildThreadFactory()); + } + + @Override + public ExecutorService get() { + return executorService; + } + + private ThreadFactory buildThreadFactory() { + return new ThreadFactoryBuilder() + .setNameFormat("conductor-worker-%d") + .setDaemon(true) + .build(); + } +} diff --git a/server/src/main/java/com/netflix/conductor/server/JerseyModule.java b/server/src/main/java/com/netflix/conductor/server/JerseyModule.java index 3b711c82fc..caff6e85db 100644 --- a/server/src/main/java/com/netflix/conductor/server/JerseyModule.java +++ b/server/src/main/java/com/netflix/conductor/server/JerseyModule.java @@ -28,8 +28,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; -import com.fasterxml.jackson.annotation.JsonInclude.Include; -import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.jaxrs.json.JacksonJsonProvider; import com.google.inject.Provides; @@ -44,11 +42,10 @@ * */ public final class JerseyModule extends JerseyServletModule { + @Override protected void configureServlets() { - - filter("/*").through(apiOriginFilter()); Map jerseyParams = new HashMap<>(); @@ -59,19 +56,7 @@ protected void configureServlets() { serve("/api/*").with(GuiceContainer.class, jerseyParams); } - @Provides - @Singleton - public ObjectMapper objectMapper() { - final ObjectMapper objectMapper = new ObjectMapper(); - objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false); - objectMapper.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false); - objectMapper.setSerializationInclusion(Include.NON_NULL); - objectMapper.setSerializationInclusion(Include.NON_EMPTY); - return objectMapper; - } - - @Provides + @Provides @Singleton JacksonJsonProvider jacksonJsonProvider(ObjectMapper mapper) { return new JacksonJsonProvider(mapper); diff --git a/server/src/main/java/com/netflix/conductor/server/LocalRedisModule.java b/server/src/main/java/com/netflix/conductor/server/LocalRedisModule.java new file mode 100644 index 0000000000..ee4d7cf83d --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/server/LocalRedisModule.java @@ -0,0 +1,29 @@ +package com.netflix.conductor.server; + +import com.google.inject.AbstractModule; +import com.google.inject.name.Names; + +import com.netflix.conductor.dyno.DynoShardSupplierProvider; +import com.netflix.conductor.dyno.DynomiteConfiguration; +import com.netflix.conductor.dyno.RedisQueuesProvider; +import com.netflix.conductor.dyno.SystemPropertiesDynomiteConfiguration; +import com.netflix.conductor.jedis.InMemoryJedisProvider; +import com.netflix.conductor.jedis.LocalHostSupplierProvider; +import com.netflix.dyno.connectionpool.HostSupplier; +import com.netflix.dyno.queues.ShardSupplier; + +import redis.clients.jedis.JedisCommands; + +public class LocalRedisModule extends AbstractModule { + @Override + protected void configure() { + + bind(DynomiteConfiguration.class).to(SystemPropertiesDynomiteConfiguration.class); + bind(JedisCommands.class).toProvider(InMemoryJedisProvider.class); + bind(JedisCommands.class) + .annotatedWith(Names.named(RedisQueuesProvider.READ_CLIENT_INJECTION_NAME)) + .toProvider(InMemoryJedisProvider.class); + bind(HostSupplier.class).toProvider(LocalHostSupplierProvider.class); + bind(ShardSupplier.class).toProvider(DynoShardSupplierProvider.class); + } +} diff --git a/server/src/main/java/com/netflix/conductor/server/Main.java b/server/src/main/java/com/netflix/conductor/server/Main.java deleted file mode 100644 index 9e7b90214f..0000000000 --- a/server/src/main/java/com/netflix/conductor/server/Main.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2017 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * - */ -package com.netflix.conductor.server; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.util.Properties; - -import org.apache.log4j.PropertyConfigurator; - -/** - * @author Viren - * Entry point for the server - */ -public class Main { - - public static void main(String[] args) throws Exception { - - loadConfigFile(args.length > 0 ? args[0] : System.getenv("CONDUCTOR_CONFIG_FILE")); - - if(args.length == 2) { - System.out.println("Using log4j config " + args[1]); - PropertyConfigurator.configure(new FileInputStream(new File(args[1]))); - } - - ConductorConfig config = new ConductorConfig(); - ConductorServer server = new ConductorServer(config); - - System.out.println("\n\n\n"); - System.out.println(" _ _ "); - System.out.println(" ___ ___ _ __ __| |_ _ ___| |_ ___ _ __ "); - System.out.println(" / __/ _ \\| '_ \\ / _` | | | |/ __| __/ _ \\| '__|"); - System.out.println("| (_| (_) | | | | (_| | |_| | (__| || (_) | | "); - System.out.println(" \\___\\___/|_| |_|\\__,_|\\__,_|\\___|\\__\\___/|_| "); - System.out.println("\n\n\n"); - - server.start(config.getIntProperty("port", 8080), true); - - } - - private static void loadConfigFile(String propertyFile) throws IOException { - if (propertyFile == null) return; - System.out.println("Using config file" + propertyFile); - Properties props = new Properties(System.getProperties()); - props.load(new FileInputStream(propertyFile)); - System.setProperties(props); - } -} diff --git a/server/src/main/java/com/netflix/conductor/server/RedisClusterModule.java b/server/src/main/java/com/netflix/conductor/server/RedisClusterModule.java new file mode 100644 index 0000000000..fc57fa2bfe --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/server/RedisClusterModule.java @@ -0,0 +1,20 @@ +package com.netflix.conductor.server; + +import com.google.inject.AbstractModule; + +import com.netflix.conductor.dyno.DynomiteConfiguration; +import com.netflix.conductor.dyno.SystemPropertiesDynomiteConfiguration; +import com.netflix.conductor.jedis.ConfigurationHostSupplierProvider; +import com.netflix.conductor.jedis.RedisClusterJedisProvider; +import com.netflix.dyno.connectionpool.HostSupplier; + +import redis.clients.jedis.JedisCommands; + +public class RedisClusterModule extends AbstractModule { + @Override + protected void configure(){ + bind(HostSupplier.class).toProvider(ConfigurationHostSupplierProvider.class); + bind(DynomiteConfiguration.class).to(SystemPropertiesDynomiteConfiguration.class); + bind(JedisCommands.class).toProvider(RedisClusterJedisProvider.class); + } +} diff --git a/server/src/main/java/com/netflix/conductor/server/ServerModule.java b/server/src/main/java/com/netflix/conductor/server/ServerModule.java index 8ec863c645..955dd8dd30 100644 --- a/server/src/main/java/com/netflix/conductor/server/ServerModule.java +++ b/server/src/main/java/com/netflix/conductor/server/ServerModule.java @@ -1,132 +1,50 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** - * + * */ package com.netflix.conductor.server; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.inject.AbstractModule; -import com.google.inject.Provides; -import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.contribs.http.HttpTask; -import com.netflix.conductor.contribs.http.RestClientManager; -import com.netflix.conductor.contribs.json.JsonJqTransform; +import com.google.inject.Scopes; +import com.netflix.archaius.guice.ArchaiusModule; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.config.CoreModule; -import com.netflix.conductor.core.utils.DummyPayloadStorage; -import com.netflix.conductor.core.utils.S3PayloadStorage; -import com.netflix.conductor.dao.RedisWorkflowModule; -import com.netflix.conductor.dao.es.index.ElasticSearchModule; -import com.netflix.conductor.dao.es5.index.ElasticSearchModuleV5; -import com.netflix.conductor.dao.mysql.MySQLWorkflowModule; -import com.netflix.dyno.connectionpool.HostSupplier; -import redis.clients.jedis.JedisCommands; +import com.netflix.conductor.dyno.SystemPropertiesDynomiteConfiguration; +import com.netflix.conductor.grpc.server.GRPCModule; +import com.netflix.conductor.jetty.server.JettyModule; +import com.netflix.runtime.health.guice.HealthModule; -import java.util.List; import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicInteger; - -import static com.netflix.conductor.server.ConductorServer.ExternalPayloadStorageType.S3; /** * @author Viren * */ public class ServerModule extends AbstractModule { - - private int maxThreads = 50; - - private ExecutorService es; - - private JedisCommands dynoConn; - - private HostSupplier hostSupplier; - - private String region; - - private String localRack; - - private ConductorConfig conductorConfig; - - private ConductorServer.DB db; - - private ConductorServer.ExternalPayloadStorageType externalPayloadStorageType; - - public ServerModule(JedisCommands jedis, HostSupplier hostSupplier, ConductorConfig conductorConfig, ConductorServer.DB db, ConductorServer.ExternalPayloadStorageType externalPayloadStorageType) { - this.dynoConn = jedis; - this.hostSupplier = hostSupplier; - this.conductorConfig = conductorConfig; - this.region = conductorConfig.getRegion(); - this.localRack = conductorConfig.getAvailabilityZone(); - this.db = db; - this.externalPayloadStorageType = externalPayloadStorageType; - } - - @Override - protected void configure() { - - configureExecutorService(); - - bind(Configuration.class).toInstance(conductorConfig); - - if (db == ConductorServer.DB.mysql) { - install(new MySQLWorkflowModule()); - } else { - install(new RedisWorkflowModule(conductorConfig, dynoConn, hostSupplier)); - } - - if (conductorConfig.getProperty("workflow.elasticsearch.version", "2").equals("5")){ - install(new ElasticSearchModuleV5()); - } - else { - // Use ES2 as default. - install(new ElasticSearchModule()); - } - - install(new CoreModule()); - install(new JerseyModule()); - - new HttpTask(new RestClientManager(), conductorConfig); - new JsonJqTransform(); - - List additionalModules = conductorConfig.getAdditionalModules(); - if(additionalModules != null) { - for(AbstractModule additionalModule : additionalModules) { - install(additionalModule); - } - } - if (externalPayloadStorageType == S3) { - bind(ExternalPayloadStorage.class).to(S3PayloadStorage.class); - } else { - bind(ExternalPayloadStorage.class).to(DummyPayloadStorage.class); - } - } - - @Provides - public ExecutorService getExecutorService(){ - return this.es; - } - - private void configureExecutorService(){ - AtomicInteger count = new AtomicInteger(0); - this.es = java.util.concurrent.Executors.newFixedThreadPool(maxThreads, runnable -> { - Thread conductorWorkerThread = new Thread(runnable); - conductorWorkerThread.setName("conductor-worker-" + count.getAndIncrement()); - return conductorWorkerThread; - }); - } + @Override + protected void configure() { + install(new CoreModule()); + install(new ArchaiusModule()); + install(new HealthModule()); + install(new JettyModule()); + install(new GRPCModule()); + + bind(ObjectMapper.class).toProvider(JsonMapperProvider.class); + bind(Configuration.class).to(SystemPropertiesDynomiteConfiguration.class); + bind(ExecutorService.class).toProvider(ExecutorServiceProvider.class).in(Scopes.SINGLETON); + } } diff --git a/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java b/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java index 6cd437704d..8ba68821e8 100644 --- a/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java +++ b/server/src/main/java/com/netflix/conductor/server/ServletContextListner.java @@ -1,36 +1,32 @@ /** * Copyright 2017 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** - * + * */ package com.netflix.conductor.server; -import java.io.FileInputStream; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; - -import org.apache.log4j.PropertyConfigurator; -import org.eclipse.jetty.servlet.DefaultServlet; - import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.servlet.GuiceServletContextListener; -import com.google.inject.servlet.ServletModule; + +import com.netflix.conductor.bootstrap.ModulesProvider; +import com.netflix.conductor.core.config.SystemPropertiesConfiguration; + +import org.apache.log4j.PropertyConfigurator; + +import java.io.FileInputStream; +import java.util.Optional; +import java.util.Properties; /** * @author Viren @@ -38,59 +34,39 @@ */ public class ServletContextListner extends GuiceServletContextListener { - @Override - protected Injector getInjector() { - - loadProperties(); - - ConductorConfig config = new ConductorConfig(); - ConductorServer server = new ConductorServer(config); - - return Guice.createInjector(server.getGuiceModule(), getSwagger()); - } - - private ServletModule getSwagger() { - - String resourceBasePath = ServletContextListner.class.getResource("/swagger-ui").toExternalForm(); - DefaultServlet ds = new DefaultServlet(); - - ServletModule sm = new ServletModule() { - @Override - protected void configureServlets() { - Map params = new HashMap<>(); - params.put("resourceBase", resourceBasePath); - params.put("redirectWelcome", "true"); - serve("/*").with(ds, params); - } - }; - - return sm; - - } - - private void loadProperties() { - try { - - String key = "conductor_properties"; - String propertyFile = Optional.ofNullable(System.getProperty(key)).orElse(System.getenv(key)); - if(propertyFile != null) { - System.out.println("Using " + propertyFile); - FileInputStream propFile = new FileInputStream(propertyFile); - Properties props = new Properties(System.getProperties()); - props.load(propFile); - System.setProperties(props); - } - - key = "log4j_properties"; - String log4jConfig = Optional.ofNullable(System.getProperty(key)).orElse(System.getenv(key)); - if(log4jConfig != null) { - PropertyConfigurator.configure(new FileInputStream(log4jConfig)); - } - - } catch (Exception e) { - System.err.println("Error loading properties " + e.getMessage()); - e.printStackTrace(); - } - } - + @Override + protected Injector getInjector() { + + loadProperties(); + + SystemPropertiesConfiguration config = new SystemPropertiesConfiguration(); + + return Guice.createInjector(new ModulesProvider(config).get()); + } + + private void loadProperties() { + try { + + String key = "conductor_properties"; + String propertyFile = Optional.ofNullable(System.getProperty(key)).orElse(System.getenv(key)); + if (propertyFile != null) { + System.out.println("Using " + propertyFile); + FileInputStream propFile = new FileInputStream(propertyFile); + Properties props = new Properties(System.getProperties()); + props.load(propFile); + System.setProperties(props); + } + + key = "log4j_properties"; + String log4jConfig = Optional.ofNullable(System.getProperty(key)).orElse(System.getenv(key)); + if (log4jConfig != null) { + PropertyConfigurator.configure(new FileInputStream(log4jConfig)); + } + + } catch (Exception e) { + System.err.println("Error loading properties " + e.getMessage()); + e.printStackTrace(); + } + } + } diff --git a/server/src/main/java/com/netflix/conductor/server/SwaggerModule.java b/server/src/main/java/com/netflix/conductor/server/SwaggerModule.java new file mode 100644 index 0000000000..812d679b03 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/server/SwaggerModule.java @@ -0,0 +1,25 @@ +package com.netflix.conductor.server; + +import com.google.inject.Scopes; +import com.google.inject.servlet.ServletModule; + +import org.eclipse.jetty.servlet.DefaultServlet; + +import java.util.HashMap; +import java.util.Map; + +public class SwaggerModule extends ServletModule { + + @Override + protected void configureServlets() { + bind(DefaultServlet.class).in(Scopes.SINGLETON); + Map params = new HashMap<>(); + params.put("resourceBase", getResourceBasePath()); + params.put("redirectWelcome", "true"); + serve("/*").with(DefaultServlet.class, params); + } + + private String getResourceBasePath() { + return SwaggerModule.class.getResource("/swagger-ui").toExternalForm(); + } +} diff --git a/server/src/main/resources/kitchenSink-ephemeralWorkflowWithEphemeralTasks.json b/server/src/main/resources/kitchenSink-ephemeralWorkflowWithEphemeralTasks.json new file mode 100644 index 0000000000..6901a577e9 --- /dev/null +++ b/server/src/main/resources/kitchenSink-ephemeralWorkflowWithEphemeralTasks.json @@ -0,0 +1,261 @@ +{ + "name": "kitchenSink-ephemeralWorkflowWithEphemeralTasks", + "workflowDef": { + "name": "ephemeralKitchenSinkEphemeralTasks", + "description": "Kitchensink ephemeral workflow with ephemeral tasks", + "version": 1, + "tasks": [ + { + "name": "task_10001", + "taskReferenceName": "task_10001", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}", + "env": { + "taskId": "${CPEWF_TASK_ID}", + "workflowId": "${HOSTNAME}" + } + }, + "type": "SIMPLE", + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "task_10001", + "description": "task_10001", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } + }, + { + "name": "event_task", + "taskReferenceName": "event_0", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}" + }, + "type": "EVENT", + "sink": "conductor" + }, + { + "name": "dyntask", + "taskReferenceName": "task_2", + "inputParameters": { + "taskToExecute": "${workflow.input.task2Name}" + }, + "type": "DYNAMIC", + "dynamicTaskNameParam": "taskToExecute" + }, + { + "name": "oddEvenDecision", + "taskReferenceName": "oddEvenDecision", + "inputParameters": { + "oddEven": "${task_2.output.oddEven}" + }, + "type": "DECISION", + "caseValueParam": "oddEven", + "decisionCases": { + "0": [ + { + "name": "task_10004", + "taskReferenceName": "task_10004", + "inputParameters": { + "mod": "${task_2.output.mod}", + "oddEven": "${task_2.output.oddEven}" + }, + "type": "SIMPLE", + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "task_10004", + "description": "task_10004", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } + }, + { + "name": "dynamic_fanout", + "taskReferenceName": "fanout1", + "inputParameters": { + "dynamicTasks": "${task_10004.output.dynamicTasks}", + "input": "${task_10004.output.inputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "dynamicForkTasksParam": "dynamicTasks", + "dynamicForkTasksInputParamName": "input" + }, + { + "name": "dynamic_join", + "taskReferenceName": "join1", + "type": "JOIN" + } + ], + "1": [ + { + "name": "fork_join", + "taskReferenceName": "forkx", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "task_100010", + "taskReferenceName": "task_100010", + "type": "SIMPLE", + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "task_100010", + "description": "task_100010", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf3", + "inputParameters": { + "mod": "${task_10001.output.mod}", + "oddEven": "${task_10001.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ], + [ + { + "name": "task_100011", + "taskReferenceName": "task_100011", + "type": "SIMPLE", + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "task_100011", + "description": "task_100011", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf4", + "inputParameters": { + "mod": "${task_10001.output.mod}", + "oddEven": "${task_10001.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ] + ] + }, + { + "name": "join", + "taskReferenceName": "join2", + "type": "JOIN", + "joinOn": [ + "wf3", + "wf4" + ] + } + ] + } + }, + { + "name": "search_elasticsearch", + "taskReferenceName": "get_es_1", + "inputParameters": { + "http_request": { + "uri": "http://localhost:9200/conductor/_search?size=10", + "method": "GET" + } + }, + "type": "HTTP" + }, + { + "name": "task_100030", + "taskReferenceName": "task_100030", + "inputParameters": { + "statuses": "${get_es_1.output..status}", + "workflowIds": "${get_es_1.output..workflowId}" + }, + "type": "SIMPLE", + "taskDefinition": { + "ownerApp": null, + "createTime": null, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "task_100030", + "description": "task_100030", + "retryCount": 1, + "timeoutSeconds": 0, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 3600, + "concurrentExecLimit": null, + "inputTemplate": {} + } + } + ], + "outputParameters": { + "statues": "${get_es_1.output..status}", + "workflowIds": "${get_es_1.output..workflowId}" + }, + "schemaVersion": 2 + }, + "input": { + "task2Name": "task_10005" + } +} diff --git a/server/src/main/resources/kitchenSink-ephemeralWorkflowWithStoredTasks.json b/server/src/main/resources/kitchenSink-ephemeralWorkflowWithStoredTasks.json new file mode 100644 index 0000000000..d47081d519 --- /dev/null +++ b/server/src/main/resources/kitchenSink-ephemeralWorkflowWithStoredTasks.json @@ -0,0 +1,166 @@ +{ + "name": "kitchenSink-ephemeralWorkflowWithStoredTasks", + "workflowDef": { + "name": "ephemeralKitchenSinkStoredTasks", + "description": "kitchensink workflow definition", + "version": 1, + "tasks": [ + { + "name": "task_1", + "taskReferenceName": "task_1", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}", + "env": { + "taskId": "${CPEWF_TASK_ID}", + "workflowId": "${HOSTNAME}" + } + }, + "type": "SIMPLE" + }, + { + "name": "event_task", + "taskReferenceName": "event_0", + "inputParameters": { + "mod": "${workflow.input.mod}", + "oddEven": "${workflow.input.oddEven}" + }, + "type": "EVENT", + "sink": "conductor" + }, + { + "name": "dyntask", + "taskReferenceName": "task_2", + "inputParameters": { + "taskToExecute": "${workflow.input.task2Name}" + }, + "type": "DYNAMIC", + "dynamicTaskNameParam": "taskToExecute" + }, + { + "name": "oddEvenDecision", + "taskReferenceName": "oddEvenDecision", + "inputParameters": { + "oddEven": "${task_2.output.oddEven}" + }, + "type": "DECISION", + "caseValueParam": "oddEven", + "decisionCases": { + "0": [ + { + "name": "task_4", + "taskReferenceName": "task_4", + "inputParameters": { + "mod": "${task_2.output.mod}", + "oddEven": "${task_2.output.oddEven}" + }, + "type": "SIMPLE" + }, + { + "name": "dynamic_fanout", + "taskReferenceName": "fanout1", + "inputParameters": { + "dynamicTasks": "${task_4.output.dynamicTasks}", + "input": "${task_4.output.inputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "dynamicForkTasksParam": "dynamicTasks", + "dynamicForkTasksInputParamName": "input" + }, + { + "name": "dynamic_join", + "taskReferenceName": "join1", + "type": "JOIN" + } + ], + "1": [ + { + "name": "fork_join", + "taskReferenceName": "forkx", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "task_10", + "taskReferenceName": "task_10", + "type": "SIMPLE" + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf3", + "inputParameters": { + "mod": "${task_1.output.mod}", + "oddEven": "${task_1.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ], + [ + { + "name": "task_11", + "taskReferenceName": "task_11", + "type": "SIMPLE" + }, + { + "name": "sub_workflow_x", + "taskReferenceName": "wf4", + "inputParameters": { + "mod": "${task_1.output.mod}", + "oddEven": "${task_1.output.oddEven}" + }, + "type": "SUB_WORKFLOW", + "subWorkflowParam": { + "name": "sub_flow_1", + "version": 1 + } + } + ] + ] + }, + { + "name": "join", + "taskReferenceName": "join2", + "type": "JOIN", + "joinOn": [ + "wf3", + "wf4" + ] + } + ] + } + }, + { + "name": "search_elasticsearch", + "taskReferenceName": "get_es_1", + "inputParameters": { + "http_request": { + "uri": "http://localhost:9200/conductor/_search?size=10", + "method": "GET" + } + }, + "type": "HTTP" + }, + { + "name": "task_30", + "taskReferenceName": "task_30", + "inputParameters": { + "statuses": "${get_es_1.output..status}", + "workflowIds": "${get_es_1.output..workflowId}" + }, + "type": "SIMPLE" + } + ], + "outputParameters": { + "statues": "${get_es_1.output..status}", + "workflowIds": "${get_es_1.output..workflowId}" + }, + "schemaVersion": 2 + }, + "input": { + "task2Name": "task_5" + } +} diff --git a/settings.gradle b/settings.gradle index 42fc5d9bbb..1897436005 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,3 +1,7 @@ rootProject.name='conductor' -include 'common', 'core', 'redis-persistence','es2-persistence', 'es5-persistence','mysql-persistence','jersey', 'client', 'test-harness', 'ui', 'contribs', 'server' + +include 'client','common','contribs','core', 'es5-persistence','jersey' +include 'mysql-persistence', 'redis-persistence','server','test-harness','ui' +include 'grpc', 'grpc-server', 'grpc-client' + rootProject.children.each {it.name="conductor-${it.name}"} diff --git a/test-harness/build.gradle b/test-harness/build.gradle index 5bcc968280..d2bb313705 100644 --- a/test-harness/build.gradle +++ b/test-harness/build.gradle @@ -1,26 +1,33 @@ configurations.all { resolutionStrategy { - force 'com.fasterxml.jackson.core:jackson-core:2.7.5' + force 'com.fasterxml.jackson.core:jackson-core:2.7.5' } } dependencies { - testCompile project(':conductor-core') - testCompile project(':conductor-jersey') - testCompile project(':conductor-redis-persistence').sourceSets.test.output - testCompile project(':conductor-client') - testCompile project(':conductor-server') - testCompile "org.elasticsearch:elasticsearch:${revElasticSearch5}" - testCompile "org.eclipse.jetty:jetty-server:${revJetteyServer}" - testCompile "org.eclipse.jetty:jetty-servlet:${revJettyServlet}" - testCompile "org.rarefiedredis.redis:redis-java:${revRarefiedRedis}" - testCompile "com.sun.jersey.contribs:jersey-guice:${revJerseyGuice}" + testCompile project(':conductor-core') + testCompile project(':conductor-jersey') + testCompile project(':conductor-redis-persistence').sourceSets.test.output + testCompile project(':conductor-client') + testCompile project(':conductor-server') + testCompile project(':conductor-grpc-client') + testCompile project(':conductor-grpc-server') + testCompile "org.elasticsearch:elasticsearch:${revElasticSearch5}" + testCompile "org.eclipse.jetty:jetty-server:${revJetteyServer}" + testCompile "org.eclipse.jetty:jetty-servlet:${revJettyServlet}" + testCompile "org.rarefiedredis.redis:redis-java:${revRarefiedRedis}" + testCompile "com.sun.jersey.contribs:jersey-guice:${revJerseyGuice}" - testCompile "com.google.inject.extensions:guice-servlet:${revGuiceServlet}" - testCompile "io.swagger:swagger-jersey-jaxrs:${revSwaggerJersey}" + testCompile "com.google.inject.extensions:guice-servlet:${revGuiceServlet}" + testCompile "io.swagger:swagger-jersey-jaxrs:${revSwaggerJersey}" +} + +test { + // Because tests in the module bind to ports they shouldn't be executed in parallel. +// maxParallelForks = 1 } task server(type: JavaExec) { - main = 'com.netflix.conductor.demo.Main' - classpath = sourceSets.test.runtimeClasspath -} \ No newline at end of file + main = 'com.netflix.conductor.demo.Main' + classpath = sourceSets.test.runtimeClasspath +} diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/AbstractEndToEndTest.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/AbstractEndToEndTest.java new file mode 100644 index 0000000000..42e00bae44 --- /dev/null +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/AbstractEndToEndTest.java @@ -0,0 +1,159 @@ +package com.netflix.conductor.tests.integration; + +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.TaskType; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.Workflow; +import org.junit.Test; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +public abstract class AbstractEndToEndTest { + + private static final String TASK_DEFINITION_PREFIX = "task_"; + private static final String DEFAULT_DESCRIPTION = "description"; + // Represents null value deserialized from the redis in memory db + private static final String DEFAULT_NULL_VALUE = "null"; + + @Test + public void testEphemeralWorkflowsWithStoredTasks() { + String workflowExecutionName = "testEphemeralWorkflow"; + + createAndRegisterTaskDefinitions("storedTaskDef", 5); + WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName); + WorkflowTask workflowTask1 = createWorkflowTask("storedTaskDef1"); + WorkflowTask workflowTask2 = createWorkflowTask("storedTaskDef2"); + workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2)); + + String workflowId = startWorkflow(workflowExecutionName, workflowDefinition); + assertNotNull(workflowId); + + Workflow workflow = getWorkflow(workflowId, true); + WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition(); + assertNotNull(ephemeralWorkflow); + assertEquals(workflowDefinition, ephemeralWorkflow); + } + + @Test + public void testEphemeralWorkflowsWithEphemeralTasks() { + String workflowExecutionName = "ephemeralWorkflowWithEphemeralTasks"; + + WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName); + WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1"); + TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1"); + workflowTask1.setTaskDefinition(taskDefinition1); + WorkflowTask workflowTask2 = createWorkflowTask("ephemeralTask2"); + TaskDef taskDefinition2 = createTaskDefinition("ephemeralTaskDef2"); + workflowTask2.setTaskDefinition(taskDefinition2); + workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2)); + + String workflowId = startWorkflow(workflowExecutionName, workflowDefinition); + assertNotNull(workflowId); + + Workflow workflow = getWorkflow(workflowId, true); + WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition(); + assertNotNull(ephemeralWorkflow); + assertEquals(workflowDefinition, ephemeralWorkflow); + + List ephemeralTasks = ephemeralWorkflow.getTasks(); + assertEquals(2, ephemeralTasks.size()); + for (WorkflowTask ephemeralTask : ephemeralTasks) { + assertNotNull(ephemeralTask.getTaskDefinition()); + } + } + + @Test + public void testEphemeralWorkflowsWithEphemeralAndStoredTasks() { + createAndRegisterTaskDefinitions("storedTask", 1); + + WorkflowDef workflowDefinition = createWorkflowDefinition("testEphemeralWorkflowsWithEphemeralAndStoredTasks"); + + WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1"); + TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1"); + workflowTask1.setTaskDefinition(taskDefinition1); + + WorkflowTask workflowTask2 = createWorkflowTask("storedTask0"); + + workflowDefinition.getTasks().add(workflowTask1); + workflowDefinition.getTasks().add(workflowTask2); + + String workflowExecutionName = "ephemeralWorkflowWithEphemeralAndStoredTasks"; + + String workflowId = startWorkflow(workflowExecutionName, workflowDefinition); + assertNotNull(workflowId); + + Workflow workflow = getWorkflow(workflowId, true); + WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition(); + assertNotNull(ephemeralWorkflow); + assertEquals(workflowDefinition, ephemeralWorkflow); + + TaskDef storedTaskDefinition = getTaskDefinition("storedTask0"); + List tasks = ephemeralWorkflow.getTasks(); + assertEquals(2, tasks.size()); + assertEquals(workflowTask1, tasks.get(0)); + TaskDef currentStoredTaskDefinition = tasks.get(1).getTaskDefinition(); + assertNotNull(currentStoredTaskDefinition); + assertEquals(storedTaskDefinition, currentStoredTaskDefinition); + + } + + protected WorkflowTask createWorkflowTask(String name) { + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName(name); + workflowTask.setWorkflowTaskType(TaskType.SIMPLE); + workflowTask.setTaskReferenceName(name); + workflowTask.setDescription(getDefaultDescription(name)); + workflowTask.setDynamicTaskNameParam(DEFAULT_NULL_VALUE); + workflowTask.setCaseValueParam(DEFAULT_NULL_VALUE); + workflowTask.setCaseExpression(DEFAULT_NULL_VALUE); + workflowTask.setDynamicForkTasksParam(DEFAULT_NULL_VALUE); + workflowTask.setDynamicForkTasksInputParamName(DEFAULT_NULL_VALUE); + workflowTask.setSink(DEFAULT_NULL_VALUE); + return workflowTask; + } + + protected TaskDef createTaskDefinition(String name) { + TaskDef taskDefinition = new TaskDef(); + taskDefinition.setName(name); + return taskDefinition; + } + + protected WorkflowDef createWorkflowDefinition(String workflowName) { + WorkflowDef workflowDefinition = new WorkflowDef(); + workflowDefinition.setName(workflowName); + workflowDefinition.setDescription(getDefaultDescription(workflowName)); + workflowDefinition.setFailureWorkflow(DEFAULT_NULL_VALUE); + return workflowDefinition; + } + + protected List createAndRegisterTaskDefinitions(String prefixTaskDefinition, int numberOfTaskDefinitions) { + String prefix = Optional.ofNullable(prefixTaskDefinition).orElse(TASK_DEFINITION_PREFIX); + List definitions = new LinkedList<>(); + for (int i = 0; i < numberOfTaskDefinitions; i++) { + TaskDef def = new TaskDef(prefix + i, "task " + i + DEFAULT_DESCRIPTION); + def.setTimeoutPolicy(TaskDef.TimeoutPolicy.RETRY); + definitions.add(def); + } + this.registerTaskDefinitions(definitions); + return definitions; + } + + private String getDefaultDescription(String nameResource) { + return nameResource + " " + DEFAULT_DESCRIPTION; + } + + protected abstract String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition); + + protected abstract Workflow getWorkflow(String workflowId, boolean includeTasks); + + protected abstract TaskDef getTaskDefinition(String taskName); + + protected abstract void registerTaskDefinitions(List taskDefinitionList); +} diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/AbstractWorkflowServiceTest.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/AbstractWorkflowServiceTest.java new file mode 100644 index 0000000000..ac61fdf22a --- /dev/null +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/AbstractWorkflowServiceTest.java @@ -0,0 +1,4463 @@ +/** + * Copyright 2016 Netflix, Inc. + *

    + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.tests.integration; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.util.concurrent.Uninterruptibles; +import com.netflix.conductor.common.metadata.tasks.PollData; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.tasks.Task.Status; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.tasks.TaskDef.RetryLogic; +import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; +import com.netflix.conductor.common.metadata.tasks.TaskResult; +import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; +import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; +import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; +import com.netflix.conductor.common.metadata.workflow.TaskType; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.common.run.Workflow.WorkflowStatus; +import com.netflix.conductor.core.WorkflowContext; +import com.netflix.conductor.core.execution.ApplicationException; +import com.netflix.conductor.core.execution.SystemTaskType; +import com.netflix.conductor.core.execution.WorkflowExecutor; +import com.netflix.conductor.core.execution.WorkflowSweeper; +import com.netflix.conductor.core.execution.tasks.SubWorkflow; +import com.netflix.conductor.core.metadata.MetadataMapperService; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.service.ExecutionService; +import com.netflix.conductor.service.MetadataService; +import org.apache.commons.lang.StringUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.FixMethodOrder; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runners.MethodSorters; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static com.netflix.conductor.common.metadata.tasks.Task.Status.COMPLETED; +import static com.netflix.conductor.common.metadata.tasks.Task.Status.FAILED; +import static com.netflix.conductor.common.metadata.tasks.Task.Status.IN_PROGRESS; +import static com.netflix.conductor.common.metadata.tasks.Task.Status.SCHEDULED; +import static com.netflix.conductor.common.metadata.tasks.Task.Status.TIMED_OUT; +import static com.netflix.conductor.common.run.Workflow.WorkflowStatus.RUNNING; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public abstract class AbstractWorkflowServiceTest { + + private static final Logger logger = LoggerFactory.getLogger(AbstractWorkflowServiceTest.class); + + private static final String COND_TASK_WF = "ConditionalTaskWF"; + + private static final String FORK_JOIN_NESTED_WF = "FanInOutNestedTest"; + + private static final String FORK_JOIN_WF = "FanInOutTest"; + + private static final String DYNAMIC_FORK_JOIN_WF = "DynamicFanInOutTest"; + + private static final String DYNAMIC_FORK_JOIN_WF_LEGACY = "DynamicFanInOutTestLegacy"; + + private static final int RETRY_COUNT = 1; + private static final String JUNIT_TEST_WF_NON_RESTARTABLE = "junit_test_wf_non_restartable"; + private static final String WF_WITH_SUB_WF = "WorkflowWithSubWorkflow"; + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + @Inject + protected ExecutionService workflowExecutionService; + + @Inject + protected SubWorkflow subworkflow; + + @Inject + protected MetadataService metadataService; + + @Inject + protected WorkflowSweeper workflowSweeper; + + @Inject + protected QueueDAO queueDAO; + + @Inject + protected WorkflowExecutor workflowExecutor; + + @Inject + protected MetadataMapperService metadataMapperService; + + private static boolean registered; + + private static List taskDefs; + + protected static final String LINEAR_WORKFLOW_T1_T2 = "junit_test_wf"; + + private static final String LINEAR_WORKFLOW_T1_T2_SW = "junit_test_wf_sw"; + + private static final String LONG_RUNNING = "longRunningWf"; + + private static final String TEST_WORKFLOW_NAME_3 = "junit_test_wf3"; + + @Before + public void init() { + System.setProperty("EC2_REGION", "us-east-1"); + System.setProperty("EC2_AVAILABILITY_ZONE", "us-east-1c"); + if (registered) { + return; + } + + + WorkflowContext.set(new WorkflowContext("junit_app")); + for (int i = 0; i < 21; i++) { + + String name = "junit_task_" + i; + if (notFoundSafeGetTaskDef(name) != null) { + continue; + } + + TaskDef task = new TaskDef(); + task.setName(name); + task.setTimeoutSeconds(120); + task.setRetryCount(RETRY_COUNT); + metadataService.registerTaskDef(Collections.singletonList(task)); + } + + for (int i = 0; i < 5; i++) { + + String name = "junit_task_0_RT_" + i; + if (notFoundSafeGetTaskDef(name) != null) { + continue; + } + + TaskDef task = new TaskDef(); + task.setName(name); + task.setTimeoutSeconds(120); + task.setRetryCount(0); + metadataService.registerTaskDef(Collections.singletonList(task)); + } + + TaskDef task = new TaskDef(); + task.setName("short_time_out"); + task.setTimeoutSeconds(5); + task.setRetryCount(RETRY_COUNT); + metadataService.registerTaskDef(Collections.singletonList(task)); + + WorkflowDef def = new WorkflowDef(); + def.setName(LINEAR_WORKFLOW_T1_T2); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + Map outputParameters = new HashMap<>(); + outputParameters.put("o1", "${workflow.input.param1}"); + outputParameters.put("o2", "${t2.output.uuid}"); + outputParameters.put("o3", "${t1.output.op}"); + def.setOutputParameters(outputParameters); + def.setFailureWorkflow("$workflow.input.failureWfName"); + def.setSchemaVersion(2); + LinkedList wftasks = new LinkedList<>(); + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("junit_task_1"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "${workflow.input.param1}"); + ip1.put("p2", "${workflow.input.param2}"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("t1"); + + WorkflowTask wft2 = new WorkflowTask(); + wft2.setName("junit_task_2"); + Map ip2 = new HashMap<>(); + ip2.put("tp1", "${workflow.input.param1}"); + ip2.put("tp2", "${t1.output.op}"); + wft2.setInputParameters(ip2); + wft2.setTaskReferenceName("t2"); + + wftasks.add(wft1); + wftasks.add(wft2); + def.setTasks(wftasks); + + WorkflowTask wft3 = new WorkflowTask(); + wft3.setName("junit_task_3"); + Map ip3 = new HashMap<>(); + ip3.put("tp1", "${workflow.input.param1}"); + ip3.put("tp2", "${t1.output.op}"); + wft3.setInputParameters(ip3); + wft3.setTaskReferenceName("t3"); + + WorkflowDef def2 = new WorkflowDef(); + def2.setName(TEST_WORKFLOW_NAME_3); + def2.setDescription(def2.getName()); + def2.setVersion(1); + def2.setInputParameters(Arrays.asList("param1", "param2")); + LinkedList wftasks2 = new LinkedList<>(); + + wftasks2.add(wft1); + wftasks2.add(wft2); + wftasks2.add(wft3); + def2.setSchemaVersion(2); + def2.setTasks(wftasks2); + + try { + + WorkflowDef[] wdsf = new WorkflowDef[]{def, def2}; + for (WorkflowDef wd : wdsf) { + metadataService.updateWorkflowDef(wd); + } + createForkJoinWorkflow(); + def.setName(LONG_RUNNING); + metadataService.updateWorkflowDef(def); + } catch (Exception e) { + } + + taskDefs = metadataService.getTaskDefs(); + + registered = true; + } + + private TaskDef notFoundSafeGetTaskDef(String name) { + try { + return metadataService.getTaskDef(name); + } catch (ApplicationException e) { + if (e.getCode() == ApplicationException.Code.NOT_FOUND) { + return null; + } else { + throw e; + } + } + } + + @Test + public void testWorkflowWithNoTasks() { + + WorkflowDef empty = new WorkflowDef(); + empty.setName("empty_workflow"); + empty.setSchemaVersion(2); + metadataService.registerWorkflowDef(empty); + + String id = startOrLoadWorkflowExecution(empty.getName(), 1, "testWorkflowWithNoTasks", new HashMap<>(), null, null); + assertNotNull(id); + Workflow workflow = workflowExecutionService.getExecutionStatus(id, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals(0, workflow.getTasks().size()); + } + + @Test + public void testTaskDefTemplate() throws Exception { + + System.setProperty("STACK2", "test_stack"); + TaskDef templatedTask = new TaskDef(); + templatedTask.setName("templated_task"); + Map httpRequest = new HashMap<>(); + httpRequest.put("method", "GET"); + httpRequest.put("vipStack", "${STACK2}"); + httpRequest.put("uri", "/get/something"); + Map body = new HashMap<>(); + body.put("inputPaths", Arrays.asList("${workflow.input.path1}", "${workflow.input.path2}")); + body.put("requestDetails", "${workflow.input.requestDetails}"); + body.put("outputPath", "${workflow.input.outputPath}"); + httpRequest.put("body", body); + templatedTask.getInputTemplate().put("http_request", httpRequest); + metadataService.registerTaskDef(Arrays.asList(templatedTask)); + + WorkflowDef templateWf = new WorkflowDef(); + templateWf.setName("template_workflow"); + WorkflowTask wft = new WorkflowTask(); + wft.setName(templatedTask.getName()); + wft.setWorkflowTaskType(TaskType.SIMPLE); + wft.setTaskReferenceName("t0"); + templateWf.getTasks().add(wft); + templateWf.setSchemaVersion(2); + metadataService.registerWorkflowDef(templateWf); + + Map requestDetails = new HashMap<>(); + requestDetails.put("key1", "value1"); + requestDetails.put("key2", 42); + + Map input = new HashMap<>(); + input.put("path1", "file://path1"); + input.put("path2", "file://path2"); + input.put("outputPath", "s3://bucket/outputPath"); + input.put("requestDetails", requestDetails); + + String id = startOrLoadWorkflowExecution(templateWf.getName(), 1, "testTaskDefTemplate", input, null, null); + assertNotNull(id); + Workflow workflow = workflowExecutionService.getExecutionStatus(id, true); + assertNotNull(workflow); + assertTrue(workflow.getReasonForIncompletion(), !workflow.getStatus().isTerminal()); + assertEquals(1, workflow.getTasks().size()); + Task task = workflow.getTasks().get(0); + Map taskInput = task.getInputData(); + assertNotNull(taskInput); + assertTrue(taskInput.containsKey("http_request")); + assertTrue(taskInput.get("http_request") instanceof Map); + + ObjectMapper om = new ObjectMapper(); + + //Use the commented sysout to get the string value + //System.out.println(om.writeValueAsString(om.writeValueAsString(taskInput))); + String expected = "{\"http_request\":{\"method\":\"GET\",\"vipStack\":\"test_stack\",\"body\":{\"requestDetails\":{\"key1\":\"value1\",\"key2\":42},\"outputPath\":\"s3://bucket/outputPath\",\"inputPaths\":[\"file://path1\",\"file://path2\"]},\"uri\":\"/get/something\"}}"; + assertEquals(expected, om.writeValueAsString(taskInput)); + } + + + @Test + public void testWorkflowSchemaVersion() { + WorkflowDef ver2 = new WorkflowDef(); + ver2.setSchemaVersion(2); + ver2.setName("Test_schema_version2"); + ver2.setVersion(1); + + WorkflowDef ver1 = new WorkflowDef(); + ver1.setName("Test_schema_version1"); + ver1.setVersion(1); + + metadataService.updateWorkflowDef(ver1); + metadataService.updateWorkflowDef(ver2); + + WorkflowDef found = metadataService.getWorkflowDef(ver2.getName(), 1); + assertEquals(2, found.getSchemaVersion()); + + WorkflowDef found1 = metadataService.getWorkflowDef(ver1.getName(), 1); + assertEquals(1, found1.getSchemaVersion()); + + } + + @Test + public void testForkJoin() throws Exception { + try { + createForkJoinWorkflow(); + } catch (Exception e) { + } + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(0); + metadataService.updateTaskDef(taskDef); + + taskName = "junit_task_2"; + taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(0); + metadataService.updateTaskDef(taskDef); + + taskName = "junit_task_3"; + taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(0); + metadataService.updateTaskDef(taskDef); + + taskName = "junit_task_4"; + taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(0); + metadataService.updateTaskDef(taskDef); + + Map input = new HashMap<>(); + String workflowId = startOrLoadWorkflowExecution(FORK_JOIN_WF, 1, "fanouttest", input, null, null); + System.out.println("testForkJoin.wfid=" + workflowId); + printTaskStatuses(workflowId, "initiated"); + + Task task1 = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task1); + assertTrue(workflowExecutionService.ackTaskReceived(task1.getTaskId())); + + Task task2 = workflowExecutionService.poll("junit_task_2", "test"); + assertNotNull(task2); + assertTrue(workflowExecutionService.ackTaskReceived(task2.getTaskId())); + + Task task3 = workflowExecutionService.poll("junit_task_3", "test"); + assertNull(task3); + + task1.setStatus(COMPLETED); + workflowExecutionService.updateTask(task1); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("Found " + workflow.getTasks(), RUNNING, workflow.getStatus()); + printTaskStatuses(workflow, "T1 completed"); + + task3 = workflowExecutionService.poll("junit_task_3", "test"); + assertNotNull(task3); + + task2.setStatus(COMPLETED); + task3.setStatus(COMPLETED); + + ExecutorService executorService = Executors.newFixedThreadPool(2); + Future future1 = executorService.submit(() -> { + try { + workflowExecutionService.updateTask(task2); + } catch (Exception e) { + throw new RuntimeException(e); + } + + }); + future1.get(); + + final Task _t3 = task3; + Future future2 = executorService.submit(() -> { + try { + workflowExecutionService.updateTask(_t3); + } catch (Exception e) { + throw new RuntimeException(e); + } + + }); + future2.get(); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + printTaskStatuses(workflow, "T2 T3 completed"); + assertEquals("Found " + workflow.getTasks(), RUNNING, workflow.getStatus()); + assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 6, workflow.getTasks().size()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("Found " + workflow.getTasks(), RUNNING, workflow.getStatus()); + assertTrue("Found " + workflow.getTasks().stream().map(t -> t.getReferenceTaskName() + "." + t.getStatus()).collect(Collectors.toList()), workflow.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t4"))); + + Task t4 = workflowExecutionService.poll("junit_task_4", "test"); + assertNotNull(t4); + t4.setStatus(COMPLETED); + workflowExecutionService.updateTask(t4); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("Found " + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); + printTaskStatuses(workflow, "All completed"); + } + + @Test + public void testForkJoinNested() { + + createForkJoinNestedWorkflow(); + + Map input = new HashMap<>(); + input.put("case", "a"); //This should execute t16 and t19 + String wfid = startOrLoadWorkflowExecution("forkJoinNested", FORK_JOIN_NESTED_WF, 1, "fork_join_nested_test", input, null, null); + System.out.println("testForkJoinNested.wfid=" + wfid); + + Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(RUNNING, wf.getStatus()); + + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t11"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t12"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t13"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("fork1"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("fork2"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t16"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t1"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t2"))); + + + Task t1 = workflowExecutionService.poll("junit_task_11", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); + + Task t2 = workflowExecutionService.poll("junit_task_12", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); + + Task t3 = workflowExecutionService.poll("junit_task_13", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); + + assertNotNull(t1); + assertNotNull(t2); + assertNotNull(t3); + + t1.setStatus(COMPLETED); + t2.setStatus(COMPLETED); + t3.setStatus(COMPLETED); + + workflowExecutionService.updateTask(t1); + workflowExecutionService.updateTask(t2); + workflowExecutionService.updateTask(t3); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + wf = workflowExecutionService.getExecutionStatus(wfid, true); + + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t16"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t14"))); + + String[] tasks = new String[]{"junit_task_14", "junit_task_16"}; + for (String tt : tasks) { + Task polled = workflowExecutionService.poll(tt, "test"); + assertNotNull("poll resulted empty for task: " + tt, polled); + polled.setStatus(COMPLETED); + workflowExecutionService.updateTask(polled); + } + + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(RUNNING, wf.getStatus()); + + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t19"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t15"))); //Not there yet + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t20"))); //Not there yet + + Task task19 = workflowExecutionService.poll("junit_task_19", "test"); + assertNotNull(task19); + task19.setStatus(COMPLETED); + workflowExecutionService.updateTask(task19); + + Task task20 = workflowExecutionService.poll("junit_task_20", "test"); + assertNotNull(task20); + task20.setStatus(COMPLETED); + workflowExecutionService.updateTask(task20); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(RUNNING, wf.getStatus()); + + Set pendingTasks = wf.getTasks().stream().filter(t -> !t.getStatus().isTerminal()).map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); + assertTrue("Found only this: " + pendingTasks, wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("join1"))); + + pendingTasks = wf.getTasks().stream().filter(t -> !t.getStatus().isTerminal()).map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); + assertTrue("Found only this: " + pendingTasks, wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t15"))); + + Task task15 = workflowExecutionService.poll("junit_task_15", "test"); + assertNotNull(task15); + task15.setStatus(COMPLETED); + workflowExecutionService.updateTask(task15); + + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); + + } + + @Test + public void testForkJoinNestedWithSubWorkflow() { + + createForkJoinNestedWorkflowWithSubworkflow(); + + Map input = new HashMap<>(); + input.put("case", "a"); //This should execute t16 and t19 + String wfid = startOrLoadWorkflowExecution(FORK_JOIN_NESTED_WF, 1, "fork_join_nested_test", input, null, null); + System.out.println("testForkJoinNested.wfid=" + wfid); + + Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(RUNNING, wf.getStatus()); + + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t11"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t12"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t13"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("sw1"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("fork1"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("fork2"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t16"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t1"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t2"))); + + + Task t1 = workflowExecutionService.poll("junit_task_11", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); + + Task t2 = workflowExecutionService.poll("junit_task_12", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); + + Task t3 = workflowExecutionService.poll("junit_task_13", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); + + assertNotNull(t1); + assertNotNull(t2); + assertNotNull(t3); + + t1.setStatus(COMPLETED); + t2.setStatus(COMPLETED); + t3.setStatus(COMPLETED); + + workflowExecutionService.updateTask(t1); + workflowExecutionService.updateTask(t2); + workflowExecutionService.updateTask(t3); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + wf = workflowExecutionService.getExecutionStatus(wfid, true); + + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t16"))); + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t14"))); + + String[] tasks = new String[]{"junit_task_1", "junit_task_2", "junit_task_14", "junit_task_16"}; + for (String tt : tasks) { + Task polled = workflowExecutionService.poll(tt, "test"); + assertNotNull("poll resulted empty for task: " + tt, polled); + polled.setStatus(COMPLETED); + workflowExecutionService.updateTask(polled); + } + + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(RUNNING, wf.getStatus()); + + assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t19"))); + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t15"))); //Not there yet + assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t20"))); //Not there yet + + Task task19 = workflowExecutionService.poll("junit_task_19", "test"); + assertNotNull(task19); + task19.setStatus(COMPLETED); + workflowExecutionService.updateTask(task19); + + Task task20 = workflowExecutionService.poll("junit_task_20", "test"); + assertNotNull(task20); + task20.setStatus(COMPLETED); + workflowExecutionService.updateTask(task20); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(RUNNING, wf.getStatus()); + + Set pendingTasks = wf.getTasks().stream().filter(t -> !t.getStatus().isTerminal()).map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); + assertTrue("Found only this: " + pendingTasks, wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("join1"))); + + pendingTasks = wf.getTasks().stream().filter(t -> !t.getStatus().isTerminal()).map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); + assertTrue("Found only this: " + pendingTasks, wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t15"))); + Task task15 = workflowExecutionService.poll("junit_task_15", "test"); + assertNotNull(task15); + task15.setStatus(COMPLETED); + workflowExecutionService.updateTask(task15); + + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); + + } + + @Test + public void testForkJoinFailure() { + + try { + createForkJoinWorkflow(); + } catch (Exception e) { + } + + String taskName = "junit_task_2"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + int retryCount = taskDef.getRetryCount(); + taskDef.setRetryCount(0); + metadataService.updateTaskDef(taskDef); + + + Map input = new HashMap(); + String wfid = startOrLoadWorkflowExecution(FORK_JOIN_WF, 1, "fanouttest", input, null, null); + System.out.println("testForkJoinFailure.wfid=" + wfid); + + Task t1 = workflowExecutionService.poll("junit_task_2", "test"); + assertNotNull(t1); + assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); + + Task t2 = workflowExecutionService.poll("junit_task_1", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); + + Task t3 = workflowExecutionService.poll("junit_task_3", "test"); + assertNull(t3); + + assertNotNull(t1); + assertNotNull(t2); + t1.setStatus(FAILED); + t2.setStatus(COMPLETED); + + workflowExecutionService.updateTask(t2); + Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals("Found " + wf.getTasks(), RUNNING, wf.getStatus()); + + t3 = workflowExecutionService.poll("junit_task_3", "test"); + assertNotNull(t3); + + + workflowExecutionService.updateTask(t1); + wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals("Found " + wf.getTasks(), WorkflowStatus.FAILED, wf.getStatus()); + + + taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(retryCount); + metadataService.updateTaskDef(taskDef); + } + + @SuppressWarnings("unchecked") + @Test + public void testDynamicForkJoinLegacy() { + + try { + createDynamicForkJoinWorkflowDefsLegacy(); + } catch (Exception e) { + } + + Map input = new HashMap(); + String wfid = startOrLoadWorkflowExecution(DYNAMIC_FORK_JOIN_WF_LEGACY, 1, "dynfanouttest1", input, null, null); + System.out.println("testDynamicForkJoinLegacy.wfid=" + wfid); + + Task t1 = workflowExecutionService.poll("junit_task_1", "test"); + //assertTrue(ess.ackTaskRecieved(t1.getTaskId(), "test")); + + DynamicForkJoinTaskList dynamicForkJoinTasks = new DynamicForkJoinTaskList(); + + input = new HashMap(); + input.put("k1", "v1"); + dynamicForkJoinTasks.add("junit_task_2", null, "xdt1", input); + + HashMap input2 = new HashMap(); + input2.put("k2", "v2"); + dynamicForkJoinTasks.add("junit_task_3", null, "xdt2", input2); + + t1.getOutputData().put("dynamicTasks", dynamicForkJoinTasks); + t1.setStatus(COMPLETED); + + workflowExecutionService.updateTask(t1); + + Task t2 = workflowExecutionService.poll("junit_task_2", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); + assertEquals("xdt1", t2.getReferenceTaskName()); + assertTrue(t2.getInputData().containsKey("k1")); + assertEquals("v1", t2.getInputData().get("k1")); + Map output = new HashMap(); + output.put("ok1", "ov1"); + t2.setOutputData(output); + t2.setStatus(COMPLETED); + workflowExecutionService.updateTask(t2); + + Task t3 = workflowExecutionService.poll("junit_task_3", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); + assertEquals("xdt2", t3.getReferenceTaskName()); + assertTrue(t3.getInputData().containsKey("k2")); + assertEquals("v2", t3.getInputData().get("k2")); + + output = new HashMap<>(); + output.put("ok1", "ov1"); + t3.setOutputData(output); + t3.setStatus(COMPLETED); + workflowExecutionService.updateTask(t3); + + Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); + + // Check the output + Task joinTask = wf.getTaskByRefName("dynamicfanouttask_join"); + assertEquals("Found:" + joinTask.getOutputData(), 2, joinTask.getOutputData().keySet().size()); + Set joinTaskOutput = joinTask.getOutputData().keySet(); + System.out.println("joinTaskOutput=" + joinTaskOutput); + for (String key : joinTask.getOutputData().keySet()) { + assertTrue(key.equals("xdt1") || key.equals("xdt2")); + assertEquals("ov1", ((Map) joinTask.getOutputData().get(key)).get("ok1")); + } + } + + @SuppressWarnings("unchecked") + @Test + public void testDynamicForkJoin() { + + createDynamicForkJoinWorkflowDefs(); + + String taskName = "junit_task_2"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + int retryCount = taskDef.getRetryCount(); + taskDef.setRetryCount(2); + taskDef.setRetryDelaySeconds(0); + taskDef.setRetryLogic(RetryLogic.FIXED); + metadataService.updateTaskDef(taskDef); + + Map workflowInput = new HashMap<>(); + String workflowId = startOrLoadWorkflowExecution(DYNAMIC_FORK_JOIN_WF, 1, "dynfanouttest1", workflowInput, null, null); + System.out.println("testDynamicForkJoin.wfid=" + workflowId); + Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); + + Task task1 = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task1); + assertTrue(workflowExecutionService.ackTaskReceived(task1.getTaskId())); + assertEquals("dt1", task1.getReferenceTaskName()); + + Map inputParams2 = new HashMap<>(); + inputParams2.put("k1", "v1"); + WorkflowTask workflowTask2 = new WorkflowTask(); + workflowTask2.setName("junit_task_2"); + workflowTask2.setTaskReferenceName("xdt1"); + + Map inputParams3 = new HashMap<>(); + inputParams3.put("k2", "v2"); + WorkflowTask workflowTask3 = new WorkflowTask(); + workflowTask3.setName("junit_task_3"); + workflowTask3.setTaskReferenceName("xdt2"); + + HashMap dynamicTasksInput = new HashMap<>(); + dynamicTasksInput.put("xdt1", inputParams2); + dynamicTasksInput.put("xdt2", inputParams3); + task1.getOutputData().put("dynamicTasks", Arrays.asList(workflowTask2, workflowTask3)); + task1.getOutputData().put("dynamicTasksInput", dynamicTasksInput); + task1.setStatus(COMPLETED); + + workflowExecutionService.updateTask(task1); + workflow = workflowExecutor.getWorkflow(workflowId, true); + assertNotNull(workflow); + assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 5, workflow.getTasks().size()); + + Task task2 = workflowExecutionService.poll("junit_task_2", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(task2.getTaskId())); + assertEquals("xdt1", task2.getReferenceTaskName()); + assertTrue(task2.getInputData().containsKey("k1")); + assertEquals("v1", task2.getInputData().get("k1")); + Map output = new HashMap<>(); + output.put("ok1", "ov1"); + task2.setOutputData(output); + task2.setStatus(FAILED); + workflowExecutionService.updateTask(task2); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_2")).count()); + assertTrue(workflow.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_2")).allMatch(t -> t.getWorkflowTask() != null)); + assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 6, workflow.getTasks().size()); + + task2 = workflowExecutionService.poll("junit_task_2", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(task2.getTaskId())); + assertEquals("xdt1", task2.getReferenceTaskName()); + assertTrue(task2.getInputData().containsKey("k1")); + assertEquals("v1", task2.getInputData().get("k1")); + task2.setOutputData(output); + task2.setStatus(COMPLETED); + workflowExecutionService.updateTask(task2); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 6, workflow.getTasks().size()); + + Task task3 = workflowExecutionService.poll("junit_task_3", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(task3.getTaskId())); + assertEquals("xdt2", task3.getReferenceTaskName()); + assertTrue(task3.getInputData().containsKey("k2")); + assertEquals("v2", task3.getInputData().get("k2")); + output = new HashMap<>(); + output.put("ok1", "ov1"); + task3.setOutputData(output); + task3.setStatus(COMPLETED); + workflowExecutionService.updateTask(task3); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus()); + assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 7, workflow.getTasks().size()); + + Task task4 = workflowExecutionService.poll("junit_task_4", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(task4.getTaskId())); + assertEquals("task4", task4.getReferenceTaskName()); + task4.setStatus(COMPLETED); + workflowExecutionService.updateTask(task4); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 7, workflow.getTasks().size()); + + // Check the output + Task joinTask = workflow.getTaskByRefName("dynamicfanouttask_join"); + assertEquals("Found:" + joinTask.getOutputData(), 2, joinTask.getOutputData().keySet().size()); + Set joinTaskOutput = joinTask.getOutputData().keySet(); + System.out.println("joinTaskOutput=" + joinTaskOutput); + for (String key : joinTask.getOutputData().keySet()) { + assertTrue(key.equals("xdt1") || key.equals("xdt2")); + assertEquals("ov1", ((Map) joinTask.getOutputData().get(key)).get("ok1")); + } + + // reset the task def + taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(retryCount); + taskDef.setRetryDelaySeconds(1); + metadataService.updateTaskDef(taskDef); + } + + private void createForkJoinWorkflow() { + + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setName(FORK_JOIN_WF); + workflowDef.setDescription(workflowDef.getName()); + workflowDef.setVersion(1); + workflowDef.setInputParameters(Arrays.asList("param1", "param2")); + + WorkflowTask fanoutTask = new WorkflowTask(); + fanoutTask.setType(TaskType.FORK_JOIN.name()); + fanoutTask.setTaskReferenceName("fanouttask"); + + WorkflowTask workflowTask1 = new WorkflowTask(); + workflowTask1.setName("junit_task_1"); + Map inputParams1 = new HashMap<>(); + inputParams1.put("p1", "workflow.input.param1"); + inputParams1.put("p2", "workflow.input.param2"); + workflowTask1.setInputParameters(inputParams1); + workflowTask1.setTaskReferenceName("t1"); + + WorkflowTask workflowTask3 = new WorkflowTask(); + workflowTask3.setName("junit_task_3"); + workflowTask3.setInputParameters(inputParams1); + workflowTask3.setTaskReferenceName("t3"); + + WorkflowTask workflowTask2 = new WorkflowTask(); + workflowTask2.setName("junit_task_2"); + Map inputParams2 = new HashMap<>(); + inputParams2.put("tp1", "workflow.input.param1"); + workflowTask2.setInputParameters(inputParams2); + workflowTask2.setTaskReferenceName("t2"); + + WorkflowTask workflowTask4 = new WorkflowTask(); + workflowTask4.setName("junit_task_4"); + workflowTask4.setInputParameters(inputParams2); + workflowTask4.setTaskReferenceName("t4"); + + fanoutTask.getForkTasks().add(Arrays.asList(workflowTask1, workflowTask3)); + fanoutTask.getForkTasks().add(Collections.singletonList(workflowTask2)); + + workflowDef.getTasks().add(fanoutTask); + + WorkflowTask joinTask = new WorkflowTask(); + joinTask.setType(TaskType.JOIN.name()); + joinTask.setTaskReferenceName("fanouttask_join"); + joinTask.setJoinOn(Arrays.asList("t3", "t2")); + + workflowDef.getTasks().add(joinTask); + workflowDef.getTasks().add(workflowTask4); + metadataService.updateWorkflowDef(workflowDef); + } + + + private void createForkJoinWorkflowWithZeroRetry() { + + WorkflowDef def = new WorkflowDef(); + def.setName(FORK_JOIN_WF + "_2"); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + + WorkflowTask fanout = new WorkflowTask(); + fanout.setType(TaskType.FORK_JOIN.name()); + fanout.setTaskReferenceName("fanouttask"); + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("junit_task_0_RT_1"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "workflow.input.param1"); + ip1.put("p2", "workflow.input.param2"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("t1"); + + WorkflowTask wft3 = new WorkflowTask(); + wft3.setName("junit_task_0_RT_3"); + wft3.setInputParameters(ip1); + wft3.setTaskReferenceName("t3"); + + WorkflowTask wft2 = new WorkflowTask(); + wft2.setName("junit_task_0_RT_2"); + Map ip2 = new HashMap<>(); + ip2.put("tp1", "workflow.input.param1"); + wft2.setInputParameters(ip2); + wft2.setTaskReferenceName("t2"); + + WorkflowTask wft4 = new WorkflowTask(); + wft4.setName("junit_task_0_RT_4"); + wft4.setInputParameters(ip2); + wft4.setTaskReferenceName("t4"); + + fanout.getForkTasks().add(Arrays.asList(wft1, wft3)); + fanout.getForkTasks().add(Arrays.asList(wft2)); + + def.getTasks().add(fanout); + + WorkflowTask join = new WorkflowTask(); + join.setType(TaskType.JOIN.name()); + join.setTaskReferenceName("fanouttask_join"); + join.setJoinOn(Arrays.asList("t3", "t2")); + + def.getTasks().add(join); + def.getTasks().add(wft4); + metadataService.updateWorkflowDef(def); + + } + + private void createForkJoinNestedWorkflow() { + + WorkflowDef def = new WorkflowDef(); + def.setName(FORK_JOIN_NESTED_WF); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + + Map ip1 = new HashMap<>(); + ip1.put("p1", "workflow.input.param1"); + ip1.put("p2", "workflow.input.param2"); + ip1.put("case", "workflow.input.case"); + + WorkflowTask[] tasks = new WorkflowTask[21]; + + for (int i = 10; i < 21; i++) { + WorkflowTask wft = new WorkflowTask(); + wft.setName("junit_task_" + i); + wft.setInputParameters(ip1); + wft.setTaskReferenceName("t" + i); + tasks[i] = wft; + } + + WorkflowTask d1 = new WorkflowTask(); + d1.setType(TaskType.DECISION.name()); + d1.setName("Decision"); + d1.setTaskReferenceName("d1"); + d1.setInputParameters(ip1); + d1.setDefaultCase(Arrays.asList(tasks[18], tasks[20])); + d1.setCaseValueParam("case"); + Map> decisionCases = new HashMap<>(); + decisionCases.put("a", Arrays.asList(tasks[16], tasks[19], tasks[20])); + decisionCases.put("b", Arrays.asList(tasks[17], tasks[20])); + d1.setDecisionCases(decisionCases); + + WorkflowTask fork2 = new WorkflowTask(); + fork2.setType(TaskType.FORK_JOIN.name()); + fork2.setName("fork2"); + fork2.setTaskReferenceName("fork2"); + fork2.getForkTasks().add(Arrays.asList(tasks[12], tasks[14])); + fork2.getForkTasks().add(Arrays.asList(tasks[13], d1)); + + WorkflowTask join2 = new WorkflowTask(); + join2.setType(TaskType.JOIN.name()); + join2.setTaskReferenceName("join2"); + join2.setJoinOn(Arrays.asList("t14", "t20")); + + WorkflowTask fork1 = new WorkflowTask(); + fork1.setType(TaskType.FORK_JOIN.name()); + fork1.setTaskReferenceName("fork1"); + fork1.getForkTasks().add(Arrays.asList(tasks[11])); + fork1.getForkTasks().add(Arrays.asList(fork2, join2)); + + WorkflowTask join1 = new WorkflowTask(); + join1.setType(TaskType.JOIN.name()); + join1.setTaskReferenceName("join1"); + join1.setJoinOn(Arrays.asList("t11", "join2")); + + def.getTasks().add(fork1); + def.getTasks().add(join1); + def.getTasks().add(tasks[15]); + + metadataService.updateWorkflowDef(def); + } + + private void createForkJoinNestedWorkflowWithSubworkflow() { + + WorkflowDef def = new WorkflowDef(); + def.setName(FORK_JOIN_NESTED_WF); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + + Map ip1 = new HashMap<>(); + ip1.put("p1", "workflow.input.param1"); + ip1.put("p2", "workflow.input.param2"); + ip1.put("case", "workflow.input.case"); + + WorkflowTask[] tasks = new WorkflowTask[21]; + + for (int i = 10; i < 21; i++) { + WorkflowTask wft = new WorkflowTask(); + wft.setName("junit_task_" + i); + wft.setInputParameters(ip1); + wft.setTaskReferenceName("t" + i); + tasks[i] = wft; + } + + WorkflowTask d1 = new WorkflowTask(); + d1.setType(TaskType.DECISION.name()); + d1.setName("Decision"); + d1.setTaskReferenceName("d1"); + d1.setInputParameters(ip1); + d1.setDefaultCase(Arrays.asList(tasks[18], tasks[20])); + d1.setCaseValueParam("case"); + Map> decisionCases = new HashMap<>(); + decisionCases.put("a", Arrays.asList(tasks[16], tasks[19], tasks[20])); + decisionCases.put("b", Arrays.asList(tasks[17], tasks[20])); + d1.setDecisionCases(decisionCases); + + WorkflowTask subWorkflow = new WorkflowTask(); + subWorkflow.setType(TaskType.SUB_WORKFLOW.name()); + SubWorkflowParams sw = new SubWorkflowParams(); + sw.setName(LINEAR_WORKFLOW_T1_T2); + subWorkflow.setSubWorkflowParam(sw); + subWorkflow.setTaskReferenceName("sw1"); + + WorkflowTask fork2 = new WorkflowTask(); + fork2.setType(TaskType.FORK_JOIN.name()); + fork2.setName("fork2"); + fork2.setTaskReferenceName("fork2"); + fork2.getForkTasks().add(Arrays.asList(tasks[12], tasks[14])); + fork2.getForkTasks().add(Arrays.asList(tasks[13], d1)); + + WorkflowTask join2 = new WorkflowTask(); + join2.setType(TaskType.JOIN.name()); + join2.setTaskReferenceName("join2"); + join2.setJoinOn(Arrays.asList("t14", "t20")); + + WorkflowTask fork1 = new WorkflowTask(); + fork1.setType(TaskType.FORK_JOIN.name()); + fork1.setTaskReferenceName("fork1"); + fork1.getForkTasks().add(Arrays.asList(tasks[11])); + fork1.getForkTasks().add(Arrays.asList(fork2, join2)); + fork1.getForkTasks().add(Arrays.asList(subWorkflow)); + + + WorkflowTask join1 = new WorkflowTask(); + join1.setType(TaskType.JOIN.name()); + join1.setTaskReferenceName("join1"); + join1.setJoinOn(Arrays.asList("t11", "join2", "sw1")); + + def.getTasks().add(fork1); + def.getTasks().add(join1); + def.getTasks().add(tasks[15]); + + metadataService.updateWorkflowDef(def); + + + } + + private void createDynamicForkJoinWorkflowDefs() { + + WorkflowDef def = new WorkflowDef(); + def.setName(DYNAMIC_FORK_JOIN_WF); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + + WorkflowTask workflowTask1 = new WorkflowTask(); + workflowTask1.setName("junit_task_1"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "workflow.input.param1"); + ip1.put("p2", "workflow.input.param2"); + workflowTask1.setInputParameters(ip1); + workflowTask1.setTaskReferenceName("dt1"); + + WorkflowTask fanout = new WorkflowTask(); + fanout.setType(TaskType.FORK_JOIN_DYNAMIC.name()); + fanout.setTaskReferenceName("dynamicfanouttask"); + fanout.setDynamicForkTasksParam("dynamicTasks"); + fanout.setDynamicForkTasksInputParamName("dynamicTasksInput"); + fanout.getInputParameters().put("dynamicTasks", "dt1.output.dynamicTasks"); + fanout.getInputParameters().put("dynamicTasksInput", "dt1.output.dynamicTasksInput"); + + WorkflowTask join = new WorkflowTask(); + join.setType(TaskType.JOIN.name()); + join.setTaskReferenceName("dynamicfanouttask_join"); + + WorkflowTask workflowTask4 = new WorkflowTask(); + workflowTask4.setName("junit_task_4"); + workflowTask4.setTaskReferenceName("task4"); + + def.getTasks().add(workflowTask1); + def.getTasks().add(fanout); + def.getTasks().add(join); + def.getTasks().add(workflowTask4); + + metadataMapperService.populateTaskDefinitions(def); + + metadataService.updateWorkflowDef(def); + } + + @SuppressWarnings("deprecation") + private void createDynamicForkJoinWorkflowDefsLegacy() { + + WorkflowDef def = new WorkflowDef(); + def.setName(DYNAMIC_FORK_JOIN_WF_LEGACY); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("junit_task_1"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "workflow.input.param1"); + ip1.put("p2", "workflow.input.param2"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("dt1"); + + WorkflowTask fanout = new WorkflowTask(); + fanout.setType(TaskType.FORK_JOIN_DYNAMIC.name()); + fanout.setTaskReferenceName("dynamicfanouttask"); + fanout.setDynamicForkJoinTasksParam("dynamicTasks"); + fanout.getInputParameters().put("dynamicTasks", "dt1.output.dynamicTasks"); + fanout.getInputParameters().put("dynamicTasksInput", "dt1.output.dynamicTasksInput"); + + WorkflowTask join = new WorkflowTask(); + join.setType(TaskType.JOIN.name()); + join.setTaskReferenceName("dynamicfanouttask_join"); + + def.getTasks().add(wft1); + def.getTasks().add(fanout); + def.getTasks().add(join); + + metadataMapperService.populateTaskDefinitions(def); + + metadataService.updateWorkflowDef(def); + + } + + private void createConditionalWF() { + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("junit_task_1"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "workflow.input.param1"); + ip1.put("p2", "workflow.input.param2"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("t1"); + + WorkflowTask wft2 = new WorkflowTask(); + wft2.setName("junit_task_2"); + Map ip2 = new HashMap<>(); + ip2.put("tp1", "workflow.input.param1"); + wft2.setInputParameters(ip2); + wft2.setTaskReferenceName("t2"); + + WorkflowTask wft3 = new WorkflowTask(); + wft3.setName("junit_task_3"); + Map ip3 = new HashMap<>(); + ip2.put("tp3", "workflow.input.param2"); + wft3.setInputParameters(ip3); + wft3.setTaskReferenceName("t3"); + + WorkflowDef def2 = new WorkflowDef(); + def2.setName(COND_TASK_WF); + def2.setDescription(COND_TASK_WF); + def2.setInputParameters(Arrays.asList("param1", "param2")); + + WorkflowTask c2 = new WorkflowTask(); + c2.setType(TaskType.DECISION.name()); + c2.setCaseValueParam("case"); + c2.setName("conditional2"); + c2.setTaskReferenceName("conditional2"); + Map> dc = new HashMap<>(); + dc.put("one", Arrays.asList(wft1, wft3)); + dc.put("two", Arrays.asList(wft2)); + c2.setDecisionCases(dc); + c2.getInputParameters().put("case", "workflow.input.param2"); + + + WorkflowTask condition = new WorkflowTask(); + condition.setType(TaskType.DECISION.name()); + condition.setCaseValueParam("case"); + condition.setName("conditional"); + condition.setTaskReferenceName("conditional"); + Map> decisionCases = new HashMap<>(); + decisionCases.put("nested", Arrays.asList(c2)); + decisionCases.put("three", Arrays.asList(wft3)); + condition.setDecisionCases(decisionCases); + condition.getInputParameters().put("case", "workflow.input.param1"); + condition.getDefaultCase().add(wft2); + def2.getTasks().add(condition); + + WorkflowTask notifyTask = new WorkflowTask(); + notifyTask.setName("junit_task_4"); + notifyTask.setTaskReferenceName("junit_task_4"); + + WorkflowTask finalTask = new WorkflowTask(); + finalTask.setName("finalcondition"); + finalTask.setTaskReferenceName("tf"); + finalTask.setType(TaskType.DECISION.name()); + finalTask.setCaseValueParam("finalCase"); + Map fi = new HashMap<>(); + fi.put("finalCase", "workflow.input.finalCase"); + finalTask.setInputParameters(fi); + finalTask.getDecisionCases().put("notify", Arrays.asList(notifyTask)); + + def2.getTasks().add(finalTask); + metadataService.updateWorkflowDef(def2); + + } + + + @Test + public void testDefDAO() { + List taskDefs = metadataService.getTaskDefs(); + assertNotNull(taskDefs); + assertTrue(!taskDefs.isEmpty()); + } + + @Test + public void testSimpleWorkflowFailureWithTerminalError() { + + clearWorkflows(); + + TaskDef taskDef = notFoundSafeGetTaskDef("junit_task_1"); + taskDef.setRetryCount(1); + metadataService.updateTaskDef(taskDef); + + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(found); + Map outputParameters = found.getOutputParameters(); + outputParameters.put("validationErrors", "${t1.output.ErrorMessage}"); + metadataService.updateWorkflowDef(found); + + String correlationId = "unit_test_1"; + Map input = new HashMap<>(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String workflowInstanceId = startOrLoadWorkflowExecution("simpleWorkflowFailureWithTerminalError", LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + logger.info("testSimpleWorkflow.wfid= {}", workflowInstanceId); + assertNotNull(workflowInstanceId); + + Workflow es = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); + assertNotNull(es); + assertEquals(es.getReasonForIncompletion(), RUNNING, es.getStatus()); + + es = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + assertEquals(1, es.getTasks().size()); //The very first task is the one that should be scheduled. + + boolean failed = false; + try { + workflowExecutor.rewind(workflowInstanceId); + } catch (ApplicationException ae) { + failed = true; + } + assertTrue(failed); + + // Polling for the first task should return the same task as before + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowInstanceId, task.getWorkflowInstanceId()); + + TaskResult taskResult = new TaskResult(task); + taskResult.setReasonForIncompletion("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down"); + taskResult.setStatus(TaskResult.Status.FAILED_WITH_TERMINAL_ERROR); + taskResult.addOutputData("TERMINAL_ERROR", "Integration endpoint down: FOOBAR"); + taskResult.addOutputData("ErrorMessage", "There was a terminal error"); + + workflowExecutionService.updateTask(taskResult); + workflowExecutor.decide(workflowInstanceId); + + es = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); + TaskDef junit_task_1 = notFoundSafeGetTaskDef("junit_task_1"); + Task t1 = es.getTaskByRefName("t1"); + assertNotNull(es); + assertEquals(WorkflowStatus.FAILED, es.getStatus()); + assertEquals("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down", es.getReasonForIncompletion()); + assertEquals(1, junit_task_1.getRetryCount()); //Configured retries at the task definition level + assertEquals(0, t1.getRetryCount()); //Actual retries done on the task + assertEquals(true, es.getOutput().containsKey("o1")); + assertEquals("p1 value", es.getOutput().get("o1")); + assertEquals(es.getOutput().get("validationErrors").toString(), "There was a terminal error"); + + outputParameters.remove("validationErrors"); + metadataService.updateWorkflowDef(found); + } + + @Test + public void testSimpleWorkflow() { + + clearWorkflows(); + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1"; + Map input = new HashMap<>(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String workflowInstanceId = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + logger.info("testSimpleWorkflow.wfid= {}", workflowInstanceId); + assertNotNull(workflowInstanceId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus()); + + workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. + + boolean failed = false; + try { + workflowExecutor.rewind(workflowInstanceId); + } catch (ApplicationException ae) { + failed = true; + } + assertTrue(failed); + + // Polling for the first task + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowInstanceId, task.getWorkflowInstanceId()); + + workflowExecutor.decide(workflowInstanceId); + + String task1Op = "task1.Done"; + List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + task = tasks.get(0); + assertEquals(workflowInstanceId, task.getWorkflowInstanceId()); + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, false); + assertNotNull(workflow); + assertNotNull(workflow.getOutput()); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull("Found=" + task.getInputData(), task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + tasks = workflow.getTasks(); + assertNotNull(tasks); + assertEquals(2, tasks.size()); + + assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); + assertEquals("task1.Done", workflow.getOutput().get("o3")); + } + + @Test + public void testSimpleWorkflowWithResponseTimeout() throws Exception { + + createWFWithResponseTimeout(); + + String correlationId = "unit_test_1"; + Map workflowInput = new HashMap(); + String inputParam1 = "p1 value"; + workflowInput.put("param1", inputParam1); + workflowInput.put("param2", "p2 value"); + String workflowId = startOrLoadWorkflowExecution("RTOWF", 1, correlationId, workflowInput, null, null); + logger.debug("testSimpleWorkflowWithResponseTimeout.wfid=" + workflowId); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. + assertEquals(1, queueDAO.getSize("task_rt")); + + // Polling for the first task should return the first task + Task task = workflowExecutionService.poll("task_rt", "task1.junit.worker.testTimeout"); + assertNotNull(task); + assertEquals("task_rt", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + // As the task_rt is out of the queue, the next poll should not get it + Task nullTask = workflowExecutionService.poll("task_rt", "task1.junit.worker.testTimeout"); + assertNull(nullTask); + + Thread.sleep(10000); + workflowExecutor.decide(workflowId); + assertEquals(1, queueDAO.getSize("task_rt")); + + // The first task would be timed_out and a new task will be scheduled + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + assertTrue(workflow.getTasks().stream().allMatch(t -> t.getReferenceTaskName().equals("task_rt_t1"))); + assertEquals(TIMED_OUT, workflow.getTasks().get(0).getStatus()); + assertEquals(SCHEDULED, workflow.getTasks().get(1).getStatus()); + + // Polling now should get the same task back because it should have been put back in the queue + Task taskAgain = workflowExecutionService.poll("task_rt", "task1.junit.worker"); + assertNotNull(taskAgain); + + // update task with callback after seconds greater than the response timeout + taskAgain.setStatus(IN_PROGRESS); + taskAgain.setCallbackAfterSeconds(20); + workflowExecutionService.updateTask(taskAgain); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + assertEquals(IN_PROGRESS, workflow.getTasks().get(1).getStatus()); + + // wait for callback after seconds which is longer than response timeout seconds and then call decide + Thread.sleep(20000); + workflowExecutor.decide(workflowId); + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + + // Poll for task again + taskAgain = workflowExecutionService.poll("task_rt", "task1.junit.worker"); + assertNotNull(taskAgain); + + taskAgain.getOutputData().put("op", "task1.Done"); + taskAgain.setStatus(COMPLETED); + workflowExecutionService.updateTask(taskAgain); + + // poll for next task + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker.testTimeout"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + } + + @Test + public void testWorkflowRerunWithSubWorkflows() { + // Execute a workflow with sub-workflow + String workflowId = this.runWorkflowWithSubworkflow(); + // Check it completed + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + + // Now lets pickup the first task in the sub workflow and rerun it from there + String subWorkflowId = null; + for (Task task : workflow.getTasks()) { + if (task.getTaskType().equalsIgnoreCase(SubWorkflow.NAME)) { + subWorkflowId = task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); + } + } + assertNotNull(subWorkflowId); + Workflow subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + Task subWorkflowTask1 = null; + for (Task task : subWorkflow.getTasks()) { + if (task.getTaskDefName().equalsIgnoreCase("junit_task_1")) { + subWorkflowTask1 = task; + } + } + assertNotNull(subWorkflowTask1); + + RerunWorkflowRequest rerunWorkflowRequest = new RerunWorkflowRequest(); + rerunWorkflowRequest.setReRunFromTaskId(subWorkflowTask1.getTaskId()); + + Map newInput = new HashMap<>(); + newInput.put("p1", "1"); + newInput.put("p2", "2"); + rerunWorkflowRequest.setTaskInput(newInput); + + String correlationId = "unit_test_sw_new"; + Map input = new HashMap<>(); + input.put("param1", "New p1 value"); + input.put("param2", "New p2 value"); + rerunWorkflowRequest.setCorrelationId(correlationId); + rerunWorkflowRequest.setWorkflowInput(input); + + rerunWorkflowRequest.setReRunFromWorkflowId(workflowId); + rerunWorkflowRequest.setReRunFromTaskId(subWorkflowTask1.getTaskId()); + // Rerun + workflowExecutor.rerun(rerunWorkflowRequest); + + // The main WF and the sub WF should be in RUNNING state + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + assertEquals(correlationId, workflow.getCorrelationId()); + assertEquals("New p1 value", workflow.getInput().get("param1")); + assertEquals("New p2 value", workflow.getInput().get("param2")); + + subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(subWorkflow); + assertEquals(RUNNING, subWorkflow.getStatus()); + // Since we are re running from the sub workflow task, there + // should be only 1 task that is SCHEDULED + assertEquals(1, subWorkflow.getTasks().size()); + assertEquals(SCHEDULED, subWorkflow.getTasks().get(0).getStatus()); + + // Now execute the task + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(task.getInputData().get("p1").toString(), "1"); + assertEquals(task.getInputData().get("p2").toString(), "2"); + task.getOutputData().put("op", "junit_task_1.done"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(subWorkflow); + assertEquals(RUNNING, subWorkflow.getStatus()); + assertEquals(2, subWorkflow.getTasks().size()); + + // Poll for second task of the sub workflow and execute it + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + task.getOutputData().put("op", "junit_task_2.done"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // Now the sub workflow and the main workflow must have finished + subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(subWorkflow); + assertEquals(WorkflowStatus.COMPLETED, subWorkflow.getStatus()); + assertEquals(2, subWorkflow.getTasks().size()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + } + + @Test + public void testSimpleWorkflowWithTaskSpecificDomain() { + + long startTimeTimestamp = System.currentTimeMillis(); + + clearWorkflows(); + createWorkflowDefForDomain(); + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2_SW, 1); + + String correlationId = "unit_test_sw"; + Map input = new HashMap<>(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + Map taskToDomain = new HashMap<>(); + taskToDomain.put("junit_task_3", "domain1"); + taskToDomain.put("junit_task_2", "domain1"); + + // Poll before so that a polling for this task is "active" + Task task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain1"); + assertNull(task); + task = workflowExecutionService.poll("junit_task_2", "task1.junit.worker", "domain1"); + assertNull(task); + + String workflowId = startOrLoadWorkflowExecution("simpleWorkflowWithTaskSpecificDomain", LINEAR_WORKFLOW_T1_T2_SW, 1, correlationId, input, null, taskToDomain); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutor.getWorkflow(workflowId, false); + assertNotNull(workflow); + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus()); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. + + // Check Size + Map sizes = workflowExecutionService.getTaskQueueSizes(Arrays.asList("domain1:junit_task_3", "junit_task_3")); + assertEquals(sizes.get("domain1:junit_task_3").intValue(), 1); + assertEquals(sizes.get("junit_task_3").intValue(), 0); + + // Polling for the first task + task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker"); + assertNull(task); + task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain1"); + assertNotNull(task); + assertEquals("junit_task_3", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + + List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 10); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + task = tasks.get(0); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + String task1Op = "task1.Done"; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, false); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, false); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertNotNull(workflow.getTaskToDomain()); + assertEquals(workflow.getTaskToDomain().size(), 2); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker", "domain1"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + tasks = workflow.getTasks(); + assertNotNull(tasks); + assertEquals(2, tasks.size()); + assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); + assertEquals("task1.Done", workflow.getOutput().get("o3")); + + Predicate pollDataWithinTestTimes = pollData -> pollData.getLastPollTime() != 0 && pollData.getLastPollTime() > startTimeTimestamp; + + List pollData = workflowExecutionService.getPollData("junit_task_3").stream() + .filter(pollDataWithinTestTimes) + .collect(Collectors.toList()); + assertEquals(2, pollData.size()); + for (PollData pd : pollData) { + assertEquals(pd.getQueueName(), "junit_task_3"); + assertEquals(pd.getWorkerId(), "task1.junit.worker"); + assertTrue(pd.getLastPollTime() != 0); + if (pd.getDomain() != null) { + assertEquals(pd.getDomain(), "domain1"); + } + } + + List pdList = workflowExecutionService.getAllPollData().stream() + .filter(pollDataWithinTestTimes) + .collect(Collectors.toList()); + int count = 0; + for (PollData pd : pdList) { + if (pd.getQueueName().equals("junit_task_3")) { + count++; + } + } + assertEquals(2, count); + } + + @Test + public void testSimpleWorkflowWithAllTaskInOneDomain() { + + clearWorkflows(); + createWorkflowDefForDomain(); + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2_SW, 1); + + String correlationId = "unit_test_sw"; + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + Map taskToDomain = new HashMap(); + taskToDomain.put("*", "domain11,, domain12"); + + // Poll before so that a polling for this task is "active" + Task task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain11"); + assertNull(task); + task = workflowExecutionService.poll("junit_task_2", "task1.junit.worker", "domain12"); + assertNull(task); + + String workflowId = startOrLoadWorkflowExecution("simpleWorkflowWithTasksInOneDomain", LINEAR_WORKFLOW_T1_T2_SW, 1, correlationId, input, null, taskToDomain); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutor.getWorkflow(workflowId, false); + assertNotNull(workflow); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getReasonForIncompletion(), RUNNING, workflow.getStatus()); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. + + // Check Size + Map sizes = workflowExecutionService.getTaskQueueSizes(Arrays.asList("domain11:junit_task_3", "junit_task_3")); + assertEquals(sizes.get("domain11:junit_task_3").intValue(), 1); + assertEquals(sizes.get("junit_task_3").intValue(), 0); + + // Polling for the first task + task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker"); + assertNull(task); + task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain11"); + assertNotNull(task); + assertEquals("junit_task_3", task.getTaskType()); + assertEquals("domain11", task.getDomain()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + task = tasks.get(0); + + String task1Op = "task1.Done"; + assertEquals(workflowId, task.getWorkflowInstanceId()); + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertEquals(workflowId, task.getWorkflowInstanceId()); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, false); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertNotNull(workflow.getTaskToDomain()); + assertEquals(workflow.getTaskToDomain().size(), 1); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker", "domain11"); + assertNull(task); + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker", "domain12"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertEquals("domain12", task.getDomain()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + tasks = workflow.getTasks(); + assertNotNull(tasks); + assertEquals(2, tasks.size()); + assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); + assertEquals("task1.Done", workflow.getOutput().get("o3")); + } + + @After + public void clearWorkflows() { + List workflows = metadataService.getWorkflowDefs().stream() + .map(WorkflowDef::getName) + .collect(Collectors.toList()); + for (String wfName : workflows) { + List running = workflowExecutionService.getRunningWorkflows(wfName); + for (String wfid : running) { + workflowExecutor.terminateWorkflow(wfid, "cleanup"); + } + } + queueDAO.queuesDetail().keySet().forEach(queueDAO::flush); + } + + @Test + public void testLongRunning() { + + clearWorkflows(); + + metadataService.getWorkflowDef(LONG_RUNNING, 1); + + String correlationId = "unit_test_1"; + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + + String workflowId = startOrLoadWorkflowExecution(LONG_RUNNING, 1, correlationId, input, null, null); + System.out.println("testLongRunning.wfid=" + workflowId); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + // Check the queue + assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Collections.singletonList("junit_task_1")).get("junit_task_1")); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Output = "task1.In.Progress"; + task.getOutputData().put("op", task1Output); + task.setStatus(IN_PROGRESS); + task.setCallbackAfterSeconds(5); + workflowExecutionService.updateTask(task); + String taskId = task.getTaskId(); + + // Check the queue + assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Arrays.asList("junit_task_1")).get("junit_task_1")); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + // Polling for next task should not return anything + Task task2 = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNull(task2); + + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNull(task); + + Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); + // Polling for the first task should return the same task as before + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(task.getTaskId(), taskId); + + task1Output = "task1.Done"; + List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + assertEquals(workflowId, task.getWorkflowInstanceId()); + task = tasks.get(0); + task.getOutputData().put("op", task1Output); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Output, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + tasks = workflow.getTasks(); + assertNotNull(tasks); + assertEquals(2, tasks.size()); + } + + @Test + public void testResetWorkflowInProgressTasks() { + + clearWorkflows(); + + metadataService.getWorkflowDef(LONG_RUNNING, 1); + + String correlationId = "unit_test_1"; + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LONG_RUNNING, 1, correlationId, input, null, null); + System.out.println("testLongRunning.wfid=" + wfid); + assertNotNull(wfid); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + // Check the queue + assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Arrays.asList("junit_task_1")).get("junit_task_1")); + /// + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + + String task1Op = "task1.In.Progress"; + task.getOutputData().put("op", task1Op); + task.setStatus(IN_PROGRESS); + task.setCallbackAfterSeconds(3600); + workflowExecutionService.updateTask(task); + String taskId = task.getTaskId(); + + // Check the queue + assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Arrays.asList("junit_task_1")).get("junit_task_1")); + /// + + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + // Polling for next task should not return anything + Task task2 = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNull(task2); + + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNull(task); + + //Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); + // Reset + workflowExecutor.resetCallbacksForInProgressTasks(wfid); + + + // Now Polling for the first task should return the same task as before + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(task.getTaskId(), taskId); + assertEquals(task.getCallbackAfterSeconds(), 0); + + task1Op = "task1.Done"; + List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + assertEquals(wfid, task.getWorkflowInstanceId()); + task = tasks.get(0); + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + tasks = es.getTasks(); + assertNotNull(tasks); + assertEquals(2, tasks.size()); + + + } + + + @Test + public void testConcurrentWorkflowExecutions() { + + int count = 3; + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_concurrrent"; + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String[] wfids = new String[count]; + + for (int i = 0; i < count; i++) { + String wfid = startOrLoadWorkflowExecution("concurrentWorkflowExecutions", LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + System.out.println("testConcurrentWorkflowExecutions.wfid=" + wfid); + assertNotNull(wfid); + + List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); + assertNotNull(ids); + assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 + boolean foundId = false; + for (String id : ids) { + if (id.equals(wfid)) { + foundId = true; + } + } + assertTrue(foundId); + wfids[i] = wfid; + } + + + String task1Op = ""; + for (int i = 0; i < count; i++) { + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + } + + for (int i = 0; i < count; i++) { + Task task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + } + + List wfs = workflowExecutionService.getWorkflowInstances(LINEAR_WORKFLOW_T1_T2, correlationId, false, false); + wfs.forEach(wf -> { + assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); + }); + + + } + + @Test + public void testCaseStatements() { + createConditionalWF(); + + String correlationId = "testCaseStatements: " + System.currentTimeMillis(); + Map input = new HashMap(); + String wfid; + String[] sequence; + + + //default case + input.put("param1", "xxx"); + input.put("param2", "two"); + wfid = startOrLoadWorkflowExecution(COND_TASK_WF, 1, correlationId, input, null, null); + System.out.println("testCaseStatements.wfid=" + wfid); + assertNotNull(wfid); + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + Task task = workflowExecutionService.poll("junit_task_2", "junit"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + assertEquals(3, es.getTasks().size()); + + /// + + + //nested - one + input.put("param1", "nested"); + input.put("param2", "one"); + wfid = startOrLoadWorkflowExecution(COND_TASK_WF + 2, COND_TASK_WF, 1, correlationId, input, null, null); + System.out.println("testCaseStatements.wfid=" + wfid); + assertNotNull(wfid); + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + sequence = new String[]{"junit_task_1", "junit_task_3"}; + + validate(wfid, sequence, new String[]{SystemTaskType.DECISION.name(), SystemTaskType.DECISION.name(), "junit_task_1", "junit_task_3", SystemTaskType.DECISION.name()}, 5); + // + + //nested - two + input.put("param1", "nested"); + input.put("param2", "two"); + wfid = startOrLoadWorkflowExecution(COND_TASK_WF + 3, COND_TASK_WF, 1, correlationId, input, null, null); + System.out.println("testCaseStatements.wfid=" + wfid); + assertNotNull(wfid); + sequence = new String[]{"junit_task_2"}; + validate(wfid, sequence, new String[]{SystemTaskType.DECISION.name(), SystemTaskType.DECISION.name(), "junit_task_2", SystemTaskType.DECISION.name()}, 4); + // + + //three + input.put("param1", "three"); + input.put("param2", "two"); + input.put("finalCase", "notify"); + wfid = startOrLoadWorkflowExecution(COND_TASK_WF + 4, COND_TASK_WF, 1, correlationId, input, null, null); + System.out.println("testCaseStatements.wfid=" + wfid); + assertNotNull(wfid); + sequence = new String[]{"junit_task_3", "junit_task_4"}; + validate(wfid, sequence, new String[]{SystemTaskType.DECISION.name(), "junit_task_3", SystemTaskType.DECISION.name(), "junit_task_4"}, 3); + // + + } + + private void validate(String wfid, String[] sequence, String[] executedTasks, int expectedTotalTasks) { + for (int i = 0; i < sequence.length; i++) { + String t = sequence[i]; + Task task = getTask(t); + if (task == null) { + System.out.println("Missing task for " + t + ", below are the workflow tasks completed..."); + Workflow workflow = workflowExecutionService.getExecutionStatus(wfid, true); + for (Task x : workflow.getTasks()) { + System.out.println(x.getTaskType() + "/" + x.getReferenceTaskName()); + } + } + assertNotNull("No task for " + t, task); + assertEquals(wfid, task.getWorkflowInstanceId()); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + Workflow workflow = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(workflow); + assertTrue(!workflow.getTasks().isEmpty()); + if (i < sequence.length - 1) { + assertEquals(RUNNING, workflow.getStatus()); + } else { + workflow = workflowExecutionService.getExecutionStatus(wfid, true); + List workflowTasks = workflow.getTasks(); + assertEquals(workflowTasks.toString(), executedTasks.length, workflowTasks.size()); + for (int k = 0; k < executedTasks.length; k++) { + assertEquals("Tasks: " + workflowTasks.toString() + "\n", executedTasks[k], workflowTasks.get(k).getTaskType()); + } + + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + } + } + } + + + private Task getTask(String taskType) { + Task task; + int count = 2; + do { + task = workflowExecutionService.poll(taskType, "junit"); + if (task == null) { + count--; + } + if (count < 0) { + break; + } + + } while (task == null); + if (task != null) { + workflowExecutionService.ackTaskReceived(task.getTaskId()); + } + return task; + } + + @Test + public void testRetries() { + + String taskName = "junit_task_2"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(2); + taskDef.setRetryDelaySeconds(1); + metadataService.updateTaskDef(taskDef); + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1"; + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + System.out.println("testRetries.wfid=" + wfid); + assertNotNull(wfid); + + List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); + assertNotNull(ids); + assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 + boolean foundId = false; + for (String id : ids) { + if (id.equals(wfid)) { + foundId = true; + } + } + assertTrue(foundId); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + //fail the task twice and then succeed + verify(inputParam1, wfid, task1Op, true); + Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + verify(inputParam1, wfid, task1Op, false); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + assertEquals(3, es.getTasks().size()); //task 1, and 2 of the task 2 + + assertEquals("junit_task_1", es.getTasks().get(0).getTaskType()); + assertEquals("junit_task_2", es.getTasks().get(1).getTaskType()); + assertEquals("junit_task_2", es.getTasks().get(2).getTaskType()); + assertEquals(COMPLETED, es.getTasks().get(0).getStatus()); + assertEquals(FAILED, es.getTasks().get(1).getStatus()); + assertEquals(COMPLETED, es.getTasks().get(2).getStatus()); + assertEquals(es.getTasks().get(1).getTaskId(), es.getTasks().get(2).getRetriedTaskId()); + + + } + + @Test + public void testSuccess() { + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); + assertNotNull(ids); + assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 + boolean foundId = false; + for (String id : ids) { + if (id.equals(wfid)) { + foundId = true; + } + } + assertTrue(foundId); + + /* + * @correlationId + List byCorrelationId = ess.getWorkflowInstances(LINEAR_WORKFLOW_T1_T2, correlationId, false, false); + assertNotNull(byCorrelationId); + assertTrue(!byCorrelationId.isEmpty()); + assertEquals(1, byCorrelationId.size()); + */ + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // The first task would be marked as scheduled + assertEquals(1, es.getTasks().size()); + assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); + + // decideNow should be idempotent if re-run on the same state! + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + assertEquals(1, es.getTasks().size()); + Task t = es.getTasks().get(0); + assertEquals(SCHEDULED, t.getStatus()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + assertNotNull(task); + assertEquals(t.getTaskId(), task.getTaskId()); + es = workflowExecutionService.getExecutionStatus(wfid, true); + t = es.getTasks().get(0); + assertEquals(IN_PROGRESS, t.getStatus()); + String taskId = t.getTaskId(); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + es = workflowExecutionService.getExecutionStatus(wfid, true); + es.getTasks().forEach(wfTask -> { + if (wfTask.getTaskId().equals(taskId)) { + assertEquals(COMPLETED, wfTask.getStatus()); + } else { + assertEquals(SCHEDULED, wfTask.getStatus()); + } + }); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertNotNull(task); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + // Check the tasks, at this time there should be 2 task + assertEquals(es.getTasks().size(), 2); + es.getTasks().forEach(wfTask -> { + assertEquals(wfTask.getStatus(), COMPLETED); + }); + + System.out.println("Total tasks=" + es.getTasks().size()); + assertTrue(es.getTasks().size() < 10); + + + } + + @Test + public void testDeciderUpdate() { + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + Workflow workflow = workflowExecutor.getWorkflow(wfid, false); + long updated1 = workflow.getUpdateTime(); + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + workflowExecutor.decide(wfid); + workflow = workflowExecutor.getWorkflow(wfid, false); + long updated2 = workflow.getUpdateTime(); + assertEquals(updated1, updated2); + + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + workflowExecutor.terminateWorkflow(wfid, "done"); + workflow = workflowExecutor.getWorkflow(wfid, false); + updated2 = workflow.getUpdateTime(); + assertTrue("updated1[" + updated1 + "] >? updated2[" + updated2 + "]", updated2 > updated1); + + } + + @Test + @Ignore + //Ignore for now, will improve this in the future + public void testFailurePoints() { + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // The first task would be marked as scheduled + assertEquals(1, es.getTasks().size()); + assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String taskId = task.getTaskId(); + + String task1Op = "task1.output"; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + try { + workflowExecutionService.updateTask(task); + } catch (Exception e) { + workflowExecutionService.updateTask(task); + } + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + es = workflowExecutionService.getExecutionStatus(wfid, true); + es.getTasks().forEach(wfTask -> { + if (wfTask.getTaskId().equals(taskId)) { + assertEquals(COMPLETED, wfTask.getStatus()); + } else { + assertEquals(SCHEDULED, wfTask.getStatus()); + } + }); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertNotNull(task); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + // Check the tasks, at this time there should be 2 task + assertEquals(es.getTasks().size(), 2); + es.getTasks().forEach(wfTask -> { + assertEquals(wfTask.getStatus(), COMPLETED); + }); + + System.out.println("Total tasks=" + es.getTasks().size()); + assertTrue(es.getTasks().size() < 10); + + + } + + @Test + public void testDeciderMix() throws Exception { + + ExecutorService executors = Executors.newFixedThreadPool(3); + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); + assertNotNull(ids); + assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 + boolean foundId = false; + for (String id : ids) { + if (id.equals(wfid)) { + foundId = true; + } + } + assertTrue(foundId); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // The first task would be marked as scheduled + assertEquals(1, es.getTasks().size()); + assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); + + List> futures = new LinkedList<>(); + for (int i = 0; i < 10; i++) { + futures.add(executors.submit(() -> { + workflowExecutor.decide(wfid); + return null; + })); + } + for (Future future : futures) { + future.get(); + } + futures.clear(); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // The first task would be marked as scheduled + assertEquals(1, es.getTasks().size()); + assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); + + + // decideNow should be idempotent if re-run on the same state! + workflowExecutor.decide(wfid); + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + assertEquals(1, es.getTasks().size()); + Task t = es.getTasks().get(0); + assertEquals(SCHEDULED, t.getStatus()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + assertNotNull(task); + assertEquals(t.getTaskId(), task.getTaskId()); + es = workflowExecutionService.getExecutionStatus(wfid, true); + t = es.getTasks().get(0); + assertEquals(IN_PROGRESS, t.getStatus()); + String taskId = t.getTaskId(); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + es = workflowExecutionService.getExecutionStatus(wfid, true); + es.getTasks().forEach(wfTask -> { + if (wfTask.getTaskId().equals(taskId)) { + assertEquals(COMPLETED, wfTask.getStatus()); + } else { + assertEquals(SCHEDULED, wfTask.getStatus()); + } + }); + + //Run sweep 10 times! + for (int i = 0; i < 10; i++) { + futures.add(executors.submit(() -> { + long s = System.currentTimeMillis(); + workflowExecutor.decide(wfid); + System.out.println("Took " + (System.currentTimeMillis() - s) + " ms to run decider"); + return null; + })); + } + for (Future future : futures) { + future.get(); + } + futures.clear(); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertEquals(RUNNING, es.getStatus()); + assertEquals(2, es.getTasks().size()); + + System.out.println("Workflow tasks=" + es.getTasks()); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertNotNull(task); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + // Check the tasks, at this time there should be 2 task + assertEquals(es.getTasks().size(), 2); + es.getTasks().forEach(wfTask -> { + assertEquals(wfTask.getStatus(), COMPLETED); + }); + + System.out.println("Total tasks=" + es.getTasks().size()); + assertTrue(es.getTasks().size() < 10); + } + + @Test + public void testFailures() { + metadataService.getWorkflowDef(FORK_JOIN_WF, 1); + + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + metadataService.updateTaskDef(taskDef); + + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(found.getFailureWorkflow()); + assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + input.put("failureWfName", "FanInOutTest"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + Task task = getTask("junit_task_1"); + assertNotNull(task); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.FAILED, es.getStatus()); + + taskDef.setRetryCount(RETRY_COUNT); + metadataService.updateTaskDef(taskDef); + + } + + @Test + public void testRetryWithForkJoin() throws Exception { + String workflowId = this.runAFailedForkJoinWF(); + workflowExecutor.retry(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(workflow.getStatus(), RUNNING); + + printTaskStatuses(workflow, "After retry called"); + + Task t2 = workflowExecutionService.poll("junit_task_0_RT_2", "test"); + assertNotNull(t2); + assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); + + Task t3 = workflowExecutionService.poll("junit_task_0_RT_3", "test"); + assertNotNull(t3); + assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); + + t2.setStatus(COMPLETED); + t3.setStatus(COMPLETED); + + ExecutorService es = Executors.newFixedThreadPool(2); + Future future1 = es.submit(() -> { + try { + workflowExecutionService.updateTask(t2); + } catch (Exception e) { + throw new RuntimeException(e); + } + + }); + final Task _t3 = t3; + Future future2 = es.submit(() -> { + try { + workflowExecutionService.updateTask(_t3); + } catch (Exception e) { + throw new RuntimeException(e); + } + + }); + future1.get(); + future2.get(); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + + printTaskStatuses(workflow, "T2, T3 complete"); + workflowExecutor.decide(workflowId); + + Task t4 = workflowExecutionService.poll("junit_task_0_RT_4", "test"); + assertNotNull(t4); + t4.setStatus(COMPLETED); + workflowExecutionService.updateTask(t4); + + printTaskStatuses(workflowId, "After complete"); + } + + @Test + public void testRetry() { + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + int retryCount = taskDef.getRetryCount(); + taskDef.setRetryCount(1); + int retryDelay = taskDef.getRetryDelaySeconds(); + taskDef.setRetryDelaySeconds(0); + metadataService.updateTaskDef(taskDef); + + WorkflowDef workflowDef = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(workflowDef.getFailureWorkflow()); + assertFalse(StringUtils.isBlank(workflowDef.getFailureWorkflow())); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap<>(); + input.put("param1", "p1 value"); + input.put("param2", "p2 value"); + String workflowId = startOrLoadWorkflowExecution("retry", LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(workflowId); + printTaskStatuses(workflowId, "initial"); + + Task task = getTask("junit_task_1"); + assertNotNull(task); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + task = getTask("junit_task_1"); + assertNotNull(task); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); + + printTaskStatuses(workflowId, "before retry"); + + workflowExecutor.retry(workflowId); + + printTaskStatuses(workflowId, "after retry"); + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + task = getTask("junit_task_1"); + assertNotNull(task); + assertEquals(workflowId, task.getWorkflowInstanceId()); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + task = getTask("junit_task_2"); + assertNotNull(task); + assertEquals(workflowId, task.getWorkflowInstanceId()); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + + assertEquals(3, workflow.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_1")).count()); + + taskDef.setRetryCount(retryCount); + taskDef.setRetryDelaySeconds(retryDelay); + metadataService.updateTaskDef(taskDef); + + printTaskStatuses(workflowId, "final"); + + } + + @Test + public void testNonRestartartableWorkflows() { + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + metadataService.updateTaskDef(taskDef); + + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + found.setName(JUNIT_TEST_WF_NON_RESTARTABLE); + found.setRestartable(false); + metadataService.updateWorkflowDef(found); + + assertNotNull(found); + assertNotNull(found.getFailureWorkflow()); + assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap<>(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String workflowId = startOrLoadWorkflowExecution(JUNIT_TEST_WF_NON_RESTARTABLE, 1, correlationId, input, null, null); + assertNotNull(workflowId); + + Task task = getTask("junit_task_1"); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); + + workflowExecutor.rewind(workflow.getWorkflowId()); + + // Polling for the first task should return the same task as before + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + task = tasks.get(0); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + String task1Op = "task1.Done"; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, false); + assertNotNull(workflow); + assertNotNull(workflow.getOutput()); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull("Found=" + task.getInputData(), task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + task.setReasonForIncompletion("unit test failure"); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + tasks = workflow.getTasks(); + assertNotNull(tasks); + assertEquals(2, tasks.size()); + assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); + assertEquals("task1.Done", workflow.getOutput().get("o3")); + + expectedException.expect(ApplicationException.class); + expectedException.expectMessage(String.format("is an instance of WorkflowDef: %s and version: %d and is non restartable", JUNIT_TEST_WF_NON_RESTARTABLE, 1)); + workflowExecutor.rewind(workflow.getWorkflowId()); + } + + + @Test + public void testRestart() { + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + metadataService.updateTaskDef(taskDef); + + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(found.getFailureWorkflow()); + assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + Task task = getTask("junit_task_1"); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.FAILED, es.getStatus()); + + workflowExecutor.rewind(es.getWorkflowId()); + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + task = getTask("junit_task_1"); + assertNotNull(task); + assertEquals(wfid, task.getWorkflowInstanceId()); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + task = getTask("junit_task_2"); + assertNotNull(task); + assertEquals(wfid, task.getWorkflowInstanceId()); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + } + + + @Test + public void testTimeout() throws Exception { + + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(1); + taskDef.setTimeoutSeconds(1); + taskDef.setRetryDelaySeconds(0); + taskDef.setTimeoutPolicy(TimeoutPolicy.RETRY); + metadataService.updateTaskDef(taskDef); + + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(found.getFailureWorkflow()); + assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + input.put("failureWfName", "FanInOutTest"); + String wfid = startOrLoadWorkflowExecution("timeout", LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + //Ensure that we have a workflow queued up for evaluation here... + long size = queueDAO.getSize(WorkflowExecutor.DECIDER_QUEUE); + assertEquals(1, size); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + assertEquals("fond: " + es.getTasks().stream().map(Task::toString).collect(Collectors.toList()), 1, es.getTasks().size()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals(wfid, task.getWorkflowInstanceId()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + + //Ensure that we have a workflow queued up for evaluation here... + size = queueDAO.getSize(WorkflowExecutor.DECIDER_QUEUE); + assertEquals(1, size); + + + Uninterruptibles.sleepUninterruptibly(3, TimeUnit.SECONDS); + workflowSweeper.sweep(Arrays.asList(wfid), workflowExecutor); + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals("fond: " + es.getTasks().stream().map(Task::toString).collect(Collectors.toList()), 2, es.getTasks().size()); + + Task task1 = es.getTasks().get(0); + assertEquals(TIMED_OUT, task1.getStatus()); + Task task2 = es.getTasks().get(1); + assertEquals(SCHEDULED, task2.getStatus()); + + task = workflowExecutionService.poll(task2.getTaskDefName(), "task1.junit.worker"); + assertNotNull(task); + assertEquals(wfid, task.getWorkflowInstanceId()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + Uninterruptibles.sleepUninterruptibly(3, TimeUnit.SECONDS); + workflowExecutor.decide(wfid); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(2, es.getTasks().size()); + + assertEquals(TIMED_OUT, es.getTasks().get(0).getStatus()); + assertEquals(TIMED_OUT, es.getTasks().get(1).getStatus()); + assertEquals(WorkflowStatus.TIMED_OUT, es.getStatus()); + + assertEquals(1, queueDAO.getSize(WorkflowExecutor.DECIDER_QUEUE)); + + taskDef.setTimeoutSeconds(0); + taskDef.setRetryCount(RETRY_COUNT); + metadataService.updateTaskDef(taskDef); + } + + @Test + public void testReruns() { + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + assertNotNull(wfid); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // Check the tasks, at this time there should be 1 task + assertEquals(es.getTasks().size(), 1); + Task t = es.getTasks().get(0); + assertEquals(SCHEDULED, t.getStatus()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(t.getTaskId(), task.getTaskId()); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + es = workflowExecutionService.getExecutionStatus(wfid, true); + es.getTasks().forEach(wfTask -> { + if (wfTask.getTaskId().equals(t.getTaskId())) { + assertEquals(wfTask.getStatus(), COMPLETED); + } else { + assertEquals(wfTask.getStatus(), SCHEDULED); + } + }); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + + // Now rerun lets rerun the workflow from the second task + RerunWorkflowRequest request = new RerunWorkflowRequest(); + request.setReRunFromWorkflowId(wfid); + request.setReRunFromTaskId(es.getTasks().get(1).getTaskId()); + + String reRunwfid = workflowExecutor.rerun(request); + + Workflow esRR = workflowExecutionService.getExecutionStatus(reRunwfid, true); + assertNotNull(esRR); + assertEquals(esRR.getReasonForIncompletion(), RUNNING, esRR.getStatus()); + // Check the tasks, at this time there should be 2 tasks + // first one is skipped and the second one is scheduled + assertEquals(esRR.getTasks().toString(), 2, esRR.getTasks().size()); + assertEquals(COMPLETED, esRR.getTasks().get(0).getStatus()); + Task tRR = esRR.getTasks().get(1); + assertEquals(esRR.getTasks().toString(), SCHEDULED, tRR.getStatus()); + assertEquals(tRR.getTaskType(), "junit_task_2"); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(reRunwfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + + ////////////////////// + // Now rerun the entire workflow + RerunWorkflowRequest request1 = new RerunWorkflowRequest(); + request1.setReRunFromWorkflowId(wfid); + + String reRunwfid1 = workflowExecutor.rerun(request1); + + es = workflowExecutionService.getExecutionStatus(reRunwfid1, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // Check the tasks, at this time there should be 1 task + assertEquals(es.getTasks().size(), 1); + assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); + + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + } + + @Test + public void testTaskSkipping() { + + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(0); + metadataService.updateTaskDef(taskDef); + + + metadataService.getWorkflowDef(TEST_WORKFLOW_NAME_3, 1); + + String correlationId = "unit_test_1" + UUID.randomUUID().toString(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(TEST_WORKFLOW_NAME_3, 1, correlationId, input, null, null); + assertNotNull(wfid); + + // Now Skip the second task + workflowExecutor.skipTaskFromWorkflow(wfid, "t2", null); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + // Check the tasks, at this time there should be 3 task + assertEquals(2, es.getTasks().size()); + assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); + assertEquals(Status.SKIPPED, es.getTasks().get(1).getStatus()); + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + assertEquals("t1", task.getReferenceTaskName()); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // If we get the full workflow here then, last task should be completed and the next task should be scheduled + es = workflowExecutionService.getExecutionStatus(wfid, true); + es.getTasks().forEach(wfTask -> { + if (wfTask.getReferenceTaskName().equals("t1")) { + assertEquals(COMPLETED, wfTask.getStatus()); + } else if (wfTask.getReferenceTaskName().equals("t2")) { + assertEquals(Status.SKIPPED, wfTask.getStatus()); + } else { + assertEquals(SCHEDULED, wfTask.getStatus()); + } + }); + + task = workflowExecutionService.poll("junit_task_3", "task3.junit.worker"); + assertNotNull(task); + assertEquals(IN_PROGRESS, task.getStatus()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + } + + @Test + public void testPauseResume() { + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + + String correlationId = "unit_test_1" + System.nanoTime(); + Map input = new HashMap(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + String wfid = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input, null, null); + + assertNotNull(wfid); + + List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); + assertNotNull(ids); + assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 + boolean foundId = false; + for (String id : ids) { + if (id.equals(wfid)) { + foundId = true; + } + } + assertTrue(foundId); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(RUNNING, es.getStatus()); + Task t = es.getTasks().get(0); + assertEquals(SCHEDULED, t.getStatus()); + + // PAUSE + workflowExecutor.pauseWorkflow(wfid); + + // The workflow is paused but the scheduled task should be pollable + + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(t.getTaskId(), task.getTaskId()); + + String param1 = (String) task.getInputData().get("p1"); + String param2 = (String) task.getInputData().get("p2"); + + assertNotNull(param1); + assertNotNull(param2); + assertEquals("p1 value", param1); + assertEquals("p2 value", param2); + + String task1Op = "task1.output->" + param1 + "." + param2; + task.getOutputData().put("op", task1Op); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // This decide should not schedule the next task + //ds.decideNow(wfid, task); + + // If we get the full workflow here then, last task should be completed and the rest (including PAUSE task) should be scheduled + es = workflowExecutionService.getExecutionStatus(wfid, true); + es.getTasks().forEach(wfTask -> { + if (wfTask.getTaskId().equals(t.getTaskId())) { + assertEquals(wfTask.getStatus(), COMPLETED); + } + }); + + // This should return null as workflow is paused + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNull("Found: " + task, task); + + // Even if decide is run again the next task will not be scheduled as the workflow is still paused-- + workflowExecutor.decide(wfid); + + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertTrue(task == null); + + // RESUME + workflowExecutor.resumeWorkflow(wfid); + + // Now polling should get the second task + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + + Task byRefName = workflowExecutionService.getPendingTaskForWorkflow("t2", wfid); + assertNotNull(byRefName); + assertEquals(task.getTaskId(), byRefName.getTaskId()); + + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + } + + @Test + public void testSubWorkflow() { + + createSubWorkflow(); + metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); + Map input = new HashMap<>(); + input.put("param1", "param 1 value"); + input.put("param3", "param 2 value"); + input.put("wfName", LINEAR_WORKFLOW_T1_T2); + String wfId = startOrLoadWorkflowExecution(WF_WITH_SUB_WF, 1, "test", input, null, null); + assertNotNull(wfId); + + Workflow es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + + Task task = workflowExecutionService.poll("junit_task_5", "test"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + assertNotNull(es.getTasks()); + + task = es.getTasks().stream().filter(t -> t.getTaskType().equals(TaskType.SUB_WORKFLOW.name())).findAny().get(); + assertNotNull(task); + assertNotNull(task.getOutputData()); + assertNotNull("Output: " + task.getOutputData().toString() + ", status: " + task.getStatus(), task.getOutputData().get("subWorkflowId")); + String subWorkflowId = task.getOutputData().get("subWorkflowId").toString(); + + es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(es); + assertNotNull(es.getTasks()); + assertEquals(wfId, es.getParentWorkflowId()); + assertEquals(RUNNING, es.getStatus()); + + task = workflowExecutionService.poll("junit_task_1", "test"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + task = workflowExecutionService.poll("junit_task_2", "test"); + assertEquals(subWorkflowId, task.getWorkflowInstanceId()); + String uuid = UUID.randomUUID().toString(); + task.getOutputData().put("uuid", uuid); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + assertNotNull(es.getOutput()); + assertTrue(es.getOutput().containsKey("o1")); + assertTrue(es.getOutput().containsKey("o2")); + assertEquals("sub workflow input param1", es.getOutput().get("o1")); + assertEquals(uuid, es.getOutput().get("o2")); + + task = workflowExecutionService.poll("junit_task_6", "test"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + } + + @Test + public void testSubWorkflowFailure() { + + TaskDef taskDef = notFoundSafeGetTaskDef("junit_task_1"); + assertNotNull(taskDef); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(2); + metadataService.updateTaskDef(taskDef); + + + createSubWorkflow(); + metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); + + Map input = new HashMap<>(); + input.put("param1", "param 1 value"); + input.put("param3", "param 2 value"); + input.put("wfName", LINEAR_WORKFLOW_T1_T2); + String wfId = startOrLoadWorkflowExecution(WF_WITH_SUB_WF, 1, "test", input, null, null); + assertNotNull(wfId); + + Workflow es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + + Task task = workflowExecutionService.poll("junit_task_5", "test"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + assertNotNull(es.getTasks()); + task = es.getTasks().stream().filter(t -> t.getTaskType().equals(TaskType.SUB_WORKFLOW.name())).findAny().get(); + assertNotNull(task); + assertNotNull(task.getOutputData()); + assertNotNull(task.getOutputData().get("subWorkflowId")); + String subWorkflowId = task.getOutputData().get("subWorkflowId").toString(); + + es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(es); + assertNotNull(es.getTasks()); + + assertEquals(wfId, es.getParentWorkflowId()); + assertEquals(RUNNING, es.getStatus()); + + task = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(es); + assertEquals(WorkflowStatus.FAILED, es.getStatus()); + workflowExecutor.executeSystemTask(subworkflow, es.getParentWorkflowTaskId(), 1); + es = workflowExecutionService.getExecutionStatus(wfId, true); + assertEquals(WorkflowStatus.FAILED, es.getStatus()); + + taskDef.setTimeoutSeconds(0); + taskDef.setRetryCount(RETRY_COUNT); + metadataService.updateTaskDef(taskDef); + } + + @Test + public void testSubWorkflowFailureInverse() { + + TaskDef taskDef = notFoundSafeGetTaskDef("junit_task_1"); + assertNotNull(taskDef); + taskDef.setRetryCount(0); + taskDef.setTimeoutSeconds(2); + metadataService.updateTaskDef(taskDef); + + + createSubWorkflow(); + + WorkflowDef found = metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); + assertNotNull(found); + Map input = new HashMap<>(); + input.put("param1", "param 1 value"); + input.put("param3", "param 2 value"); + input.put("wfName", LINEAR_WORKFLOW_T1_T2); + String wfId = startOrLoadWorkflowExecution(WF_WITH_SUB_WF, 1, "test", input, null, null); + assertNotNull(wfId); + + Workflow es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + + Task task = workflowExecutionService.poll("junit_task_5", "test"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + es = workflowExecutionService.getExecutionStatus(wfId, true); + assertNotNull(es); + assertNotNull(es.getTasks()); + task = es.getTasks().stream().filter(t -> t.getTaskType().equals(TaskType.SUB_WORKFLOW.name())).findAny().get(); + assertNotNull(task); + assertNotNull(task.getOutputData()); + assertNotNull(task.getOutputData().get("subWorkflowId")); + String subWorkflowId = task.getOutputData().get("subWorkflowId").toString(); + + es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(es); + assertNotNull(es.getTasks()); + assertEquals(wfId, es.getParentWorkflowId()); + assertEquals(RUNNING, es.getStatus()); + + workflowExecutor.terminateWorkflow(wfId, "fail"); + es = workflowExecutionService.getExecutionStatus(wfId, true); + assertEquals(WorkflowStatus.TERMINATED, es.getStatus()); + + es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertEquals(WorkflowStatus.TERMINATED, es.getStatus()); + + } + + @Test + public void testSubWorkflowRetry() { + String taskName = "junit_task_1"; + TaskDef taskDef = notFoundSafeGetTaskDef(taskName); + int retryCount = notFoundSafeGetTaskDef(taskName).getRetryCount(); + taskDef.setRetryCount(0); + metadataService.updateTaskDef(taskDef); + + // create a workflow with sub-workflow + createSubWorkflow(); + WorkflowDef found = metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); + + // start the workflow + Map workflowInputParams = new HashMap<>(); + workflowInputParams.put("param1", "param 1"); + workflowInputParams.put("param3", "param 2"); + workflowInputParams.put("wfName", LINEAR_WORKFLOW_T1_T2); + String workflowId = startOrLoadWorkflowExecution(WF_WITH_SUB_WF, 1, "test", workflowInputParams, null, null); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + + // poll and complete first task + Task task = workflowExecutionService.poll("junit_task_5", "test"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertNotNull(workflow.getTasks()); + assertEquals(2, workflow.getTasks().size()); + + task = workflow.getTasks().stream().filter(t -> t.getTaskType().equals(TaskType.SUB_WORKFLOW.name())).findAny().orElse(null); + assertNotNull(task); + assertNotNull(task.getOutputData()); + assertNotNull("Output: " + task.getOutputData().toString() + ", status: " + task.getStatus(), task.getOutputData().get("subWorkflowId")); + String subWorkflowId = task.getOutputData().get("subWorkflowId").toString(); + + workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(workflow); + assertNotNull(workflow.getTasks()); + assertEquals(workflowId, workflow.getParentWorkflowId()); + assertEquals(RUNNING, workflow.getStatus()); + + // poll and fail the first task in sub-workflow + task = workflowExecutionService.poll("junit_task_1", "test"); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); + + // Retry the failed sub workflow + workflowExecutor.retry(subWorkflowId); + task = workflowExecutionService.poll("junit_task_1", "test"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + task = workflowExecutionService.poll("junit_task_2", "test"); + assertEquals(subWorkflowId, task.getWorkflowInstanceId()); + String uuid = UUID.randomUUID().toString(); + task.getOutputData().put("uuid", uuid); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertNotNull(workflow.getOutput()); + assertTrue(workflow.getOutput().containsKey("o1")); + assertTrue(workflow.getOutput().containsKey("o2")); + assertEquals("sub workflow input param1", workflow.getOutput().get("o1")); + assertEquals(uuid, workflow.getOutput().get("o2")); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + + task = workflowExecutionService.poll("junit_task_6", "test"); + assertNotNull(task); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + + // reset retry count + taskDef = notFoundSafeGetTaskDef(taskName); + taskDef.setRetryCount(retryCount); + metadataService.updateTaskDef(taskDef); + } + + + @Test + public void testWait() { + + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setName("test_wait"); + workflowDef.setSchemaVersion(2); + + WorkflowTask waitWorkflowTask = new WorkflowTask(); + waitWorkflowTask.setWorkflowTaskType(TaskType.WAIT); + waitWorkflowTask.setName("wait"); + waitWorkflowTask.setTaskReferenceName("wait0"); + + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName("junit_task_1"); + workflowTask.setTaskReferenceName("t1"); + + workflowDef.getTasks().add(waitWorkflowTask); + workflowDef.getTasks().add(workflowTask); + metadataService.registerWorkflowDef(workflowDef); + + String workflowId = startOrLoadWorkflowExecution(workflowDef.getName(), workflowDef.getVersion(), "", new HashMap<>(), null, null); + Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); + assertNotNull(workflow); + assertEquals(1, workflow.getTasks().size()); + assertEquals(RUNNING, workflow.getStatus()); + + Task waitTask = workflow.getTasks().get(0); + assertEquals(TaskType.WAIT.name(), waitTask.getTaskType()); + waitTask.setStatus(COMPLETED); + workflowExecutor.updateTask(new TaskResult(waitTask)); + + Task task = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task); + task.setStatus(Status.COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("tasks:" + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); + } + + @Test + public void testEventWorkflow() { + + TaskDef taskDef = new TaskDef(); + taskDef.setName("eventX"); + taskDef.setTimeoutSeconds(1); + + metadataService.registerTaskDef(Collections.singletonList(taskDef)); + + WorkflowDef workflowDef = new WorkflowDef(); + workflowDef.setName("test_event"); + workflowDef.setSchemaVersion(2); + + WorkflowTask eventWorkflowTask = new WorkflowTask(); + eventWorkflowTask.setWorkflowTaskType(TaskType.EVENT); + eventWorkflowTask.setName("eventX"); + eventWorkflowTask.setTaskReferenceName("wait0"); + eventWorkflowTask.setSink("conductor"); + + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName("junit_task_1"); + workflowTask.setTaskReferenceName("t1"); + + workflowDef.getTasks().add(eventWorkflowTask); + workflowDef.getTasks().add(workflowTask); + metadataService.registerWorkflowDef(workflowDef); + + String workflowId = startOrLoadWorkflowExecution(workflowDef.getName(), workflowDef.getVersion(), "", new HashMap<>(), null, null); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); + assertNotNull(workflow); + + Task eventTask = workflow.getTasks().get(0); + assertEquals(TaskType.EVENT.name(), eventTask.getTaskType()); + assertEquals(COMPLETED, eventTask.getStatus()); + assertTrue(!eventTask.getOutputData().isEmpty()); + assertNotNull(eventTask.getOutputData().get("event_produced")); + + Task task = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task); + task.setStatus(Status.COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("tasks:" + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); + } + + @Test + public void testTaskWithCallbackAfterSecondsInWorkflow() { + WorkflowDef workflowDef = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(workflowDef); + + String workflowId = startOrLoadWorkflowExecution(workflowDef.getName(), workflowDef.getVersion(), "", new HashMap<>(), null, null); + Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); + assertNotNull(workflow); + + Task task = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + String taskId = task.getTaskId(); + task.setStatus(IN_PROGRESS); + task.setCallbackAfterSeconds(5L); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(1, workflow.getTasks().size()); + + // task should not be available + task = workflowExecutionService.poll("junit_task_1", "test"); + assertNull(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(1, workflow.getTasks().size()); + + Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); + + task = workflowExecutionService.poll("junit_task_1", "test"); + assertNotNull(task); + assertEquals(taskId, task.getTaskId()); + + task.setStatus(Status.COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(2, workflow.getTasks().size()); + + task = workflowExecutionService.poll("junit_task_2", "test"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + taskId = task.getTaskId(); + task.setStatus(IN_PROGRESS); + task.setCallbackAfterSeconds(5L); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(2, workflow.getTasks().size()); + + // task should not be available + task = workflowExecutionService.poll("junit_task_1", "test"); + assertNull(task); + + Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); + + task = workflowExecutionService.poll("junit_task_2", "test"); + assertNotNull(task); + assertEquals(taskId, task.getTaskId()); + + task.setStatus(Status.COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(2, workflow.getTasks().size()); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + } + + @Test + public void testWorkflowUsingExternalPayloadStorage() { + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(found); + + Map outputParameters = found.getOutputParameters(); + outputParameters.put("workflow_output", "${t1.output.op}"); + metadataService.updateWorkflowDef(found); + + String workflowInputPath = "workflow/input"; + String correlationId = "wf_external_storage"; + String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, null, workflowInputPath, null, null); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertTrue("The workflow input should not be persisted", workflow.getInput().isEmpty()); + assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); + assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); + + // Polling for the first task + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + // update first task with COMPLETED + String taskOutputPath = "task/output"; + task.setOutputData(null); + task.setExternalOutputPayloadStoragePath(taskOutputPath); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertTrue("The workflow input should not be persisted", workflow.getInput().isEmpty()); + assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); + assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + assertTrue("The first task output should not be persisted", workflow.getTasks().get(0).getOutputData().isEmpty()); + assertTrue("The second task input should not be persisted", workflow.getTasks().get(1).getInputData().isEmpty()); + assertEquals(taskOutputPath, workflow.getTasks().get(0).getExternalOutputPayloadStoragePath()); + assertEquals("task/input", workflow.getTasks().get(1).getExternalInputPayloadStoragePath()); + + // Polling for the second task + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(task.getInputData().isEmpty()); + assertNotNull(task.getExternalInputPayloadStoragePath()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + // update second task with COMPLETED + task.getOutputData().put("op", "success_task2"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertTrue("The workflow input should not be persisted", workflow.getInput().isEmpty()); + assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + assertTrue("The first task output should not be persisted", workflow.getTasks().get(0).getOutputData().isEmpty()); + assertTrue("The second task input should not be persisted", workflow.getTasks().get(1).getInputData().isEmpty()); + assertEquals(taskOutputPath, workflow.getTasks().get(0).getExternalOutputPayloadStoragePath()); + assertEquals("task/input", workflow.getTasks().get(1).getExternalInputPayloadStoragePath()); + assertTrue(workflow.getOutput().isEmpty()); + assertNotNull(workflow.getExternalOutputPayloadStoragePath()); + assertEquals("workflow/output", workflow.getExternalOutputPayloadStoragePath()); + } + + @Test + public void testRetryWorkflowUsingExternalPayloadStorage() { + WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); + assertNotNull(found); + + Map outputParameters = found.getOutputParameters(); + outputParameters.put("workflow_output", "${t1.output.op}"); + metadataService.updateWorkflowDef(found); + + String taskName = "junit_task_2"; + TaskDef taskDef = metadataService.getTaskDef(taskName); + taskDef.setRetryCount(2); + taskDef.setRetryDelaySeconds(0); + metadataService.updateTaskDef(taskDef); + + String workflowInputPath = "workflow/input"; + String correlationId = "wf_external_storage"; + String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, null, workflowInputPath, null, null); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertTrue("The workflow input should not be persisted", workflow.getInput().isEmpty()); + assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); + assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); + + // Polling for the first task + Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_1", task.getTaskType()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + // update first task with COMPLETED + String taskOutputPath = "task/output"; + task.setOutputData(null); + task.setExternalOutputPayloadStoragePath(taskOutputPath); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // Polling for the second task + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(task.getInputData().isEmpty()); + assertNotNull(task.getExternalInputPayloadStoragePath()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + // update second task with FAILED + task.getOutputData().put("op", "failed_task2"); + task.setStatus(FAILED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertTrue("The workflow input should not be persisted", workflow.getInput().isEmpty()); + assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); + assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); + + // Polling again for the second task + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertEquals("junit_task_2", task.getTaskType()); + assertTrue(task.getInputData().isEmpty()); + assertNotNull(task.getExternalInputPayloadStoragePath()); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + assertEquals(workflowId, task.getWorkflowInstanceId()); + + // update second task with COMPLETED + task.getOutputData().put("op", "success_task2"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertTrue("The workflow input should not be persisted", workflow.getInput().isEmpty()); + assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals(3, workflow.getTasks().size()); + assertTrue("The first task output should not be persisted", workflow.getTasks().get(0).getOutputData().isEmpty()); + assertTrue("The second task input should not be persisted", workflow.getTasks().get(1).getInputData().isEmpty()); + assertTrue("The second task input should not be persisted", workflow.getTasks().get(2).getInputData().isEmpty()); + assertEquals(taskOutputPath, workflow.getTasks().get(0).getExternalOutputPayloadStoragePath()); + assertEquals("task/input", workflow.getTasks().get(1).getExternalInputPayloadStoragePath()); + assertEquals("task/input", workflow.getTasks().get(2).getExternalInputPayloadStoragePath()); + assertTrue(workflow.getOutput().isEmpty()); + assertNotNull(workflow.getExternalOutputPayloadStoragePath()); + assertEquals("workflow/output", workflow.getExternalOutputPayloadStoragePath()); + } + + //@Test + public void testRateLimiting() { + + TaskDef td = new TaskDef(); + td.setName("eventX1"); + td.setTimeoutSeconds(1); + td.setConcurrentExecLimit(1); + + metadataService.registerTaskDef(Arrays.asList(td)); + + WorkflowDef def = new WorkflowDef(); + def.setName("test_rate_limit"); + def.setSchemaVersion(2); + + WorkflowTask event = new WorkflowTask(); + event.setType("USER_TASK"); + event.setName("eventX1"); + event.setTaskReferenceName("event0"); + event.setSink("conductor"); + + def.getTasks().add(event); + metadataService.registerWorkflowDef(def); + + Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(() -> { + queueDAO.processUnacks("USER_TASK"); + }, 2, 2, TimeUnit.SECONDS); + + String[] ids = new String[100]; + ExecutorService es = Executors.newFixedThreadPool(10); + for (int i = 0; i < 10; i++) { + final int index = i; + es.submit(() -> { + try { + String id = startOrLoadWorkflowExecution(def.getName(), def.getVersion(), "", new HashMap<>(), null, null); + ids[index] = id; + } catch (Exception e) { + e.printStackTrace(); + } + + }); + } + Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); + for (int i = 0; i < 10; i++) { + String id = ids[i]; + Workflow workflow = workflowExecutor.getWorkflow(id, true); + assertNotNull(workflow); + assertEquals(1, workflow.getTasks().size()); + + Task eventTask = workflow.getTasks().get(0); + assertEquals(COMPLETED, eventTask.getStatus()); + assertEquals("tasks:" + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); + assertTrue(!eventTask.getOutputData().isEmpty()); + assertNotNull(eventTask.getOutputData().get("event_produced")); + } + } + + private void createSubWorkflow() { + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("junit_task_5"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "${workflow.input.param1}"); + ip1.put("p2", "${workflow.input.param2}"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("a1"); + + WorkflowTask wft2 = new WorkflowTask(); + wft2.setName("subWorkflowTask"); + wft2.setType(TaskType.SUB_WORKFLOW.name()); + SubWorkflowParams swp = new SubWorkflowParams(); + swp.setName(LINEAR_WORKFLOW_T1_T2); + wft2.setSubWorkflowParam(swp); + Map ip2 = new HashMap<>(); + ip2.put("test", "test value"); + ip2.put("param1", "sub workflow input param1"); + wft2.setInputParameters(ip2); + wft2.setTaskReferenceName("a2"); + + WorkflowTask wft3 = new WorkflowTask(); + wft3.setName("junit_task_6"); + Map ip3 = new HashMap<>(); + ip3.put("p1", "${workflow.input.param1}"); + ip3.put("p2", "${workflow.input.param2}"); + wft3.setInputParameters(ip3); + wft3.setTaskReferenceName("a3"); + + WorkflowDef main = new WorkflowDef(); + main.setSchemaVersion(2); + main.setInputParameters(Arrays.asList("param1", "param2")); + main.setName(WF_WITH_SUB_WF); + main.getTasks().addAll(Arrays.asList(wft1, wft2, wft3)); + + metadataService.updateWorkflowDef(Collections.singletonList(main)); + + } + + private void verify(String inputParam1, String wfid, String task1Op, boolean fail) { + Task task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + + String task2Input = (String) task.getInputData().get("tp2"); + assertNotNull(task2Input); + assertEquals(task1Op, task2Input); + task2Input = (String) task.getInputData().get("tp1"); + assertNotNull(task2Input); + assertEquals(inputParam1, task2Input); + if (fail) { + task.setStatus(FAILED); + task.setReasonForIncompletion("failure...0"); + } else { + task.setStatus(COMPLETED); + } + + workflowExecutionService.updateTask(task); + + Workflow es = workflowExecutionService.getExecutionStatus(wfid, false); + assertNotNull(es); + if (fail) { + assertEquals(RUNNING, es.getStatus()); + } else { + assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); + } + } + + @Before + public void flushAllTaskQueues() { + queueDAO.queuesDetail().keySet().forEach(queueName -> { + queueDAO.flush(queueName); + }); + + if (taskDefs == null) { + return; + } + for (TaskDef td : taskDefs) { + queueDAO.flush(td.getName()); + } + } + + private void createWorkflowDefForDomain() { + WorkflowDef defSW = new WorkflowDef(); + defSW.setName(LINEAR_WORKFLOW_T1_T2_SW); + defSW.setDescription(defSW.getName()); + defSW.setVersion(1); + defSW.setInputParameters(Arrays.asList("param1", "param2")); + Map outputParameters = new HashMap<>(); + outputParameters.put("o1", "${workflow.input.param1}"); + outputParameters.put("o2", "${t2.output.uuid}"); + outputParameters.put("o3", "${t1.output.op}"); + defSW.setOutputParameters(outputParameters); + defSW.setFailureWorkflow("$workflow.input.failureWfName"); + defSW.setSchemaVersion(2); + LinkedList wftasks = new LinkedList<>(); + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("junit_task_3"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "${workflow.input.param1}"); + ip1.put("p2", "${workflow.input.param2}"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("t1"); + + WorkflowTask subWorkflow = new WorkflowTask(); + subWorkflow.setType(TaskType.SUB_WORKFLOW.name()); + SubWorkflowParams sw = new SubWorkflowParams(); + sw.setName(LINEAR_WORKFLOW_T1_T2); + subWorkflow.setSubWorkflowParam(sw); + subWorkflow.setTaskReferenceName("sw1"); + + wftasks.add(wft1); + wftasks.add(subWorkflow); + defSW.setTasks(wftasks); + + try { + metadataService.updateWorkflowDef(defSW); + } catch (Exception e) { + } + } + + private void createWFWithResponseTimeout() { + TaskDef task = new TaskDef(); + task.setName("task_rt"); + task.setTimeoutSeconds(120); + task.setRetryCount(RETRY_COUNT); + task.setRetryDelaySeconds(0); + task.setResponseTimeoutSeconds(10); + metadataService.registerTaskDef(Collections.singletonList(task)); + + WorkflowDef def = new WorkflowDef(); + def.setName("RTOWF"); + def.setDescription(def.getName()); + def.setVersion(1); + def.setInputParameters(Arrays.asList("param1", "param2")); + Map outputParameters = new HashMap<>(); + outputParameters.put("o1", "${workflow.input.param1}"); + outputParameters.put("o2", "${t2.output.uuid}"); + outputParameters.put("o3", "${t1.output.op}"); + def.setOutputParameters(outputParameters); + def.setFailureWorkflow("$workflow.input.failureWfName"); + def.setSchemaVersion(2); + LinkedList wftasks = new LinkedList<>(); + + WorkflowTask wft1 = new WorkflowTask(); + wft1.setName("task_rt"); + Map ip1 = new HashMap<>(); + ip1.put("p1", "${workflow.input.param1}"); + ip1.put("p2", "${workflow.input.param2}"); + wft1.setInputParameters(ip1); + wft1.setTaskReferenceName("task_rt_t1"); + + WorkflowTask wft2 = new WorkflowTask(); + wft2.setName("junit_task_2"); + Map ip2 = new HashMap<>(); + ip2.put("tp1", "${workflow.input.param1}"); + ip2.put("tp2", "${t1.output.op}"); + wft2.setInputParameters(ip2); + wft2.setTaskReferenceName("t2"); + + wftasks.add(wft1); + wftasks.add(wft2); + def.setTasks(wftasks); + + metadataService.updateWorkflowDef(def); + } + + private String runWorkflowWithSubworkflow() { + clearWorkflows(); + createWorkflowDefForDomain(); + + metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2_SW, 1); + + String correlationId = "unit_test_sw"; + Map input = new HashMap<>(); + String inputParam1 = "p1 value"; + input.put("param1", inputParam1); + input.put("param2", "p2 value"); + + String workflowId = startOrLoadWorkflowExecution(LINEAR_WORKFLOW_T1_T2_SW, 1, correlationId, input, null, null); + System.out.println("testSimpleWorkflow.wfid=" + workflowId); + assertNotNull(workflowId); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. + + // Poll for first task and execute it + Task task = workflowExecutionService.poll("junit_task_3", "task3.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + task.getOutputData().put("op", "junit_task_3.done"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + + // Get the sub workflow id + String subWorkflowId = null; + for (Task t : workflow.getTasks()) { + if (t.getTaskType().equalsIgnoreCase("SUB_WORKFLOW")) { + subWorkflowId = t.getOutputData().get("subWorkflowId").toString(); + } + } + assertNotNull(subWorkflowId); + + Workflow subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(subWorkflow); + assertEquals(RUNNING, subWorkflow.getStatus()); + assertEquals(1, subWorkflow.getTasks().size()); + + // Now the Sub workflow is triggered + // Poll for first task of the sub workflow and execute it + task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + task.getOutputData().put("op", "junit_task_1.done"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(subWorkflow); + assertEquals(RUNNING, subWorkflow.getStatus()); + assertEquals(2, subWorkflow.getTasks().size()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(RUNNING, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + + // Poll for second task of the sub workflow and execute it + task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); + assertNotNull(task); + assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); + task.getOutputData().put("op", "junit_task_2.done"); + task.setStatus(COMPLETED); + workflowExecutionService.updateTask(task); + + // Now the sub workflow and the main workflow must have finished + subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); + assertNotNull(subWorkflow); + assertEquals(WorkflowStatus.COMPLETED, subWorkflow.getStatus()); + assertEquals(2, subWorkflow.getTasks().size()); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); + assertEquals(2, workflow.getTasks().size()); + + return workflowId; + } + + private String runAFailedForkJoinWF() throws Exception { + try { + this.createForkJoinWorkflowWithZeroRetry(); + } catch (Exception e) { + } + + Map input = new HashMap<>(); + String workflowId = startOrLoadWorkflowExecution(FORK_JOIN_WF + "_2", 1, "fanouttest", input, null, null); + System.out.println("testForkJoin.wfid=" + workflowId); + Task t1 = workflowExecutionService.poll("junit_task_0_RT_1", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); + + Task t2 = workflowExecutionService.poll("junit_task_0_RT_2", "test"); + assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); + assertNotNull(t1); + assertNotNull(t2); + + t1.setStatus(COMPLETED); + workflowExecutionService.updateTask(t1); + + Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals("Found " + workflow.getTasks(), RUNNING, workflow.getStatus()); + printTaskStatuses(workflow, "Initial"); + + t2.setStatus(FAILED); + + ExecutorService executorService = Executors.newFixedThreadPool(2); + Future future1 = executorService.submit(() -> { + try { + workflowExecutionService.updateTask(t2); + } catch (Exception e) { + throw new RuntimeException(e); + } + + }); + future1.get(); + + workflow = workflowExecutionService.getExecutionStatus(workflowId, true); + assertNotNull(workflow); + assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); + + return workflowId; + } + + private void printTaskStatuses(String wfid, String message) { + Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); + assertNotNull(wf); + printTaskStatuses(wf, message); + } + + private String startOrLoadWorkflowExecution(String workflowName, int version, String correlationId, Map input, String event, Map taskToDomain) { + return startOrLoadWorkflowExecution(workflowName, workflowName, version, correlationId, input, event, taskToDomain); + } + + abstract String startOrLoadWorkflowExecution(String snapshotResourceName, String workflowName, int version, String correlationId, Map input, String event, Map taskToDomain); + + private boolean printWFTaskDetails = false; + + private void printTaskStatuses(Workflow wf, String message) { + if (printWFTaskDetails) { + System.out.println(message + " >>> Workflow status " + wf.getStatus().name()); + wf.getTasks().forEach(t -> { + System.out.println("Task " + String.format("%-15s", t.getTaskType()) + "\t" + String.format("%-15s", t.getReferenceTaskName()) + "\t" + String.format("%-15s", t.getWorkflowTask().getType()) + "\t" + t.getSeq() + "\t" + t.getStatus() + "\t" + t.getTaskId()); + }); + System.out.println(); + } + } +} diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndGrpcTests.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndGrpcTests.java new file mode 100644 index 0000000000..cddd44c0d9 --- /dev/null +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndGrpcTests.java @@ -0,0 +1,239 @@ +/** + * Copyright 2016 Netflix, Inc. + *

    + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.tests.integration; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.conductor.bootstrap.BootstrapModule; +import com.netflix.conductor.bootstrap.ModulesProvider; +import com.netflix.conductor.client.grpc.MetadataClient; +import com.netflix.conductor.client.grpc.TaskClient; +import com.netflix.conductor.client.grpc.WorkflowClient; +import com.netflix.conductor.common.metadata.tasks.Task; +import com.netflix.conductor.common.metadata.tasks.Task.Status; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; +import com.netflix.conductor.common.metadata.tasks.TaskResult; +import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.common.run.SearchResult; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.common.run.Workflow.WorkflowStatus; +import com.netflix.conductor.common.run.WorkflowSummary; +import com.netflix.conductor.elasticsearch.ElasticSearchConfiguration; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearch; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearchProvider; +import com.netflix.conductor.grpc.server.GRPCServer; +import com.netflix.conductor.grpc.server.GRPCServerConfiguration; +import com.netflix.conductor.grpc.server.GRPCServerProvider; +import com.netflix.conductor.tests.utils.TestEnvironment; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +/** + * @author Viren + */ +public class End2EndGrpcTests extends AbstractEndToEndTest { + private static TaskClient taskClient; + private static WorkflowClient workflowClient; + private static MetadataClient metadataClient; + private static EmbeddedElasticSearch search; + + @BeforeClass + public static void setup() throws Exception { + TestEnvironment.setup(); + System.setProperty(GRPCServerConfiguration.ENABLED_PROPERTY_NAME, "true"); + System.setProperty(ElasticSearchConfiguration.EMBEDDED_PORT_PROPERTY_NAME, "9202"); + System.setProperty(ElasticSearchConfiguration.ELASTIC_SEARCH_URL_PROPERTY_NAME, "localhost:9302"); + + Injector bootInjector = Guice.createInjector(new BootstrapModule()); + Injector serverInjector = Guice.createInjector(bootInjector.getInstance(ModulesProvider.class).get()); + + search = serverInjector.getInstance(EmbeddedElasticSearchProvider.class).get().get(); + search.start(); + + Optional server = serverInjector.getInstance(GRPCServerProvider.class).get(); + assertTrue("failed to instantiate GRPCServer", server.isPresent()); + server.get().start(); + + taskClient = new TaskClient("localhost", 8090); + workflowClient = new WorkflowClient("localhost", 8090); + metadataClient = new MetadataClient("localhost", 8090); + } + + @AfterClass + public static void teardown() throws Exception { + TestEnvironment.teardown(); + search.stop(); + } + + @Test + public void testAll() throws Exception { + assertNotNull(taskClient); + List defs = new LinkedList<>(); + for (int i = 0; i < 5; i++) { + TaskDef def = new TaskDef("t" + i, "task " + i); + def.setTimeoutPolicy(TimeoutPolicy.RETRY); + defs.add(def); + } + metadataClient.registerTaskDefs(defs); + + for (int i = 0; i < 5; i++) { + final String taskName = "t" + i; + TaskDef def = metadataClient.getTaskDef(taskName); + assertNotNull(def); + assertEquals(taskName, def.getName()); + } + + WorkflowDef def = createWorkflowDefinition("test"); + WorkflowTask t0 = createWorkflowTask("t0"); + WorkflowTask t1 = createWorkflowTask("t1"); + + + def.getTasks().add(t0); + def.getTasks().add(t1); + + metadataClient.registerWorkflowDef(def); + WorkflowDef found = metadataClient.getWorkflowDef(def.getName(), null); + assertNotNull(found); + assertEquals(def, found); + + String correlationId = "test_corr_id"; + StartWorkflowRequest startWf = new StartWorkflowRequest(); + startWf.setName(def.getName()); + startWf.setCorrelationId(correlationId); + + String workflowId = workflowClient.startWorkflow(startWf); + assertNotNull(workflowId); + System.out.println("Started workflow id=" + workflowId); + + Workflow wf = workflowClient.getWorkflow(workflowId, false); + assertEquals(0, wf.getTasks().size()); + assertEquals(workflowId, wf.getWorkflowId()); + + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); + assertEquals(1, wf.getTasks().size()); + assertEquals(t0.getTaskReferenceName(), wf.getTasks().get(0).getReferenceTaskName()); + assertEquals(workflowId, wf.getWorkflowId()); + + List runningIds = workflowClient.getRunningWorkflow(def.getName(), def.getVersion()); + assertNotNull(runningIds); + assertEquals(1, runningIds.size()); + assertEquals(workflowId, runningIds.get(0)); + + List polled = taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100); + assertNotNull(polled); + assertEquals(0, polled.size()); + + polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); + assertNotNull(polled); + assertEquals(1, polled.size()); + assertEquals(t0.getName(), polled.get(0).getTaskDefName()); + Task task = polled.get(0); + + Boolean acked = taskClient.ack(task.getTaskId(), "test"); + assertNotNull(acked); + assertTrue(acked); + + task.getOutputData().put("key1", "value1"); + task.setStatus(Status.COMPLETED); + taskClient.updateTask(new TaskResult(task)); + + polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); + assertNotNull(polled); + assertTrue(polled.toString(), polled.isEmpty()); + + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); + assertEquals(2, wf.getTasks().size()); + assertEquals(t0.getTaskReferenceName(), wf.getTasks().get(0).getReferenceTaskName()); + assertEquals(t1.getTaskReferenceName(), wf.getTasks().get(1).getReferenceTaskName()); + assertEquals(Status.COMPLETED, wf.getTasks().get(0).getStatus()); + assertEquals(Status.SCHEDULED, wf.getTasks().get(1).getStatus()); + + Task taskById = taskClient.getTaskDetails(task.getTaskId()); + assertNotNull(taskById); + assertEquals(task.getTaskId(), taskById.getTaskId()); + + + List getTasks = taskClient.getPendingTasksByType(t0.getName(), null, 1); + assertNotNull(getTasks); + assertEquals(0, getTasks.size()); //getTasks only gives pending tasks + + + getTasks = taskClient.getPendingTasksByType(t1.getName(), null, 1); + assertNotNull(getTasks); + assertEquals(1, getTasks.size()); + + + Task pending = taskClient.getPendingTaskForWorkflow(workflowId, t1.getTaskReferenceName()); + assertNotNull(pending); + assertEquals(t1.getTaskReferenceName(), pending.getReferenceTaskName()); + assertEquals(workflowId, pending.getWorkflowInstanceId()); + + Thread.sleep(1000); + SearchResult searchResult = workflowClient.search("workflowType='" + def.getName() + "'"); + assertNotNull(searchResult); + assertEquals(1, searchResult.getTotalHits()); + + workflowClient.terminateWorkflow(workflowId, "terminate reason"); + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.TERMINATED, wf.getStatus()); + + workflowClient.restart(workflowId); + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); + assertEquals(1, wf.getTasks().size()); + } + + @Override + protected String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition) { + StartWorkflowRequest workflowRequest = new StartWorkflowRequest() + .withName(workflowExecutionName) + .withWorkflowDef(workflowDefinition); + return workflowClient.startWorkflow(workflowRequest); + } + + @Override + protected Workflow getWorkflow(String workflowId, boolean includeTasks) { + return workflowClient.getWorkflow(workflowId, includeTasks); + } + + @Override + protected TaskDef getTaskDefinition(String taskName) { + return metadataClient.getTaskDef(taskName); + } + + @Override + protected void registerTaskDefinitions(List taskDefinitionList) { + metadataClient.registerTaskDefs(taskDefinitionList); + } +} diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndTests.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndTests.java index 76e5c5f7b7..9cd897cb4e 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndTests.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/End2EndTests.java @@ -1,23 +1,24 @@ /** * Copyright 2016 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + *

    + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** * */ package com.netflix.conductor.tests.integration; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.netflix.conductor.bootstrap.BootstrapModule; +import com.netflix.conductor.bootstrap.ModulesProvider; import com.netflix.conductor.client.exceptions.ConductorClientException; import com.netflix.conductor.client.http.MetadataClient; import com.netflix.conductor.client.http.TaskClient; @@ -25,277 +26,300 @@ import com.netflix.conductor.common.metadata.tasks.Task; import com.netflix.conductor.common.metadata.tasks.Task.Status; import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; import com.netflix.conductor.common.metadata.tasks.TaskResult; import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; +import com.netflix.conductor.common.metadata.workflow.TaskType; import com.netflix.conductor.common.metadata.workflow.WorkflowDef; import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; import com.netflix.conductor.common.run.SearchResult; import com.netflix.conductor.common.run.Workflow; import com.netflix.conductor.common.run.Workflow.WorkflowStatus; import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.server.ConductorConfig; -import com.netflix.conductor.server.ConductorServer; +import com.netflix.conductor.elasticsearch.ElasticSearchConfiguration; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearch; +import com.netflix.conductor.elasticsearch.EmbeddedElasticSearchProvider; +import com.netflix.conductor.jetty.server.JettyServer; +import com.netflix.conductor.tests.utils.TestEnvironment; +import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; +import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; - /** * @author Viren - * */ -public class End2EndTests { - - static { - System.setProperty("EC2_REGION", "us-east-1"); - System.setProperty("EC2_AVAILABILITY_ZONE", "us-east-1c"); - System.setProperty("workflow.elasticsearch.url", "localhost:9300"); - System.setProperty("workflow.elasticsearch.index.name", "conductor"); - System.setProperty("workflow.namespace.prefix", "integration-test"); - System.setProperty("db", "memory"); - System.setProperty("workflow.elasticsearch.version", "5"); - } - - private static TaskClient taskClient; - - private static WorkflowClient workflowClient; - - private static MetadataClient metadataClient; - - @BeforeClass - public static void setup() throws Exception { - - ConductorServer server = new ConductorServer(new ConductorConfig()); - server.start(8080, false); - - taskClient = new TaskClient(); - taskClient.setRootURI("http://localhost:8080/api/"); - - workflowClient = new WorkflowClient(); - workflowClient.setRootURI("http://localhost:8080/api/"); - - metadataClient = new MetadataClient(); - metadataClient.setRootURI("http://localhost:8080/api/"); - } - - @Test - public void testAll() throws Exception { - assertNotNull(taskClient); - List defs = new LinkedList<>(); - for(int i = 0; i < 5; i++) { - TaskDef def = new TaskDef("t" + i, "task " + i); - def.setTimeoutPolicy(TimeoutPolicy.RETRY); - defs.add(def); - } - metadataClient.registerTaskDefs(defs); - TaskDef found = metadataClient.getTaskDef(defs.get(0).getName()); - assertNotNull(found); - assertEquals(defs.get(0), found); - - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - WorkflowTask t0 = new WorkflowTask(); - t0.setName("t0"); - t0.setWorkflowTaskType(Type.SIMPLE); - t0.setTaskReferenceName("t0"); - - WorkflowTask t1 = new WorkflowTask(); - t1.setName("t1"); - t1.setWorkflowTaskType(Type.SIMPLE); - t1.setTaskReferenceName("t1"); - - def.getTasks().add(t0); - def.getTasks().add(t1); - - metadataClient.registerWorkflowDef(def); - WorkflowDef foundd = metadataClient.getWorkflowDef(def.getName(), null); - assertNotNull(foundd); - assertEquals(def.getName(), foundd.getName()); - assertEquals(def.getVersion(), foundd.getVersion()); - - String correlationId = "test_corr_id"; - StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest() +public class End2EndTests extends AbstractEndToEndTest { + + private static TaskClient taskClient; + private static WorkflowClient workflowClient; + private static EmbeddedElasticSearch search; + private static MetadataClient metadataClient; + + private static final int SERVER_PORT = 8080; + + @BeforeClass + public static void setup() throws Exception { + TestEnvironment.setup(); + System.setProperty(ElasticSearchConfiguration.EMBEDDED_PORT_PROPERTY_NAME, "9201"); + System.setProperty(ElasticSearchConfiguration.ELASTIC_SEARCH_URL_PROPERTY_NAME, "localhost:9301"); + + Injector bootInjector = Guice.createInjector(new BootstrapModule()); + Injector serverInjector = Guice.createInjector(bootInjector.getInstance(ModulesProvider.class).get()); + + search = serverInjector.getInstance(EmbeddedElasticSearchProvider.class).get().get(); + search.start(); + + JettyServer server = new JettyServer(SERVER_PORT, false); + server.start(); + + taskClient = new TaskClient(); + taskClient.setRootURI("http://localhost:8080/api/"); + + workflowClient = new WorkflowClient(); + workflowClient.setRootURI("http://localhost:8080/api/"); + + metadataClient = new MetadataClient(); + metadataClient.setRootURI("http://localhost:8080/api/"); + } + + @AfterClass + public static void teardown() throws Exception { + TestEnvironment.teardown(); + search.stop(); + } + + @Override + protected String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition) { + StartWorkflowRequest workflowRequest = new StartWorkflowRequest() + .withName(workflowExecutionName) + .withWorkflowDef(workflowDefinition); + + return workflowClient.startWorkflow(workflowRequest); + } + + @Override + protected Workflow getWorkflow(String workflowId, boolean includeTasks) { + return workflowClient.getWorkflow(workflowId, includeTasks); + } + + @Override + protected TaskDef getTaskDefinition(String taskName) { + return metadataClient.getTaskDef(taskName); + } + + @Override + protected void registerTaskDefinitions(List taskDefinitionList) { + metadataClient.registerTaskDefs(taskDefinitionList); + } + + @Test + public void testAll() throws Exception { + List definitions = createAndRegisterTaskDefinitions("t", 5); + + List found = taskClient.getTaskDef().stream() + .filter(taskDefinition -> taskDefinition.getName().startsWith("t")) + .collect(Collectors.toList()); + assertNotNull(found); + assertEquals(definitions.size(), found.size()); + + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + WorkflowTask t0 = new WorkflowTask(); + t0.setName("t0"); + t0.setWorkflowTaskType(TaskType.SIMPLE); + t0.setTaskReferenceName("t0"); + + WorkflowTask t1 = new WorkflowTask(); + t1.setName("t1"); + t1.setWorkflowTaskType(TaskType.SIMPLE); + t1.setTaskReferenceName("t1"); + + + def.getTasks().add(t0); + def.getTasks().add(t1); + + metadataClient.registerWorkflowDef(def); + WorkflowDef workflowDefinitionFromSystem = metadataClient.getWorkflowDef(def.getName(), null); + assertNotNull(workflowDefinitionFromSystem); + assertEquals(def, workflowDefinitionFromSystem); + + String correlationId = "test_corr_id"; + StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest() .withName(def.getName()) .withCorrelationId(correlationId) .withInput(new HashMap<>()); - String workflowId = workflowClient.startWorkflow(startWorkflowRequest); - assertNotNull(workflowId); - System.out.println(workflowId); - - Workflow wf = workflowClient.getWorkflow(workflowId, false); - assertEquals(0, wf.getTasks().size()); - assertEquals(workflowId, wf.getWorkflowId()); - - List workflowList = workflowClient.getWorkflows(def.getName(), correlationId, false, false); - assertEquals(1, workflowList.size()); - assertEquals(workflowId, workflowList.get(0).getWorkflowId()); - - wf = workflowClient.getWorkflow(workflowId, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); - assertEquals(1, wf.getTasks().size()); - assertEquals(t0.getTaskReferenceName(), wf.getTasks().get(0).getReferenceTaskName()); - assertEquals(workflowId, wf.getWorkflowId()); - - List runningIds = workflowClient.getRunningWorkflow(def.getName(), def.getVersion()); - assertNotNull(runningIds); - assertEquals(1, runningIds.size()); - assertEquals(workflowId, runningIds.get(0)); - - List polled = taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100); - assertNotNull(polled); - assertEquals(0, polled.size()); - - polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); - assertNotNull(polled); - assertEquals(1, polled.size()); - assertEquals(t0.getName(), polled.get(0).getTaskDefName()); - Task task = polled.get(0); - - Boolean acked = taskClient.ack(task.getTaskId(), "test"); - assertNotNull(acked); - assertTrue(acked); - - task.getOutputData().put("key1", "value1"); - task.setStatus(Status.COMPLETED); - taskClient.updateTask(new TaskResult(task), task.getTaskType()); - - polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); - assertNotNull(polled); - assertTrue(polled.toString(), polled.isEmpty()); - - wf = workflowClient.getWorkflow(workflowId, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); - assertEquals(2, wf.getTasks().size()); - assertEquals(t0.getTaskReferenceName(), wf.getTasks().get(0).getReferenceTaskName()); - assertEquals(t1.getTaskReferenceName(), wf.getTasks().get(1).getReferenceTaskName()); - assertEquals(Task.Status.COMPLETED, wf.getTasks().get(0).getStatus()); - assertEquals(Task.Status.SCHEDULED, wf.getTasks().get(1).getStatus()); - - Task taskById = taskClient.getTaskDetails(task.getTaskId()); - assertNotNull(taskById); - assertEquals(task.getTaskId(), taskById.getTaskId()); - - List getTasks = taskClient.getPendingTasksByType(t0.getName(), null, 1); - assertNotNull(getTasks); - assertEquals(0, getTasks.size()); //getTasks only gives pending tasks - getTasks = taskClient.getPendingTasksByType(t1.getName(), null, 1); - assertNotNull(getTasks); - assertEquals(1, getTasks.size()); - - Task pending = taskClient.getPendingTaskForWorkflow(workflowId, t1.getTaskReferenceName()); - assertNotNull(pending); - assertEquals(t1.getTaskReferenceName(), pending.getReferenceTaskName()); - assertEquals(workflowId, pending.getWorkflowInstanceId()); - - Thread.sleep(1000); - SearchResult searchResult = workflowClient.search("workflowType='" + def.getName() + "'"); - assertNotNull(searchResult); - assertEquals(1, searchResult.getTotalHits()); - - workflowClient.terminateWorkflow(workflowId, "terminate reason"); - wf = workflowClient.getWorkflow(workflowId, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.TERMINATED, wf.getStatus()); - - workflowClient.restart(workflowId); - wf = workflowClient.getWorkflow(workflowId, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); - assertEquals(1, wf.getTasks().size()); - } - - @Test - public void testMetadataWorkflowDefinition() { - WorkflowDef def = new WorkflowDef(); - def.setName("testWorkflowDel"); - def.setVersion(1); - WorkflowTask t0 = new WorkflowTask(); - t0.setName("t0"); - t0.setWorkflowTaskType(Type.SIMPLE); - t0.setTaskReferenceName("t0"); - - WorkflowTask t1 = new WorkflowTask(); - t1.setName("t1"); - t1.setWorkflowTaskType(Type.SIMPLE); - t1.setTaskReferenceName("t1"); - - def.getTasks().add(t0); - def.getTasks().add(t1); - - metadataClient.registerWorkflowDef(def); - metadataClient.unregisterWorkflowDef("testWorkflowDel", 1); - - try { - metadataClient.getWorkflowDef("testWorkflowDel", 1); - } catch (ConductorClientException e) { - int statusCode = e.getStatus(); - String errorMessage = e.getMessage(); - boolean retryable = e.isRetryable(); - assertEquals(404, statusCode); - assertEquals("No such workflow found by name: testWorkflowDel, version: 1", errorMessage); - assertFalse(retryable); - } - } - - @Test - public void testInvalidResource() { + String workflowId = workflowClient.startWorkflow(startWorkflowRequest); + assertNotNull(workflowId); + + Workflow wf = workflowClient.getWorkflow(workflowId, false); + assertEquals(0, wf.getTasks().size()); + assertEquals(workflowId, wf.getWorkflowId()); + + List workflowList = workflowClient.getWorkflows(def.getName(), correlationId, false, false); + assertEquals(1, workflowList.size()); + assertEquals(workflowId, workflowList.get(0).getWorkflowId()); + wf = workflowClient.getWorkflow(workflowId, true); + + assertNotNull(wf); + assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); + assertEquals(1, wf.getTasks().size()); + assertEquals(t0.getTaskReferenceName(), wf.getTasks().get(0).getReferenceTaskName()); + assertEquals(workflowId, wf.getWorkflowId()); + + List runningIds = workflowClient.getRunningWorkflow(def.getName(), def.getVersion()); + assertNotNull(runningIds); + assertEquals(1, runningIds.size()); + assertEquals(workflowId, runningIds.get(0)); + + List polled = taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100); + assertNotNull(polled); + assertEquals(0, polled.size()); + + polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); + assertNotNull(polled); + assertEquals(1, polled.size()); + assertEquals(t0.getName(), polled.get(0).getTaskDefName()); + Task task = polled.get(0); + + Boolean acked = taskClient.ack(task.getTaskId(), "test"); + assertNotNull(acked); + assertTrue(acked); + + task.getOutputData().put("key1", "value1"); + task.setStatus(Status.COMPLETED); + taskClient.updateTask(new TaskResult(task), task.getTaskType()); + + polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100); + assertNotNull(polled); + assertTrue(polled.toString(), polled.isEmpty()); + + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); + assertEquals(2, wf.getTasks().size()); + assertEquals(t0.getTaskReferenceName(), wf.getTasks().get(0).getReferenceTaskName()); + assertEquals(t1.getTaskReferenceName(), wf.getTasks().get(1).getReferenceTaskName()); + assertEquals(Task.Status.COMPLETED, wf.getTasks().get(0).getStatus()); + assertEquals(Task.Status.SCHEDULED, wf.getTasks().get(1).getStatus()); + + Task taskById = taskClient.getTaskDetails(task.getTaskId()); + assertNotNull(taskById); + assertEquals(task.getTaskId(), taskById.getTaskId()); + + + List getTasks = taskClient.getPendingTasksByType(t0.getName(), null, 1); + assertNotNull(getTasks); + assertEquals(0, getTasks.size()); //getTasks only gives pending tasks + + + getTasks = taskClient.getPendingTasksByType(t1.getName(), null, 1); + assertNotNull(getTasks); + assertEquals(1, getTasks.size()); + + + Task pending = taskClient.getPendingTaskForWorkflow(workflowId, t1.getTaskReferenceName()); + assertNotNull(pending); + assertEquals(t1.getTaskReferenceName(), pending.getReferenceTaskName()); + assertEquals(workflowId, pending.getWorkflowInstanceId()); + + Thread.sleep(1000); + SearchResult searchResult = workflowClient.search("workflowType='" + def.getName() + "'"); + assertNotNull(searchResult); + assertEquals(1, searchResult.getTotalHits()); + + workflowClient.terminateWorkflow(workflowId, "terminate reason"); + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.TERMINATED, wf.getStatus()); + + workflowClient.restart(workflowId); + wf = workflowClient.getWorkflow(workflowId, true); + assertNotNull(wf); + assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); + assertEquals(1, wf.getTasks().size()); + } + + @Test + public void testMetadataWorkflowDefinition() { + String workflowDefName = "testWorkflowDefMetadata"; + WorkflowDef def = new WorkflowDef(); + def.setName(workflowDefName); + def.setVersion(1); + WorkflowTask t0 = new WorkflowTask(); + t0.setName("t0"); + t0.setWorkflowTaskType(TaskType.SIMPLE); + t0.setTaskReferenceName("t0"); + WorkflowTask t1 = new WorkflowTask(); + t1.setName("t1"); + t1.setWorkflowTaskType(TaskType.SIMPLE); + t1.setTaskReferenceName("t1"); + def.getTasks().add(t0); + def.getTasks().add(t1); + metadataClient.registerWorkflowDef(def); + try { + metadataClient.getWorkflowDef(workflowDefName, 1); + } catch (ConductorClientException e) { + int statusCode = e.getStatus(); + String errorMessage = e.getMessage(); + boolean retryable = e.isRetryable(); + assertEquals(404, statusCode); + assertEquals("No such workflow found by name: testWorkflowDefMetadata, version: 1", errorMessage); + assertFalse(retryable); + } + metadataClient.unregisterWorkflowDef(workflowDefName, 1); + } + + @Test + public void testInvalidResource() { MetadataClient metadataClient = new MetadataClient(); metadataClient.setRootURI("http://localhost:8080/api/invalid"); - WorkflowDef def = new WorkflowDef(); def.setName("testWorkflowDel"); def.setVersion(1); - try { metadataClient.registerWorkflowDef(def); } catch (ConductorClientException e) { int statusCode = e.getStatus(); - boolean retryable = e.isRetryable(); + boolean retryable = e.isRetryable(); assertEquals(404, statusCode); - assertFalse(retryable); + assertFalse(retryable); } - } - - @Test - public void testUpdateWorkflow() { - WorkflowDef def = new WorkflowDef(); - def.setName("testWorkflowDel"); - def.setVersion(1); - metadataClient.registerWorkflowDef(def); - def.setVersion(2); - List workflowList = new ArrayList<>(); - workflowList.add(def); - metadataClient.updateWorkflowDefs(workflowList); - WorkflowDef def1 = metadataClient.getWorkflowDef(def.getName(), 2); - assertNotNull(def1); - - try{ - metadataClient.getTaskDef("test"); - } catch (ConductorClientException e) { - int statuCode = e.getStatus(); - assertEquals(404, statuCode); - assertEquals("No such taskType found by name: test", e.getMessage()); - assertFalse(e.isRetryable()); - } - } - - - - @Test - public void testStartWorkflow() { + } + + @Test + public void testUpdateWorkflow() { + WorkflowDef def = new WorkflowDef(); + def.setName("testWorkflowDel"); + def.setVersion(1); + metadataClient.registerWorkflowDef(def); + def.setVersion(2); + List workflowList = new ArrayList<>(); + workflowList.add(def); + metadataClient.updateWorkflowDefs(workflowList); + WorkflowDef def1 = metadataClient.getWorkflowDef(def.getName(), 2); + assertNotNull(def1); + try { + metadataClient.getTaskDef("test"); + } catch (ConductorClientException e) { + int statuCode = e.getStatus(); + assertEquals(404, statuCode); + assertEquals("No such taskType found by name: test", e.getMessage()); + assertFalse(e.isRetryable()); + } + } + + + @Test + public void testStartWorkflow() { StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest(); try{ workflowClient.startWorkflow(startWorkflowRequest); @@ -303,12 +327,11 @@ public void testStartWorkflow() { assertEquals("Workflow name cannot be null or empty", e.getMessage()); } } - @Test public void testUpdateTask() { - TaskResult taskResult = new TaskResult(); - try{ - taskClient.updateTask(taskResult, "taskTest"); + TaskResult taskResult = new TaskResult(); + try{ + taskClient.updateTask(taskResult, "taskTest"); } catch (ConductorClientException e){ int statuCode = e.getStatus(); assertEquals(400, statuCode); @@ -316,18 +339,16 @@ public void testUpdateTask() { assertFalse(e.isRetryable()); } } - @Test public void testGetWorfklowNotFound() { - try{ - workflowClient.getWorkflow("w123", true); + try{ + workflowClient.getWorkflow("w123", true); } catch (ConductorClientException e) { - assertEquals(404, e.getStatus()); + assertEquals(404, e.getStatus()); assertEquals("No such workflow found by id: w123", e.getMessage()); assertFalse(e.isRetryable()); } } - @Test public void testEmptyCreateWorkflowDef() { try{ @@ -339,7 +360,6 @@ public void testEmptyCreateWorkflowDef() { assertFalse(e.isRetryable()); } } - @Test public void testUpdateWorkflowDef() { try{ @@ -353,18 +373,17 @@ public void testUpdateWorkflowDef() { assertFalse(e.isRetryable()); } } - - @Test(expected = Test.None.class /* no exception expected */) + @Test public void testGetTaskInProgress() { - taskClient.getPendingTaskForWorkflow("test", "t1"); + taskClient.getPendingTaskForWorkflow("test", "t1"); } @Test public void testRemoveTaskFromTaskQueue() { - try { - taskClient.removeTaskFromQueue("test", "fakeQueue"); + try { + taskClient.removeTaskFromQueue("test", "fakeQueue"); } catch (ConductorClientException e) { - assertEquals(404, e.getStatus()); + assertEquals(404, e.getStatus()); } } @@ -378,8 +397,8 @@ public void testTaskByTaskId() { } } - @Test(expected = Test.None.class /* no exception expected */) + @Test public void testListworkflowsByCorrelationId() { - workflowClient.getWorkflows("test", "test12", false, false); + workflowClient.getWorkflows("test", "test12", false, false); } } diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowLegacyMigrationTest.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowLegacyMigrationTest.java new file mode 100644 index 0000000000..ff28faa7bd --- /dev/null +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowLegacyMigrationTest.java @@ -0,0 +1,121 @@ +/** + * Copyright 2016 Netflix, Inc. + *

    + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package com.netflix.conductor.tests.integration; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.io.Resources; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.core.config.Configuration; +import com.netflix.conductor.core.execution.WorkflowExecutor; +import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.tests.utils.TestRunner; +import org.apache.commons.io.Charsets; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; + +import javax.inject.Inject; +import java.util.Map; + +import static org.junit.Assert.fail; + +@RunWith(TestRunner.class) +public class WorkflowLegacyMigrationTest extends AbstractWorkflowServiceTest { + + private static final String WORKFLOW_SCENARIOS_PATH_PREFIX = "/integration/scenarios/legacy/"; + private static final String WORKFLOW_SCENARIO_EXTENSION = ".json"; + private static final String WORKFLOW_INSTANCE_ID_PLACEHOLDER = "WORKFLOW_INSTANCE_ID"; + + @Inject + private ExecutionDAO executionDAO; + + @Inject + private ObjectMapper objectMapper; + + @Inject + private Configuration configuration; + + @Override + public String startOrLoadWorkflowExecution(String snapshotResourceName, String workflowName, + int version, String correlationId, Map input, + String event, Map taskToDomain) { + Workflow workflow = null; + try { + workflow = loadWorkflowSnapshot(getWorkflowResourcePath(snapshotResourceName)); + } catch (Exception e) { + fail("Error loading workflow scenario " + snapshotResourceName); + } + + final String workflowId = workflow.getWorkflowId(); + + workflow.setCorrelationId(correlationId); + workflow.setInput(input); + workflow.setEvent(event); + workflow.setTaskToDomain(taskToDomain); + workflow.setVersion(version); + + workflow.getTasks().forEach(task -> { + task.setTaskId(IDGenerator.generate()); + task.setWorkflowInstanceId(workflowId); + task.setCorrelationId(correlationId); + }); + + executionDAO.createTasks(workflow.getTasks()); + executionDAO.createWorkflow(workflow); + + /* + * Apart from loading a workflow snapshot, + * in order to represent a workflow on the system, we need to populate the + * respective queues related to tasks in progress or decisions. + */ + workflow.getTasks().forEach(task -> { + workflowExecutor.addTaskToQueue(task); + queueDAO.push(WorkflowExecutor.DECIDER_QUEUE, workflowId, configuration.getSweepFrequency()); + }); + + return workflow.getWorkflowId(); + } + + private Workflow loadWorkflowSnapshot(String resourcePath) throws Exception { + + String content = Resources.toString(WorkflowLegacyMigrationTest.class.getResource(resourcePath), Charsets.UTF_8); + String workflowId = IDGenerator.generate(); + content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId); + + Workflow workflow = objectMapper.readValue(content, Workflow.class); + workflow.setWorkflowId(workflowId); + + return workflow; + } + + private String getWorkflowResourcePath(String workflowName) { + return WORKFLOW_SCENARIOS_PATH_PREFIX + workflowName + WORKFLOW_SCENARIO_EXTENSION; + } + + @Ignore + @Test + @Override + /* + * This scenario cannot be recreated loading a workflow snapshot. + * ForkJoins are also tested on testForkJoin() + */ + public void testForkJoinNestedWithSubWorkflow() { + } +} diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java b/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java index 35ddd5e5ca..fdc4eee01e 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/integration/WorkflowServiceTest.java @@ -18,4276 +18,16 @@ */ package com.netflix.conductor.tests.integration; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.util.concurrent.Uninterruptibles; -import com.netflix.conductor.common.metadata.tasks.PollData; -import com.netflix.conductor.common.metadata.tasks.Task; -import com.netflix.conductor.common.metadata.tasks.Task.Status; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.tasks.TaskDef.RetryLogic; -import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; -import com.netflix.conductor.common.metadata.tasks.TaskResult; -import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; -import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest; -import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask.Type; -import com.netflix.conductor.common.run.Workflow; -import com.netflix.conductor.common.run.Workflow.WorkflowStatus; -import com.netflix.conductor.core.WorkflowContext; -import com.netflix.conductor.core.execution.ApplicationException; -import com.netflix.conductor.core.execution.SystemTaskType; -import com.netflix.conductor.core.execution.WorkflowExecutor; -import com.netflix.conductor.core.execution.WorkflowSweeper; -import com.netflix.conductor.core.execution.tasks.SubWorkflow; -import com.netflix.conductor.dao.QueueDAO; -import com.netflix.conductor.service.ExecutionService; -import com.netflix.conductor.service.MetadataService; import com.netflix.conductor.tests.utils.TestRunner; -import org.apache.commons.lang.StringUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import javax.inject.Inject; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import static com.netflix.conductor.common.metadata.tasks.Task.Status.COMPLETED; -import static com.netflix.conductor.common.metadata.tasks.Task.Status.FAILED; -import static com.netflix.conductor.common.metadata.tasks.Task.Status.IN_PROGRESS; -import static com.netflix.conductor.common.metadata.tasks.Task.Status.SCHEDULED; -import static com.netflix.conductor.common.metadata.tasks.Task.Status.TIMED_OUT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** - * @author Viren - */ @RunWith(TestRunner.class) -public class WorkflowServiceTest { - - - private static final Logger logger = LoggerFactory.getLogger(WorkflowServiceTest.class); - - private static final String COND_TASK_WF = "ConditionalTaskWF"; - - private static final String FORK_JOIN_NESTED_WF = "FanInOutNestedTest"; - - private static final String FORK_JOIN_WF = "FanInOutTest"; - - private static final String DYNAMIC_FORK_JOIN_WF = "DynamicFanInOutTest"; - - private static final String DYNAMIC_FORK_JOIN_WF_LEGACY = "DynamicFanInOutTestLegacy"; - - private static final int RETRY_COUNT = 1; - private static final String JUNIT_TEST_WF_NON_RESTARTABLE = "junit_test_wf_non_restartable"; - private static final String WF_WITH_SUB_WF = "WorkflowWithSubWorkflow"; - - @Rule - public final ExpectedException expectedException = ExpectedException.none(); - - @Inject - private ExecutionService workflowExecutionService; - - @Inject - private SubWorkflow subworkflow; - - @Inject - private MetadataService metadataService; - - @Inject - private WorkflowSweeper workflowSweeper; - - @Inject - private QueueDAO queueDAO; - - @Inject - private WorkflowExecutor workflowExecutor; - - private static boolean registered; - - private static List taskDefs; - - private static final String LINEAR_WORKFLOW_T1_T2 = "junit_test_wf"; - - private static final String LINEAR_WORKFLOW_T1_T2_SW = "junit_test_wf_sw"; - - private static final String LONG_RUNNING = "longRunningWf"; - - private static final String TEST_WORKFLOW_NAME_3 = "junit_test_wf3"; - - @Before - public void init() { - System.setProperty("EC2_REGION", "us-east-1"); - System.setProperty("EC2_AVAILABILITY_ZONE", "us-east-1c"); - if (registered) { - return; - } - - WorkflowContext.set(new WorkflowContext("junit_app")); - for (int i = 0; i < 21; i++) { - - String name = "junit_task_" + i; - try { - metadataService.getTaskDef(name); - } catch (ApplicationException e) { - if (e.getHttpStatusCode() == 404) { - TaskDef task = new TaskDef(); - task.setName(name); - task.setTimeoutSeconds(120); - task.setRetryCount(RETRY_COUNT); - metadataService.registerTaskDef(Collections.singletonList(task)); - } - } - } - - for (int i = 0; i < 5; i++) { - - String name = "junit_task_0_RT_" + i; - try { - metadataService.getTaskDef(name); - } catch (ApplicationException e) { - if (e.getHttpStatusCode() == 404) { - TaskDef task = new TaskDef(); - task.setName(name); - task.setTimeoutSeconds(120); - task.setRetryCount(0); - metadataService.registerTaskDef(Collections.singletonList(task)); - } - } - } - - TaskDef task = new TaskDef(); - task.setName("short_time_out"); - task.setTimeoutSeconds(5); - task.setRetryCount(RETRY_COUNT); - metadataService.registerTaskDef(Collections.singletonList(task)); - - WorkflowDef def = new WorkflowDef(); - def.setName(LINEAR_WORKFLOW_T1_T2); - def.setDescription(def.getName()); - def.setVersion(1); - def.setInputParameters(Arrays.asList("param1", "param2")); - Map outputParameters = new HashMap<>(); - outputParameters.put("o1", "${workflow.input.param1}"); - outputParameters.put("o2", "${t2.output.uuid}"); - outputParameters.put("o3", "${t1.output.op}"); - def.setOutputParameters(outputParameters); - def.setFailureWorkflow("$workflow.input.failureWfName"); - def.setSchemaVersion(2); - LinkedList wftasks = new LinkedList<>(); - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_1"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "${workflow.input.param1}"); - ip1.put("p2", "${workflow.input.param2}"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("t1"); - - WorkflowTask wft2 = new WorkflowTask(); - wft2.setName("junit_task_2"); - Map ip2 = new HashMap<>(); - ip2.put("tp1", "${workflow.input.param1}"); - ip2.put("tp2", "${t1.output.op}"); - wft2.setInputParameters(ip2); - wft2.setTaskReferenceName("t2"); - - wftasks.add(wft1); - wftasks.add(wft2); - def.setTasks(wftasks); - - WorkflowTask wft3 = new WorkflowTask(); - wft3.setName("junit_task_3"); - Map ip3 = new HashMap<>(); - ip3.put("tp1", "${workflow.input.param1}"); - ip3.put("tp2", "${t1.output.op}"); - wft3.setInputParameters(ip3); - wft3.setTaskReferenceName("t3"); - - WorkflowDef def2 = new WorkflowDef(); - def2.setName(TEST_WORKFLOW_NAME_3); - def2.setDescription(def2.getName()); - def2.setVersion(1); - def2.setInputParameters(Arrays.asList("param1", "param2")); - LinkedList wftasks2 = new LinkedList<>(); - - wftasks2.add(wft1); - wftasks2.add(wft2); - wftasks2.add(wft3); - def2.setSchemaVersion(2); - def2.setTasks(wftasks2); - - WorkflowDef[] wdsf = new WorkflowDef[]{def, def2}; - for (WorkflowDef wd : wdsf) { - metadataService.updateWorkflowDef(wd); - } - createForkJoinWorkflow(); - def.setName(LONG_RUNNING); - metadataService.updateWorkflowDef(def); - - taskDefs = metadataService.getTaskDefs(); - registered = true; - } - - @Test - public void testWorkflowWithNoTasks() { - - WorkflowDef empty = new WorkflowDef(); - empty.setName("empty_workflow"); - empty.setSchemaVersion(2); - metadataService.registerWorkflowDef(empty); - - String id = workflowExecutor.startWorkflow(empty.getName(), 1, "testWorkflowWithNoTasks", new HashMap<>()); - assertNotNull(id); - Workflow workflow = workflowExecutionService.getExecutionStatus(id, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals(0, workflow.getTasks().size()); - } - - @Test - public void testTaskDefTemplate() throws Exception { - - System.setProperty("STACK2", "test_stack"); - TaskDef templatedTask = new TaskDef(); - templatedTask.setName("templated_task"); - Map httpRequest = new HashMap<>(); - httpRequest.put("method", "GET"); - httpRequest.put("vipStack", "${STACK2}"); - httpRequest.put("uri", "/get/something"); - Map body = new HashMap<>(); - body.put("inputPaths", Arrays.asList("${workflow.input.path1}", "${workflow.input.path2}")); - body.put("requestDetails", "${workflow.input.requestDetails}"); - body.put("outputPath", "${workflow.input.outputPath}"); - httpRequest.put("body", body); - templatedTask.getInputTemplate().put("http_request", httpRequest); - metadataService.registerTaskDef(Collections.singletonList(templatedTask)); - - WorkflowDef templateWf = new WorkflowDef(); - templateWf.setName("template_workflow"); - WorkflowTask wft = new WorkflowTask(); - wft.setName(templatedTask.getName()); - wft.setWorkflowTaskType(Type.SIMPLE); - wft.setTaskReferenceName("t0"); - templateWf.getTasks().add(wft); - templateWf.setSchemaVersion(2); - metadataService.registerWorkflowDef(templateWf); - - Map requestDetails = new HashMap<>(); - requestDetails.put("key1", "value1"); - requestDetails.put("key2", 42); - - Map input = new HashMap<>(); - input.put("path1", "file://path1"); - input.put("path2", "file://path2"); - input.put("outputPath", "s3://bucket/outputPath"); - input.put("requestDetails", requestDetails); - - String id = workflowExecutor.startWorkflow(templateWf.getName(), 1, "testTaskDefTemplate", input); - assertNotNull(id); - Workflow workflow = workflowExecutionService.getExecutionStatus(id, true); - assertNotNull(workflow); - assertTrue(workflow.getReasonForIncompletion(), !workflow.getStatus().isTerminal()); - assertEquals(1, workflow.getTasks().size()); - Task task = workflow.getTasks().get(0); - Map taskInput = task.getInputData(); - assertNotNull(taskInput); - assertTrue(taskInput.containsKey("http_request")); - assertTrue(taskInput.get("http_request") instanceof Map); - - ObjectMapper objectMapper = new ObjectMapper(); - - //Use the commented sysout to get the string value - //System.out.println(om.writeValueAsString(om.writeValueAsString(taskInput))); - String expected = "{\"http_request\":{\"method\":\"GET\",\"vipStack\":\"test_stack\",\"body\":{\"requestDetails\":{\"key1\":\"value1\",\"key2\":42},\"outputPath\":\"s3://bucket/outputPath\",\"inputPaths\":[\"file://path1\",\"file://path2\"]},\"uri\":\"/get/something\"}}"; - assertEquals(expected, objectMapper.writeValueAsString(taskInput)); - } - - - @Test - public void testWorkflowSchemaVersion() { - WorkflowDef ver2 = new WorkflowDef(); - ver2.setSchemaVersion(2); - ver2.setName("Test_schema_version2"); - ver2.setVersion(1); - - WorkflowDef ver1 = new WorkflowDef(); - ver1.setName("Test_schema_version1"); - ver1.setVersion(1); - - metadataService.updateWorkflowDef(ver1); - metadataService.updateWorkflowDef(ver2); - - WorkflowDef found = metadataService.getWorkflowDef(ver2.getName(), 1); - assertNotNull(found); - assertEquals(2, found.getSchemaVersion()); - - WorkflowDef found1 = metadataService.getWorkflowDef(ver1.getName(), 1); - assertNotNull(found1); - assertEquals(1, found1.getSchemaVersion()); - - } - - @Test - public void testForkJoin() throws Exception { - try { - createForkJoinWorkflow(); - } catch (Exception e) { - } - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(0); - metadataService.updateTaskDef(taskDef); - - taskName = "junit_task_2"; - taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(0); - metadataService.updateTaskDef(taskDef); - - taskName = "junit_task_3"; - taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(0); - metadataService.updateTaskDef(taskDef); - - taskName = "junit_task_4"; - taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(0); - metadataService.updateTaskDef(taskDef); - - Map input = new HashMap<>(); - String workflowId = workflowExecutor.startWorkflow(FORK_JOIN_WF, 1, "fanouttest", input); - System.out.println("testForkJoin.wfid=" + workflowId); - printTaskStatuses(workflowId, "initiated"); - - Task task1 = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task1); - assertTrue(workflowExecutionService.ackTaskReceived(task1.getTaskId())); - - Task task2 = workflowExecutionService.poll("junit_task_2", "test"); - assertNotNull(task2); - assertTrue(workflowExecutionService.ackTaskReceived(task2.getTaskId())); - - Task task3 = workflowExecutionService.poll("junit_task_3", "test"); - assertNull(task3); - - task1.setStatus(COMPLETED); - workflowExecutionService.updateTask(task1); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("Found " + workflow.getTasks(), WorkflowStatus.RUNNING, workflow.getStatus()); - printTaskStatuses(workflow, "T1 completed"); - - task3 = workflowExecutionService.poll("junit_task_3", "test"); - assertNotNull(task3); - - task2.setStatus(COMPLETED); - task3.setStatus(COMPLETED); - - ExecutorService executorService = Executors.newFixedThreadPool(2); - Future future1 = executorService.submit(() -> { - try { - workflowExecutionService.updateTask(task2); - } catch (Exception e) { - throw new RuntimeException(e); - } - - }); - future1.get(); - - final Task _t3 = task3; - Future future2 = executorService.submit(() -> { - try { - workflowExecutionService.updateTask(_t3); - } catch (Exception e) { - throw new RuntimeException(e); - } - - }); - future2.get(); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - printTaskStatuses(workflow, "T2 T3 completed"); - assertEquals("Found " + workflow.getTasks(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 6, workflow.getTasks().size()); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("Found " + workflow.getTasks(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertTrue("Found " + workflow.getTasks().stream().map(t -> t.getReferenceTaskName() + "." + t.getStatus()).collect(Collectors.toList()), workflow.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t4"))); - - Task t4 = workflowExecutionService.poll("junit_task_4", "test"); - assertNotNull(t4); - t4.setStatus(COMPLETED); - workflowExecutionService.updateTask(t4); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("Found " + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); - printTaskStatuses(workflow, "All completed"); - } - - @Test - public void testForkJoinNested() { - - createForkJoinNestedWorkflow(); - - Map input = new HashMap<>(); - input.put("case", "a"); //This should execute t16 and t19 - String wfid = workflowExecutor.startWorkflow(FORK_JOIN_NESTED_WF, 1, "fork_join_nested_test", input); - System.out.println("testForkJoinNested.wfid=" + wfid); - - Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); - - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t11"))); - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t12"))); - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t13"))); - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("sw1"))); - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("fork1"))); - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("fork2"))); - - assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t16"))); - assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t1"))); - assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t2"))); - - - Task t1 = workflowExecutionService.poll("junit_task_11", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); - - Task t2 = workflowExecutionService.poll("junit_task_12", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); - - Task t3 = workflowExecutionService.poll("junit_task_13", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); - - assertNotNull(t1); - assertNotNull(t2); - assertNotNull(t3); - - t1.setStatus(COMPLETED); - t2.setStatus(COMPLETED); - t3.setStatus(COMPLETED); - - workflowExecutionService.updateTask(t1); - workflowExecutionService.updateTask(t2); - workflowExecutionService.updateTask(t3); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - - wf = workflowExecutionService.getExecutionStatus(wfid, true); - - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t16"))); - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t14"))); - - String[] tasks = new String[]{"junit_task_1", "junit_task_2", "junit_task_14", "junit_task_16"}; - for (String tt : tasks) { - Task polled = workflowExecutionService.poll(tt, "test"); - assertNotNull("poll resulted empty for task: " + tt, polled); - polled.setStatus(COMPLETED); - workflowExecutionService.updateTask(polled); - } - - wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); - - assertTrue(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t19"))); - assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t15"))); //Not there yet - assertFalse(wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t20"))); //Not there yet - - Task task19 = workflowExecutionService.poll("junit_task_19", "test"); - assertNotNull(task19); - task19.setStatus(COMPLETED); - workflowExecutionService.updateTask(task19); - - Task task20 = workflowExecutionService.poll("junit_task_20", "test"); - assertNotNull(task20); - task20.setStatus(COMPLETED); - workflowExecutionService.updateTask(task20); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.RUNNING, wf.getStatus()); - - Set pendingTasks = wf.getTasks().stream().filter(t -> !t.getStatus().isTerminal()).map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); - assertTrue("Found only this: " + pendingTasks, wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("join1"))); - - pendingTasks = wf.getTasks().stream().filter(t -> !t.getStatus().isTerminal()).map(t -> t.getReferenceTaskName()).collect(Collectors.toSet()); - assertTrue("Found only this: " + pendingTasks, wf.getTasks().stream().anyMatch(t -> t.getReferenceTaskName().equals("t15"))); - Task task15 = workflowExecutionService.poll("junit_task_15", "test"); - assertNotNull(task15); - task15.setStatus(COMPLETED); - workflowExecutionService.updateTask(task15); - - wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); - - } - - @Test - public void testForkJoinFailure() { - - try { - createForkJoinWorkflow(); - } catch (Exception e) { - } - - String taskName = "junit_task_2"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - int retryCount = taskDef.getRetryCount(); - taskDef.setRetryCount(0); - metadataService.updateTaskDef(taskDef); - - - Map input = new HashMap(); - String wfid = workflowExecutor.startWorkflow(FORK_JOIN_WF, 1, "fanouttest", input); - System.out.println("testForkJoinFailure.wfid=" + wfid); - - Task t1 = workflowExecutionService.poll("junit_task_2", "test"); - assertNotNull(t1); - assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); - - Task t2 = workflowExecutionService.poll("junit_task_1", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); - - Task t3 = workflowExecutionService.poll("junit_task_3", "test"); - assertNull(t3); - - assertNotNull(t1); - assertNotNull(t2); - t1.setStatus(FAILED); - t2.setStatus(COMPLETED); - - workflowExecutionService.updateTask(t2); - Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals("Found " + wf.getTasks(), WorkflowStatus.RUNNING, wf.getStatus()); - - t3 = workflowExecutionService.poll("junit_task_3", "test"); - assertNotNull(t3); - - - workflowExecutionService.updateTask(t1); - wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals("Found " + wf.getTasks(), WorkflowStatus.FAILED, wf.getStatus()); - - - taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(retryCount); - metadataService.updateTaskDef(taskDef); - } - - @SuppressWarnings("unchecked") - @Test - public void testDynamicForkJoinLegacy() { - - try { - createDynamicForkJoinWorkflowDefsLegacy(); - } catch (Exception e) { - } - - Map input = new HashMap(); - String wfid = workflowExecutor.startWorkflow(DYNAMIC_FORK_JOIN_WF_LEGACY, 1, "dynfanouttest1", input); - System.out.println("testDynamicForkJoinLegacy.wfid=" + wfid); - - Task t1 = workflowExecutionService.poll("junit_task_1", "test"); - //assertTrue(ess.ackTaskRecieved(t1.getTaskId(), "test")); - - DynamicForkJoinTaskList dtasks = new DynamicForkJoinTaskList(); - - input = new HashMap(); - input.put("k1", "v1"); - dtasks.add("junit_task_2", null, "xdt1", input); - - HashMap input2 = new HashMap(); - input2.put("k2", "v2"); - dtasks.add("junit_task_3", null, "xdt2", input2); - - t1.getOutputData().put("dynamicTasks", dtasks); - t1.setStatus(COMPLETED); - - workflowExecutionService.updateTask(t1); - - Task t2 = workflowExecutionService.poll("junit_task_2", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); - assertEquals("xdt1", t2.getReferenceTaskName()); - assertTrue(t2.getInputData().containsKey("k1")); - assertEquals("v1", t2.getInputData().get("k1")); - Map output = new HashMap(); - output.put("ok1", "ov1"); - t2.setOutputData(output); - t2.setStatus(COMPLETED); - workflowExecutionService.updateTask(t2); - - Task t3 = workflowExecutionService.poll("junit_task_3", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); - assertEquals("xdt2", t3.getReferenceTaskName()); - assertTrue(t3.getInputData().containsKey("k2")); - assertEquals("v2", t3.getInputData().get("k2")); - - output = new HashMap<>(); - output.put("ok1", "ov1"); - t3.setOutputData(output); - t3.setStatus(COMPLETED); - workflowExecutionService.updateTask(t3); - - Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); - - // Check the output - Task joinTask = wf.getTaskByRefName("dynamicfanouttask_join"); - assertEquals("Found:" + joinTask.getOutputData(), 2, joinTask.getOutputData().keySet().size()); - Set joinTaskOutput = joinTask.getOutputData().keySet(); - System.out.println("joinTaskOutput=" + joinTaskOutput); - for (String key : joinTask.getOutputData().keySet()) { - assertTrue(key.equals("xdt1") || key.equals("xdt2")); - assertEquals("ov1", ((Map) joinTask.getOutputData().get(key)).get("ok1")); - } - } - - @SuppressWarnings("unchecked") - @Test - public void testDynamicForkJoin() { - - createDynamicForkJoinWorkflowDefs(); - - String taskName = "junit_task_2"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - int retryCount = taskDef.getRetryCount(); - taskDef.setRetryCount(2); - taskDef.setRetryDelaySeconds(0); - taskDef.setRetryLogic(RetryLogic.FIXED); - metadataService.updateTaskDef(taskDef); - - Map workflowInput = new HashMap<>(); - String workflowId = workflowExecutor.startWorkflow(DYNAMIC_FORK_JOIN_WF, 1, "dynfanouttest1", workflowInput); - System.out.println("testDynamicForkJoin.wfid=" + workflowId); - Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); - - Task task1 = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task1); - assertTrue(workflowExecutionService.ackTaskReceived(task1.getTaskId())); - assertEquals("dt1", task1.getReferenceTaskName()); - - Map inputParams2 = new HashMap<>(); - inputParams2.put("k1", "v1"); - WorkflowTask workflowTask2 = new WorkflowTask(); - workflowTask2.setName("junit_task_2"); - workflowTask2.setTaskReferenceName("xdt1"); - - Map inputParams3 = new HashMap<>(); - inputParams3.put("k2", "v2"); - WorkflowTask workflowTask3 = new WorkflowTask(); - workflowTask3.setName("junit_task_3"); - workflowTask3.setTaskReferenceName("xdt2"); - - HashMap dynamicTasksInput = new HashMap<>(); - dynamicTasksInput.put("xdt1", inputParams2); - dynamicTasksInput.put("xdt2", inputParams3); - task1.getOutputData().put("dynamicTasks", Arrays.asList(workflowTask2, workflowTask3)); - task1.getOutputData().put("dynamicTasksInput", dynamicTasksInput); - task1.setStatus(COMPLETED); - - workflowExecutionService.updateTask(task1); - workflow = workflowExecutor.getWorkflow(workflowId, true); - assertNotNull(workflow); - assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 5, workflow.getTasks().size()); - - Task task2 = workflowExecutionService.poll("junit_task_2", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(task2.getTaskId())); - assertEquals("xdt1", task2.getReferenceTaskName()); - assertTrue(task2.getInputData().containsKey("k1")); - assertEquals("v1", task2.getInputData().get("k1")); - Map output = new HashMap<>(); - output.put("ok1", "ov1"); - task2.setOutputData(output); - task2.setStatus(FAILED); - workflowExecutionService.updateTask(task2); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_2")).count()); - assertTrue(workflow.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_2")).allMatch(t -> t.getWorkflowTask() != null)); - assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 6, workflow.getTasks().size()); - - task2 = workflowExecutionService.poll("junit_task_2", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(task2.getTaskId())); - assertEquals("xdt1", task2.getReferenceTaskName()); - assertTrue(task2.getInputData().containsKey("k1")); - assertEquals("v1", task2.getInputData().get("k1")); - task2.setOutputData(output); - task2.setStatus(COMPLETED); - workflowExecutionService.updateTask(task2); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 6, workflow.getTasks().size()); - - Task task3 = workflowExecutionService.poll("junit_task_3", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(task3.getTaskId())); - assertEquals("xdt2", task3.getReferenceTaskName()); - assertTrue(task3.getInputData().containsKey("k2")); - assertEquals("v2", task3.getInputData().get("k2")); - output = new HashMap<>(); - output.put("ok1", "ov1"); - task3.setOutputData(output); - task3.setStatus(COMPLETED); - workflowExecutionService.updateTask(task3); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 7, workflow.getTasks().size()); - - Task task4 = workflowExecutionService.poll("junit_task_4", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(task4.getTaskId())); - assertEquals("task4", task4.getReferenceTaskName()); - task4.setStatus(COMPLETED); - workflowExecutionService.updateTask(task4); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals("Found " + workflow.getTasks().stream().map(Task::getTaskType).collect(Collectors.toList()), 7, workflow.getTasks().size()); - - // Check the output - Task joinTask = workflow.getTaskByRefName("dynamicfanouttask_join"); - assertEquals("Found:" + joinTask.getOutputData(), 2, joinTask.getOutputData().keySet().size()); - Set joinTaskOutput = joinTask.getOutputData().keySet(); - System.out.println("joinTaskOutput=" + joinTaskOutput); - for (String key : joinTask.getOutputData().keySet()) { - assertTrue(key.equals("xdt1") || key.equals("xdt2")); - assertEquals("ov1", ((Map) joinTask.getOutputData().get(key)).get("ok1")); - } - - // reset the task def - taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(retryCount); - taskDef.setRetryDelaySeconds(1); - metadataService.updateTaskDef(taskDef); - } - - private void createForkJoinWorkflow() { - - WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName(FORK_JOIN_WF); - workflowDef.setDescription(workflowDef.getName()); - workflowDef.setVersion(1); - workflowDef.setInputParameters(Arrays.asList("param1", "param2")); - - WorkflowTask fanoutTask = new WorkflowTask(); - fanoutTask.setType(Type.FORK_JOIN.name()); - fanoutTask.setTaskReferenceName("fanouttask"); - - WorkflowTask workflowTask1 = new WorkflowTask(); - workflowTask1.setName("junit_task_1"); - Map inputParams1 = new HashMap<>(); - inputParams1.put("p1", "workflow.input.param1"); - inputParams1.put("p2", "workflow.input.param2"); - workflowTask1.setInputParameters(inputParams1); - workflowTask1.setTaskReferenceName("t1"); - - WorkflowTask workflowTask3 = new WorkflowTask(); - workflowTask3.setName("junit_task_3"); - workflowTask3.setInputParameters(inputParams1); - workflowTask3.setTaskReferenceName("t3"); - - WorkflowTask workflowTask2 = new WorkflowTask(); - workflowTask2.setName("junit_task_2"); - Map inputParams2 = new HashMap<>(); - inputParams2.put("tp1", "workflow.input.param1"); - workflowTask2.setInputParameters(inputParams2); - workflowTask2.setTaskReferenceName("t2"); - - WorkflowTask workflowTask4 = new WorkflowTask(); - workflowTask4.setName("junit_task_4"); - workflowTask4.setInputParameters(inputParams2); - workflowTask4.setTaskReferenceName("t4"); - - fanoutTask.getForkTasks().add(Arrays.asList(workflowTask1, workflowTask3)); - fanoutTask.getForkTasks().add(Collections.singletonList(workflowTask2)); - - workflowDef.getTasks().add(fanoutTask); - - WorkflowTask joinTask = new WorkflowTask(); - joinTask.setType(Type.JOIN.name()); - joinTask.setTaskReferenceName("fanouttask_join"); - joinTask.setJoinOn(Arrays.asList("t3", "t2")); - - workflowDef.getTasks().add(joinTask); - workflowDef.getTasks().add(workflowTask4); - metadataService.updateWorkflowDef(workflowDef); - } - - - private void createForkJoinWorkflowWithZeroRetry() { - - WorkflowDef def = new WorkflowDef(); - def.setName(FORK_JOIN_WF + "_2"); - def.setDescription(def.getName()); - def.setVersion(1); - def.setInputParameters(Arrays.asList("param1", "param2")); - - WorkflowTask fanout = new WorkflowTask(); - fanout.setType(Type.FORK_JOIN.name()); - fanout.setTaskReferenceName("fanouttask"); - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_0_RT_1"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "workflow.input.param1"); - ip1.put("p2", "workflow.input.param2"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("t1"); - - WorkflowTask wft3 = new WorkflowTask(); - wft3.setName("junit_task_0_RT_3"); - wft3.setInputParameters(ip1); - wft3.setTaskReferenceName("t3"); - - WorkflowTask wft2 = new WorkflowTask(); - wft2.setName("junit_task_0_RT_2"); - Map ip2 = new HashMap<>(); - ip2.put("tp1", "workflow.input.param1"); - wft2.setInputParameters(ip2); - wft2.setTaskReferenceName("t2"); - - WorkflowTask wft4 = new WorkflowTask(); - wft4.setName("junit_task_0_RT_4"); - wft4.setInputParameters(ip2); - wft4.setTaskReferenceName("t4"); - - fanout.getForkTasks().add(Arrays.asList(wft1, wft3)); - fanout.getForkTasks().add(Arrays.asList(wft2)); - - def.getTasks().add(fanout); - - WorkflowTask join = new WorkflowTask(); - join.setType(Type.JOIN.name()); - join.setTaskReferenceName("fanouttask_join"); - join.setJoinOn(Arrays.asList("t3", "t2")); - - def.getTasks().add(join); - def.getTasks().add(wft4); - metadataService.updateWorkflowDef(def); - - } - - private void createForkJoinNestedWorkflow() { - - WorkflowDef def = new WorkflowDef(); - def.setName(FORK_JOIN_NESTED_WF); - def.setDescription(def.getName()); - def.setVersion(1); - def.setInputParameters(Arrays.asList("param1", "param2")); - - Map ip1 = new HashMap<>(); - ip1.put("p1", "workflow.input.param1"); - ip1.put("p2", "workflow.input.param2"); - ip1.put("case", "workflow.input.case"); - - WorkflowTask[] tasks = new WorkflowTask[21]; - - for (int i = 10; i < 21; i++) { - WorkflowTask wft = new WorkflowTask(); - wft.setName("junit_task_" + i); - wft.setInputParameters(ip1); - wft.setTaskReferenceName("t" + i); - tasks[i] = wft; - } - - WorkflowTask d1 = new WorkflowTask(); - d1.setType(Type.DECISION.name()); - d1.setName("Decision"); - d1.setTaskReferenceName("d1"); - d1.setInputParameters(ip1); - d1.setDefaultCase(Arrays.asList(tasks[18], tasks[20])); - d1.setCaseValueParam("case"); - Map> decisionCases = new HashMap<>(); - decisionCases.put("a", Arrays.asList(tasks[16], tasks[19], tasks[20])); - decisionCases.put("b", Arrays.asList(tasks[17], tasks[20])); - d1.setDecisionCases(decisionCases); - - WorkflowTask subWorkflow = new WorkflowTask(); - subWorkflow.setType(Type.SUB_WORKFLOW.name()); - SubWorkflowParams sw = new SubWorkflowParams(); - sw.setName(LINEAR_WORKFLOW_T1_T2); - subWorkflow.setSubWorkflowParam(sw); - subWorkflow.setTaskReferenceName("sw1"); - - WorkflowTask fork2 = new WorkflowTask(); - fork2.setType(Type.FORK_JOIN.name()); - fork2.setName("fork2"); - fork2.setTaskReferenceName("fork2"); - fork2.getForkTasks().add(Arrays.asList(tasks[12], tasks[14])); - fork2.getForkTasks().add(Arrays.asList(tasks[13], d1)); - - WorkflowTask join2 = new WorkflowTask(); - join2.setType(Type.JOIN.name()); - join2.setTaskReferenceName("join2"); - join2.setJoinOn(Arrays.asList("t14", "t20")); - - WorkflowTask fork1 = new WorkflowTask(); - fork1.setType(Type.FORK_JOIN.name()); - fork1.setTaskReferenceName("fork1"); - fork1.getForkTasks().add(Arrays.asList(tasks[11])); - fork1.getForkTasks().add(Arrays.asList(fork2, join2)); - fork1.getForkTasks().add(Arrays.asList(subWorkflow)); - - - WorkflowTask join1 = new WorkflowTask(); - join1.setType(Type.JOIN.name()); - join1.setTaskReferenceName("join1"); - join1.setJoinOn(Arrays.asList("t11", "join2", "sw1")); - - def.getTasks().add(fork1); - def.getTasks().add(join1); - def.getTasks().add(tasks[15]); - - metadataService.updateWorkflowDef(def); - - - } - - private void createDynamicForkJoinWorkflowDefs() { - - WorkflowDef def = new WorkflowDef(); - def.setName(DYNAMIC_FORK_JOIN_WF); - def.setDescription(def.getName()); - def.setVersion(1); - def.setInputParameters(Arrays.asList("param1", "param2")); - - WorkflowTask workflowTask1 = new WorkflowTask(); - workflowTask1.setName("junit_task_1"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "workflow.input.param1"); - ip1.put("p2", "workflow.input.param2"); - workflowTask1.setInputParameters(ip1); - workflowTask1.setTaskReferenceName("dt1"); - - WorkflowTask fanout = new WorkflowTask(); - fanout.setType(Type.FORK_JOIN_DYNAMIC.name()); - fanout.setTaskReferenceName("dynamicfanouttask"); - fanout.setDynamicForkTasksParam("dynamicTasks"); - fanout.setDynamicForkTasksInputParamName("dynamicTasksInput"); - fanout.getInputParameters().put("dynamicTasks", "dt1.output.dynamicTasks"); - fanout.getInputParameters().put("dynamicTasksInput", "dt1.output.dynamicTasksInput"); - - WorkflowTask join = new WorkflowTask(); - join.setType(Type.JOIN.name()); - join.setTaskReferenceName("dynamicfanouttask_join"); - - WorkflowTask workflowTask4 = new WorkflowTask(); - workflowTask4.setName("junit_task_4"); - workflowTask4.setTaskReferenceName("task4"); - - def.getTasks().add(workflowTask1); - def.getTasks().add(fanout); - def.getTasks().add(join); - def.getTasks().add(workflowTask4); - - metadataService.updateWorkflowDef(def); - } - - @SuppressWarnings("deprecation") - private void createDynamicForkJoinWorkflowDefsLegacy() { - - WorkflowDef def = new WorkflowDef(); - def.setName(DYNAMIC_FORK_JOIN_WF_LEGACY); - def.setDescription(def.getName()); - def.setVersion(1); - def.setInputParameters(Arrays.asList("param1", "param2")); - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_1"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "workflow.input.param1"); - ip1.put("p2", "workflow.input.param2"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("dt1"); - - WorkflowTask fanout = new WorkflowTask(); - fanout.setType(Type.FORK_JOIN_DYNAMIC.name()); - fanout.setTaskReferenceName("dynamicfanouttask"); - fanout.setDynamicForkJoinTasksParam("dynamicTasks"); - fanout.getInputParameters().put("dynamicTasks", "dt1.output.dynamicTasks"); - fanout.getInputParameters().put("dynamicTasksInput", "dt1.output.dynamicTasksInput"); - - WorkflowTask join = new WorkflowTask(); - join.setType(Type.JOIN.name()); - join.setTaskReferenceName("dynamicfanouttask_join"); - - def.getTasks().add(wft1); - def.getTasks().add(fanout); - def.getTasks().add(join); - - metadataService.updateWorkflowDef(def); - - } - - private void createConditionalWF() { - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_1"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "workflow.input.param1"); - ip1.put("p2", "workflow.input.param2"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("t1"); - - WorkflowTask wft2 = new WorkflowTask(); - wft2.setName("junit_task_2"); - Map ip2 = new HashMap<>(); - ip2.put("tp1", "workflow.input.param1"); - wft2.setInputParameters(ip2); - wft2.setTaskReferenceName("t2"); - - WorkflowTask wft3 = new WorkflowTask(); - wft3.setName("junit_task_3"); - Map ip3 = new HashMap<>(); - ip2.put("tp3", "workflow.input.param2"); - wft3.setInputParameters(ip3); - wft3.setTaskReferenceName("t3"); - - WorkflowDef def2 = new WorkflowDef(); - def2.setName(COND_TASK_WF); - def2.setDescription(COND_TASK_WF); - def2.setInputParameters(Arrays.asList("param1", "param2")); - - WorkflowTask c2 = new WorkflowTask(); - c2.setType(Type.DECISION.name()); - c2.setCaseValueParam("case"); - c2.setName("conditional2"); - c2.setTaskReferenceName("conditional2"); - Map> dc = new HashMap<>(); - dc.put("one", Arrays.asList(wft1, wft3)); - dc.put("two", Arrays.asList(wft2)); - c2.setDecisionCases(dc); - c2.getInputParameters().put("case", "workflow.input.param2"); - - - WorkflowTask condition = new WorkflowTask(); - condition.setType(Type.DECISION.name()); - condition.setCaseValueParam("case"); - condition.setName("conditional"); - condition.setTaskReferenceName("conditional"); - Map> decisionCases = new HashMap<>(); - decisionCases.put("nested", Arrays.asList(c2)); - decisionCases.put("three", Arrays.asList(wft3)); - condition.setDecisionCases(decisionCases); - condition.getInputParameters().put("case", "workflow.input.param1"); - condition.getDefaultCase().add(wft2); - def2.getTasks().add(condition); - - WorkflowTask notifyTask = new WorkflowTask(); - notifyTask.setName("junit_task_4"); - notifyTask.setTaskReferenceName("junit_task_4"); - - WorkflowTask finalTask = new WorkflowTask(); - finalTask.setName("finalcondition"); - finalTask.setTaskReferenceName("tf"); - finalTask.setType(Type.DECISION.name()); - finalTask.setCaseValueParam("finalCase"); - Map fi = new HashMap<>(); - fi.put("finalCase", "workflow.input.finalCase"); - finalTask.setInputParameters(fi); - finalTask.getDecisionCases().put("notify", Arrays.asList(notifyTask)); - - def2.getTasks().add(finalTask); - metadataService.updateWorkflowDef(def2); - - } - - - @Test - public void testDefDAO() { - List taskDefs = metadataService.getTaskDefs(); - assertNotNull(taskDefs); - assertTrue(!taskDefs.isEmpty()); - } - - @Test - public void testSimpleWorkflowFailureWithTerminalError() { - - clearWorkflows(); - - TaskDef taskDef = metadataService.getTaskDef("junit_task_1"); - taskDef.setRetryCount(1); - metadataService.updateTaskDef(taskDef); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - Map outputParameters = found.getOutputParameters(); - outputParameters.put("validationErrors", "${t1.output.ErrorMessage}"); - metadataService.updateWorkflowDef(found); - - String correlationId = "unit_test_1"; - Map input = new HashMap<>(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String workflowInstanceId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - logger.info("testSimpleWorkflow.wfid= {}", workflowInstanceId); - assertNotNull(workflowInstanceId); - - Workflow es = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); - assertNotNull(es); - assertEquals(es.getReasonForIncompletion(), WorkflowStatus.RUNNING, es.getStatus()); - - es = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - assertEquals(1, es.getTasks().size()); //The very first task is the one that should be scheduled. - - boolean failed = false; - try { - workflowExecutor.rewind(workflowInstanceId); - } catch (ApplicationException ae) { - failed = true; - } - assertTrue(failed); - - // Polling for the first task should return the same task as before - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowInstanceId, task.getWorkflowInstanceId()); - - TaskResult taskResult = new TaskResult(task); - taskResult.setReasonForIncompletion("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down"); - taskResult.setStatus(TaskResult.Status.FAILED_WITH_TERMINAL_ERROR); - taskResult.addOutputData("TERMINAL_ERROR", "Integration endpoint down: FOOBAR"); - taskResult.addOutputData("ErrorMessage", "There was a terminal error"); - - workflowExecutionService.updateTask(taskResult); - workflowExecutor.decide(workflowInstanceId); - - es = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); - TaskDef junit_task_1 = metadataService.getTaskDef("junit_task_1"); - Task t1 = es.getTaskByRefName("t1"); - assertNotNull(es); - assertEquals(WorkflowStatus.FAILED, es.getStatus()); - assertEquals("NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down", es.getReasonForIncompletion()); - assertEquals(1, junit_task_1.getRetryCount()); //Configured retries at the task definition level - assertEquals(0, t1.getRetryCount()); //Actual retries done on the task - assertTrue(es.getOutput().containsKey("o1")); - assertEquals("p1 value", es.getOutput().get("o1")); - assertEquals(es.getOutput().get("validationErrors").toString(), "There was a terminal error"); - - outputParameters.remove("validationErrors"); - metadataService.updateWorkflowDef(found); - - } - - - @Test - public void testSimpleWorkflow() { - - clearWorkflows(); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1"; - Map input = new HashMap<>(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String workflowInstanceId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - logger.info("testSimpleWorkflow.wfid= {}", workflowInstanceId); - assertNotNull(workflowInstanceId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. - - boolean failed = false; - try { - workflowExecutor.rewind(workflowInstanceId); - } catch (ApplicationException ae) { - failed = true; - } - assertTrue(failed); - - // Polling for the first task should return the same task as before - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowInstanceId, task.getWorkflowInstanceId()); - - workflowExecutor.decide(workflowInstanceId); - - String task1Op = "task1.Done"; - List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - task = tasks.get(0); - - workflow = workflowExecutionService.getExecutionStatus(task.getWorkflowInstanceId(), false); - System.out.println("task workflow = " + workflow.getWorkflowType() + "," + workflow.getInput()); - assertEquals(workflowInstanceId, task.getWorkflowInstanceId()); - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, false); - assertNotNull(workflow); - assertNotNull(workflow.getOutput()); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull("Found=" + task.getInputData(), task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - tasks = workflow.getTasks(); - assertNotNull(tasks); - assertEquals(2, tasks.size()); - - assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); - assertEquals("task1.Done", workflow.getOutput().get("o3")); - } - - @Test - public void testSimpleWorkflowWithResponseTimeout() throws Exception { - - createWFWithResponseTimeout(); - - String correlationId = "unit_test_1"; - Map workflowInput = new HashMap(); - String inputParam1 = "p1 value"; - workflowInput.put("param1", inputParam1); - workflowInput.put("param2", "p2 value"); - String workflowId = workflowExecutor.startWorkflow("RTOWF", 1, correlationId, workflowInput); - logger.debug("testSimpleWorkflowWithResponseTimeout.wfid={}", workflowId); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. - assertEquals(1, queueDAO.getSize("task_rt")); - - // Polling for the first task should return the first task - Task task = workflowExecutionService.poll("task_rt", "task1.junit.worker.testTimeout"); - assertNotNull(task); - assertEquals("task_rt", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - // As the task_rt is out of the queue, the next poll should not get it - Task nullTask = workflowExecutionService.poll("task_rt", "task1.junit.worker.testTimeout"); - assertNull(nullTask); - - Thread.sleep(10000); - workflowExecutor.decide(workflowId); - assertEquals(1, queueDAO.getSize("task_rt")); - - // The first task would be timed_out and a new task will be scheduled - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - assertTrue(workflow.getTasks().stream().allMatch(t -> t.getReferenceTaskName().equals("task_rt_t1"))); - assertEquals(TIMED_OUT, workflow.getTasks().get(0).getStatus()); - assertEquals(SCHEDULED, workflow.getTasks().get(1).getStatus()); - - // Polling now should get the seco task back because it is now scheduled - Task taskAgain = workflowExecutionService.poll("task_rt", "task1.junit.worker"); - assertNotNull(taskAgain); - - // update task with callback after seconds greater than the response timeout - taskAgain.setStatus(IN_PROGRESS); - taskAgain.setCallbackAfterSeconds(20); - workflowExecutionService.updateTask(taskAgain); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - assertEquals(IN_PROGRESS, workflow.getTasks().get(1).getStatus()); - - // wait for callback after seconds which is longer than response timeout seconds and then call decide - Thread.sleep(20000); - workflowExecutor.decide(workflowId); - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - - // Poll for task again - taskAgain = workflowExecutionService.poll("task_rt", "task1.junit.worker"); - assertNotNull(taskAgain); - - // set task to completed - taskAgain.getOutputData().put("op", "task1.Done"); - taskAgain.setStatus(COMPLETED); - workflowExecutionService.updateTask(taskAgain); - - // poll for next task - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker.testTimeout"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - // set task to completed - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - } - - @Test - public void testWorkflowRerunWithSubWorkflows() { - // Execute a workflow with sub-workflow - String workflowId = this.runWorkflowWithSubworkflow(); - // Check it completed - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - - // Now lets pickup the first task in the sub workflow and rerun it from there - String subWorkflowId = null; - for (Task task : workflow.getTasks()) { - if (task.getTaskType().equalsIgnoreCase(SubWorkflow.NAME)) { - subWorkflowId = task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); - } - } - assertNotNull(subWorkflowId); - Workflow subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - Task subWorkflowTask1 = null; - for (Task task : subWorkflow.getTasks()) { - if (task.getTaskDefName().equalsIgnoreCase("junit_task_1")) { - subWorkflowTask1 = task; - } - } - assertNotNull(subWorkflowTask1); - - RerunWorkflowRequest rerunWorkflowRequest = new RerunWorkflowRequest(); - rerunWorkflowRequest.setReRunFromTaskId(subWorkflowTask1.getTaskId()); - - Map newInput = new HashMap<>(); - newInput.put("p1", "1"); - newInput.put("p2", "2"); - rerunWorkflowRequest.setTaskInput(newInput); - - String correlationId = "unit_test_sw_new"; - Map input = new HashMap<>(); - input.put("param1", "New p1 value"); - input.put("param2", "New p2 value"); - rerunWorkflowRequest.setCorrelationId(correlationId); - rerunWorkflowRequest.setWorkflowInput(input); - - rerunWorkflowRequest.setReRunFromWorkflowId(workflowId); - rerunWorkflowRequest.setReRunFromTaskId(subWorkflowTask1.getTaskId()); - // Rerun - workflowExecutor.rerun(rerunWorkflowRequest); - - // The main WF and the sub WF should be in RUNNING state - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - assertEquals(correlationId, workflow.getCorrelationId()); - assertEquals("New p1 value", workflow.getInput().get("param1")); - assertEquals("New p2 value", workflow.getInput().get("param2")); - - subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(subWorkflow); - assertEquals(WorkflowStatus.RUNNING, subWorkflow.getStatus()); - // Since we are re running from the sub workflow task, there - // should be only 1 task that is SCHEDULED - assertEquals(1, subWorkflow.getTasks().size()); - assertEquals(SCHEDULED, subWorkflow.getTasks().get(0).getStatus()); - - // Now execute the task - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(task.getInputData().get("p1").toString(), "1"); - assertEquals(task.getInputData().get("p2").toString(), "2"); - task.getOutputData().put("op", "junit_task_1.done"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(subWorkflow); - assertEquals(WorkflowStatus.RUNNING, subWorkflow.getStatus()); - assertEquals(2, subWorkflow.getTasks().size()); - - // Poll for second task of the sub workflow and execute it - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - task.getOutputData().put("op", "junit_task_2.done"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // Now the sub workflow and the main workflow must have finished - subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(subWorkflow); - assertEquals(WorkflowStatus.COMPLETED, subWorkflow.getStatus()); - assertEquals(2, subWorkflow.getTasks().size()); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - } - - @Test - public void testSimpleWorkflowWithTaskSpecificDomain() { - - clearWorkflows(); - createWorkflowDefForDomain(); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2_SW, 1); - assertNotNull(found); - - String correlationId = "unit_test_sw"; - Map input = new HashMap<>(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - Map taskToDomain = new HashMap<>(); - taskToDomain.put("junit_task_3", "domain1"); - taskToDomain.put("junit_task_2", "domain1"); - - // Poll before so that a polling for this task is "active" - Task task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain1"); - assertNull(task); - task = workflowExecutionService.poll("junit_task_2", "task1.junit.worker", "domain1"); - assertNull(task); - - String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2_SW, 1, correlationId, input, null, null, taskToDomain); - //System.out.println("testSimpleWorkflow.wfid=" + workflowId); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. - - // Check Size - Map sizes = workflowExecutionService.getTaskQueueSizes(Arrays.asList("domain1:junit_task_3", "junit_task_3")); - assertEquals(sizes.get("domain1:junit_task_3").intValue(), 1); - assertEquals(sizes.get("junit_task_3").intValue(), 0); - - // Polling for the first task should return the same task as before - task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker"); - assertNull(task); - task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain1"); - assertNotNull(task); - assertEquals("junit_task_3", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - task = tasks.get(0); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - String task1Op = "task1.Done"; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - - workflow = workflowExecutionService.getExecutionStatus(task.getWorkflowInstanceId(), false); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertNotNull(workflow.getTaskToDomain()); - assertEquals(workflow.getTaskToDomain().size(), 2); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker", "domain1"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - tasks = workflow.getTasks(); - assertNotNull(tasks); - assertEquals(2, tasks.size()); - assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); - assertEquals("task1.Done", workflow.getOutput().get("o3")); - - List pollData = workflowExecutionService.getPollData("junit_task_3"); - assertEquals(2, pollData.size()); - for (PollData pd : pollData) { - assertEquals(pd.getQueueName(), "junit_task_3"); - assertEquals(pd.getWorkerId(), "task1.junit.worker"); - assertTrue(pd.getLastPollTime() != 0); - if (pd.getDomain() != null) { - assertEquals(pd.getDomain(), "domain1"); - } - } - - List pdList = workflowExecutionService.getAllPollData(); - int count = 0; - for (PollData pd : pdList) { - if (pd.getQueueName().equals("junit_task_3")) { - count++; - } - } - assertEquals(2, count); - } - - @Test - public void testSimpleWorkflowWithAllTaskInOneDomain() { - - clearWorkflows(); - createWorkflowDefForDomain(); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2_SW, 1); - assertNotNull(found); - - String correlationId = "unit_test_sw"; - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - Map taskToDomain = new HashMap(); - taskToDomain.put("*", "domain11,, domain12"); - - // Poll before so that a polling for this task is "active" - Task task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain11"); - assertNull(task); - task = workflowExecutionService.poll("junit_task_2", "task1.junit.worker", "domain12"); - assertNull(task); - - String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2_SW, 1, correlationId, input, null, null, taskToDomain); - //System.out.println("testSimpleWorkflow.wfid=" + workflowId); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. - - // Check Size - Map sizes = workflowExecutionService.getTaskQueueSizes(Arrays.asList("domain11:junit_task_3", "junit_task_3")); - assertEquals(sizes.get("domain11:junit_task_3").intValue(), 1); - assertEquals(sizes.get("junit_task_3").intValue(), 0); - - // Polling for the first task should return the same task as before - task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker"); - assertNull(task); - task = workflowExecutionService.poll("junit_task_3", "task1.junit.worker", "domain11"); - assertNotNull(task); - assertEquals("junit_task_3", task.getTaskType()); - assertEquals("domain11", task.getDomain()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - task = tasks.get(0); - - String task1Op = "task1.Done"; - assertEquals(workflowId, task.getWorkflowInstanceId()); - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(2, workflow.getTasks().size()); - - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - - workflow = workflowExecutionService.getExecutionStatus(task.getWorkflowInstanceId(), false); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertNotNull(workflow.getTaskToDomain()); - assertEquals(workflow.getTaskToDomain().size(), 1); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker", "domain11"); - assertNull(task); - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker", "domain12"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertEquals("domain12", task.getDomain()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - - tasks = workflow.getTasks(); - assertNotNull(tasks); - assertEquals(2, tasks.size()); - assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); - assertEquals("task1.Done", workflow.getOutput().get("o3")); - } - - @After - public void clearWorkflows() { - List workflows = metadataService.getWorkflowDefs().stream() - .map(WorkflowDef::getName) - .collect(Collectors.toList()); - for (String wfName : workflows) { - List running = workflowExecutionService.getRunningWorkflows(wfName); - for (String wfid : running) { - workflowExecutor.terminateWorkflow(wfid, "cleanup"); - } - } - queueDAO.queuesDetail().keySet().forEach(queueDAO::flush); - } - - @Test - public void testLongRunning() { - - clearWorkflows(); - - WorkflowDef found = metadataService.getWorkflowDef(LONG_RUNNING, 1); - assertNotNull(found); - - String correlationId = "unit_test_1"; - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String workflowId = workflowExecutor.startWorkflow(LONG_RUNNING, 1, correlationId, input); - logger.debug("testLongRunning.wfid={}", workflowId); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - // Check the queue - assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Collections.singletonList("junit_task_1")).get("junit_task_1")); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Output = "task1.In.Progress"; - task.getOutputData().put("op", task1Output); - task.setStatus(Status.IN_PROGRESS); - task.setCallbackAfterSeconds(5); - workflowExecutionService.updateTask(task); - String taskId = task.getTaskId(); - - // Check the queue - assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Collections.singletonList("junit_task_1")).get("junit_task_1")); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - // Polling for next task should not return anything - Task task2 = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNull(task2); - - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNull(task); - - Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); - // Polling for the first task should return the same task as before - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(task.getTaskId(), taskId); - - task1Output = "task1.Done"; - List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - task = tasks.get(0); - task.getOutputData().put("op", task1Output); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Output, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - - tasks = workflow.getTasks(); - assertNotNull(tasks); - assertEquals(2, tasks.size()); - } - - @Test - public void testResetWorkflowInProgressTasks() { - - clearWorkflows(); - - WorkflowDef found = metadataService.getWorkflowDef(LONG_RUNNING, 1); - assertNotNull(found); - - String correlationId = "unit_test_1"; - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LONG_RUNNING, 1, correlationId, input); - System.out.println("testLongRunning.wfid=" + wfid); - assertNotNull(wfid); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - // Check the queue - assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Arrays.asList("junit_task_1")).get("junit_task_1")); - /// - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - - String task1Op = "task1.In.Progress"; - task.getOutputData().put("op", task1Op); - task.setStatus(Status.IN_PROGRESS); - task.setCallbackAfterSeconds(3600); - workflowExecutionService.updateTask(task); - String taskId = task.getTaskId(); - - // Check the queue - assertEquals(Integer.valueOf(1), workflowExecutionService.getTaskQueueSizes(Arrays.asList("junit_task_1")).get("junit_task_1")); - /// - - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - // Polling for next task should not return anything - Task task2 = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNull(task2); - - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNull(task); - - //Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); - // Reset - workflowExecutor.resetCallbacksForInProgressTasks(wfid); - - - // Now Polling for the first task should return the same task as before - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(task.getTaskId(), taskId); - assertEquals(task.getCallbackAfterSeconds(), 0); - - task1Op = "task1.Done"; - List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - assertEquals(wfid, task.getWorkflowInstanceId()); - task = tasks.get(0); - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - tasks = es.getTasks(); - assertNotNull(tasks); - assertEquals(2, tasks.size()); - - - } - - - @Test - public void testConcurrentWorkflowExecutions() { - - int count = 3; - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_concurrrent"; - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String[] wfids = new String[count]; - - for (int i = 0; i < count; i++) { - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - System.out.println("testConcurrentWorkflowExecutions.wfid=" + wfid); - assertNotNull(wfid); - - List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); - assertNotNull(ids); - assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 - boolean foundId = false; - for (String id : ids) { - if (id.equals(wfid)) { - foundId = true; - } - } - assertTrue(foundId); - wfids[i] = wfid; - } - - - String task1Op = ""; - for (int i = 0; i < count; i++) { - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - } - - for (int i = 0; i < count; i++) { - Task task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - } - - List wfs = workflowExecutionService.getWorkflowInstances(LINEAR_WORKFLOW_T1_T2, correlationId, false, false); - wfs.forEach(wf -> { - assertEquals(WorkflowStatus.COMPLETED, wf.getStatus()); - }); - - - } - - @Test - public void testCaseStatements() { - createConditionalWF(); - - String correlationId = "testCaseStatements: " + System.currentTimeMillis(); - Map input = new HashMap(); - String wfid; - String[] sequence; - - - //default case - input.put("param1", "xxx"); - input.put("param2", "two"); - wfid = workflowExecutor.startWorkflow(COND_TASK_WF, 1, correlationId, input); - System.out.println("testCaseStatements.wfid=" + wfid); - assertNotNull(wfid); - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - Task task = workflowExecutionService.poll("junit_task_2", "junit"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - assertEquals(3, es.getTasks().size()); - - /// - - - //nested - one - input.put("param1", "nested"); - input.put("param2", "one"); - wfid = workflowExecutor.startWorkflow(COND_TASK_WF, 1, correlationId, input); - System.out.println("testCaseStatements.wfid=" + wfid); - assertNotNull(wfid); - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - sequence = new String[]{"junit_task_1", "junit_task_3"}; - - validate(wfid, sequence, new String[]{SystemTaskType.DECISION.name(), SystemTaskType.DECISION.name(), "junit_task_1", "junit_task_3", SystemTaskType.DECISION.name()}, 5); - // - - //nested - two - input.put("param1", "nested"); - input.put("param2", "two"); - wfid = workflowExecutor.startWorkflow(COND_TASK_WF, 1, correlationId, input); - System.out.println("testCaseStatements.wfid=" + wfid); - assertNotNull(wfid); - sequence = new String[]{"junit_task_2"}; - validate(wfid, sequence, new String[]{SystemTaskType.DECISION.name(), SystemTaskType.DECISION.name(), "junit_task_2", SystemTaskType.DECISION.name()}, 4); - // - - //three - input.put("param1", "three"); - input.put("param2", "two"); - input.put("finalCase", "notify"); - wfid = workflowExecutor.startWorkflow(COND_TASK_WF, 1, correlationId, input); - System.out.println("testCaseStatements.wfid=" + wfid); - assertNotNull(wfid); - sequence = new String[]{"junit_task_3", "junit_task_4"}; - validate(wfid, sequence, new String[]{SystemTaskType.DECISION.name(), "junit_task_3", SystemTaskType.DECISION.name(), "junit_task_4"}, 3); - // - - } - - private void validate(String wfid, String[] sequence, String[] executedTasks, int expectedTotalTasks) { - for (int i = 0; i < sequence.length; i++) { - String t = sequence[i]; - Task task = getTask(t); - if (task == null) { - System.out.println("Missing task for " + t + ", below are the workflow tasks completed..."); - Workflow workflow = workflowExecutionService.getExecutionStatus(wfid, true); - for (Task x : workflow.getTasks()) { - System.out.println(x.getTaskType() + "/" + x.getReferenceTaskName()); - } - } - assertNotNull("No task for " + t, task); - assertEquals(wfid, task.getWorkflowInstanceId()); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - Workflow workflow = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(workflow); - assertTrue(!workflow.getTasks().isEmpty()); - if (i < sequence.length - 1) { - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - } else { - workflow = workflowExecutionService.getExecutionStatus(wfid, true); - List workflowTasks = workflow.getTasks(); - assertEquals(workflowTasks.toString(), executedTasks.length, workflowTasks.size()); - for (int k = 0; k < executedTasks.length; k++) { - assertEquals("Tasks: " + workflowTasks.toString() + "\n", executedTasks[k], workflowTasks.get(k).getTaskType()); - } - - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - } - } - } - - - private Task getTask(String taskType) { - Task task; - int count = 2; - do { - task = workflowExecutionService.poll(taskType, "junit"); - if (task == null) { - count--; - } - if (count < 0) { - break; - } - - } while (task == null); - if (task != null) { - workflowExecutionService.ackTaskReceived(task.getTaskId()); - } - return task; - } - - @Test - public void testRetries() { - - String taskName = "junit_task_2"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(2); - taskDef.setRetryDelaySeconds(1); - metadataService.updateTaskDef(taskDef); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1"; - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - System.out.println("testRetries.wfid=" + wfid); - assertNotNull(wfid); - - List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); - assertNotNull(ids); - assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 - boolean foundId = false; - for (String id : ids) { - if (id.equals(wfid)) { - foundId = true; - } - } - assertTrue(foundId); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - //fail the task twice and then succeed - verify(inputParam1, wfid, task1Op, true); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - verify(inputParam1, wfid, task1Op, false); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - assertEquals(3, es.getTasks().size()); //task 1, and 2 of the task 2 - - assertEquals("junit_task_1", es.getTasks().get(0).getTaskType()); - assertEquals("junit_task_2", es.getTasks().get(1).getTaskType()); - assertEquals("junit_task_2", es.getTasks().get(2).getTaskType()); - assertEquals(COMPLETED, es.getTasks().get(0).getStatus()); - assertEquals(FAILED, es.getTasks().get(1).getStatus()); - assertEquals(COMPLETED, es.getTasks().get(2).getStatus()); - assertEquals(es.getTasks().get(1).getTaskId(), es.getTasks().get(2).getRetriedTaskId()); - - - } - - @Test - public void testSuccess() { - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); - assertNotNull(ids); - assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 - boolean foundId = false; - for (String id : ids) { - if (id.equals(wfid)) { - foundId = true; - } - } - assertTrue(foundId); - - /* - * @correlationId - List byCorrelationId = ess.getWorkflowInstances(LINEAR_WORKFLOW_T1_T2, correlationId, false, false); - assertNotNull(byCorrelationId); - assertTrue(!byCorrelationId.isEmpty()); - assertEquals(1, byCorrelationId.size()); - */ - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // The first task would be marked as scheduled - assertEquals(1, es.getTasks().size()); - assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); - - // decideNow should be idempotent if re-run on the same state! - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - assertEquals(1, es.getTasks().size()); - Task t = es.getTasks().get(0); - assertEquals(SCHEDULED, t.getStatus()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - assertNotNull(task); - assertEquals(t.getTaskId(), task.getTaskId()); - es = workflowExecutionService.getExecutionStatus(wfid, true); - t = es.getTasks().get(0); - assertEquals(Status.IN_PROGRESS, t.getStatus()); - String taskId = t.getTaskId(); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - es = workflowExecutionService.getExecutionStatus(wfid, true); - es.getTasks().forEach(wfTask -> { - if (wfTask.getTaskId().equals(taskId)) { - assertEquals(COMPLETED, wfTask.getStatus()); - } else { - assertEquals(SCHEDULED, wfTask.getStatus()); - } - }); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertNotNull(task); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - // Check the tasks, at this time there should be 2 task - assertEquals(es.getTasks().size(), 2); - es.getTasks().forEach(wfTask -> { - assertEquals(wfTask.getStatus(), COMPLETED); - }); - - System.out.println("Total tasks=" + es.getTasks().size()); - assertTrue(es.getTasks().size() < 10); - - - } - - @Test - public void testDeciderUpdate() { - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - Workflow workflow = workflowExecutor.getWorkflow(wfid, false); - long updated1 = workflow.getUpdateTime(); - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - workflowExecutor.decide(wfid); - workflow = workflowExecutor.getWorkflow(wfid, false); - long updated2 = workflow.getUpdateTime(); - assertEquals(updated1, updated2); - - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - workflowExecutor.terminateWorkflow(wfid, "done"); - workflow = workflowExecutor.getWorkflow(wfid, false); - updated2 = workflow.getUpdateTime(); - assertTrue("updated1[" + updated1 + "] >? updated2[" + updated2 + "]", updated2 > updated1); - - } - - @Test - @Ignore - //Ignore for now, will improve this in the future - public void testFailurePoints() { - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // The first task would be marked as scheduled - assertEquals(1, es.getTasks().size()); - assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String taskId = task.getTaskId(); - - String task1Op = "task1.output"; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - try { - workflowExecutionService.updateTask(task); - } catch (Exception e) { - workflowExecutionService.updateTask(task); - } - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - es = workflowExecutionService.getExecutionStatus(wfid, true); - es.getTasks().forEach(wfTask -> { - if (wfTask.getTaskId().equals(taskId)) { - assertEquals(COMPLETED, wfTask.getStatus()); - } else { - assertEquals(SCHEDULED, wfTask.getStatus()); - } - }); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertNotNull(task); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - // Check the tasks, at this time there should be 2 task - assertEquals(es.getTasks().size(), 2); - es.getTasks().forEach(wfTask -> { - assertEquals(wfTask.getStatus(), COMPLETED); - }); - - System.out.println("Total tasks=" + es.getTasks().size()); - assertTrue(es.getTasks().size() < 10); - - - } - - @Test - public void testDeciderMix() throws Exception { - - ExecutorService executors = Executors.newFixedThreadPool(3); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); - assertNotNull(ids); - assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 - boolean foundId = false; - for (String id : ids) { - if (id.equals(wfid)) { - foundId = true; - } - } - assertTrue(foundId); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // The first task would be marked as scheduled - assertEquals(1, es.getTasks().size()); - assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); - - List> futures = new LinkedList<>(); - for (int i = 0; i < 10; i++) { - futures.add(executors.submit(() -> { - workflowExecutor.decide(wfid); - return null; - })); - } - for (Future future : futures) { - future.get(); - } - futures.clear(); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // The first task would be marked as scheduled - assertEquals(1, es.getTasks().size()); - assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); - - - // decideNow should be idempotent if re-run on the same state! - workflowExecutor.decide(wfid); - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - assertEquals(1, es.getTasks().size()); - Task t = es.getTasks().get(0); - assertEquals(SCHEDULED, t.getStatus()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - assertNotNull(task); - assertEquals(t.getTaskId(), task.getTaskId()); - es = workflowExecutionService.getExecutionStatus(wfid, true); - t = es.getTasks().get(0); - assertEquals(Status.IN_PROGRESS, t.getStatus()); - String taskId = t.getTaskId(); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - es = workflowExecutionService.getExecutionStatus(wfid, true); - es.getTasks().forEach(wfTask -> { - if (wfTask.getTaskId().equals(taskId)) { - assertEquals(COMPLETED, wfTask.getStatus()); - } else { - assertEquals(SCHEDULED, wfTask.getStatus()); - } - }); - - //Run sweep 10 times! - for (int i = 0; i < 10; i++) { - futures.add(executors.submit(() -> { - long s = System.currentTimeMillis(); - workflowExecutor.decide(wfid); - System.out.println("Took " + (System.currentTimeMillis() - s) + " ms to run decider"); - return null; - })); - } - for (Future future : futures) { - future.get(); - } - futures.clear(); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - assertEquals(2, es.getTasks().size()); - - System.out.println("Workflow tasks=" + es.getTasks()); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertNotNull(task); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - // Check the tasks, at this time there should be 2 task - assertEquals(es.getTasks().size(), 2); - es.getTasks().forEach(wfTask -> { - assertEquals(wfTask.getStatus(), COMPLETED); - }); - - System.out.println("Total tasks=" + es.getTasks().size()); - assertTrue(es.getTasks().size() < 10); - } - - @Test - public void testFailures() { - WorkflowDef errorWorkflow = metadataService.getWorkflowDef(FORK_JOIN_WF, 1); - assertNotNull("Error workflow is not defined", errorWorkflow); - - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - metadataService.updateTaskDef(taskDef); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - assertNotNull(found.getFailureWorkflow()); - assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - input.put("failureWfName", "FanInOutTest"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - Task task = getTask("junit_task_1"); - assertNotNull(task); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.FAILED, es.getStatus()); - - taskDef.setRetryCount(RETRY_COUNT); - metadataService.updateTaskDef(taskDef); - - } - - @Test - public void testRetryWithForkJoin() throws Exception { - String workflowId = this.runAFailedForkJoinWF(); - workflowExecutor.retry(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(workflow.getStatus(), WorkflowStatus.RUNNING); - - printTaskStatuses(workflow, "After retry called"); - - Task t2 = workflowExecutionService.poll("junit_task_0_RT_2", "test"); - assertNotNull(t2); - assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); - - Task t3 = workflowExecutionService.poll("junit_task_0_RT_3", "test"); - assertNotNull(t3); - assertTrue(workflowExecutionService.ackTaskReceived(t3.getTaskId())); - - t2.setStatus(COMPLETED); - t3.setStatus(COMPLETED); - - ExecutorService es = Executors.newFixedThreadPool(2); - Future future1 = es.submit(() -> { - try { - workflowExecutionService.updateTask(t2); - } catch (Exception e) { - throw new RuntimeException(e); - } - - }); - final Task _t3 = t3; - Future future2 = es.submit(() -> { - try { - workflowExecutionService.updateTask(_t3); - } catch (Exception e) { - throw new RuntimeException(e); - } - - }); - future1.get(); - future2.get(); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - - printTaskStatuses(workflow, "T2, T3 complete"); - workflowExecutor.decide(workflowId); - - Task t4 = workflowExecutionService.poll("junit_task_0_RT_4", "test"); - assertNotNull(t4); - t4.setStatus(COMPLETED); - workflowExecutionService.updateTask(t4); - - printTaskStatuses(workflowId, "After complete"); - } - - @Test - public void testRetry() { - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - int retryCount = taskDef.getRetryCount(); - taskDef.setRetryCount(1); - int retryDelay = taskDef.getRetryDelaySeconds(); - taskDef.setRetryDelaySeconds(0); - metadataService.updateTaskDef(taskDef); - - WorkflowDef workflowDef = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(workflowDef); - assertNotNull(workflowDef.getFailureWorkflow()); - assertFalse(StringUtils.isBlank(workflowDef.getFailureWorkflow())); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap<>(); - input.put("param1", "p1 value"); - input.put("param2", "p2 value"); - String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(workflowId); - printTaskStatuses(workflowId, "initial"); - - Task task = getTask("junit_task_1"); - assertNotNull(task); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - task = getTask("junit_task_1"); - assertNotNull(task); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); - - printTaskStatuses(workflowId, "before retry"); - - workflowExecutor.retry(workflowId); - - printTaskStatuses(workflowId, "after retry"); - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - task = getTask("junit_task_1"); - assertNotNull(task); - assertEquals(workflowId, task.getWorkflowInstanceId()); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - task = getTask("junit_task_2"); - assertNotNull(task); - assertEquals(workflowId, task.getWorkflowInstanceId()); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - - assertEquals(3, workflow.getTasks().stream().filter(t -> t.getTaskType().equals("junit_task_1")).count()); - - taskDef.setRetryCount(retryCount); - taskDef.setRetryDelaySeconds(retryDelay); - metadataService.updateTaskDef(taskDef); - - printTaskStatuses(workflowId, "final"); - - } - - @Test - public void testNonRestartartableWorkflows() { - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - metadataService.updateTaskDef(taskDef); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - found.setName(JUNIT_TEST_WF_NON_RESTARTABLE); - found.setRestartable(false); - metadataService.updateWorkflowDef(found); - - assertNotNull(found); - assertNotNull(found.getFailureWorkflow()); - assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap<>(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String workflowId = workflowExecutor.startWorkflow(JUNIT_TEST_WF_NON_RESTARTABLE, 1, correlationId, input); - assertNotNull(workflowId); - - Task task = getTask("junit_task_1"); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); - - workflowExecutor.rewind(workflow.getWorkflowId()); - - // Polling for the first task should return the same task as before - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - List tasks = workflowExecutionService.getTasks(task.getTaskType(), null, 1); - assertNotNull(tasks); - assertEquals(1, tasks.size()); - - task = tasks.get(0); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - String task1Op = "task1.Done"; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNotNull(workflow.getOutput()); - assertEquals(2, workflow.getTasks().size()); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull("Found=" + task.getInputData(), task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - task.setReasonForIncompletion("unit test failure"); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - - tasks = workflow.getTasks(); - assertNotNull(tasks); - assertEquals(2, tasks.size()); - assertTrue("Found " + workflow.getOutput().toString(), workflow.getOutput().containsKey("o3")); - assertEquals("task1.Done", workflow.getOutput().get("o3")); - - expectedException.expect(ApplicationException.class); - expectedException.expectMessage(String.format("is an instance of WorkflowDef: %s and version: %d and is non restartable", JUNIT_TEST_WF_NON_RESTARTABLE, 1)); - workflowExecutor.rewind(workflow.getWorkflowId()); - } - - - @Test - public void testRestart() { - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - metadataService.updateTaskDef(taskDef); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - assertNotNull(found.getFailureWorkflow()); - assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - Task task = getTask("junit_task_1"); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.FAILED, es.getStatus()); - - workflowExecutor.rewind(es.getWorkflowId()); - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - task = getTask("junit_task_1"); - assertNotNull(task); - assertEquals(wfid, task.getWorkflowInstanceId()); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - task = getTask("junit_task_2"); - assertNotNull(task); - assertEquals(wfid, task.getWorkflowInstanceId()); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - - - } - - - @Test - public void testTimeout() throws Exception { - - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(1); - taskDef.setTimeoutSeconds(1); - taskDef.setRetryDelaySeconds(0); - taskDef.setTimeoutPolicy(TimeoutPolicy.RETRY); - metadataService.updateTaskDef(taskDef); - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - assertNotNull(found.getFailureWorkflow()); - assertFalse(StringUtils.isBlank(found.getFailureWorkflow())); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - input.put("failureWfName", "FanInOutTest"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - //Ensure that we have a workflow queued up for evaluation here... - long size = queueDAO.getSize(WorkflowExecutor.deciderQueue); - assertEquals(1, size); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - assertEquals("fond: " + es.getTasks().stream().map(Task::toString).collect(Collectors.toList()), 1, es.getTasks().size()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals(wfid, task.getWorkflowInstanceId()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - - //Ensure that we have a workflow queued up for evaluation here... - size = queueDAO.getSize(WorkflowExecutor.deciderQueue); - assertEquals(1, size); - - - Uninterruptibles.sleepUninterruptibly(3, TimeUnit.SECONDS); - workflowSweeper.sweep(Arrays.asList(wfid), workflowExecutor); - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals("fond: " + es.getTasks().stream().map(Task::toString).collect(Collectors.toList()), 2, es.getTasks().size()); - - Task task1 = es.getTasks().get(0); - assertEquals(Status.TIMED_OUT, task1.getStatus()); - Task task2 = es.getTasks().get(1); - assertEquals(SCHEDULED, task2.getStatus()); - - task = workflowExecutionService.poll(task2.getTaskDefName(), "task1.junit.worker"); - assertNotNull(task); - assertEquals(wfid, task.getWorkflowInstanceId()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - Uninterruptibles.sleepUninterruptibly(3, TimeUnit.SECONDS); - workflowExecutor.decide(wfid); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(2, es.getTasks().size()); - - assertEquals(Status.TIMED_OUT, es.getTasks().get(0).getStatus()); - assertEquals(Status.TIMED_OUT, es.getTasks().get(1).getStatus()); - assertEquals(WorkflowStatus.TIMED_OUT, es.getStatus()); - - assertEquals(1, queueDAO.getSize(WorkflowExecutor.deciderQueue)); - - taskDef.setTimeoutSeconds(0); - taskDef.setRetryCount(RETRY_COUNT); - metadataService.updateTaskDef(taskDef); - - } - - @Test - public void testReruns() { - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // Check the tasks, at this time there should be 1 task - assertEquals(es.getTasks().size(), 1); - Task t = es.getTasks().get(0); - assertEquals(SCHEDULED, t.getStatus()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(t.getTaskId(), task.getTaskId()); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - es = workflowExecutionService.getExecutionStatus(wfid, true); - es.getTasks().forEach(wfTask -> { - if (wfTask.getTaskId().equals(t.getTaskId())) { - assertEquals(wfTask.getStatus(), COMPLETED); - } else { - assertEquals(wfTask.getStatus(), SCHEDULED); - } - }); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - - // Now rerun lets rerun the workflow from the second task - RerunWorkflowRequest request = new RerunWorkflowRequest(); - request.setReRunFromWorkflowId(wfid); - request.setReRunFromTaskId(es.getTasks().get(1).getTaskId()); - - String reRunwfid = workflowExecutor.rerun(request); - - Workflow esRR = workflowExecutionService.getExecutionStatus(reRunwfid, true); - assertNotNull(esRR); - assertEquals(esRR.getReasonForIncompletion(), WorkflowStatus.RUNNING, esRR.getStatus()); - // Check the tasks, at this time there should be 2 tasks - // first one is skipped and the second one is scheduled - assertEquals(esRR.getTasks().toString(), 2, esRR.getTasks().size()); - assertEquals(COMPLETED, esRR.getTasks().get(0).getStatus()); - Task tRR = esRR.getTasks().get(1); - assertEquals(esRR.getTasks().toString(), SCHEDULED, tRR.getStatus()); - assertEquals(tRR.getTaskType(), "junit_task_2"); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(reRunwfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - - ////////////////////// - // Now rerun the entire workflow - RerunWorkflowRequest request1 = new RerunWorkflowRequest(); - request1.setReRunFromWorkflowId(wfid); - - String reRunwfid1 = workflowExecutor.rerun(request1); - - es = workflowExecutionService.getExecutionStatus(reRunwfid1, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // Check the tasks, at this time there should be 1 task - assertEquals(es.getTasks().size(), 1); - assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); - - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - - - } - - - @Test - public void testTaskSkipping() { - - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(0); - metadataService.updateTaskDef(taskDef); - - - WorkflowDef found = metadataService.getWorkflowDef(TEST_WORKFLOW_NAME_3, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + UUID.randomUUID().toString(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(TEST_WORKFLOW_NAME_3, 1, correlationId, input); - assertNotNull(wfid); - - // Now Skip the second task - workflowExecutor.skipTaskFromWorkflow(wfid, "t2", null); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - // Check the tasks, at this time there should be 3 task - assertEquals(2, es.getTasks().size()); - assertEquals(SCHEDULED, es.getTasks().get(0).getStatus()); - assertEquals(Task.Status.SKIPPED, es.getTasks().get(1).getStatus()); - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - assertEquals("t1", task.getReferenceTaskName()); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // If we get the full workflow here then, last task should be completed and the next task should be scheduled - es = workflowExecutionService.getExecutionStatus(wfid, true); - es.getTasks().forEach(wfTask -> { - if (wfTask.getReferenceTaskName().equals("t1")) { - assertEquals(COMPLETED, wfTask.getStatus()); - } else if (wfTask.getReferenceTaskName().equals("t2")) { - assertEquals(Status.SKIPPED, wfTask.getStatus()); - } else { - assertEquals(SCHEDULED, wfTask.getStatus()); - } - }); - - task = workflowExecutionService.poll("junit_task_3", "task3.junit.worker"); - assertNotNull(task); - assertEquals(Status.IN_PROGRESS, task.getStatus()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - - - } - - @Test - public void testPauseResume() { - - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - - String correlationId = "unit_test_1" + System.nanoTime(); - Map input = new HashMap(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - String wfid = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, input); - assertNotNull(wfid); - - List ids = workflowExecutionService.getRunningWorkflows(LINEAR_WORKFLOW_T1_T2); - assertNotNull(ids); - assertTrue("found no ids: " + ids, ids.size() > 0); //if there are concurrent tests running, this would be more than 1 - boolean foundId = false; - for (String id : ids) { - if (id.equals(wfid)) { - foundId = true; - } - } - assertTrue(foundId); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - Task t = es.getTasks().get(0); - assertEquals(SCHEDULED, t.getStatus()); - - // PAUSE - workflowExecutor.pauseWorkflow(wfid); - - // The workflow is paused but the scheduled task should be pollable - - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(t.getTaskId(), task.getTaskId()); - - String param1 = (String) task.getInputData().get("p1"); - String param2 = (String) task.getInputData().get("p2"); - - assertNotNull(param1); - assertNotNull(param2); - assertEquals("p1 value", param1); - assertEquals("p2 value", param2); - - String task1Op = "task1.output->" + param1 + "." + param2; - task.getOutputData().put("op", task1Op); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // This decide should not schedule the next task - //ds.decideNow(wfid, task); - - // If we get the full workflow here then, last task should be completed and the rest (including PAUSE task) should be scheduled - es = workflowExecutionService.getExecutionStatus(wfid, true); - es.getTasks().forEach(wfTask -> { - if (wfTask.getTaskId().equals(t.getTaskId())) { - assertEquals(wfTask.getStatus(), COMPLETED); - } - }); - - // This should return null as workflow is paused - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNull("Found: " + task, task); - - // Even if decide is run again the next task will not be scheduled as the workflow is still paused-- - workflowExecutor.decide(wfid); - - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertTrue(task == null); - - // RESUME - workflowExecutor.resumeWorkflow(wfid); - - // Now polling should get the second task - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - - Task byRefName = workflowExecutionService.getPendingTaskForWorkflow("t2", wfid); - assertNotNull(byRefName); - assertEquals(task.getTaskId(), byRefName.getTaskId()); - - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - - } - - @Test - public void testSubWorkflow() { - - createSubWorkflow(); - WorkflowDef found = metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); - assertNotNull(found); - Map input = new HashMap<>(); - input.put("param1", "param 1 value"); - input.put("param3", "param 2 value"); - input.put("wfName", LINEAR_WORKFLOW_T1_T2); - String wfId = workflowExecutor.startWorkflow(WF_WITH_SUB_WF, 1, "test", input); - assertNotNull(wfId); - - Workflow es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - - Task task = workflowExecutionService.poll("junit_task_5", "test"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - - es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - assertNotNull(es.getTasks()); - - task = es.getTasks().stream().filter(t -> t.getTaskType().equals(Type.SUB_WORKFLOW.name())).findAny().get(); - assertNotNull(task); - assertNotNull(task.getOutputData()); - assertNotNull("Output: " + task.getOutputData().toString() + ", status: " + task.getStatus(), task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID)); - String subWorkflowId = task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); - - es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(es); - assertNotNull(es.getTasks()); - assertEquals(wfId, es.getParentWorkflowId()); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - task = workflowExecutionService.poll("junit_task_1", "test"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - task = workflowExecutionService.poll("junit_task_2", "test"); - assertEquals(subWorkflowId, task.getWorkflowInstanceId()); - String uuid = UUID.randomUUID().toString(); - task.getOutputData().put("uuid", uuid); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - assertNotNull(es.getOutput()); - assertTrue(es.getOutput().containsKey("o1")); - assertTrue(es.getOutput().containsKey("o2")); - assertEquals("sub workflow input param1", es.getOutput().get("o1")); - assertEquals(uuid, es.getOutput().get("o2")); - - task = workflowExecutionService.poll("junit_task_6", "test"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - } - - @Test - public void testSubWorkflowFailure() { - - TaskDef taskDef = metadataService.getTaskDef("junit_task_1"); - assertNotNull(taskDef); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(2); - metadataService.updateTaskDef(taskDef); - - - createSubWorkflow(); - WorkflowDef found = metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); - assertNotNull(found); - - Map input = new HashMap<>(); - input.put("param1", "param 1 value"); - input.put("param3", "param 2 value"); - input.put("wfName", LINEAR_WORKFLOW_T1_T2); - String wfId = workflowExecutor.startWorkflow(WF_WITH_SUB_WF, 1, "test", input); - assertNotNull(wfId); - - Workflow es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - - Task task = workflowExecutionService.poll("junit_task_5", "test"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - - es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - assertNotNull(es.getTasks()); - task = es.getTasks().stream().filter(t -> t.getTaskType().equals(Type.SUB_WORKFLOW.name())).findAny().get(); - assertNotNull(task); - assertNotNull(task.getOutputData()); - assertNotNull(task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID)); - String subWorkflowId = task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); - - es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(es); - assertNotNull(es.getTasks()); - - assertEquals(wfId, es.getParentWorkflowId()); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - task = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(es); - assertEquals(WorkflowStatus.FAILED, es.getStatus()); - workflowExecutor.executeSystemTask(subworkflow, es.getParentWorkflowTaskId(), 1); - es = workflowExecutionService.getExecutionStatus(wfId, true); - assertEquals(WorkflowStatus.FAILED, es.getStatus()); - - taskDef.setTimeoutSeconds(0); - taskDef.setRetryCount(RETRY_COUNT); - metadataService.updateTaskDef(taskDef); - - } - - @Test - public void testSubWorkflowFailureInverse() { - - TaskDef taskDef = metadataService.getTaskDef("junit_task_1"); - assertNotNull(taskDef); - taskDef.setRetryCount(0); - taskDef.setTimeoutSeconds(2); - metadataService.updateTaskDef(taskDef); - - - createSubWorkflow(); - - WorkflowDef found = metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); - assertNotNull(found); - Map input = new HashMap<>(); - input.put("param1", "param 1 value"); - input.put("param3", "param 2 value"); - input.put("wfName", LINEAR_WORKFLOW_T1_T2); - String wfId = workflowExecutor.startWorkflow(WF_WITH_SUB_WF, 1, "test", input); - assertNotNull(wfId); - - Workflow es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - - Task task = workflowExecutionService.poll("junit_task_5", "test"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - - es = workflowExecutionService.getExecutionStatus(wfId, true); - assertNotNull(es); - assertNotNull(es.getTasks()); - task = es.getTasks().stream().filter(t -> t.getTaskType().equals(Type.SUB_WORKFLOW.name())).findAny().get(); - assertNotNull(task); - assertNotNull(task.getOutputData()); - assertNotNull(task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID)); - String subWorkflowId = task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); - - es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(es); - assertNotNull(es.getTasks()); - assertEquals(wfId, es.getParentWorkflowId()); - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - - workflowExecutor.terminateWorkflow(wfId, "fail"); - es = workflowExecutionService.getExecutionStatus(wfId, true); - assertEquals(WorkflowStatus.TERMINATED, es.getStatus()); - - es = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertEquals(WorkflowStatus.TERMINATED, es.getStatus()); - - } - - @Test - public void testSubWorkflowRetry() { - String taskName = "junit_task_1"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - int retryCount = metadataService.getTaskDef(taskName).getRetryCount(); - taskDef.setRetryCount(0); - metadataService.updateTaskDef(taskDef); - - // create a workflow with sub-workflow - createSubWorkflow(); - WorkflowDef found = metadataService.getWorkflowDef(WF_WITH_SUB_WF, 1); - assertNotNull(found); - - // start the workflow - Map workflowInputParams = new HashMap<>(); - workflowInputParams.put("param1", "param 1"); - workflowInputParams.put("param3", "param 2"); - workflowInputParams.put("wfName", LINEAR_WORKFLOW_T1_T2); - String workflowId = workflowExecutor.startWorkflow(WF_WITH_SUB_WF, 1, "test", workflowInputParams); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - - // poll and complete first task - Task task = workflowExecutionService.poll("junit_task_5", "test"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNotNull(workflow.getTasks()); - assertEquals(2, workflow.getTasks().size()); - - task = workflow.getTasks().stream().filter(t -> t.getTaskType().equals(Type.SUB_WORKFLOW.name())).findAny().orElse(null); - assertNotNull(task); - assertNotNull(task.getOutputData()); - assertNotNull("Output: " + task.getOutputData().toString() + ", status: " + task.getStatus(), task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID)); - String subWorkflowId = task.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); - - workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(workflow); - assertNotNull(workflow.getTasks()); - assertEquals(workflowId, workflow.getParentWorkflowId()); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - // poll and fail the first task in sub-workflow - task = workflowExecutionService.poll("junit_task_1", "test"); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); - - // Retry the failed sub workflow - workflowExecutor.retry(subWorkflowId); - task = workflowExecutionService.poll("junit_task_1", "test"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - task = workflowExecutionService.poll("junit_task_2", "test"); - assertEquals(subWorkflowId, task.getWorkflowInstanceId()); - String uuid = UUID.randomUUID().toString(); - task.getOutputData().put("uuid", uuid); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertNotNull(workflow.getOutput()); - assertTrue(workflow.getOutput().containsKey("o1")); - assertTrue(workflow.getOutput().containsKey("o2")); - assertEquals("sub workflow input param1", workflow.getOutput().get("o1")); - assertEquals(uuid, workflow.getOutput().get("o2")); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - task = workflowExecutionService.poll("junit_task_6", "test"); - assertNotNull(task); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - - // reset retry count - taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(retryCount); - metadataService.updateTaskDef(taskDef); - } - - - @Test - public void testWait() { - - WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName("test_wait"); - workflowDef.setSchemaVersion(2); - - WorkflowTask waitWorkflowTask = new WorkflowTask(); - waitWorkflowTask.setWorkflowTaskType(Type.WAIT); - waitWorkflowTask.setName("wait"); - waitWorkflowTask.setTaskReferenceName("wait0"); - - WorkflowTask workflowTask = new WorkflowTask(); - workflowTask.setName("junit_task_1"); - workflowTask.setTaskReferenceName("t1"); - - workflowDef.getTasks().add(waitWorkflowTask); - workflowDef.getTasks().add(workflowTask); - metadataService.registerWorkflowDef(workflowDef); - - String workflowId = workflowExecutor.startWorkflow(workflowDef.getName(), workflowDef.getVersion(), "", new HashMap<>()); - Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); - assertNotNull(workflow); - assertEquals(1, workflow.getTasks().size()); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - Task waitTask = workflow.getTasks().get(0); - assertEquals(WorkflowTask.Type.WAIT.name(), waitTask.getTaskType()); - waitTask.setStatus(COMPLETED); - workflowExecutor.updateTask(new TaskResult(waitTask)); - - Task task = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task); - task.setStatus(Status.COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("tasks:" + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); - } - - @Test - public void testEventWorkflow() { - - TaskDef taskDef = new TaskDef(); - taskDef.setName("eventX"); - taskDef.setTimeoutSeconds(1); - - metadataService.registerTaskDef(Collections.singletonList(taskDef)); - - WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName("test_event"); - workflowDef.setSchemaVersion(2); - - WorkflowTask eventWorkflowTask = new WorkflowTask(); - eventWorkflowTask.setWorkflowTaskType(Type.EVENT); - eventWorkflowTask.setName("eventX"); - eventWorkflowTask.setTaskReferenceName("wait0"); - eventWorkflowTask.setSink("conductor"); - - WorkflowTask workflowTask = new WorkflowTask(); - workflowTask.setName("junit_task_1"); - workflowTask.setTaskReferenceName("t1"); - - workflowDef.getTasks().add(eventWorkflowTask); - workflowDef.getTasks().add(workflowTask); - metadataService.registerWorkflowDef(workflowDef); - - String workflowId = workflowExecutor.startWorkflow(workflowDef.getName(), workflowDef.getVersion(), "", new HashMap<>()); - Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); - Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); - assertNotNull(workflow); - - Task eventTask = workflow.getTasks().get(0); - assertEquals(Type.EVENT.name(), eventTask.getTaskType()); - assertEquals(COMPLETED, eventTask.getStatus()); - assertTrue(!eventTask.getOutputData().isEmpty()); - assertNotNull(eventTask.getOutputData().get("event_produced")); - - Task task = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task); - task.setStatus(Status.COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("tasks:" + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); - } - - @Test - public void testTaskWithCallbackAfterSecondsInWorkflow() { - WorkflowDef workflowDef = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(workflowDef); - - String workflowId = workflowExecutor.startWorkflow(workflowDef.getName(), workflowDef.getVersion(), "", new HashMap<>()); - Workflow workflow = workflowExecutor.getWorkflow(workflowId, true); - assertNotNull(workflow); - - Task task = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - String taskId = task.getTaskId(); - task.setStatus(Status.IN_PROGRESS); - task.setCallbackAfterSeconds(5L); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(1, workflow.getTasks().size()); - - // task should not be available - task = workflowExecutionService.poll("junit_task_1", "test"); - assertNull(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(1, workflow.getTasks().size()); - - Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); - - task = workflowExecutionService.poll("junit_task_1", "test"); - assertNotNull(task); - assertEquals(taskId, task.getTaskId()); - - task.setStatus(Status.COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(2, workflow.getTasks().size()); - - task = workflowExecutionService.poll("junit_task_2", "test"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - taskId = task.getTaskId(); - task.setStatus(Status.IN_PROGRESS); - task.setCallbackAfterSeconds(5L); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(2, workflow.getTasks().size()); - - // task should not be available - task = workflowExecutionService.poll("junit_task_1", "test"); - assertNull(task); - - Uninterruptibles.sleepUninterruptibly(5, TimeUnit.SECONDS); - - task = workflowExecutionService.poll("junit_task_2", "test"); - assertNotNull(task); - assertEquals(taskId, task.getTaskId()); - - task.setStatus(Status.COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(2, workflow.getTasks().size()); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - } - - //@Test - public void testRateLimiting() { - - TaskDef td = new TaskDef(); - td.setName("eventX1"); - td.setTimeoutSeconds(1); - td.setConcurrentExecLimit(1); - - metadataService.registerTaskDef(Arrays.asList(td)); - - WorkflowDef def = new WorkflowDef(); - def.setName("test_rate_limit"); - def.setSchemaVersion(2); - - WorkflowTask event = new WorkflowTask(); - event.setType("USER_TASK"); - event.setName("eventX1"); - event.setTaskReferenceName("event0"); - event.setSink("conductor"); - - def.getTasks().add(event); - metadataService.registerWorkflowDef(def); - - Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(() -> { - queueDAO.processUnacks("USER_TASK"); - }, 2, 2, TimeUnit.SECONDS); - - String[] ids = new String[100]; - ExecutorService es = Executors.newFixedThreadPool(10); - for (int i = 0; i < 10; i++) { - final int index = i; - es.submit(() -> { - try { - String id = workflowExecutor.startWorkflow(def.getName(), def.getVersion(), "", new HashMap<>()); - ids[index] = id; - } catch (Exception e) { - e.printStackTrace(); - } - - }); - } - Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS); - for (int i = 0; i < 10; i++) { - String id = ids[i]; - Workflow workflow = workflowExecutor.getWorkflow(id, true); - assertNotNull(workflow); - assertEquals(1, workflow.getTasks().size()); - - Task eventTask = workflow.getTasks().get(0); - assertEquals(COMPLETED, eventTask.getStatus()); - assertEquals("tasks:" + workflow.getTasks(), WorkflowStatus.COMPLETED, workflow.getStatus()); - assertTrue(!eventTask.getOutputData().isEmpty()); - assertNotNull(eventTask.getOutputData().get("event_produced")); - } - } - - @Test - public void testWorkflowUsingExternalPayloadStorage() { - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - Map outputParameters = found.getOutputParameters(); - outputParameters.put("workflow_output", "${t1.output.op}"); - metadataService.updateWorkflowDef(found); - - String workflowInputPath = "workflow/input"; - String correlationId = "wf_external_storage"; - String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, null, workflowInputPath, null, null); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNull("The workflow input should not be persisted", workflow.getInput()); - assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); - - // Polling for the first task - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - // update first task with COMPLETED - String taskOutputPath = "task/output"; - task.setOutputData(null); - task.setExternalOutputPayloadStoragePath(taskOutputPath); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNull("The workflow input should not be persisted", workflow.getInput()); - assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - assertNull("The first task output should not be persisted", workflow.getTasks().get(0).getOutputData()); - assertNull("The second task input should not be persisted", workflow.getTasks().get(1).getInputData()); - assertEquals(taskOutputPath, workflow.getTasks().get(0).getExternalOutputPayloadStoragePath()); - assertEquals("task/input", workflow.getTasks().get(1).getExternalInputPayloadStoragePath()); - - // Polling for the second task - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertNull(task.getInputData()); - assertNotNull(task.getExternalInputPayloadStoragePath()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - // update second task with COMPLETED - task.getOutputData().put("op", "success_task2"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNull("The workflow input should not be persisted", workflow.getInput()); - assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - assertNull("The first task output should not be persisted", workflow.getTasks().get(0).getOutputData()); - assertNull("The second task input should not be persisted", workflow.getTasks().get(1).getInputData()); - assertEquals(taskOutputPath, workflow.getTasks().get(0).getExternalOutputPayloadStoragePath()); - assertEquals("task/input", workflow.getTasks().get(1).getExternalInputPayloadStoragePath()); - assertNull(workflow.getOutput()); - assertNotNull(workflow.getExternalOutputPayloadStoragePath()); - assertEquals("workflow/output", workflow.getExternalOutputPayloadStoragePath()); - } - - @Test - public void testRetryWorkflowUsingExternalPayloadStorage() { - WorkflowDef found = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2, 1); - assertNotNull(found); - Map outputParameters = found.getOutputParameters(); - outputParameters.put("workflow_output", "${t1.output.op}"); - metadataService.updateWorkflowDef(found); - - String taskName = "junit_task_2"; - TaskDef taskDef = metadataService.getTaskDef(taskName); - taskDef.setRetryCount(2); - taskDef.setRetryDelaySeconds(0); - metadataService.updateTaskDef(taskDef); - - String workflowInputPath = "workflow/input"; - String correlationId = "wf_external_storage"; - String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2, 1, correlationId, null, workflowInputPath, null, null); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNull("The workflow input should not be persisted", workflow.getInput()); - assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); - assertEquals(workflow.getReasonForIncompletion(), WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); - - // Polling for the first task - Task task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_1", task.getTaskType()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - // update first task with COMPLETED - String taskOutputPath = "task/output"; - task.setOutputData(null); - task.setExternalOutputPayloadStoragePath(taskOutputPath); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // Polling for the second task - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertNull(task.getInputData()); - assertNotNull(task.getExternalInputPayloadStoragePath()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - // update second task with FAILED - task.getOutputData().put("op", "failed_task2"); - task.setStatus(FAILED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNull("The workflow input should not be persisted", workflow.getInput()); - assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - - // Polling again for the second task - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertEquals("junit_task_2", task.getTaskType()); - assertNull(task.getInputData()); - assertNotNull(task.getExternalInputPayloadStoragePath()); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - assertEquals(workflowId, task.getWorkflowInstanceId()); - - // update second task with COMPLETED - task.getOutputData().put("op", "success_task2"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertNull("The workflow input should not be persisted", workflow.getInput()); - assertEquals(workflowInputPath, workflow.getExternalInputPayloadStoragePath()); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals(3, workflow.getTasks().size()); - assertNull("The first task output should not be persisted", workflow.getTasks().get(0).getOutputData()); - assertNull("The second task input should not be persisted", workflow.getTasks().get(1).getInputData()); - assertNull("The second task input should not be persisted", workflow.getTasks().get(2).getInputData()); - assertEquals(taskOutputPath, workflow.getTasks().get(0).getExternalOutputPayloadStoragePath()); - assertEquals("task/input", workflow.getTasks().get(1).getExternalInputPayloadStoragePath()); - assertEquals("task/input", workflow.getTasks().get(2).getExternalInputPayloadStoragePath()); - assertNull(workflow.getOutput()); - assertNotNull(workflow.getExternalOutputPayloadStoragePath()); - assertEquals("workflow/output", workflow.getExternalOutputPayloadStoragePath()); - } - - private void createSubWorkflow() { - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_5"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "${workflow.input.param1}"); - ip1.put("p2", "${workflow.input.param2}"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("a1"); - - WorkflowTask wft2 = new WorkflowTask(); - wft2.setName("subWorkflowTask"); - wft2.setType(Type.SUB_WORKFLOW.name()); - SubWorkflowParams swp = new SubWorkflowParams(); - swp.setName(LINEAR_WORKFLOW_T1_T2); - wft2.setSubWorkflowParam(swp); - Map ip2 = new HashMap<>(); - ip2.put("test", "test value"); - ip2.put("param1", "sub workflow input param1"); - wft2.setInputParameters(ip2); - wft2.setTaskReferenceName("a2"); - - WorkflowTask wft3 = new WorkflowTask(); - wft3.setName("junit_task_6"); - Map ip3 = new HashMap<>(); - ip3.put("p1", "${workflow.input.param1}"); - ip3.put("p2", "${workflow.input.param2}"); - wft3.setInputParameters(ip3); - wft3.setTaskReferenceName("a3"); - - WorkflowDef main = new WorkflowDef(); - main.setSchemaVersion(2); - main.setInputParameters(Arrays.asList("param1", "param2")); - main.setName(WF_WITH_SUB_WF); - main.getTasks().addAll(Arrays.asList(wft1, wft2, wft3)); - - metadataService.updateWorkflowDef(Collections.singletonList(main)); - - } - - private void verify(String inputParam1, String wfid, String task1Op, boolean fail) { - Task task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - - String task2Input = (String) task.getInputData().get("tp2"); - assertNotNull(task2Input); - assertEquals(task1Op, task2Input); - task2Input = (String) task.getInputData().get("tp1"); - assertNotNull(task2Input); - assertEquals(inputParam1, task2Input); - if (fail) { - task.setStatus(FAILED); - task.setReasonForIncompletion("failure...0"); - } else { - task.setStatus(COMPLETED); - } - - workflowExecutionService.updateTask(task); - - Workflow es = workflowExecutionService.getExecutionStatus(wfid, false); - assertNotNull(es); - if (fail) { - assertEquals(WorkflowStatus.RUNNING, es.getStatus()); - } else { - assertEquals(WorkflowStatus.COMPLETED, es.getStatus()); - } - } - - @Before - public void flushAllTaskQueues() { - queueDAO.queuesDetail().keySet().forEach(queueName -> { - queueDAO.flush(queueName); - }); - - if (taskDefs == null) { - return; - } - for (TaskDef td : taskDefs) { - queueDAO.flush(td.getName()); - } - } - - private void createWorkflowDefForDomain() { - WorkflowDef defSW = new WorkflowDef(); - defSW.setName(LINEAR_WORKFLOW_T1_T2_SW); - defSW.setDescription(defSW.getName()); - defSW.setVersion(1); - defSW.setInputParameters(Arrays.asList("param1", "param2")); - Map outputParameters = new HashMap<>(); - outputParameters.put("o1", "${workflow.input.param1}"); - outputParameters.put("o2", "${t2.output.uuid}"); - outputParameters.put("o3", "${t1.output.op}"); - defSW.setOutputParameters(outputParameters); - defSW.setFailureWorkflow("$workflow.input.failureWfName"); - defSW.setSchemaVersion(2); - LinkedList wftasks = new LinkedList<>(); - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_3"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "${workflow.input.param1}"); - ip1.put("p2", "${workflow.input.param2}"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("t1"); - - WorkflowTask subWorkflow = new WorkflowTask(); - subWorkflow.setType(Type.SUB_WORKFLOW.name()); - SubWorkflowParams sw = new SubWorkflowParams(); - sw.setName(LINEAR_WORKFLOW_T1_T2); - subWorkflow.setSubWorkflowParam(sw); - subWorkflow.setTaskReferenceName("sw1"); - - wftasks.add(wft1); - wftasks.add(subWorkflow); - defSW.setTasks(wftasks); - - try { - metadataService.updateWorkflowDef(defSW); - } catch (Exception e) { - } - } - - private void createWFWithResponseTimeout() { - TaskDef task = new TaskDef(); - task.setName("task_rt"); - task.setTimeoutSeconds(120); - task.setRetryCount(RETRY_COUNT); - task.setRetryDelaySeconds(0); - task.setResponseTimeoutSeconds(10); - metadataService.registerTaskDef(Collections.singletonList(task)); - - WorkflowDef def = new WorkflowDef(); - def.setName("RTOWF"); - def.setDescription(def.getName()); - def.setVersion(1); - def.setInputParameters(Arrays.asList("param1", "param2")); - Map outputParameters = new HashMap<>(); - outputParameters.put("o1", "${workflow.input.param1}"); - outputParameters.put("o2", "${t2.output.uuid}"); - outputParameters.put("o3", "${t1.output.op}"); - def.setOutputParameters(outputParameters); - def.setFailureWorkflow("$workflow.input.failureWfName"); - def.setSchemaVersion(2); - LinkedList wftasks = new LinkedList<>(); - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("task_rt"); - Map ip1 = new HashMap<>(); - ip1.put("p1", "${workflow.input.param1}"); - ip1.put("p2", "${workflow.input.param2}"); - wft1.setInputParameters(ip1); - wft1.setTaskReferenceName("task_rt_t1"); - - WorkflowTask wft2 = new WorkflowTask(); - wft2.setName("junit_task_2"); - Map ip2 = new HashMap<>(); - ip2.put("tp1", "${workflow.input.param1}"); - ip2.put("tp2", "${t1.output.op}"); - wft2.setInputParameters(ip2); - wft2.setTaskReferenceName("t2"); - - wftasks.add(wft1); - wftasks.add(wft2); - def.setTasks(wftasks); - - metadataService.updateWorkflowDef(def); - } - - private String runWorkflowWithSubworkflow() { - clearWorkflows(); - createWorkflowDefForDomain(); - - WorkflowDef workflowDef = metadataService.getWorkflowDef(LINEAR_WORKFLOW_T1_T2_SW, 1); - assertNotNull(workflowDef); - - String correlationId = "unit_test_sw"; - Map input = new HashMap<>(); - String inputParam1 = "p1 value"; - input.put("param1", inputParam1); - input.put("param2", "p2 value"); - - String workflowId = workflowExecutor.startWorkflow(LINEAR_WORKFLOW_T1_T2_SW, 1, correlationId, input, null); - System.out.println("testSimpleWorkflow.wfid=" + workflowId); - assertNotNull(workflowId); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(1, workflow.getTasks().size()); //The very first task is the one that should be scheduled. - - // Poll for first task and execute it - Task task = workflowExecutionService.poll("junit_task_3", "task3.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - task.getOutputData().put("op", "junit_task_3.done"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.RUNNING, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - - // Get the sub workflow id - String subWorkflowId = null; - for (Task t : workflow.getTasks()) { - if (t.getTaskType().equalsIgnoreCase(SubWorkflow.NAME)) { - subWorkflowId = t.getOutputData().get(SubWorkflow.SUB_WORKFLOW_ID).toString(); - } - } - assertNotNull(subWorkflowId); - - Workflow subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(subWorkflow); - assertEquals(WorkflowStatus.RUNNING, subWorkflow.getStatus()); - assertEquals(1, subWorkflow.getTasks().size()); - - // Now the Sub workflow is triggered - // Poll for first task of the sub workflow and execute it - task = workflowExecutionService.poll("junit_task_1", "task1.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - task.getOutputData().put("op", "junit_task_1.done"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(subWorkflow); - assertEquals(WorkflowStatus.RUNNING, subWorkflow.getStatus()); - assertEquals(2, subWorkflow.getTasks().size()); - - // Poll for second task of the sub workflow and execute it - task = workflowExecutionService.poll("junit_task_2", "task2.junit.worker"); - assertNotNull(task); - assertTrue(workflowExecutionService.ackTaskReceived(task.getTaskId())); - task.getOutputData().put("op", "junit_task_2.done"); - task.setStatus(COMPLETED); - workflowExecutionService.updateTask(task); - - // Now the sub workflow and the main workflow must have finished - subWorkflow = workflowExecutionService.getExecutionStatus(subWorkflowId, true); - assertNotNull(subWorkflow); - assertEquals(WorkflowStatus.COMPLETED, subWorkflow.getStatus()); - assertEquals(2, subWorkflow.getTasks().size()); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.COMPLETED, workflow.getStatus()); - assertEquals(2, workflow.getTasks().size()); - - return workflowId; - } - - private String runAFailedForkJoinWF() throws Exception { - try { - this.createForkJoinWorkflowWithZeroRetry(); - } catch (Exception e) { - } - - Map input = new HashMap<>(); - String workflowId = workflowExecutor.startWorkflow(FORK_JOIN_WF + "_2", 1, "fanouttest", input); - System.out.println("testForkJoin.wfid=" + workflowId); - Task t1 = workflowExecutionService.poll("junit_task_0_RT_1", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t1.getTaskId())); - - Task t2 = workflowExecutionService.poll("junit_task_0_RT_2", "test"); - assertTrue(workflowExecutionService.ackTaskReceived(t2.getTaskId())); - assertNotNull(t1); - assertNotNull(t2); - - t1.setStatus(COMPLETED); - workflowExecutionService.updateTask(t1); - - Workflow workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals("Found " + workflow.getTasks(), WorkflowStatus.RUNNING, workflow.getStatus()); - printTaskStatuses(workflow, "Initial"); - - t2.setStatus(FAILED); - - ExecutorService executorService = Executors.newFixedThreadPool(2); - Future future1 = executorService.submit(() -> { - try { - workflowExecutionService.updateTask(t2); - } catch (Exception e) { - throw new RuntimeException(e); - } - - }); - future1.get(); - - workflow = workflowExecutionService.getExecutionStatus(workflowId, true); - assertNotNull(workflow); - assertEquals(WorkflowStatus.FAILED, workflow.getStatus()); - - return workflowId; - } - - private void printTaskStatuses(String wfid, String message) { - Workflow wf = workflowExecutionService.getExecutionStatus(wfid, true); - assertNotNull(wf); - printTaskStatuses(wf, message); - } - - private boolean printWFTaskDetails = false; +public class WorkflowServiceTest extends AbstractWorkflowServiceTest { - private void printTaskStatuses(Workflow wf, String message) { - if (printWFTaskDetails) { - System.out.println(message + " >>> Workflow status " + wf.getStatus().name()); - wf.getTasks().forEach(t -> { - System.out.println("Task " + String.format("%-15s", t.getTaskType()) + "\t" + String.format("%-15s", t.getReferenceTaskName()) + "\t" + String.format("%-15s", t.getWorkflowTask().getType()) + "\t" + t.getSeq() + "\t" + t.getStatus() + "\t" + t.getTaskId()); - }); - System.out.println(); - } + @Override + String startOrLoadWorkflowExecution(String snapshotResourceName, String workflowName, int version, String correlationId, Map input, String event, Map taskToDomain) { + return workflowExecutor.startWorkflow(workflowName, version, correlationId, input, null, event, taskToDomain); } } diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockConfiguration.java b/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockConfiguration.java index 354f5ad2d2..7aac34a073 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockConfiguration.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockConfiguration.java @@ -16,10 +16,13 @@ package com.netflix.conductor.tests.utils; +import com.google.inject.AbstractModule; import com.netflix.conductor.core.config.Configuration; import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.Collections; +import java.util.List; import java.util.Map; public class MockConfiguration implements Configuration { @@ -132,4 +135,19 @@ public Map getAll() { public long getLongProperty(String name, long defaultValue) { return 1000000L; } + + @Override + public boolean getBooleanProperty(String name, boolean defaultValue) { + return defaultValue; + } + + @Override + public boolean getBoolProperty(String name, boolean defaultValue) { + return defaultValue; + } + + @Override + public List getAdditionalModules() { + return Collections.emptyList(); + } } diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java b/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java index 6e94b9f2f4..c854b666a1 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/utils/MockIndexDAO.java @@ -36,6 +36,10 @@ */ public class MockIndexDAO implements IndexDAO { + @Override + public void setup() { + } + @Override public void indexWorkflow(Workflow workflow) { } diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestEnvironment.java b/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestEnvironment.java new file mode 100644 index 0000000000..daf4d16130 --- /dev/null +++ b/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestEnvironment.java @@ -0,0 +1,21 @@ +package com.netflix.conductor.tests.utils; + +public class TestEnvironment { + private TestEnvironment() {} + + private static void setupSystemProperties() { + System.setProperty("EC2_REGION", "us-east-1"); + System.setProperty("EC2_AVAILABILITY_ZONE", "us-east-1c"); + System.setProperty("workflow.elasticsearch.index.name", "conductor"); + System.setProperty("workflow.namespace.prefix", "integration-test"); + System.setProperty("db", "memory"); + } + + public static void setup() { + setupSystemProperties(); + } + + public static void teardown() { + System.setProperties(null); + } +} diff --git a/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestModule.java b/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestModule.java index 83643cfbf1..9abed623b3 100644 --- a/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestModule.java +++ b/test-harness/src/test/java/com/netflix/conductor/tests/utils/TestModule.java @@ -1,37 +1,36 @@ /** * Copyright 2016 Netflix, Inc. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. */ /** * */ package com.netflix.conductor.tests.utils; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.inject.AbstractModule; import com.google.inject.Provides; import com.netflix.conductor.common.utils.ExternalPayloadStorage; +import com.netflix.conductor.common.utils.JsonMapperProvider; import com.netflix.conductor.core.config.Configuration; import com.netflix.conductor.core.config.CoreModule; import com.netflix.conductor.dao.ExecutionDAO; import com.netflix.conductor.dao.IndexDAO; import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.dao.QueueDAO; -import com.netflix.conductor.dao.dynomite.DynoProxy; import com.netflix.conductor.dao.dynomite.RedisExecutionDAO; import com.netflix.conductor.dao.dynomite.RedisMetadataDAO; import com.netflix.conductor.dao.dynomite.queue.DynoQueueDAO; -import com.netflix.conductor.redis.utils.JedisMock; +import com.netflix.conductor.jedis.InMemoryJedisProvider; +import com.netflix.conductor.jedis.JedisMock; import com.netflix.dyno.queues.ShardSupplier; import redis.clients.jedis.JedisCommands; @@ -46,60 +45,59 @@ * */ public class TestModule extends AbstractModule { + private int maxThreads = 50; - private int maxThreads = 50; + private ExecutorService executorService; - private ExecutorService executorService; + @Override + protected void configure() { - @Override - protected void configure() { + System.setProperty("workflow.system.task.worker.callback.seconds", "0"); + System.setProperty("workflow.system.task.worker.queue.size", "10000"); + System.setProperty("workflow.system.task.worker.thread.count", "10"); - System.setProperty("workflow.system.task.worker.callback.seconds", "0"); - System.setProperty("workflow.system.task.worker.queue.size", "10000"); - System.setProperty("workflow.system.task.worker.thread.count", "10"); + configureExecutorService(); - configureExecutorService(); + MockConfiguration config = new MockConfiguration(); + bind(Configuration.class).toInstance(config); + JedisCommands jedisMock = new JedisMock(); - MockConfiguration config = new MockConfiguration(); - bind(Configuration.class).toInstance(config); - JedisCommands jedisMock = new JedisMock(); + DynoQueueDAO queueDao = new DynoQueueDAO(jedisMock, jedisMock, new ShardSupplier() { - DynoQueueDAO queueDao = new DynoQueueDAO(jedisMock, jedisMock, new ShardSupplier() { + @Override + public Set getQueueShards() { + return new HashSet<>(Collections.singletonList("a")); + } - @Override - public Set getQueueShards() { - return new HashSet<>(Collections.singletonList("a")); - } + @Override + public String getCurrentShard() { + return "a"; + } + }, config); - @Override - public String getCurrentShard() { - return "a"; - } - }, config); + bind(MetadataDAO.class).to(RedisMetadataDAO.class); + bind(ExecutionDAO.class).to(RedisExecutionDAO.class); + bind(DynoQueueDAO.class).toInstance(queueDao); + bind(QueueDAO.class).to(DynoQueueDAO.class); + bind(IndexDAO.class).to(MockIndexDAO.class); + bind(JedisCommands.class).toProvider(InMemoryJedisProvider.class); + install(new CoreModule()); + bind(UserTask.class).asEagerSingleton(); + bind(ObjectMapper.class).toProvider(JsonMapperProvider.class); + bind(ExternalPayloadStorage.class).to(MockExternalPayloadStorage.class); + } - bind(MetadataDAO.class).to(RedisMetadataDAO.class); - bind(ExecutionDAO.class).to(RedisExecutionDAO.class); - bind(DynoQueueDAO.class).toInstance(queueDao); - bind(QueueDAO.class).to(DynoQueueDAO.class); - bind(IndexDAO.class).to(MockIndexDAO.class); - DynoProxy proxy = new DynoProxy(jedisMock); - bind(DynoProxy.class).toInstance(proxy); - install(new CoreModule()); - bind(UserTask.class).asEagerSingleton(); - bind(ExternalPayloadStorage.class).to(MockExternalPayloadStorage.class); - } + @Provides + public ExecutorService getExecutorService() { + return this.executorService; + } - @Provides - public ExecutorService getExecutorService(){ - return this.executorService; - } - - private void configureExecutorService(){ - AtomicInteger count = new AtomicInteger(0); - this.executorService = java.util.concurrent.Executors.newFixedThreadPool(maxThreads, runnable -> { + private void configureExecutorService() { + AtomicInteger count = new AtomicInteger(0); + this.executorService = java.util.concurrent.Executors.newFixedThreadPool(maxThreads, runnable -> { Thread workflowWorkerThread = new Thread(runnable); workflowWorkerThread.setName(String.format("workflow-worker-%d", count.getAndIncrement())); return workflowWorkerThread; }); - } + } } diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF.json b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF.json new file mode 100644 index 0000000000..e05646d326 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF.json @@ -0,0 +1,178 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534990536238, + "updateTime": 1534990536390, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "xxx" + }, + "referenceTaskName": "conditional", + "retryCount": 0, + "seq": 1, + "correlationId": "testCaseStatements: 1534990536230", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1534990536269, + "startTime": 1534990536275, + "endTime": 1534990536389, + "updateTime": 1534990536389, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "8f65b5f2-da29-417f-a211-c86a9031d63a", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "xxx" + ] + }, + "workflowTask": { + "name": "conditional", + "taskReferenceName": "conditional", + "inputParameters": { + "case": "workflow.input.param1" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "nested": [ + { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "startDelay": 0, + "optional": false + } + ], + "three": [ + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 6 + }, + { + "taskType": "junit_task_2", + "status": "SCHEDULED", + "inputData": { + "tp1": "xxx", + "tp3": "two" + }, + "referenceTaskName": "t2", + "retryCount": 0, + "seq": 2, + "correlationId": "testCaseStatements: 1534990536230", + "pollCount": 0, + "taskDefName": "junit_task_2", + "scheduledTime": 1534990536275, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990536275, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "5c00a173-d931-4f68-b3ba-c182265bc3db", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "xxx", + "param2": "two" + }, + "workflowType": "ConditionalTaskWF", + "version": 1, + "correlationId": "testCaseStatements: 1534990536230", + "schemaVersion": 1, + "startTime": 1534990536238 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF2.json b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF2.json new file mode 100644 index 0000000000..ced7ddab93 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF2.json @@ -0,0 +1,259 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534999254315, + "updateTime": 1534999254327, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "nested" + }, + "referenceTaskName": "conditional", + "retryCount": 0, + "seq": 1, + "correlationId": "testCaseStatements: 1534999254108", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1534999254316, + "startTime": 1534999254317, + "endTime": 1534999254326, + "updateTime": 1534999254326, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "455c0446-edfb-492b-946d-89ec2fa34fe6", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "nested" + ] + }, + "workflowTask": { + "name": "conditional", + "taskReferenceName": "conditional", + "inputParameters": { + "case": "workflow.input.param1" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "nested": [ + { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "startDelay": 0, + "optional": false + } + ], + "three": [ + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 1 + }, + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "one" + }, + "referenceTaskName": "conditional2", + "retryCount": 0, + "seq": 2, + "correlationId": "testCaseStatements: 1534999254108", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1534999254316, + "startTime": 1534999254318, + "endTime": 1534999254327, + "updateTime": 1534999254327, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "d8547bbd-39a2-415a-81bf-9783f563bdbe", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "one" + ] + }, + "workflowTask": { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 2 + }, + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "nested", + "p2": "one" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 3, + "correlationId": "testCaseStatements: 1534999254108", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534999254317, + "startTime": 0, + "endTime": 0, + "updateTime": 1534999254317, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "64423d29-e6e4-4a69-a5f3-a64c62f552b0", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "nested", + "param2": "one" + }, + "workflowType": "ConditionalTaskWF", + "version": 1, + "correlationId": "testCaseStatements: 1534999254108", + "schemaVersion": 1, + "startTime": 1534999254315 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF3.json b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF3.json new file mode 100644 index 0000000000..c636c9aac9 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF3.json @@ -0,0 +1,259 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534999254353, + "updateTime": 1534999254358, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "nested" + }, + "referenceTaskName": "conditional", + "retryCount": 0, + "seq": 1, + "correlationId": "testCaseStatements: 1534999254108", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1534999254354, + "startTime": 1534999254355, + "endTime": 1534999254357, + "updateTime": 1534999254357, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "5264732a-968b-4c93-a4b6-c6ad086dfd02", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "nested" + ] + }, + "workflowTask": { + "name": "conditional", + "taskReferenceName": "conditional", + "inputParameters": { + "case": "workflow.input.param1" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "nested": [ + { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "startDelay": 0, + "optional": false + } + ], + "three": [ + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 1 + }, + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "two" + }, + "referenceTaskName": "conditional2", + "retryCount": 0, + "seq": 2, + "correlationId": "testCaseStatements: 1534999254108", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1534999254355, + "startTime": 1534999254355, + "endTime": 1534999254358, + "updateTime": 1534999254358, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "c42112d0-409b-4651-950e-4068e40d2f61", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "two" + ] + }, + "workflowTask": { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 0 + }, + { + "taskType": "junit_task_2", + "status": "SCHEDULED", + "inputData": { + "tp1": "nested", + "tp3": "two" + }, + "referenceTaskName": "t2", + "retryCount": 0, + "seq": 3, + "correlationId": "testCaseStatements: 1534999254108", + "pollCount": 0, + "taskDefName": "junit_task_2", + "scheduledTime": 1534999254355, + "startTime": 0, + "endTime": 0, + "updateTime": 1534999254355, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "0df27bb5-0961-4ac5-8af8-28c3e49c3368", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "nested", + "param2": "two" + }, + "workflowType": "ConditionalTaskWF", + "version": 1, + "correlationId": "testCaseStatements: 1534999254108", + "schemaVersion": 1, + "startTime": 1534999254353 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF4.json b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF4.json new file mode 100644 index 0000000000..b51ce9d085 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/ConditionalTaskWF4.json @@ -0,0 +1,171 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534999661844, + "updateTime": 1534999661847, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "DECISION", + "status": "COMPLETED", + "inputData": { + "hasChildren": "true", + "case": "three" + }, + "referenceTaskName": "conditional", + "retryCount": 0, + "seq": 1, + "correlationId": "testCaseStatements: 1534999661592", + "pollCount": 0, + "taskDefName": "DECISION", + "scheduledTime": 1534999661845, + "startTime": 1534999661845, + "endTime": 1534999661847, + "updateTime": 1534999661847, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "3a6fc7c8-07f2-4aba-86c8-3e09f0ea0cc6", + "callbackAfterSeconds": 0, + "outputData": { + "caseOutput": [ + "three" + ] + }, + "workflowTask": { + "name": "conditional", + "taskReferenceName": "conditional", + "inputParameters": { + "case": "workflow.input.param1" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "nested": [ + { + "name": "conditional2", + "taskReferenceName": "conditional2", + "inputParameters": { + "case": "workflow.input.param2" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "one": [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "two": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "startDelay": 0, + "optional": false + } + ], + "three": [ + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1", + "tp3": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 0 + }, + { + "taskType": "junit_task_3", + "status": "SCHEDULED", + "referenceTaskName": "t3", + "retryCount": 0, + "seq": 2, + "correlationId": "testCaseStatements: 1534999661592", + "pollCount": 0, + "taskDefName": "junit_task_3", + "scheduledTime": 1534999661845, + "startTime": 0, + "endTime": 0, + "updateTime": 1534999661845, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "ConditionalTaskWF", + "taskId": "0d9eaabb-23a2-4942-a130-65448f40d34d", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_3", + "taskReferenceName": "t3", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "finalCase": "notify", + "param1": "three", + "param2": "two" + }, + "workflowType": "ConditionalTaskWF", + "version": 1, + "correlationId": "testCaseStatements: 1534999661592", + "schemaVersion": 1, + "startTime": 1534999661844 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/DynamicFanInOutTest.json b/test-harness/src/test/resources/integration/scenarios/legacy/DynamicFanInOutTest.json new file mode 100644 index 0000000000..3d507dc716 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/DynamicFanInOutTest.json @@ -0,0 +1,56 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534990883111, + "updateTime": 1534990883246, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null + }, + "referenceTaskName": "dt1", + "retryCount": 0, + "seq": 1, + "correlationId": "dynfanouttest1", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534990883143, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990883143, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "DynamicFanInOutTest", + "taskId": "d2c21507-a553-465b-82ce-1eedc86f3d1d", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "dt1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "workflowType": "DynamicFanInOutTest", + "version": 1, + "correlationId": "dynfanouttest1", + "schemaVersion": 1, + "startTime": 1534990883111 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/DynamicFanInOutTestLegacy.json b/test-harness/src/test/resources/integration/scenarios/legacy/DynamicFanInOutTestLegacy.json new file mode 100644 index 0000000000..18ded7557d --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/DynamicFanInOutTestLegacy.json @@ -0,0 +1,56 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534985656537, + "updateTime": 1534985656670, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null + }, + "referenceTaskName": "dt1", + "retryCount": 0, + "seq": 1, + "correlationId": "dynfanouttest1", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534985656568, + "startTime": 0, + "endTime": 0, + "updateTime": 1534985656568, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "DynamicFanInOutTestLegacy", + "taskId": "e04365be-93c3-4e36-ad05-664e0e6a27c3", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "dt1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "workflowType": "DynamicFanInOutTestLegacy", + "version": 1, + "correlationId": "dynfanouttest1", + "schemaVersion": 1, + "startTime": 1534985656537 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutNestedTest.json b/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutNestedTest.json new file mode 100644 index 0000000000..f06396a4dc --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutNestedTest.json @@ -0,0 +1,685 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534984145445, + "updateTime": 1534984145695, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "FORK", + "status": "COMPLETED", + "referenceTaskName": "fork1", + "retryCount": 0, + "seq": 1, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "FORK", + "scheduledTime": 1534984145532, + "startTime": 1534984145540, + "endTime": 1534984145687, + "updateTime": 1534984145687, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "8e4c840b-f7ab-48e6-a882-a0fa5611223b", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "fork1", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_11", + "taskReferenceName": "t11", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "fork2", + "taskReferenceName": "fork2", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_12", + "taskReferenceName": "t12", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_14", + "taskReferenceName": "t14", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "junit_task_13", + "taskReferenceName": "t13", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "Decision", + "taskReferenceName": "d1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "a": [ + { + "name": "junit_task_16", + "taskReferenceName": "t16", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_19", + "taskReferenceName": "t19", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "b": [ + { + "name": "junit_task_17", + "taskReferenceName": "t17", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_18", + "taskReferenceName": "t18", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + { + "taskReferenceName": "join2", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t14", + "t20" + ], + "optional": false + } + ], + [ + { + "taskReferenceName": "sw1", + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "junit_test_wf" + }, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 8 + }, + { + "taskType": "junit_task_11", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null, + "case": "a" + }, + "referenceTaskName": "t11", + "retryCount": 0, + "seq": 2, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "junit_task_11", + "scheduledTime": 1534984145537, + "startTime": 0, + "endTime": 0, + "updateTime": 1534984145537, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "dbce7428-23c8-4b02-a7e4-cf2f1629c44f", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_11", + "taskReferenceName": "t11", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "FORK", + "status": "COMPLETED", + "referenceTaskName": "fork2", + "retryCount": 0, + "seq": 3, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "FORK", + "scheduledTime": 1534984145538, + "startTime": 1534984145541, + "endTime": 1534984145688, + "updateTime": 1534984145688, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "49aea6d0-0073-404a-b8a5-f49ea5d51b63", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "fork2", + "taskReferenceName": "fork2", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_12", + "taskReferenceName": "t12", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_14", + "taskReferenceName": "t14", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "junit_task_13", + "taskReferenceName": "t13", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "Decision", + "taskReferenceName": "d1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "a": [ + { + "name": "junit_task_16", + "taskReferenceName": "t16", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_19", + "taskReferenceName": "t19", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "b": [ + { + "name": "junit_task_17", + "taskReferenceName": "t17", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_18", + "taskReferenceName": "t18", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 3 + }, + { + "taskType": "junit_task_12", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null, + "case": "a" + }, + "referenceTaskName": "t12", + "retryCount": 0, + "seq": 4, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "junit_task_12", + "scheduledTime": 1534984145538, + "startTime": 0, + "endTime": 0, + "updateTime": 1534984145538, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "cdef48f5-39d5-4341-85f2-b72984f1dd46", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_12", + "taskReferenceName": "t12", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "junit_task_13", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null, + "case": "a" + }, + "referenceTaskName": "t13", + "retryCount": 0, + "seq": 5, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "junit_task_13", + "scheduledTime": 1534984145539, + "startTime": 0, + "endTime": 0, + "updateTime": 1534984145539, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "f304e4ec-effd-46e5-a2f3-854afe2b4fc6", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_13", + "taskReferenceName": "t13", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "SUB_WORKFLOW", + "status": "IN_PROGRESS", + "inputData": { + "workflowInput": {}, + "subWorkflowId": "a1fd729d-ad38-4159-a788-4bd07f60e911", + "subWorkflowName": "junit_test_wf", + "subWorkflowVersion": 1 + }, + "referenceTaskName": "sw1", + "retryCount": 0, + "seq": 6, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "SUB_WORKFLOW", + "scheduledTime": 1534984145539, + "startTime": 1534984145541, + "endTime": 1534984145527, + "updateTime": 1534984145669, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "bc62166d-1fb2-4cc9-9d94-52da4d88bd19", + "callbackAfterSeconds": 0, + "outputData": { + "subWorkflowId": "a1fd729d-ad38-4159-a788-4bd07f60e911" + }, + "workflowTask": { + "taskReferenceName": "sw1", + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "junit_test_wf" + }, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 2 + }, + { + "taskType": "JOIN", + "status": "IN_PROGRESS", + "inputData": { + "joinOn": [ + "t11", + "join2", + "sw1" + ] + }, + "referenceTaskName": "join1", + "retryCount": 0, + "seq": 7, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "JOIN", + "scheduledTime": 1534984145686, + "startTime": 1534984145687, + "endTime": 1534984145685, + "updateTime": 1534984145687, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "3dccf1fe-4653-4e04-b7a5-f21a2a723da8", + "callbackAfterSeconds": 0, + "outputData": { + "t11": {} + }, + "workflowTask": { + "taskReferenceName": "join1", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t11", + "join2", + "sw1" + ], + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 1 + }, + { + "taskType": "JOIN", + "status": "IN_PROGRESS", + "inputData": { + "joinOn": [ + "t14", + "t20" + ] + }, + "referenceTaskName": "join2", + "retryCount": 0, + "seq": 8, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "JOIN", + "scheduledTime": 1534984145687, + "startTime": 1534984145687, + "endTime": 1534984145686, + "updateTime": 1534984145687, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "b371077b-b382-4e19-80b8-6d6540752793", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "join2", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t14", + "t20" + ], + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 0 + } + ], + "input": { + "case": "a" + }, + "workflowType": "FanInOutNestedTest", + "version": 1, + "correlationId": "fork_join_nested_test", + "schemaVersion": 1, + "startTime": 1534984145445 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutTest.json b/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutTest.json new file mode 100644 index 0000000000..952f375daf --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutTest.json @@ -0,0 +1,207 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534983505157, + "updateTime": 1534983505166, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "FORK", + "status": "COMPLETED", + "referenceTaskName": "fanouttask", + "retryCount": 0, + "seq": 1, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "FORK", + "scheduledTime": 1534983505159, + "startTime": 1534983505161, + "endTime": 1534983505164, + "updateTime": 1534983505164, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest", + "taskId": "aa40aee3-4e9d-4cbc-8298-37d39883fec5", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "fanouttask", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_3", + "taskReferenceName": "t3", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 2 + }, + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 2, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534983505160, + "startTime": 0, + "endTime": 0, + "updateTime": 1534983505160, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest", + "taskId": "feb6c6c2-0974-4e78-b4df-df4389c46aea", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "junit_task_2", + "status": "SCHEDULED", + "inputData": { + "tp1": null + }, + "referenceTaskName": "t2", + "retryCount": 0, + "seq": 3, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "junit_task_2", + "scheduledTime": 1534983505160, + "startTime": 0, + "endTime": 0, + "updateTime": 1534983505160, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest", + "taskId": "36ee8037-e378-4649-92ca-3655c4e2ba75", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "JOIN", + "status": "IN_PROGRESS", + "inputData": { + "joinOn": [ + "t3", + "t2" + ] + }, + "referenceTaskName": "fanouttask_join", + "retryCount": 0, + "seq": 4, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "JOIN", + "scheduledTime": 1534983505164, + "startTime": 1534983505164, + "endTime": 1534983505163, + "updateTime": 1534983505164, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest", + "taskId": "e681e9ce-b902-4931-9759-3ec5df88ddd0", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "fanouttask_join", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t3", + "t2" + ], + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 0 + } + ], + "workflowType": "FanInOutTest", + "version": 1, + "correlationId": "fanouttest", + "schemaVersion": 1, + "startTime": 1534983505157 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutTest_2.json b/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutTest_2.json new file mode 100644 index 0000000000..992fb60820 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/FanInOutTest_2.json @@ -0,0 +1,207 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534990432158, + "updateTime": 1534990432327, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "FORK", + "status": "COMPLETED", + "referenceTaskName": "fanouttask", + "retryCount": 0, + "seq": 1, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "FORK", + "scheduledTime": 1534990432193, + "startTime": 1534990432201, + "endTime": 1534990432325, + "updateTime": 1534990432325, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest_2", + "taskId": "1a7aa928-4159-447a-b9a8-2722a1da1031", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "fanouttask", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_0_RT_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_0_RT_3", + "taskReferenceName": "t3", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "junit_task_0_RT_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 8 + }, + { + "taskType": "junit_task_0_RT_1", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 2, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "junit_task_0_RT_1", + "scheduledTime": 1534990432200, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990432200, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest_2", + "taskId": "7933d420-53df-450f-bf38-950c90835b0e", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_0_RT_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "junit_task_0_RT_2", + "status": "SCHEDULED", + "inputData": { + "tp1": null + }, + "referenceTaskName": "t2", + "retryCount": 0, + "seq": 3, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "junit_task_0_RT_2", + "scheduledTime": 1534990432200, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990432200, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest_2", + "taskId": "9028683d-6604-49a8-b803-e1d9f924c474", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_0_RT_2", + "taskReferenceName": "t2", + "inputParameters": { + "tp1": "workflow.input.param1" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "JOIN", + "status": "IN_PROGRESS", + "inputData": { + "joinOn": [ + "t3", + "t2" + ] + }, + "referenceTaskName": "fanouttask_join", + "retryCount": 0, + "seq": 4, + "correlationId": "fanouttest", + "pollCount": 0, + "taskDefName": "JOIN", + "scheduledTime": 1534990432324, + "startTime": 1534990432325, + "endTime": 1534990432323, + "updateTime": 1534990432325, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutTest_2", + "taskId": "94140a15-875a-4f39-9573-3e2f6960f557", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "fanouttask_join", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t3", + "t2" + ], + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 1 + } + ], + "workflowType": "FanInOutTest_2", + "version": 1, + "correlationId": "fanouttest", + "schemaVersion": 1, + "startTime": 1534990432158 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/RTOWF.json b/test-harness/src/test/resources/integration/scenarios/legacy/RTOWF.json new file mode 100644 index 0000000000..0c7e5e1082 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/RTOWF.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534986389370, + "updateTime": 1534986389594, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "task_rt", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "task_rt_t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_1", + "pollCount": 0, + "taskDefName": "task_rt", + "scheduledTime": 1534986389470, + "startTime": 0, + "endTime": 0, + "updateTime": 1534986389470, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 10, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "RTOWF", + "taskId": "225c5461-b21b-4934-8b09-65c24a3daeda", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "task_rt", + "taskReferenceName": "task_rt_t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "RTOWF", + "version": 1, + "correlationId": "unit_test_1", + "schemaVersion": 2, + "startTime": 1534986389370 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/WorkflowWithSubWorkflow.json b/test-harness/src/test/resources/integration/scenarios/legacy/WorkflowWithSubWorkflow.json new file mode 100644 index 0000000000..09af345b62 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/WorkflowWithSubWorkflow.json @@ -0,0 +1,61 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534989930597, + "updateTime": 1534989930779, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_5", + "status": "SCHEDULED", + "inputData": { + "p1": "param 1 value", + "p2": null + }, + "referenceTaskName": "a1", + "retryCount": 0, + "seq": 1, + "correlationId": "test", + "pollCount": 0, + "taskDefName": "junit_task_5", + "scheduledTime": 1534989930669, + "startTime": 0, + "endTime": 0, + "updateTime": 1534989930669, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "WorkflowWithSubWorkflow", + "taskId": "9d704f3e-3814-4e60-8592-a4e2aec81e50", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_5", + "taskReferenceName": "a1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "wfName": "junit_test_wf", + "param3": "param 2 value", + "param1": "param 1 value" + }, + "workflowType": "WorkflowWithSubWorkflow", + "version": 1, + "correlationId": "test", + "schemaVersion": 2, + "startTime": 1534989930597 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/concurrentWorkflowExecutions.json b/test-harness/src/test/resources/integration/scenarios/legacy/concurrentWorkflowExecutions.json new file mode 100644 index 0000000000..bf14efed98 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/concurrentWorkflowExecutions.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535058398369, + "updateTime": 1535058398372, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_concurrrent", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1535058398371, + "startTime": 0, + "endTime": 0, + "updateTime": 1535058398371, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf", + "taskId": "39f2ccac-e449-4eab-87ba-cb85ba817a3c", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf", + "version": 1, + "correlationId": "unit_test_concurrrent", + "schemaVersion": 2, + "startTime": 1535058398369 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/empty_workflow.json b/test-harness/src/test/resources/integration/scenarios/legacy/empty_workflow.json new file mode 100644 index 0000000000..ab11ffa4b1 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/empty_workflow.json @@ -0,0 +1,14 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534983504822, + "updateTime": 1534983504841, + "status": "COMPLETED", + "endTime": 1534983504841, + "workflowId": "WORKFLOW_INSTANCE_ID", + "workflowType": "empty_workflow", + "version": 1, + "correlationId": "testWorkflowWithNoTasks", + "reasonForIncompletion": "No tasks found to be executed", + "schemaVersion": 2, + "startTime": 1534983504822 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/forkJoinNested.json b/test-harness/src/test/resources/integration/scenarios/legacy/forkJoinNested.json new file mode 100644 index 0000000000..4248674eaf --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/forkJoinNested.json @@ -0,0 +1,628 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535070652209, + "updateTime": 1535070652424, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "FORK", + "status": "COMPLETED", + "referenceTaskName": "fork1", + "retryCount": 0, + "seq": 1, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "FORK", + "scheduledTime": 1535070652273, + "startTime": 1535070652282, + "endTime": 1535070652416, + "updateTime": 1535070652416, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "7154b7b6-cf9e-45b4-a81f-f9faa349845c", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "fork1", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_11", + "taskReferenceName": "t11", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "fork2", + "taskReferenceName": "fork2", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_12", + "taskReferenceName": "t12", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_14", + "taskReferenceName": "t14", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "junit_task_13", + "taskReferenceName": "t13", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "Decision", + "taskReferenceName": "d1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "a": [ + { + "name": "junit_task_16", + "taskReferenceName": "t16", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_19", + "taskReferenceName": "t19", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "b": [ + { + "name": "junit_task_17", + "taskReferenceName": "t17", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_18", + "taskReferenceName": "t18", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + { + "taskReferenceName": "join2", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t14", + "t20" + ], + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 9 + }, + { + "taskType": "junit_task_11", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null, + "case": "a" + }, + "referenceTaskName": "t11", + "retryCount": 0, + "seq": 2, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "junit_task_11", + "scheduledTime": 1535070652279, + "startTime": 0, + "endTime": 0, + "updateTime": 1535070652279, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "f56e4de5-1331-478b-bee5-055703cd47c8", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_11", + "taskReferenceName": "t11", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "FORK", + "status": "COMPLETED", + "referenceTaskName": "fork2", + "retryCount": 0, + "seq": 3, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "FORK", + "scheduledTime": 1535070652280, + "startTime": 1535070652283, + "endTime": 1535070652417, + "updateTime": 1535070652417, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "dada5cfc-e8ab-4b14-97d6-5115af8fdb4c", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "fork2", + "taskReferenceName": "fork2", + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "junit_task_12", + "taskReferenceName": "t12", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_14", + "taskReferenceName": "t14", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + [ + { + "name": "junit_task_13", + "taskReferenceName": "t13", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "Decision", + "taskReferenceName": "d1", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "DECISION", + "caseValueParam": "case", + "decisionCases": { + "a": [ + { + "name": "junit_task_16", + "taskReferenceName": "t16", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_19", + "taskReferenceName": "t19", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "b": [ + { + "name": "junit_task_17", + "taskReferenceName": "t17", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ] + }, + "defaultCase": [ + { + "name": "junit_task_18", + "taskReferenceName": "t18", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + { + "name": "junit_task_20", + "taskReferenceName": "t20", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + } + ], + "startDelay": 0, + "optional": false + } + ] + ], + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 3 + }, + { + "taskType": "junit_task_12", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null, + "case": "a" + }, + "referenceTaskName": "t12", + "retryCount": 0, + "seq": 4, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "junit_task_12", + "scheduledTime": 1535070652280, + "startTime": 0, + "endTime": 0, + "updateTime": 1535070652280, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "72262faf-8ce6-4d2d-9bff-d7c4a549db54", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_12", + "taskReferenceName": "t12", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "junit_task_13", + "status": "SCHEDULED", + "inputData": { + "p1": null, + "p2": null, + "case": "a" + }, + "referenceTaskName": "t13", + "retryCount": 0, + "seq": 5, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "junit_task_13", + "scheduledTime": 1535070652280, + "startTime": 0, + "endTime": 0, + "updateTime": 1535070652281, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "53163269-212d-4670-9586-8cd9dfd096db", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_13", + "taskReferenceName": "t13", + "inputParameters": { + "p1": "workflow.input.param1", + "p2": "workflow.input.param2", + "case": "workflow.input.case" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + }, + { + "taskType": "JOIN", + "status": "IN_PROGRESS", + "inputData": { + "joinOn": [ + "t11", + "join2" + ] + }, + "referenceTaskName": "join1", + "retryCount": 0, + "seq": 7, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "JOIN", + "scheduledTime": 1535070652415, + "startTime": 1535070652416, + "endTime": 1535070652415, + "updateTime": 1535070652416, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "d39443cc-d531-4612-9eb0-f1b4c2c1aa29", + "callbackAfterSeconds": 0, + "outputData": { + "t11": {} + }, + "workflowTask": { + "taskReferenceName": "join1", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t11", + "join2" + ], + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 1 + }, + { + "taskType": "JOIN", + "status": "IN_PROGRESS", + "inputData": { + "joinOn": [ + "t14", + "t20" + ] + }, + "referenceTaskName": "join2", + "retryCount": 0, + "seq": 8, + "correlationId": "fork_join_nested_test", + "pollCount": 0, + "taskDefName": "JOIN", + "scheduledTime": 1535070652416, + "startTime": 1535070652416, + "endTime": 1535070652415, + "updateTime": 1535070652416, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "FanInOutNestedTest", + "taskId": "7696c0e4-5112-497f-a61f-6a24317a4d8a", + "callbackAfterSeconds": 0, + "workflowTask": { + "taskReferenceName": "join2", + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "t14", + "t20" + ], + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 0 + } + ], + "input": { + "case": "a" + }, + "workflowType": "FanInOutNestedTest", + "version": 1, + "correlationId": "fork_join_nested_test", + "schemaVersion": 1, + "startTime": 1535070652209 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf.json b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf.json new file mode 100644 index 0000000000..87390f515b --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf.json @@ -0,0 +1,91 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534901322631, + "updateTime": 1534901322800, + "createdBy": null, + "updatedBy": null, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "parentWorkflowId": null, + "parentWorkflowTaskId": null, + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_126654816802294", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534901322699, + "startTime": 0, + "endTime": 0, + "updateTime": 1534901322699, + "startDelayInSeconds": 0, + "retriedTaskId": null, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf", + "taskId": "7456b2cb-a9dc-4a35-8b52-4c9e914d3073", + "reasonForIncompletion": null, + "callbackAfterSeconds": 0, + "workerId": null, + "outputData": {}, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "description": null, + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "decisionCases": {}, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "subWorkflowParam": null, + "joinOn": [], + "sink": null, + "optional": false, + "rateLimited": null + }, + "domain": null, + "inputMessage": null, + "outputMessage": null, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "output": {}, + "workflowType": "junit_test_wf", + "version": 1, + "correlationId": "unit_test_126654816802294", + "reRunFromWorkflowId": null, + "reasonForIncompletion": null, + "schemaVersion": 2, + "event": null, + "taskToDomain": {}, + "failedReferenceTaskNames": [], + "startTime": 1534901322631 +} \ No newline at end of file diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf3.json b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf3.json new file mode 100644 index 0000000000..2d1f2cdca0 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf3.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534990090639, + "updateTime": 1534990090810, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_122addfc0-354a-4e99-b756-e34d72ea1f97", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534990090709, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990090709, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf3", + "taskId": "7f53def1-835a-4aa5-ae9a-7b6052e657b1", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf3", + "version": 1, + "correlationId": "unit_test_122addfc0-354a-4e99-b756-e34d72ea1f97", + "schemaVersion": 2, + "startTime": 1534990090639 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf_non_restartable.json b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf_non_restartable.json new file mode 100644 index 0000000000..48d70f9c2b --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf_non_restartable.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534990253433, + "updateTime": 1534990253667, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_180b692c7-691a-4edb-a0a6-8b65cc76f9a0", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534990253540, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990253540, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf_non_restartable", + "taskId": "37fc37f7-729c-4b22-9b8c-a23d68149ed2", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf_non_restartable", + "version": 1, + "correlationId": "unit_test_180b692c7-691a-4edb-a0a6-8b65cc76f9a0", + "schemaVersion": 2, + "startTime": 1534990253433 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf_sw.json b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf_sw.json new file mode 100644 index 0000000000..cc2627adde --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/junit_test_wf_sw.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534986608565, + "updateTime": 1534986608739, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_3", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_sw", + "pollCount": 0, + "taskDefName": "junit_task_3", + "scheduledTime": 1534986608637, + "startTime": 0, + "endTime": 0, + "updateTime": 1534986608637, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf_sw", + "taskId": "6ecc0e58-adb3-408b-b815-0762eb572488", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_3", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf_sw", + "version": 1, + "correlationId": "unit_test_sw", + "schemaVersion": 2, + "startTime": 1534986608565 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/longRunningWf.json b/test-harness/src/test/resources/integration/scenarios/legacy/longRunningWf.json new file mode 100644 index 0000000000..08a65eb266 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/longRunningWf.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534990739242, + "updateTime": 1534990739413, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_1", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534990739311, + "startTime": 0, + "endTime": 0, + "updateTime": 1534990739311, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "longRunningWf", + "taskId": "0fefb562-dbef-4cec-bec8-a5e72be5cfef", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "longRunningWf", + "version": 1, + "correlationId": "unit_test_1", + "schemaVersion": 2, + "startTime": 1534990739242 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/retry.json b/test-harness/src/test/resources/integration/scenarios/legacy/retry.json new file mode 100644 index 0000000000..34f01eab2f --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/retry.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535063868376, + "updateTime": 1535063868556, + "status": "RUNNING", + "endTime": 0, + "workflowId": "045ec0b9-5f79-446b-9832-82918ec047aa", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_1d2488fa6-a508-49d5-9a88-82335a7d43fa", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1535063868452, + "startTime": 0, + "endTime": 0, + "updateTime": 1535063868452, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "045ec0b9-5f79-446b-9832-82918ec047aa", + "workflowType": "junit_test_wf", + "taskId": "80362582-3e22-4b5d-8635-6cc9c2824adf", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf", + "version": 1, + "correlationId": "unit_test_1d2488fa6-a508-49d5-9a88-82335a7d43fa", + "schemaVersion": 2, + "startTime": 1535063868376 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowFailureWithTerminalError.json b/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowFailureWithTerminalError.json new file mode 100644 index 0000000000..b88a711691 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowFailureWithTerminalError.json @@ -0,0 +1,60 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535068496317, + "updateTime": 1535068496495, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_1", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1535068496389, + "startTime": 0, + "endTime": 0, + "updateTime": 1535068496389, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf", + "taskId": "94c952ad-10c8-4b04-a236-586f57f44a0a", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "queueWaitTime": 0, + "taskStatus": "SCHEDULED" + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf", + "version": 1, + "correlationId": "unit_test_1", + "schemaVersion": 2, + "startTime": 1535068496317 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowWithTaskSpecificDomain.json b/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowWithTaskSpecificDomain.json new file mode 100644 index 0000000000..d0ccd59237 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowWithTaskSpecificDomain.json @@ -0,0 +1,65 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535067958883, + "updateTime": 1535067959010, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_3", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_sw", + "pollCount": 0, + "taskDefName": "junit_task_3", + "scheduledTime": 1535067959000, + "startTime": 0, + "endTime": 0, + "updateTime": 1535067959000, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf_sw", + "taskId": "3ebec7c7-ba78-4983-85d4-3081886cd7f7", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_3", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "domain": "domain1", + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf_sw", + "version": 1, + "correlationId": "unit_test_sw", + "schemaVersion": 2, + "taskToDomain": { + "junit_task_3": "domain1", + "junit_task_2": "domain1" + }, + "startTime": 1535067958883 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowWithTasksInOneDomain.json b/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowWithTasksInOneDomain.json new file mode 100644 index 0000000000..26180163f6 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/simpleWorkflowWithTasksInOneDomain.json @@ -0,0 +1,64 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535067060792, + "updateTime": 1535067060906, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_3", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_sw", + "pollCount": 0, + "taskDefName": "junit_task_3", + "scheduledTime": 1535067060896, + "startTime": 0, + "endTime": 0, + "updateTime": 1535067060896, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf_sw", + "taskId": "06e596e1-dc12-4bd0-937c-dfde77bacbcf", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_3", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "domain": "domain11", + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf_sw", + "version": 1, + "correlationId": "unit_test_sw", + "schemaVersion": 2, + "taskToDomain": { + "*": "domain11,, domain12" + }, + "startTime": 1535067060792 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/template_workflow.json b/test-harness/src/test/resources/integration/scenarios/legacy/template_workflow.json new file mode 100644 index 0000000000..0a06e7bd0f --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/template_workflow.json @@ -0,0 +1,75 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534983505050, + "updateTime": 1534983505131, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "templated_task", + "status": "SCHEDULED", + "inputData": { + "http_request": { + "method": "GET", + "vipStack": "test_stack", + "body": { + "requestDetails": { + "key1": "value1", + "key2": 42 + }, + "outputPath": "s3://bucket/outputPath", + "inputPaths": [ + "file://path1", + "file://path2" + ] + }, + "uri": "/get/something" + } + }, + "referenceTaskName": "t0", + "retryCount": 0, + "seq": 1, + "correlationId": "testTaskDefTemplate", + "pollCount": 0, + "taskDefName": "templated_task", + "scheduledTime": 1534983505121, + "startTime": 0, + "endTime": 0, + "updateTime": 1534983505121, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "template_workflow", + "taskId": "9dea4567-0240-4eab-bde8-99f4535ea3fc", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "templated_task", + "taskReferenceName": "t0", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "path1": "file://path1", + "path2": "file://path2", + "requestDetails": { + "key1": "value1", + "key2": 42 + }, + "outputPath": "s3://bucket/outputPath" + }, + "workflowType": "template_workflow", + "version": 1, + "correlationId": "testTaskDefTemplate", + "schemaVersion": 2, + "startTime": 1534983505050 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/test_event.json b/test-harness/src/test/resources/integration/scenarios/legacy/test_event.json new file mode 100644 index 0000000000..26b0730e7f --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/test_event.json @@ -0,0 +1,101 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534988786979, + "updateTime": 1534988787169, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "EVENT", + "status": "COMPLETED", + "inputData": { + "sink": "conductor" + }, + "referenceTaskName": "wait0", + "retryCount": 0, + "seq": 1, + "pollCount": 0, + "taskDefName": "eventX", + "scheduledTime": 1534988787043, + "startTime": 1534988787051, + "endTime": 1534988787169, + "updateTime": 1534988787169, + "startDelayInSeconds": 0, + "retried": false, + "executed": true, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "test_event", + "taskId": "4f39b091-03c5-469c-970f-1c2addec1571", + "callbackAfterSeconds": 0, + "outputData": { + "event_produced": "conductor:test_event:wait0", + "sink": "conductor", + "workflowType": "test_event", + "correlationId": null, + "workflowVersion": 1, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID" + }, + "workflowTask": { + "name": "eventX", + "taskReferenceName": "wait0", + "inputParameters": { + "sink": "conductor" + }, + "type": "EVENT", + "startDelay": 0, + "sink": "conductor", + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "COMPLETED", + "queueWaitTime": 8 + }, + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 2, + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1534988787163, + "startTime": 0, + "endTime": 0, + "updateTime": 1534988787163, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "test_event", + "taskId": "2d482701-9cae-4576-ab2c-91e096b5ed1c", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "output": { + "event_produced": "conductor:test_event:wait0", + "sink": "conductor", + "workflowType": "test_event", + "correlationId": null, + "workflowVersion": 1, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID" + }, + "workflowType": "test_event", + "version": 1, + "schemaVersion": 2, + "startTime": 1534988786979 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/test_wait.json b/test-harness/src/test/resources/integration/scenarios/legacy/test_wait.json new file mode 100644 index 0000000000..9a74441969 --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/test_wait.json @@ -0,0 +1,46 @@ +{ + "ownerApp": "junit_app", + "createTime": 1534988907130, + "updateTime": 1534988907297, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "WAIT", + "status": "IN_PROGRESS", + "referenceTaskName": "wait0", + "retryCount": 0, + "seq": 1, + "pollCount": 0, + "taskDefName": "wait", + "scheduledTime": 1534988907192, + "startTime": 1534988907200, + "endTime": 1534988907186, + "updateTime": 1534988907200, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 0, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "test_wait", + "taskId": "22052711-1039-4888-935a-5d0efa8b5bbf", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "wait", + "taskReferenceName": "wait0", + "type": "WAIT", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "IN_PROGRESS", + "queueWaitTime": 8 + } + ], + "workflowType": "test_wait", + "version": 1, + "schemaVersion": 2, + "startTime": 1534988907130 +} diff --git a/test-harness/src/test/resources/integration/scenarios/legacy/timeout.json b/test-harness/src/test/resources/integration/scenarios/legacy/timeout.json new file mode 100644 index 0000000000..66298f0c9b --- /dev/null +++ b/test-harness/src/test/resources/integration/scenarios/legacy/timeout.json @@ -0,0 +1,61 @@ +{ + "ownerApp": "junit_app", + "createTime": 1535068128051, + "updateTime": 1535068128223, + "status": "RUNNING", + "endTime": 0, + "workflowId": "WORKFLOW_INSTANCE_ID", + "tasks": [ + { + "taskType": "junit_task_1", + "status": "SCHEDULED", + "inputData": { + "p1": "p1 value", + "p2": "p2 value" + }, + "referenceTaskName": "t1", + "retryCount": 0, + "seq": 1, + "correlationId": "unit_test_175105d0e-55a2-4bb0-bbf7-06e119de34fe", + "pollCount": 0, + "taskDefName": "junit_task_1", + "scheduledTime": 1535068128120, + "startTime": 0, + "endTime": 0, + "updateTime": 1535068128120, + "startDelayInSeconds": 0, + "retried": false, + "executed": false, + "callbackFromWorker": true, + "responseTimeoutSeconds": 3600, + "workflowInstanceId": "WORKFLOW_INSTANCE_ID", + "workflowType": "junit_test_wf", + "taskId": "9b9c93d2-fe29-4a75-8771-d8e71f3cde84", + "callbackAfterSeconds": 0, + "workflowTask": { + "name": "junit_task_1", + "taskReferenceName": "t1", + "inputParameters": { + "p1": "${workflow.input.param1}", + "p2": "${workflow.input.param2}" + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false + }, + "rateLimitPerSecond": 0, + "taskStatus": "SCHEDULED", + "queueWaitTime": 0 + } + ], + "input": { + "failureWfName": "FanInOutTest", + "param1": "p1 value", + "param2": "p2 value" + }, + "workflowType": "junit_test_wf", + "version": 1, + "correlationId": "unit_test_175105d0e-55a2-4bb0-bbf7-06e119de34fe", + "schemaVersion": 2, + "startTime": 1535068128051 +} diff --git a/ui/src/api/wfe.js b/ui/src/api/wfe.js index e3c67a381d..e449aead20 100644 --- a/ui/src/api/wfe.js +++ b/ui/src/api/wfe.js @@ -8,6 +8,7 @@ import transform from 'lodash/transform'; import identity from 'lodash/identity'; const router = new Router(); + const baseURL = process.env.WF_SERVER; const baseURL2 = baseURL + 'workflow/'; const baseURLMeta = baseURL + 'metadata/'; @@ -35,6 +36,7 @@ router.get('/', async (req, res, next) => { } let query = req.query.q; + const url = baseURL2 + 'search?size=100&sort=startTime:DESC&freeText=' + @@ -78,6 +80,7 @@ router.get('/search-by-task/:taskId', async (req, res, next) => { const url = baseURL2 + 'search-by-tasks?size=100&sort=startTime:DESC&freeText=' + freeText.join(' AND ') + '&start=' + start; const result = await http.get(url, req.token); + const hits = result.results; res.status(200).send({ result: { hits: hits, totalHits: result.totalHits } }); } catch (err) { @@ -86,12 +89,17 @@ router.get('/search-by-task/:taskId', async (req, res, next) => { }); router.get('/id/:workflowId', async (req, res, next) => { + try { const result = await http.get(baseURL2 + req.params.workflowId + '?includeTasks=true', req.token); - const meta = await http.get( - baseURLMeta + 'workflow/' + result.workflowType + '?version=' + result.version, - req.token - ); + + let meta = result.workflowDefinition; + if (!meta) { + meta = await http.get( + baseURLMeta + 'workflow/' + result.workflowType + '?version=' + result.version, + req.token + ); + } const subs = filter(identity)( map(task => { @@ -234,8 +242,8 @@ router.post('/retry/:workflowId', async (req, res, next) => { router.post('/pause/:workflowId', async (req, res, next) => { try { - const result = await http.put(baseURL2 + req.params.workflowId + '/pause', {}, req.token); - res.status(200).send({ result: req.params.workflowId }); + const result = await http.put(baseURL2 + req.params.workflowId + '/pause'); + res.status(200).send({result: req.params.workflowId }); } catch (err) { next(err); } diff --git a/versionsOfDependencies.gradle b/versionsOfDependencies.gradle index c964ac0c98..acf4694af5 100644 --- a/versionsOfDependencies.gradle +++ b/versionsOfDependencies.gradle @@ -9,38 +9,43 @@ ext { revDynoCore = '1.6.4' revDynoJedis = '1.6.4' revDynoQueues = '2.0.0-rc3' - revElasticSearch2 = '2.4.6' revElasticSearch5 = '5.6.8' revElasticSearch5Client = '5.6.8' revEurekaClient = '1.8.7' revFlywayCore ='4.0.3' + revGrpc = '1.14.+' revGuavaRetrying = '2.0.0' revGuice = '4.1.0' revGuiceMultiBindings = '4.1.0' revGuiceServlet = '4.1.0' + revHealth = '1.1.+' revHikariCP = '3.2.0' revJsonPath = '2.2.0' revJaxrsJackson = '2.7.5' + revJavaxInject = '1' revJacksonCore = '2.7.5' revJacksonDatabind = '2.7.5' revJedis = '2.9.0' revJerseyBundle = '1.19.1' revJerseyClient = '1.19.4' revJerseyGuice = '1.19.4' - revJUnit = '4.10' + revJUnit = '4.12' revJsr311Api = '1.1.1' revJq = '0.0.8' revLog4jApi = '2.9.1' revLog4jCore = '2.9.1' revMariaDB4j = '2.2.3' revRxJava = '1.2.2' - revMockito = '1.10.0' + revMockito = '1.10.19' revMySqlConnector = '8.0.11' revNatsStreaming = '0.5.0' revJetteyServer = '9.3.9.v20160517' revJettyServlet = '9.3.9.v20160517' revOauthClient = '1.19.4' revOauthSignature = '1.19.4' + revProtoBuf = '3.5.1' + revProtogenAnnotations = '1.0.0' + revProtogenCodegen = '1.2.0' revRarefiedRedis = '0.0.17' revServo = '0.12.17' revServletApi = '3.1.0'