code
stringlengths
0
56.1M
repo_name
stringclasses
515 values
path
stringlengths
2
147
language
stringclasses
447 values
license
stringclasses
7 values
size
int64
0
56.8M
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package errors type RetriableError struct { err error } func (e *RetriableError) Error() string { if e.err != nil { return "retriable error occurred: " + e.err.Error() } return "retriable error occurred" } func (e *RetriableError) Unwrap() error { if e == nil { return nil } return e.err } // NewRetriable returns a RetriableError wrapping an existing context error. func NewRetriable(err error) *RetriableError { return &RetriableError{ err: err, } }
mikeee/dapr
pkg/runtime/errors/retriable.go
GO
mit
1,033
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package errors import "testing" func TestRetriableError(t *testing.T) { var _ error = new(RetriableError) }
mikeee/dapr
pkg/runtime/errors/retriable_test.go
GO
mit
672
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package differ import ( "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/components/secretstores" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/processor/wfbackend" ) var wfengineComp = wfbackend.ComponentDefinition() // Resource is a generic type constraint. type Resource interface { componentsapi.Component | subapi.Subscription meta.Resource } type Result[T Resource] struct { Deleted []T Updated []T Created []T } type LocalRemoteResources[T Resource] struct { Local []T Remote []T } // Diff returns the difference between the local and remote resources of the // given kind. func Diff[T Resource](resources *LocalRemoteResources[T]) *Result[T] { if resources == nil || (len(resources.Local) == 0 && len(resources.Remote) == 0) { return nil } // missing are the resources which exist remotely but which don't exist // locally or have changed. missing := detectDiff(resources.Local, resources.Remote, nil) // deleted are the resources which exist locally but which don't exist // remotely or have changed. deleted := detectDiff(resources.Remote, resources.Local, func(r T) bool { if comp, ok := any(r).(componentsapi.Component); ok { // Ignore the built-in Kubernetes secret store and workflow engine. if comp.Name == secretstores.BuiltinKubernetesSecretStore && comp.Spec.Type == "secretstores.kubernetes" { return true } if comp.Name == wfengineComp.Name && comp.Spec.Type == wfengineComp.Spec.Type { return true } } return false }) var result Result[T] for i := range deleted { if _, ok := missing[deleted[i].GetName()]; !ok { result.Deleted = append(result.Deleted, deleted[i]) } } for i := range missing { if _, ok := deleted[missing[i].GetName()]; ok { result.Updated = append(result.Updated, missing[i]) } else { result.Created = append(result.Created, missing[i]) } } return &result } // detectDiff returns a map for resource names to resources where the base // resource does not exist in the target. // The returned map contains on the resources in base which don't exist in the // target. // If skipTarget is not nil, if it called on target resources, and if returns // true, will skip checking whether that base resource exists in the target. func detectDiff[T Resource](base, target []T, skipTarget func(T) bool) map[string]T { notExist := make(map[string]T) for i := range target { if skipTarget != nil && skipTarget(target[i]) { continue } found := false for _, tt := range base { if AreSame(target[i], tt) { found = true break } } if !found { notExist[target[i].GetName()] = target[i] } } return notExist } // AreSame returns true if the resources have the same functional spec. func AreSame[T Resource](r1, r2 T) bool { return reflect.DeepEqual(toComparableObj(r1), toComparableObj(r2)) } // toComparableObj returns the object but which strips out values which should // not be compared as they don't change the spec of the resource. func toComparableObj[T Resource](r T) metav1.Object { return r.EmptyMetaDeepCopy() }
mikeee/dapr
pkg/runtime/hotreload/differ/differ.go
GO
mit
3,841
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //nolint:makezero package differ import ( "testing" fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" ) func Test_toComparableObj(t *testing.T) { t.Parallel() const numCases = 500 components := make([]componentsapi.Component, numCases) fz := fuzz.New() for i := 0; i < numCases; i++ { fz.Fuzz(&components[i]) } for i := 0; i < numCases; i++ { t.Run("Component", func(t *testing.T) { compWithoutObject := components[i].DeepCopy() compWithoutObject.ObjectMeta = metav1.ObjectMeta{ Name: components[i].Name, } compWithoutObject.TypeMeta = metav1.TypeMeta{ Kind: "Component", APIVersion: "dapr.io/v1alpha1", } assert.Equal(t, compWithoutObject, toComparableObj[componentsapi.Component](components[i])) }) } } func Test_AreSame(t *testing.T) { t.Parallel() const numCases = 250 components := make([]componentsapi.Component, numCases) componentsDiff := make([]componentsapi.Component, numCases) fz := fuzz.New() for i := 0; i < numCases; i++ { fz.Fuzz(&components[i]) fz.Fuzz(&componentsDiff[i]) } for i := 0; i < numCases; i++ { t.Run("Exact same resource should always return true", func(t *testing.T) { t.Run("Component", func(t *testing.T) { comp1 := components[i] comp2 := comp1.DeepCopy() assert.True(t, AreSame[componentsapi.Component](comp1, *comp2)) }) }) t.Run("Same resource but with different Object&Type meta (same name) should return true", func(t *testing.T) { t.Run("Component", func(t *testing.T) { comp1 := components[i] comp2 := comp1.DeepCopy() fz.Fuzz(&comp2.ObjectMeta) fz.Fuzz(&comp2.TypeMeta) comp2.Name = comp1.Name assert.True(t, AreSame[componentsapi.Component](comp1, *comp2)) }) }) t.Run("Different resources should return false", func(t *testing.T) { t.Run("Component", func(t *testing.T) { comp1 := components[i] comp2 := componentsDiff[i] assert.False(t, AreSame[componentsapi.Component](comp1, comp2)) }) }) } } func Test_detectDiff(t *testing.T) { t.Parallel() const numCases = 100 components := make([]componentsapi.Component, numCases) componentsDiff := make([]componentsapi.Component, numCases) fz := fuzz.New() for i := 0; i < numCases; i++ { fz.Fuzz(&components[i]) fz.Fuzz(&componentsDiff[i]) } t.Run("If resources are the same then expect a map of the same resources returned", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { expSameComponents := make(map[string]componentsapi.Component) assert.Equal(t, expSameComponents, detectDiff[componentsapi.Component](components, components, nil)) }) }) t.Run("If resources are the same with a check returning false then expect a map of the same resources returned", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { expSameComponents := make(map[string]componentsapi.Component) assert.Equal(t, expSameComponents, detectDiff[componentsapi.Component](components, components, func(componentsapi.Component) bool { return false })) }) }) t.Run("If resources are the same with a check returning true then expect a map of the same resources returned", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { expSameComponents := make(map[string]componentsapi.Component) assert.Equal(t, expSameComponents, detectDiff[componentsapi.Component](components, components, func(componentsapi.Component) bool { return true })) }) }) t.Run("Should return the different resources which don't exist in the target", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { expDiffComponents := make(map[string]componentsapi.Component) for i := 0; i < numCases; i++ { expDiffComponents[componentsDiff[i].Name] = componentsDiff[i] } assert.Equal(t, expDiffComponents, detectDiff[componentsapi.Component](components, append(components, componentsDiff...), nil)) assert.Equal(t, expDiffComponents, detectDiff[componentsapi.Component](components, append(componentsDiff, components...), nil)) }) }) t.Run("Should not return resources if they exist in base, but not the target", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { expDiffComponents := make(map[string]componentsapi.Component) assert.Equal(t, expDiffComponents, detectDiff[componentsapi.Component](append(components, componentsDiff...), components, nil)) assert.Equal(t, expDiffComponents, detectDiff[componentsapi.Component](append(componentsDiff, components...), components, nil)) }) }) t.Run("Should not return resources if they exist in the target and not base, but are skipped on check", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { t.Parallel() expDiffComponents := make(map[string]componentsapi.Component) var j int assert.Equal(t, expDiffComponents, detectDiff[componentsapi.Component](components, append(components, componentsDiff...), func(c componentsapi.Component) bool { if j < len(components) { assert.Equal(t, components[j], c) } else { assert.Equal(t, componentsDiff[j-len(components)], c) } j++ return true })) j = 0 assert.Equal(t, expDiffComponents, detectDiff[componentsapi.Component](components, append(componentsDiff, components...), func(c componentsapi.Component) bool { if j < len(components) { assert.Equal(t, componentsDiff[j], c) } else { assert.Equal(t, components[j-len(components)], c) } j++ return true })) }) }) } func Test_Diff(t *testing.T) { t.Parallel() const numCases = 100 components := make([]componentsapi.Component, numCases) componentsDiff1 := make([]componentsapi.Component, numCases) componentsDiff2 := make([]componentsapi.Component, numCases) takenNames := make(map[string]bool) forCh := func(name string) bool { ok := len(name) == 0 || takenNames[name] takenNames[name] = true return ok } fz := fuzz.New() for i := 0; i < numCases; i++ { for forCh(components[i].Name) { fz.Fuzz(&components[i]) } for forCh(componentsDiff1[i].Name) { fz.Fuzz(&componentsDiff1[i]) } for forCh(componentsDiff2[i].Name) { fz.Fuzz(&componentsDiff2[i]) } } t.Run("if no resources given, return nil", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { assert.Nil(t, Diff[componentsapi.Component](nil)) assert.Nil(t, Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: nil, Remote: nil, })) }) }) t.Run("if no remote, expect all local to be deleted", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: components, Remote: nil, }) assert.ElementsMatch(t, components, resp.Deleted) assert.ElementsMatch(t, nil, resp.Updated) assert.ElementsMatch(t, nil, resp.Created) }) }) t.Run("if no local, expect all remote to be created", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: nil, Remote: components, }) assert.ElementsMatch(t, nil, resp.Deleted) assert.ElementsMatch(t, nil, resp.Updated) assert.ElementsMatch(t, components, resp.Created) }) }) t.Run("if local and remote completely different, expect both created and deleted", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: componentsDiff1, Remote: components, }) assert.ElementsMatch(t, componentsDiff1, resp.Deleted) assert.ElementsMatch(t, nil, resp.Updated) assert.ElementsMatch(t, components, resp.Created) }) }) t.Run("if local and remote share some resources, they should be omitted from the result", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: append(componentsDiff2, componentsDiff1...), Remote: append(components, componentsDiff2...), }) assert.ElementsMatch(t, componentsDiff1, resp.Deleted) assert.ElementsMatch(t, nil, resp.Updated) assert.ElementsMatch(t, components, resp.Created) }) }) t.Run("should not mark components as deleted if they are in the reserved skipped set", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: append(append(componentsDiff2, componentsDiff1...), []componentsapi.Component{ { ObjectMeta: metav1.ObjectMeta{Name: "kubernetes"}, Spec: componentsapi.ComponentSpec{Type: "secretstores.kubernetes"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "dapr"}, Spec: componentsapi.ComponentSpec{Type: "workflow.dapr"}, }, }...), Remote: append(components, componentsDiff2...), }) assert.ElementsMatch(t, componentsDiff1, resp.Deleted) assert.ElementsMatch(t, nil, resp.Updated) assert.ElementsMatch(t, components, resp.Created) }) }) t.Run("if local and remote share the same names, then should be updated with remote", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { remote := make([]componentsapi.Component, len(components)) for i := 0; i < len(components); i++ { comp := componentsDiff1[i].DeepCopy() comp.Name = components[i].Name remote[i] = *comp } resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: components, Remote: remote, }) assert.ElementsMatch(t, nil, resp.Deleted) assert.ElementsMatch(t, remote, resp.Updated) assert.ElementsMatch(t, nil, resp.Created) }) }) t.Run("has deleted, updated, created, with reserved skipped resources", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { remote := make([]componentsapi.Component, len(components)/2) for i := 0; i < len(components)/2; i++ { comp := componentsDiff1[i].DeepCopy() comp.Name = components[i].Name remote[i] = *comp } resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: append(components, []componentsapi.Component{ { ObjectMeta: metav1.ObjectMeta{Name: "kubernetes"}, Spec: componentsapi.ComponentSpec{Type: "secretstores.kubernetes"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "dapr"}, Spec: componentsapi.ComponentSpec{Type: "workflow.dapr"}, }, }...), Remote: append(remote, componentsDiff2...), }) assert.ElementsMatch(t, components[len(components)/2:], resp.Deleted) assert.ElementsMatch(t, remote, resp.Updated) assert.ElementsMatch(t, componentsDiff2, resp.Created) }) }) t.Run("a component which changes spec type should be in updated", func(t *testing.T) { t.Parallel() t.Run("Component", func(t *testing.T) { resp := Diff[componentsapi.Component](&LocalRemoteResources[componentsapi.Component]{ Local: []componentsapi.Component{{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: componentsapi.ComponentSpec{Type: "secretstores.in-memory"}, }}, Remote: []componentsapi.Component{{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: componentsapi.ComponentSpec{Type: "secretstores.sqlite"}, }}, }) assert.ElementsMatch(t, nil, resp.Deleted) assert.ElementsMatch(t, []componentsapi.Component{{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Spec: componentsapi.ComponentSpec{Type: "secretstores.sqlite"}, }}, resp.Updated) assert.ElementsMatch(t, nil, resp.Created) }) }) }
mikeee/dapr
pkg/runtime/hotreload/differ/differ_test.go
GO
mit
12,600
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hotreload import ( "context" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/config" operatorv1 "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/authorizer" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/hotreload/loader/disk" "github.com/dapr/dapr/pkg/runtime/hotreload/loader/operator" "github.com/dapr/dapr/pkg/runtime/hotreload/reconciler" "github.com/dapr/dapr/pkg/runtime/processor" "github.com/dapr/kit/concurrency" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime.hotreload") type OptionsReloaderDisk struct { Config *config.Configuration AppID string Dirs []string ComponentStore *compstore.ComponentStore Authorizer *authorizer.Authorizer Processor *processor.Processor } type OptionsReloaderOperator struct { PodName string Namespace string Client operatorv1.OperatorClient Config *config.Configuration ComponentStore *compstore.ComponentStore Authorizer *authorizer.Authorizer Processor *processor.Processor } type Reloader struct { isEnabled bool loader loader.Interface componentsReconciler *reconciler.Reconciler[compapi.Component] subscriptionsReconciler *reconciler.Reconciler[subapi.Subscription] } func NewDisk(opts OptionsReloaderDisk) (*Reloader, error) { loader, err := disk.New(disk.Options{ AppID: opts.AppID, Dirs: opts.Dirs, ComponentStore: opts.ComponentStore, }) if err != nil { return nil, err } return &Reloader{ isEnabled: opts.Config.IsFeatureEnabled(config.HotReload), loader: loader, componentsReconciler: reconciler.NewComponents(reconciler.Options[compapi.Component]{ Loader: loader, CompStore: opts.ComponentStore, Processor: opts.Processor, Authorizer: opts.Authorizer, }), subscriptionsReconciler: reconciler.NewSubscriptions(reconciler.Options[subapi.Subscription]{ Loader: loader, CompStore: opts.ComponentStore, Processor: opts.Processor, }), }, nil } func NewOperator(opts OptionsReloaderOperator) *Reloader { loader := operator.New(operator.Options{ PodName: opts.PodName, Namespace: opts.Namespace, ComponentStore: opts.ComponentStore, OperatorClient: opts.Client, }) return &Reloader{ isEnabled: opts.Config.IsFeatureEnabled(config.HotReload), loader: loader, componentsReconciler: reconciler.NewComponents(reconciler.Options[compapi.Component]{ Loader: loader, CompStore: opts.ComponentStore, Processor: opts.Processor, Authorizer: opts.Authorizer, }), subscriptionsReconciler: reconciler.NewSubscriptions(reconciler.Options[subapi.Subscription]{ Loader: loader, CompStore: opts.ComponentStore, Processor: opts.Processor, }), } } func (r *Reloader) Run(ctx context.Context) error { if !r.isEnabled { log.Debug("Hot reloading disabled") <-ctx.Done() return nil } log.Info("Hot reloading enabled. Daprd will reload 'Component' and 'Subscription' resources on change.") return concurrency.NewRunnerManager( r.loader.Run, r.componentsReconciler.Run, r.subscriptionsReconciler.Run, ).Run(ctx) }
mikeee/dapr
pkg/runtime/hotreload/hotreload.go
GO
mit
3,958
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package disk import ( "context" "fmt" "strings" "time" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" loaderdisk "github.com/dapr/dapr/pkg/internal/loader/disk" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/hotreload/loader/store" "github.com/dapr/kit/concurrency" "github.com/dapr/kit/events/batcher" "github.com/dapr/kit/fswatcher" "github.com/dapr/kit/logger" "github.com/dapr/kit/ptr" ) var log = logger.NewLogger("dapr.runtime.hotreload.loader.disk") type Options struct { AppID string Dirs []string ComponentStore *compstore.ComponentStore } type disk struct { components *resource[compapi.Component] subscriptions *resource[subapi.Subscription] fs *fswatcher.FSWatcher batcher *batcher.Batcher[int, struct{}] } func New(opts Options) (loader.Interface, error) { log.Infof("Watching directories: [%s]", strings.Join(opts.Dirs, ", ")) fs, err := fswatcher.New(fswatcher.Options{ Targets: opts.Dirs, Interval: ptr.Of(time.Millisecond * 200), }) if err != nil { return nil, fmt.Errorf("failed to create watcher: %w", err) } batcher := batcher.New[int, struct{}](0) return &disk{ fs: fs, components: newResource[compapi.Component]( resourceOptions[compapi.Component]{ loader: loaderdisk.NewComponents(loaderdisk.Options{ AppID: opts.AppID, Paths: opts.Dirs, }), store: store.NewComponents(opts.ComponentStore), batcher: batcher, }, ), subscriptions: newResource[subapi.Subscription]( resourceOptions[subapi.Subscription]{ loader: loaderdisk.NewSubscriptions(loaderdisk.Options{ AppID: opts.AppID, Paths: opts.Dirs, }), store: store.NewSubscriptions(opts.ComponentStore), batcher: batcher, }, ), batcher: batcher, }, nil } func (d *disk) Run(ctx context.Context) error { eventCh := make(chan struct{}) return concurrency.NewRunnerManager( d.components.run, d.subscriptions.run, func(ctx context.Context) error { return d.fs.Run(ctx, eventCh) }, func(ctx context.Context) error { defer d.batcher.Close() var i int for { select { case <-ctx.Done(): return nil case <-eventCh: // Use a separate: index every batch to prevent deduplicates of separate // file updates happening at the same time. i++ d.batcher.Batch(i, struct{}{}) } } }, ).Run(ctx) } func (d *disk) Components() loader.Loader[compapi.Component] { return d.components } func (d *disk) Subscriptions() loader.Loader[subapi.Subscription] { return d.subscriptions }
mikeee/dapr
pkg/runtime/hotreload/loader/disk/disk.go
GO
mit
3,289
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package disk import ( "context" "fmt" "sync" "sync/atomic" internalloader "github.com/dapr/dapr/pkg/internal/loader" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/hotreload/loader/store" "github.com/dapr/kit/events/batcher" ) // resource is a generic implementation of a disk resource loader. resource // will watch and load resources from disk. type resource[T differ.Resource] struct { sourceBatcher *batcher.Batcher[int, struct{}] streamBatcher *batcher.Batcher[int, struct{}] store store.Store[T] diskLoader internalloader.Loader[T] lock sync.RWMutex currentResult *differ.Result[T] wg sync.WaitGroup running chan struct{} closeCh chan struct{} closed atomic.Bool } type resourceOptions[T differ.Resource] struct { loader internalloader.Loader[T] store store.Store[T] batcher *batcher.Batcher[int, struct{}] } func newResource[T differ.Resource](opts resourceOptions[T]) *resource[T] { return &resource[T]{ sourceBatcher: opts.batcher, store: opts.store, diskLoader: opts.loader, streamBatcher: batcher.New[int, struct{}](0), running: make(chan struct{}), closeCh: make(chan struct{}), } } // List returns the current list of resources loaded from disk. func (r *resource[T]) List(ctx context.Context) (*differ.LocalRemoteResources[T], error) { remotes, err := r.diskLoader.Load(ctx) if err != nil { return nil, err } return &differ.LocalRemoteResources[T]{ Local: r.store.List(), Remote: remotes, }, nil } // Stream returns a channel of events that will be sent when a resource is // created, updated, or deleted. func (r *resource[T]) Stream(ctx context.Context) (*loader.StreamConn[T], error) { conn := &loader.StreamConn[T]{ EventCh: make(chan *loader.Event[T]), ReconcileCh: make(chan struct{}), } batchCh := make(chan struct{}) r.streamBatcher.Subscribe(ctx, batchCh) r.wg.Add(1) go func() { defer r.wg.Done() for { select { case <-ctx.Done(): return case <-r.closeCh: return case <-batchCh: r.triggerDiff(ctx, conn) } } }() return conn, nil } func (r *resource[T]) triggerDiff(ctx context.Context, conn *loader.StreamConn[T]) { r.lock.RLock() defer r.lock.RUnlock() // Each group is a list of resources which have been created, updated, or // deleted. It is critical that we send the events in the order of deleted, // updated, and created. This ensures we close before initing a resource // with the same name. for _, group := range []struct { resources []T eventType operatorpb.ResourceEventType }{ {r.currentResult.Deleted, operatorpb.ResourceEventType_DELETED}, {r.currentResult.Updated, operatorpb.ResourceEventType_UPDATED}, {r.currentResult.Created, operatorpb.ResourceEventType_CREATED}, } { for _, resource := range group.resources { select { case conn.EventCh <- &loader.Event[T]{ Resource: resource, Type: group.eventType, }: case <-r.closeCh: return case <-ctx.Done(): return } } } } func (r *resource[T]) run(ctx context.Context) error { defer func() { if r.closed.CompareAndSwap(false, true) { close(r.closeCh) } r.streamBatcher.Close() r.wg.Wait() }() updateCh := make(chan struct{}) r.sourceBatcher.Subscribe(ctx, updateCh) close(r.running) var i int for { select { case <-ctx.Done(): return nil case <-r.closeCh: return nil case <-updateCh: } // List the resources which exist locally (those loaded already), and those // which reside as in a resource file on disk. resources, err := r.List(ctx) if err != nil { return fmt.Errorf("failed to load resources from disk: %s", err) } // Reconcile the differences between what we have loaded locally, and what // exists on disk.k result := differ.Diff(resources) r.lock.Lock() r.currentResult = result r.lock.Unlock() if result == nil { continue } // Use a separate: index every batch to prevent deduplicates of separate // file updates happening at the same time. i++ r.streamBatcher.Batch(i, struct{}{}) } }
mikeee/dapr
pkg/runtime/hotreload/loader/disk/resource.go
GO
mit
4,801
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package disk import ( "context" "os" "path/filepath" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" commonapi "github.com/dapr/dapr/pkg/apis/common" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" loaderdisk "github.com/dapr/dapr/pkg/internal/loader/disk" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" loadercompstore "github.com/dapr/dapr/pkg/runtime/hotreload/loader/store" "github.com/dapr/kit/events/batcher" ) const ( comp1 = `apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: comp1 spec: type: state.in-memory version: v1 ` comp2 = `apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: comp2 spec: type: state.in-memory version: v1 ` comp3 = `apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: comp3 spec: type: state.in-memory version: v1 ` ) func Test_Disk(t *testing.T) { t.Parallel() dir := t.TempDir() store := compstore.New() d, err := New(Options{ Dirs: []string{dir}, ComponentStore: store, }) require.NoError(t, err) errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) go func() { errCh <- d.Run(ctx) }() t.Cleanup(func() { cancel() require.NoError(t, <-errCh) }) assert.Empty(t, store.ListComponents()) conn, err := d.Components().Stream(context.Background()) require.NoError(t, err) err = os.WriteFile(filepath.Join(dir, "f.yaml"), []byte(strings.Join([]string{comp1, comp2, comp3}, "\n---\n")), 0o600) require.NoError(t, err) var events []*loader.Event[componentsapi.Component] for i := 0; i < 3; i++ { select { case event := <-conn.EventCh: events = append(events, event) case <-time.After(time.Second * 3): assert.Fail(t, "expected to receive event") } } assert.ElementsMatch(t, []*loader.Event[componentsapi.Component]{ { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp1"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp2"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp3"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, }, events) } func Test_Stream(t *testing.T) { t.Parallel() t.Run("if store empty and event happens, should send create event with all components", func(t *testing.T) { t.Parallel() dir := t.TempDir() err := os.WriteFile(filepath.Join(dir, "f.yaml"), []byte(strings.Join([]string{comp1, comp2, comp3}, "\n---\n")), 0o600) require.NoError(t, err) batcher := batcher.New[int, struct{}](0) store := compstore.New() r := newResource[componentsapi.Component](resourceOptions[componentsapi.Component]{ store: loadercompstore.NewComponents(store), batcher: batcher, loader: loaderdisk.NewComponents(loaderdisk.Options{ Paths: []string{dir}, }), }) errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() require.NoError(t, <-errCh) }) go func() { errCh <- r.run(ctx) }() select { case <-r.running: case <-time.After(time.Second * 3): assert.Fail(t, "expected to be running") } batcher.Batch(0, struct{}{}) conn, err := r.Stream(context.Background()) require.NoError(t, err) var events []*loader.Event[componentsapi.Component] for i := 0; i < 3; i++ { select { case event := <-conn.EventCh: events = append(events, event) case <-time.After(time.Second * 3): assert.Fail(t, "expected to receive event") } } assert.ElementsMatch(t, []*loader.Event[componentsapi.Component]{ { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp1"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp2"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp3"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, }, events) }) t.Run("if store has a component and event happens, should send create event with new components", func(t *testing.T) { t.Parallel() dir := t.TempDir() err := os.WriteFile(filepath.Join(dir, "f.yaml"), []byte(strings.Join([]string{comp1, comp2, comp3}, "\n---\n")), 0o600) require.NoError(t, err) batcher := batcher.New[int, struct{}](0) store := compstore.New() require.NoError(t, store.AddPendingComponentForCommit(componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp1"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, })) require.NoError(t, store.CommitPendingComponent()) r := newResource[componentsapi.Component](resourceOptions[componentsapi.Component]{ store: loadercompstore.NewComponents(store), batcher: batcher, loader: loaderdisk.NewComponents(loaderdisk.Options{ Paths: []string{dir}, }), }) errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() require.NoError(t, <-errCh) }) go func() { errCh <- r.run(ctx) }() select { case <-r.running: case <-time.After(time.Second * 3): assert.Fail(t, "expected to be running") } batcher.Batch(0, struct{}{}) conn, err := r.Stream(context.Background()) require.NoError(t, err) var events []*loader.Event[componentsapi.Component] for i := 0; i < 2; i++ { select { case event := <-conn.EventCh: events = append(events, event) case <-time.After(time.Second * 3): assert.Fail(t, "expected to receive event") } } assert.ElementsMatch(t, []*loader.Event[componentsapi.Component]{ { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp2"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp3"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, }, events) }) t.Run("if store has a component and event happens, should send create/update/delete events components", func(t *testing.T) { t.Parallel() dir := t.TempDir() err := os.WriteFile(filepath.Join(dir, "f.yaml"), []byte(strings.Join([]string{comp2, comp3}, "\n---\n")), 0o600) require.NoError(t, err) batcher := batcher.New[int, struct{}](0) store := compstore.New() require.NoError(t, store.AddPendingComponentForCommit(componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp1"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, })) require.NoError(t, store.CommitPendingComponent()) require.NoError(t, store.AddPendingComponentForCommit(componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp2"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{ Type: "state.in-memory", Version: "v1", Metadata: []commonapi.NameValuePair{{Name: "foo", EnvRef: "bar"}}, }, })) require.NoError(t, store.CommitPendingComponent()) r := newResource[componentsapi.Component](resourceOptions[componentsapi.Component]{ store: loadercompstore.NewComponents(store), batcher: batcher, loader: loaderdisk.NewComponents(loaderdisk.Options{ Paths: []string{dir}, }), }) errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() require.NoError(t, <-errCh) }) go func() { errCh <- r.run(ctx) }() select { case <-r.running: case <-time.After(time.Second * 3): assert.Fail(t, "expected to be running") } batcher.Batch(0, struct{}{}) conn, err := r.Stream(context.Background()) require.NoError(t, err) var events []*loader.Event[componentsapi.Component] for i := 0; i < 3; i++ { select { case event := <-conn.EventCh: events = append(events, event) case <-time.After(time.Second * 3): assert.Fail(t, "expected to receive event") } } assert.ElementsMatch(t, []*loader.Event[componentsapi.Component]{ { Type: operatorpb.ResourceEventType_DELETED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp1"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_UPDATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp2"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, { Type: operatorpb.ResourceEventType_CREATED, Resource: componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "comp3"}, TypeMeta: metav1.TypeMeta{APIVersion: "dapr.io/v1alpha1", Kind: "Component"}, Spec: componentsapi.ComponentSpec{Type: "state.in-memory", Version: "v1"}, }, }, }, events) }) }
mikeee/dapr
pkg/runtime/hotreload/loader/disk/resource_test.go
GO
mit
11,552
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "context" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" ) type FakeT struct { runFn func(context.Context) error components *Fake[compapi.Component] subscriptions *Fake[subapi.Subscription] startFn func(context.Context) error } func New() *FakeT { return &FakeT{ runFn: func(ctx context.Context) error { <-ctx.Done() return nil }, components: NewFake[compapi.Component](), subscriptions: NewFake[subapi.Subscription](), startFn: func(ctx context.Context) error { <-ctx.Done() return nil }, } } func (f *FakeT) Run(ctx context.Context) error { return f.runFn(ctx) } func (f *FakeT) Components() loader.Loader[compapi.Component] { return f.components } func (f *FakeT) Subscriptions() loader.Loader[subapi.Subscription] { return f.subscriptions } func (f *FakeT) WithComponents(fake *Fake[compapi.Component]) *FakeT { f.components = fake return f } func (f *FakeT) WithRun(fn func(context.Context) error) *FakeT { f.runFn = fn return f } type Fake[T differ.Resource] struct { listFn func(context.Context) (*differ.LocalRemoteResources[T], error) streamFn func(context.Context) (*loader.StreamConn[T], error) } func NewFake[T differ.Resource]() *Fake[T] { return &Fake[T]{ listFn: func(context.Context) (*differ.LocalRemoteResources[T], error) { return nil, nil }, streamFn: func(context.Context) (*loader.StreamConn[T], error) { return &loader.StreamConn[T]{ EventCh: make(chan *loader.Event[T]), ReconcileCh: make(chan struct{}), }, nil }, } } func (f *Fake[T]) WithList(fn func(context.Context) (*differ.LocalRemoteResources[T], error)) *Fake[T] { f.listFn = fn return f } func (f *Fake[T]) WithStream(fn func(context.Context) (*loader.StreamConn[T], error)) *Fake[T] { f.streamFn = fn return f } func (f *Fake[T]) List(ctx context.Context) (*differ.LocalRemoteResources[T], error) { return f.listFn(ctx) } func (f *Fake[T]) Stream(ctx context.Context) (*loader.StreamConn[T], error) { return f.streamFn(ctx) }
mikeee/dapr
pkg/runtime/hotreload/loader/fake/fake.go
GO
mit
2,791
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "testing" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" ) func Test_Fake(t *testing.T) { var _ loader.Interface = New() var _ loader.Loader[componentsapi.Component] = NewFake[componentsapi.Component]() }
mikeee/dapr
pkg/runtime/hotreload/loader/fake/fake_test.go
GO
mit
867
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package loader import ( "context" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" operatorv1pb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" ) // Interface is an interface for loading and watching for changes to components // a source. type Interface interface { Run(context.Context) error Components() Loader[compapi.Component] Subscriptions() Loader[subapi.Subscription] } type StreamConn[T differ.Resource] struct { EventCh chan *Event[T] ReconcileCh chan struct{} } // Loader is an interface for loading and watching for changes to a resource // from a source. type Loader[T differ.Resource] interface { List(context.Context) (*differ.LocalRemoteResources[T], error) Stream(context.Context) (*StreamConn[T], error) } // Event is a component event. type Event[T differ.Resource] struct { Type operatorv1pb.ResourceEventType Resource T }
mikeee/dapr
pkg/runtime/hotreload/loader/loader.go
GO
mit
1,554
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operator import ( "context" "encoding/json" "fmt" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" ) type components struct { operatorpb.Operator_ComponentUpdateClient } // The go linter does not yet understand that these functions are being used by // the generic operator. // //nolint:unused func (c *components) list(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) ([][]byte, error) { resp, err := opclient.ListComponents(ctx, &operatorpb.ListComponentsRequest{ Namespace: ns, PodName: podName, }) if err != nil { return nil, err } return resp.GetComponents(), nil } //nolint:unused func (c *components) close() error { if c.Operator_ComponentUpdateClient != nil { return c.Operator_ComponentUpdateClient.CloseSend() } return nil } //nolint:unused func (c *components) recv() (*loader.Event[componentsapi.Component], error) { event, err := c.Operator_ComponentUpdateClient.Recv() if err != nil { return nil, err } var component componentsapi.Component if err := json.Unmarshal(event.GetComponent(), &component); err != nil { return nil, fmt.Errorf("failed to deserializing component: %w", err) } return &loader.Event[componentsapi.Component]{ Resource: component, Type: event.GetType(), }, nil } //nolint:unused func (c *components) establish(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) error { stream, err := opclient.ComponentUpdate(ctx, &operatorpb.ComponentUpdateRequest{ Namespace: ns, PodName: podName, }) if err != nil { return err } c.Operator_ComponentUpdateClient = stream return nil }
mikeee/dapr
pkg/runtime/hotreload/loader/operator/components.go
GO
mit
2,325
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operator import ( "context" "errors" "sync/atomic" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" loadercompstore "github.com/dapr/dapr/pkg/runtime/hotreload/loader/store" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime.hotreload.loader.operator") type Options struct { PodName string Namespace string ComponentStore *compstore.ComponentStore OperatorClient operatorpb.OperatorClient } type operator struct { components *resource[componentsapi.Component] subscriptions *resource[subapi.Subscription] running atomic.Bool } func New(opts Options) loader.Interface { return &operator{ components: newResource[componentsapi.Component](opts, loadercompstore.NewComponents(opts.ComponentStore), new(components)), subscriptions: newResource[subapi.Subscription](opts, loadercompstore.NewSubscriptions(opts.ComponentStore), new(subscriptions)), } } func (o *operator) Run(ctx context.Context) error { if !o.running.CompareAndSwap(false, true) { return errors.New("already running") } <-ctx.Done() return errors.Join(o.components.close(), o.subscriptions.close()) } func (o *operator) Components() loader.Loader[componentsapi.Component] { return o.components } func (o *operator) Subscriptions() loader.Loader[subapi.Subscription] { return o.subscriptions }
mikeee/dapr
pkg/runtime/hotreload/loader/operator/operator.go
GO
mit
2,136
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operator import ( "context" "encoding/json" "errors" "fmt" "sync" "sync/atomic" "github.com/cenkalti/backoff/v4" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/hotreload/loader/store" ) // resource is a generic implementation of an operator resource loader. // resource will watch and load resources from the operator service. type resource[T differ.Resource] struct { opClient operatorpb.OperatorClient podName string namespace string streamer streamer[T] store store.Store[T] wg sync.WaitGroup closeCh chan struct{} closed atomic.Bool } // streamer is a generic interface for streaming resources from the operator. // We need a generic interface because the gRPC methods used for streaming // differ between resources. type streamer[T differ.Resource] interface { list(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) ([][]byte, error) close() error recv() (*loader.Event[T], error) establish(context.Context, operatorpb.OperatorClient, string, string) error } func newResource[T differ.Resource](opts Options, store store.Store[T], streamer streamer[T]) *resource[T] { return &resource[T]{ opClient: opts.OperatorClient, podName: opts.PodName, namespace: opts.Namespace, streamer: streamer, store: store, closeCh: make(chan struct{}), } } func (r *resource[T]) List(ctx context.Context) (*differ.LocalRemoteResources[T], error) { resp, err := backoff.RetryWithData(func() ([][]byte, error) { return r.streamer.list(ctx, r.opClient, r.namespace, r.podName) }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) if err != nil { return nil, err } remotes := make([]T, len(resp)) for i, c := range resp { var obj T if err := json.Unmarshal(c, &obj); err != nil { return nil, fmt.Errorf("error deserializing object: %s", err) } remotes[i] = obj } return &differ.LocalRemoteResources[T]{ Local: r.store.List(), Remote: remotes, }, nil } func (r *resource[T]) Stream(ctx context.Context) (*loader.StreamConn[T], error) { if r.closed.Load() { return nil, errors.New("stream is closed") } if err := r.streamer.establish(ctx, r.opClient, r.namespace, r.podName); err != nil { return nil, err } log.Debugf("stream established with operator") conn := &loader.StreamConn[T]{ EventCh: make(chan *loader.Event[T]), ReconcileCh: make(chan struct{}), } ctx, cancel := context.WithCancel(ctx) r.wg.Add(2) go func() { defer r.wg.Done() select { case <-r.closeCh: case <-ctx.Done(): } cancel() }() go func() { defer r.wg.Done() r.stream(ctx, conn) }() return conn, nil } func (r *resource[T]) stream(ctx context.Context, conn *loader.StreamConn[T]) { for { for { event, err := r.streamer.recv() if err != nil { r.streamer.close() // Retry on stream error. log.Errorf("Error from operator stream: %s", err) break } select { case conn.EventCh <- event: case <-ctx.Done(): return } } if ctx.Err() != nil { return } if err := backoff.Retry(func() error { berr := r.streamer.establish(ctx, r.opClient, r.namespace, r.podName) if berr != nil { log.Errorf("Failed to establish stream: %s", berr) } return berr }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)); err != nil { log.Errorf("Stream retry failed: %s", err) return } log.Info("Reconnected to operator") select { case <-ctx.Done(): return case conn.ReconcileCh <- struct{}{}: } } } func (r *resource[T]) close() error { defer r.wg.Wait() if r.closed.CompareAndSwap(false, true) { close(r.closeCh) } return r.streamer.close() }
mikeee/dapr
pkg/runtime/hotreload/loader/operator/resource.go
GO
mit
4,379
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operator import ( "context" "errors" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" loadercompstore "github.com/dapr/dapr/pkg/runtime/hotreload/loader/store" ) func Test_generic(t *testing.T) { t.Run("Stream should return error on stream when already closed", func(t *testing.T) { streamer := newFakeStreamer() r := newResource[componentsapi.Component]( Options{}, loadercompstore.NewComponents(compstore.New()), streamer, ) require.NoError(t, r.close()) ch, err := r.Stream(context.Background()) assert.Nil(t, ch) require.ErrorContains(t, err, "stream is closed") }) t.Run("Stream should return error on stream and context cancelled", func(t *testing.T) { streamer := newFakeStreamer() r := newResource[componentsapi.Component]( Options{}, loadercompstore.NewComponents(compstore.New()), streamer, ) ctx, cancel := context.WithCancel(context.Background()) cancel() streamer.establishFn = func(context.Context, operatorpb.OperatorClient, string, string) error { return errors.New("test error") } ch, err := r.Stream(ctx) assert.Nil(t, ch) require.ErrorContains(t, err, "test error") }) t.Run("Should send event to Stream channel on Recv", func(t *testing.T) { streamer := newFakeStreamer() r := newResource[componentsapi.Component]( Options{}, loadercompstore.NewComponents(compstore.New()), streamer, ) recCh := make(chan *loader.Event[componentsapi.Component], 1) streamer.recvFn = func() (*loader.Event[componentsapi.Component], error) { return <-recCh, nil } ch, err := r.Stream(context.Background()) assert.NotNil(t, ch) require.NoError(t, err) for i := 0; i < 5; i++ { comp := new(loader.Event[componentsapi.Component]) select { case recCh <- comp: case <-time.After(time.Second): t.Error("expected to be able to send on receive") } select { case got := <-ch.EventCh: assert.Same(t, comp, got) case <-time.After(time.Second): t.Error("expected to get event from on receive") } } close(recCh) require.NoError(t, r.close()) }) t.Run("Should attempt to re-establish after the stream fails", func(t *testing.T) { streamer := newFakeStreamer() r := newResource[componentsapi.Component]( Options{}, loadercompstore.NewComponents(compstore.New()), streamer, ) var calls int retried := make(chan struct{}) streamer.establishFn = func(context.Context, operatorpb.OperatorClient, string, string) error { defer func() { calls++ }() if calls == 3 { close(retried) } if calls == 0 || calls > 2 { return nil } return errors.New("test error") } streamer.recvFn = func() (*loader.Event[componentsapi.Component], error) { return nil, errors.New("recv error") } conn, err := r.Stream(context.Background()) require.NoError(t, err) select { case <-retried: case <-time.After(time.Second * 3): t.Error("expected generic to retry establishing stream after failure") } select { case <-conn.ReconcileCh: case <-time.After(time.Second * 3): t.Error("expected reconcile channel to be sent") } require.NoError(t, r.close()) assert.GreaterOrEqual(t, calls, 3) }) t.Run("close waits for streamer to close", func(t *testing.T) { streamer := newFakeStreamer() r := newResource[componentsapi.Component]( Options{}, loadercompstore.NewComponents(compstore.New()), streamer, ) closeCh := make(chan error) streamer.closeFn = func() error { closeCh <- errors.New("streamer error") return errors.New("return error") } go func() { closeCh <- r.close() }() select { case err := <-closeCh: require.ErrorContains(t, err, "streamer error") case <-time.After(time.Second * 3): t.Error("streamer did not close in time") } select { case err := <-closeCh: require.ErrorContains(t, err, "return error") case <-time.After(time.Second * 3): t.Error("generic did not close in time") } }) } type fakeStreamer[T differ.Resource] struct { listFn func(context.Context, operatorpb.OperatorClient, string, string) ([][]byte, error) closeFn func() error recvFn func() (*loader.Event[T], error) establishFn func(context.Context, operatorpb.OperatorClient, string, string) error } func newFakeStreamer() *fakeStreamer[componentsapi.Component] { return &fakeStreamer[componentsapi.Component]{ listFn: func(context.Context, operatorpb.OperatorClient, string, string) ([][]byte, error) { return nil, nil }, closeFn: func() error { return nil }, recvFn: func() (*loader.Event[componentsapi.Component], error) { return nil, nil }, establishFn: func(context.Context, operatorpb.OperatorClient, string, string) error { return nil }, } } //nolint:unused func (f *fakeStreamer[T]) list(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) ([][]byte, error) { return f.listFn(ctx, opclient, ns, podName) } //nolint:unused func (f *fakeStreamer[T]) close() error { return f.closeFn() } //nolint:unused func (f *fakeStreamer[T]) recv() (*loader.Event[T], error) { return f.recvFn() } //nolint:unused func (f *fakeStreamer[T]) establish(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) error { return f.establishFn(ctx, opclient, ns, podName) }
mikeee/dapr
pkg/runtime/hotreload/loader/operator/resource_test.go
GO
mit
6,207
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package operator import ( "context" "encoding/json" "fmt" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" ) type subscriptions struct { operatorpb.Operator_SubscriptionUpdateClient } // The go linter does not yet understand that these functions are being used by // the generic operator. // //nolint:unused func (s *subscriptions) list(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) ([][]byte, error) { resp, err := opclient.ListSubscriptionsV2(ctx, &operatorpb.ListSubscriptionsRequest{ Namespace: ns, PodName: podName, }) if err != nil { return nil, err } return resp.GetSubscriptions(), nil } //nolint:unused func (s *subscriptions) close() error { if s.Operator_SubscriptionUpdateClient != nil { return s.Operator_SubscriptionUpdateClient.CloseSend() } return nil } //nolint:unused func (s *subscriptions) recv() (*loader.Event[subapi.Subscription], error) { event, err := s.Operator_SubscriptionUpdateClient.Recv() if err != nil { return nil, err } var subscription subapi.Subscription if err := json.Unmarshal(event.GetSubscription(), &subscription); err != nil { return nil, fmt.Errorf("failed to deserializing subscription: %w", err) } return &loader.Event[subapi.Subscription]{ Resource: subscription, Type: event.GetType(), }, nil } //nolint:unused func (s *subscriptions) establish(ctx context.Context, opclient operatorpb.OperatorClient, ns, podName string) error { stream, err := opclient.SubscriptionUpdate(ctx, &operatorpb.SubscriptionUpdateRequest{ Namespace: ns, PodName: podName, }) if err != nil { return err } s.Operator_SubscriptionUpdateClient = stream return nil }
mikeee/dapr
pkg/runtime/hotreload/loader/operator/subscriptions.go
GO
mit
2,371
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package store import ( compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/runtime/compstore" ) type components struct { compStore *compstore.ComponentStore } func NewComponents(compStore *compstore.ComponentStore) Store[compapi.Component] { return &components{ compStore: compStore, } } func (c *components) List() []compapi.Component { return c.compStore.ListComponents() }
mikeee/dapr
pkg/runtime/hotreload/loader/store/components.go
GO
mit
983
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package store import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/runtime/compstore" ) func Test_component(t *testing.T) { var store Store[componentsapi.Component] compStore := compstore.New() store = NewComponents(compStore) comp1, comp2 := componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "1"}, }, componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "2"}, } require.NoError(t, compStore.AddPendingComponentForCommit(comp1)) require.NoError(t, compStore.CommitPendingComponent()) require.NoError(t, compStore.AddPendingComponentForCommit(comp2)) require.NoError(t, compStore.CommitPendingComponent()) assert.ElementsMatch(t, []componentsapi.Component{comp1, comp2}, store.List()) compStore.DeleteComponent("1") assert.ElementsMatch(t, []componentsapi.Component{comp2}, store.List()) compStore.DeleteComponent("2") assert.ElementsMatch(t, []componentsapi.Component{}, store.List()) }
mikeee/dapr
pkg/runtime/hotreload/loader/store/components_test.go
GO
mit
1,678
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package store import ( "github.com/dapr/dapr/pkg/runtime/hotreload/differ" ) type Store[T differ.Resource] interface { List() []T }
mikeee/dapr
pkg/runtime/hotreload/loader/store/store.go
GO
mit
697
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package store import ( subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/runtime/compstore" ) type subscriptions struct { compStore *compstore.ComponentStore } func NewSubscriptions(compStore *compstore.ComponentStore) Store[subapi.Subscription] { return &subscriptions{ compStore: compStore, } } func (s *subscriptions) List() []subapi.Subscription { return s.compStore.ListDeclarativeSubscriptions() }
mikeee/dapr
pkg/runtime/hotreload/loader/store/subscriptions.go
GO
mit
1,015
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciler import ( "context" "strings" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/runtime/authorizer" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/processor" "github.com/dapr/dapr/pkg/runtime/processor/state" ) type components struct { store *compstore.ComponentStore proc *processor.Processor auth *authorizer.Authorizer loader.Loader[compapi.Component] } // The go linter does not yet understand that these functions are being used by // the generic reconciler. // //nolint:unused func (c *components) update(ctx context.Context, comp compapi.Component) { if !c.verify(comp) { return } oldComp, exists := c.store.GetComponent(comp.Name) _, _ = c.proc.Secret().ProcessResource(ctx, comp) if exists { if differ.AreSame(oldComp, comp) { log.Debugf("Component update skipped: no changes detected: %s", comp.LogName()) return } log.Infof("Closing existing Component to reload: %s", oldComp.LogName()) // TODO: change close to accept pointer if err := c.proc.Close(oldComp); err != nil { log.Errorf("error closing old component: %s", err) return } } if !c.auth.IsObjectAuthorized(comp) { log.Warnf("Received unauthorized component update, ignored: %s", comp.LogName()) return } log.Infof("Adding Component for processing: %s", comp.LogName()) if c.proc.AddPendingComponent(ctx, comp) { log.Infof("Component updated: %s", comp.LogName()) c.proc.WaitForEmptyComponentQueue() } return } //nolint:unused func (c *components) delete(_ context.Context, comp compapi.Component) { if !c.verify(comp) { return } if err := c.proc.Close(comp); err != nil { log.Errorf("error closing deleted component: %s", err) } } //nolint:unused func (c *components) verify(vcomp compapi.Component) bool { toverify := []compapi.Component{vcomp} if comp, ok := c.store.GetComponent(vcomp.Name); ok { toverify = append(toverify, comp) } // Reject component if it has the same name as the actor state store. if _, name, ok := c.store.GetStateStoreActor(); ok && name == vcomp.Name { log.Errorf("Aborting to hot-reload a state store component that is used as an actor state store: %s", vcomp.LogName()) return false } // Reject component if it is being marked as the actor state store. for _, comp := range toverify { if strings.HasPrefix(comp.Spec.Type, "state.") { for _, meta := range comp.Spec.Metadata { if strings.EqualFold(meta.Name, state.PropertyKeyActorStateStore) { log.Errorf("Aborting to hot-reload a state store component that is used as an actor state store: %s", comp.LogName()) return false } } } } for backendName := range c.store.ListWorkflowBackends() { if backendName == vcomp.Name { log.Errorf("Aborting to hot-reload a workflowbackend component which is not supported: %s", vcomp.LogName()) return false } } if strings.HasPrefix(vcomp.Spec.Type, "workflowbackend.") { log.Errorf("Aborting to hot-reload a workflowbackend component which is not supported: %s", vcomp.LogName()) return false } return true }
mikeee/dapr
pkg/runtime/hotreload/reconciler/components.go
GO
mit
3,796
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciler import ( "context" "fmt" "sync" "time" "k8s.io/utils/clock" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" operatorpb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/authorizer" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/processor" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime.hotreload.reconciler") type Options[T differ.Resource] struct { Loader loader.Interface CompStore *compstore.ComponentStore Processor *processor.Processor Authorizer *authorizer.Authorizer } type Reconciler[T differ.Resource] struct { kind string manager manager[T] clock clock.WithTicker } type manager[T differ.Resource] interface { loader.Loader[T] update(context.Context, T) delete(context.Context, T) } func NewComponents(opts Options[compapi.Component]) *Reconciler[compapi.Component] { return &Reconciler[compapi.Component]{ clock: clock.RealClock{}, kind: compapi.Kind, manager: &components{ Loader: opts.Loader.Components(), store: opts.CompStore, proc: opts.Processor, auth: opts.Authorizer, }, } } func NewSubscriptions(opts Options[subapi.Subscription]) *Reconciler[subapi.Subscription] { return &Reconciler[subapi.Subscription]{ clock: clock.RealClock{}, kind: subapi.Kind, manager: &subscriptions{ Loader: opts.Loader.Subscriptions(), store: opts.CompStore, proc: opts.Processor, }, } } func (r *Reconciler[T]) Run(ctx context.Context) error { conn, err := r.manager.Stream(ctx) if err != nil { return fmt.Errorf("error running component stream: %w", err) } return r.watchForEvents(ctx, conn) } func (r *Reconciler[T]) watchForEvents(ctx context.Context, conn *loader.StreamConn[T]) error { log.Infof("Starting to watch %s updates", r.kind) ticker := r.clock.NewTicker(time.Second * 60) defer ticker.Stop() ctx, cancel := context.WithCancel(ctx) defer cancel() for { select { case <-ctx.Done(): return nil case <-ticker.C(): log.Debugf("Running scheduled %s reconcile", r.kind) resources, err := r.manager.List(ctx) if err != nil { log.Errorf("Error listing %s: %s", r.kind, err) continue } r.reconcile(ctx, differ.Diff(resources)) case <-conn.ReconcileCh: log.Debugf("Reconciling all %s", r.kind) resources, err := r.manager.List(ctx) if err != nil { log.Errorf("Error listing %s: %s", r.kind, err) continue } r.reconcile(ctx, differ.Diff(resources)) case event := <-conn.EventCh: r.handleEvent(ctx, event) } } } func (r *Reconciler[T]) reconcile(ctx context.Context, result *differ.Result[T]) { if result == nil { return } var wg sync.WaitGroup for _, group := range []struct { resources []T eventType operatorpb.ResourceEventType }{ {result.Deleted, operatorpb.ResourceEventType_DELETED}, {result.Updated, operatorpb.ResourceEventType_UPDATED}, {result.Created, operatorpb.ResourceEventType_CREATED}, } { wg.Add(len(group.resources)) for _, resource := range group.resources { go func(resource T, eventType operatorpb.ResourceEventType) { defer wg.Done() r.handleEvent(ctx, &loader.Event[T]{ Type: eventType, Resource: resource, }) }(resource, group.eventType) } wg.Wait() } } func (r *Reconciler[T]) handleEvent(ctx context.Context, event *loader.Event[T]) { log.Debugf("Received %s event %s: %s", event.Resource.Kind(), event.Type, event.Resource.LogName()) switch event.Type { case operatorpb.ResourceEventType_CREATED: log.Infof("Received %s creation: %s", r.kind, event.Resource.LogName()) r.manager.update(ctx, event.Resource) case operatorpb.ResourceEventType_UPDATED: log.Infof("Received %s update: %s", r.kind, event.Resource.LogName()) r.manager.update(ctx, event.Resource) case operatorpb.ResourceEventType_DELETED: log.Infof("Received %s deletion, closing: %s", r.kind, event.Resource.LogName()) r.manager.delete(ctx, event.Resource) } }
mikeee/dapr
pkg/runtime/hotreload/reconciler/reconciler.go
GO
mit
4,758
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciler import ( "context" "sync/atomic" "testing" "time" fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clocktesting "k8s.io/utils/clock/testing" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/differ" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/hotreload/loader/fake" ) func Test_Run(t *testing.T) { t.Run("should reconcile when ticker reaches 60 seconds", func(t *testing.T) { compLoader := fake.NewFake[componentsapi.Component]() loader := fake.New().WithComponents(compLoader) var listCalled atomic.Int32 compLoader.WithList(func(context.Context) (*differ.LocalRemoteResources[componentsapi.Component], error) { listCalled.Add(1) return nil, nil }) r := NewComponents(Options[componentsapi.Component]{ Loader: loader, CompStore: compstore.New(), }) fakeClock := clocktesting.NewFakeClock(time.Now()) r.clock = fakeClock errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) go func() { errCh <- r.Run(ctx) }() assert.Eventually(t, fakeClock.HasWaiters, time.Second*3, time.Millisecond*100) assert.Equal(t, int32(0), listCalled.Load()) fakeClock.Step(time.Second * 60) assert.Eventually(t, func() bool { return listCalled.Load() == 1 }, time.Second*3, time.Millisecond*100) cancel() select { case err := <-errCh: require.NoError(t, err) case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } }) t.Run("should reconcile when ticker reaches 60 seconds", func(t *testing.T) { compLoader := fake.NewFake[componentsapi.Component]() compCh := make(chan *loader.Event[componentsapi.Component]) compLoader.WithStream(func(context.Context) (*loader.StreamConn[componentsapi.Component], error) { return &loader.StreamConn[componentsapi.Component]{ EventCh: compCh, ReconcileCh: make(chan struct{}), }, nil }) r := NewComponents(Options[componentsapi.Component]{ Loader: fake.New().WithComponents(compLoader), CompStore: compstore.New(), }) fakeClock := clocktesting.NewFakeClock(time.Now()) r.clock = fakeClock mngr := newFakeManager() mngr.Loader = compLoader updateCh := make(chan componentsapi.Component) deleteCh := make(chan componentsapi.Component) mngr.deleteFn = func(_ context.Context, c componentsapi.Component) { deleteCh <- c } mngr.updateFn = func(_ context.Context, c componentsapi.Component) { updateCh <- c } r.manager = mngr errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) go func() { errCh <- r.Run(ctx) }() comp1 := componentsapi.Component{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} select { case compCh <- &loader.Event[componentsapi.Component]{ Type: operator.ResourceEventType_CREATED, Resource: comp1, }: case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } select { case e := <-updateCh: assert.Equal(t, comp1, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } select { case compCh <- &loader.Event[componentsapi.Component]{ Type: operator.ResourceEventType_UPDATED, Resource: comp1, }: case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } select { case e := <-updateCh: assert.Equal(t, comp1, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } select { case compCh <- &loader.Event[componentsapi.Component]{ Type: operator.ResourceEventType_DELETED, Resource: comp1, }: case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } select { case e := <-deleteCh: assert.Equal(t, comp1, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } cancel() select { case err := <-errCh: require.NoError(t, err) case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } }) t.Run("should reconcile when receive event from reconcile channel", func(t *testing.T) { compLoader := fake.NewFake[componentsapi.Component]() recCh := make(chan struct{}) compLoader.WithStream(func(context.Context) (*loader.StreamConn[componentsapi.Component], error) { return &loader.StreamConn[componentsapi.Component]{ EventCh: make(chan *loader.Event[componentsapi.Component]), ReconcileCh: recCh, }, nil }) r := NewComponents(Options[componentsapi.Component]{ Loader: fake.New().WithComponents(compLoader), CompStore: compstore.New(), }) fakeClock := clocktesting.NewFakeClock(time.Now()) r.clock = fakeClock mngr := newFakeManager() mngr.Loader = compLoader updateCh := make(chan componentsapi.Component) deleteCh := make(chan componentsapi.Component) mngr.deleteFn = func(_ context.Context, c componentsapi.Component) { deleteCh <- c } mngr.updateFn = func(_ context.Context, c componentsapi.Component) { updateCh <- c } r.manager = mngr errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) go func() { errCh <- r.Run(ctx) }() comp1 := componentsapi.Component{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} compLoader.WithList(func(context.Context) (*differ.LocalRemoteResources[componentsapi.Component], error) { return &differ.LocalRemoteResources[componentsapi.Component]{ Local: []componentsapi.Component{}, Remote: []componentsapi.Component{comp1}, }, nil }) select { case recCh <- struct{}{}: case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } select { case e := <-updateCh: assert.Equal(t, comp1, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } comp2 := comp1.DeepCopy() comp2.Spec = componentsapi.ComponentSpec{Version: "123"} compLoader.WithList(func(context.Context) (*differ.LocalRemoteResources[componentsapi.Component], error) { return &differ.LocalRemoteResources[componentsapi.Component]{ Local: []componentsapi.Component{comp1}, Remote: []componentsapi.Component{*comp2}, }, nil }) select { case recCh <- struct{}{}: case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } select { case e := <-updateCh: assert.Equal(t, *comp2, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } compLoader.WithList(func(context.Context) (*differ.LocalRemoteResources[componentsapi.Component], error) { return &differ.LocalRemoteResources[componentsapi.Component]{ Local: []componentsapi.Component{*comp2}, Remote: []componentsapi.Component{}, }, nil }) select { case recCh <- struct{}{}: case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } select { case e := <-deleteCh: assert.Equal(t, *comp2, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } cancel() select { case err := <-errCh: require.NoError(t, err) case <-time.After(time.Second * 3): t.Error("reconciler did not return in time") } }) } func Test_reconcile(t *testing.T) { const caseNum = 100 deleted := make([]componentsapi.Component, caseNum) updated := make([]componentsapi.Component, caseNum) created := make([]componentsapi.Component, caseNum) fz := fuzz.New() for i := 0; i < caseNum; i++ { fz.Fuzz(&deleted[i]) fz.Fuzz(&updated[i]) fz.Fuzz(&created[i]) } eventCh := make(chan componentsapi.Component) mngr := newFakeManager() mngr.deleteFn = func(_ context.Context, c componentsapi.Component) { eventCh <- c } mngr.updateFn = func(_ context.Context, c componentsapi.Component) { eventCh <- c } r := &Reconciler[componentsapi.Component]{ manager: mngr, } t.Run("events should be sent in the correct grouped order", func(t *testing.T) { recDone := make(chan struct{}) go func() { defer close(recDone) r.reconcile(context.Background(), &differ.Result[componentsapi.Component]{ Deleted: deleted, Updated: updated, Created: created, }) }() var got []componentsapi.Component for i := 0; i < caseNum; i++ { select { case e := <-eventCh: got = append(got, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } } assert.ElementsMatch(t, deleted, got) got = []componentsapi.Component{} for i := 0; i < caseNum; i++ { select { case e := <-eventCh: got = append(got, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } } assert.ElementsMatch(t, updated, got) got = []componentsapi.Component{} for i := 0; i < caseNum; i++ { select { case e := <-eventCh: got = append(got, e) case <-time.After(time.Second * 3): t.Error("did not get event in time") } } assert.ElementsMatch(t, created, got) select { case <-recDone: case <-time.After(time.Second * 3): t.Error("did not get reconcile return in time") } }) } func Test_handleEvent(t *testing.T) { mngr := newFakeManager() updateCalled, deleteCalled := 0, 0 comp1 := componentsapi.Component{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} mngr.deleteFn = func(_ context.Context, c componentsapi.Component) { assert.Equal(t, comp1, c) deleteCalled++ } mngr.updateFn = func(_ context.Context, c componentsapi.Component) { assert.Equal(t, comp1, c) updateCalled++ } r := &Reconciler[componentsapi.Component]{manager: mngr} assert.Equal(t, 0, updateCalled) assert.Equal(t, 0, deleteCalled) r.handleEvent(context.Background(), &loader.Event[componentsapi.Component]{ Type: operator.ResourceEventType_CREATED, Resource: comp1, }) assert.Equal(t, 1, updateCalled) assert.Equal(t, 0, deleteCalled) r.handleEvent(context.Background(), &loader.Event[componentsapi.Component]{ Type: operator.ResourceEventType_UPDATED, Resource: comp1, }) assert.Equal(t, 2, updateCalled) assert.Equal(t, 0, deleteCalled) r.handleEvent(context.Background(), &loader.Event[componentsapi.Component]{ Type: operator.ResourceEventType_DELETED, Resource: comp1, }) assert.Equal(t, 2, updateCalled) assert.Equal(t, 1, deleteCalled) } type fakeManager struct { loader.Loader[componentsapi.Component] updateFn func(context.Context, componentsapi.Component) deleteFn func(context.Context, componentsapi.Component) } func newFakeManager() *fakeManager { return &fakeManager{ updateFn: func(context.Context, componentsapi.Component) {}, deleteFn: func(context.Context, componentsapi.Component) {}, } } //nolint:unused func (f *fakeManager) update(ctx context.Context, comp componentsapi.Component) { f.updateFn(ctx, comp) } //nolint:unused func (f *fakeManager) delete(ctx context.Context, comp componentsapi.Component) { f.deleteFn(ctx, comp) }
mikeee/dapr
pkg/runtime/hotreload/reconciler/reconciler_test.go
GO
mit
11,706
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reconciler import ( "context" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/hotreload/loader" "github.com/dapr/dapr/pkg/runtime/processor" ) type subscriptions struct { store *compstore.ComponentStore proc *processor.Processor loader.Loader[subapi.Subscription] } // The go linter does not yet understand that these functions are being used by // the generic reconciler. // //nolint:unused func (s *subscriptions) update(ctx context.Context, sub subapi.Subscription) { oldSub, exists := s.store.GetDeclarativeSubscription(sub.Name) if exists { log.Infof("Closing existing Subscription to reload: %s", oldSub.Name) if err := s.proc.CloseSubscription(ctx, oldSub.Comp); err != nil { log.Errorf("Failed to close existing Subscription: %s", err) return } } log.Infof("Adding Subscription for processing: %s", sub.Name) if s.proc.AddPendingSubscription(ctx, sub) { log.Infof("Subscription updated: %s", sub.Name) } return } //nolint:unused func (s *subscriptions) delete(ctx context.Context, sub subapi.Subscription) { if err := s.proc.CloseSubscription(ctx, &sub); err != nil { log.Errorf("Failed to close Subscription: %s", err) } }
mikeee/dapr
pkg/runtime/hotreload/reconciler/subscriptions.go
GO
mit
1,836
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package meta import ( "fmt" "strings" "github.com/google/uuid" "github.com/dapr/components-contrib/metadata" "github.com/dapr/dapr/pkg/apis/common" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/components" "github.com/dapr/dapr/pkg/modes" ) const WasmStrictSandboxMetadataKey = "strictSandbox" type Options struct { ID string PodName string Namespace string StrictSandbox bool Mode modes.DaprMode } type Meta struct { id string podName string namespace string strictSandbox bool mode modes.DaprMode } func New(options Options) *Meta { return &Meta{ podName: options.PodName, namespace: options.Namespace, strictSandbox: options.StrictSandbox, id: options.ID, mode: options.Mode, } } func (m *Meta) ToBaseMetadata(comp compapi.Component) (metadata.Base, error) { // Add global wasm strict sandbox config to the wasm component metadata if components.IsWasmComponentType(comp.Spec.Type) { m.AddWasmStrictSandbox(&comp) } props, err := m.convertItemsToProps(comp.Spec.Metadata) if err != nil { return metadata.Base{}, err } return metadata.Base{ Properties: props, Name: comp.Name, }, nil } func (m *Meta) convertItemsToProps(items []common.NameValuePair) (map[string]string, error) { properties := map[string]string{} for _, c := range items { val := c.Value.String() for strings.Contains(val, "{uuid}") { u, err := uuid.NewRandom() if err != nil { return nil, fmt.Errorf("failed to generate UUID: %w", err) } val = strings.Replace(val, "{uuid}", u.String(), 1) } if strings.Contains(val, "{podName}") { if m.podName == "" { return nil, fmt.Errorf("failed to parse metadata: property %s refers to {podName} but podName is not set", c.Name) } val = strings.ReplaceAll(val, "{podName}", m.podName) } val = strings.ReplaceAll(val, "{namespace}", m.namespace+"."+m.id) val = strings.ReplaceAll(val, "{appID}", m.id) properties[c.Name] = val } return properties, nil } func (m *Meta) AuthSecretStoreOrDefault(resource Resource) string { secretStore := resource.GetSecretStore() if secretStore == "" { switch m.mode { case modes.KubernetesMode: return "kubernetes" } } return secretStore } func ContainsNamespace(items []common.NameValuePair) bool { for _, c := range items { val := c.Value.String() if strings.Contains(val, "{namespace}") { return true } } return false } // AddWasmStrictSandbox adds global wasm strict sandbox configuration to component metadata. // When strict sandbox is enabled, WASM components always run in strict mode regardless of their configuration. // When strict sandbox is disabled or unset, keep the original component configuration. func (m *Meta) AddWasmStrictSandbox(comp *compapi.Component) { // If the global strict sandbox is disabled (or unset), it is not enforced. if !m.strictSandbox { return } // If the metadata already contains the strict sandbox key, update the value to global strict sandbox config. for i, c := range comp.Spec.Metadata { if strings.EqualFold(c.Name, WasmStrictSandboxMetadataKey) { comp.Spec.Metadata[i].SetValue([]byte("true")) return } } // If the metadata does not contain the strict sandbox key, add it. sandbox := common.NameValuePair{ Name: WasmStrictSandboxMetadataKey, } sandbox.SetValue([]byte("true")) comp.Spec.Metadata = append(comp.Spec.Metadata, sandbox) }
mikeee/dapr
pkg/runtime/meta/meta.go
GO
mit
4,084
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package meta import ( "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "github.com/dapr/dapr/pkg/apis/common" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/components" "github.com/dapr/dapr/pkg/modes" ) func TestMetadataItemsToPropertiesConversion(t *testing.T) { t.Run("string", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode}) items := []common.NameValuePair{ { Name: "a", Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte("b")}, }, }, } m, err := meta.convertItemsToProps(items) require.NoError(t, err) assert.Len(t, m, 1) assert.Equal(t, "b", m["a"]) }) t.Run("int", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode}) items := []common.NameValuePair{ { Name: "a", Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte(strconv.Itoa(6))}, }, }, } m, err := meta.convertItemsToProps(items) require.NoError(t, err) assert.Len(t, m, 1) assert.Equal(t, "6", m["a"]) }) t.Run("bool", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode}) items := []common.NameValuePair{ { Name: "a", Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte("true")}, }, }, } m, err := meta.convertItemsToProps(items) require.NoError(t, err) assert.Len(t, m, 1) assert.Equal(t, "true", m["a"]) }) t.Run("float", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode}) items := []common.NameValuePair{ { Name: "a", Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte("5.5")}, }, }, } m, err := meta.convertItemsToProps(items) require.NoError(t, err) assert.Len(t, m, 1) assert.Equal(t, "5.5", m["a"]) }) t.Run("JSON string", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode}) items := []common.NameValuePair{ { Name: "a", Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte(`"hello there"`)}, }, }, } m, err := meta.convertItemsToProps(items) require.NoError(t, err) assert.Len(t, m, 1) assert.Equal(t, "hello there", m["a"]) }) } func TestMetadataContainsNamespace(t *testing.T) { t.Run("namespace field present", func(t *testing.T) { r := ContainsNamespace( []common.NameValuePair{ { Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte("{namespace}")}, }, }, }, ) assert.True(t, r) }) t.Run("namespace field not present", func(t *testing.T) { r := ContainsNamespace( []common.NameValuePair{ {}, }, ) assert.False(t, r) }) } func TestMetadataOverrideWasmStrictSandbox(t *testing.T) { t.Run("original set to false override to true", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode, StrictSandbox: true}) // component with WasmStrictSandbox set to false items := []common.NameValuePair{ { Name: WasmStrictSandboxMetadataKey, Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte(`false`)}, }, }, } com := compapi.Component{ Spec: compapi.ComponentSpec{ Metadata: items, }, } // override WasmStrictSandbox to true meta.AddWasmStrictSandbox(&com) // check that WasmStrictSandbox is set to true base, err := meta.ToBaseMetadata(com) require.NoError(t, err) assert.Equal(t, "true", base.Properties[WasmStrictSandboxMetadataKey]) }) t.Run("global strict sandbox config not set", func(t *testing.T) { meta := New(Options{Mode: modes.StandaloneMode}) // component with WasmStrictSandbox set to false items := []common.NameValuePair{ { Name: WasmStrictSandboxMetadataKey, Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte(`true`)}, }, }, } com := compapi.Component{ Spec: compapi.ComponentSpec{ Metadata: items, }, } // set global strictSandbox config meta.AddWasmStrictSandbox(&com) // check that WasmStrictSandbox is set to true base, err := meta.ToBaseMetadata(com) require.NoError(t, err) assert.Equal(t, "true", base.Properties[WasmStrictSandboxMetadataKey]) }) t.Run("auto set strict sandbox to wasm components", func(t *testing.T) { // register test wasm component components.RegisterWasmComponentType(components.CategoryMiddleware, "test") meta := New(Options{Mode: modes.StandaloneMode, StrictSandbox: true}) // component with WasmStrictSandbox set to false items := []common.NameValuePair{ { Name: WasmStrictSandboxMetadataKey, Value: common.DynamicValue{ JSON: v1.JSON{Raw: []byte(`false`)}, }, }, } com := compapi.Component{ Spec: compapi.ComponentSpec{ Type: "middleware.test", Metadata: items, }, } // component that is not registered as a wasm component noneWasmComp := compapi.Component{ Spec: compapi.ComponentSpec{ Type: "middleware.none", Metadata: []common.NameValuePair{}, }, } wasm, err := meta.ToBaseMetadata(com) noneWasm, err2 := meta.ToBaseMetadata(noneWasmComp) require.NoError(t, err) require.NoError(t, err2) assert.Equal(t, "true", wasm.Properties[WasmStrictSandboxMetadataKey]) assert.Equal(t, "", noneWasm.Properties[WasmStrictSandboxMetadataKey]) }) }
mikeee/dapr
pkg/runtime/meta/meta_test.go
GO
mit
5,878
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package meta import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/dapr/dapr/pkg/apis/common" ) // Resource interface that applies to both Component and HTTPEndpoint // resources. type Resource interface { Kind() string APIVersion() string GetName() string GetNamespace() string LogName() string GetSecretStore() string GetScopes() []string NameValuePairs() []common.NameValuePair ClientObject() client.Object // Returns a deep copy of the resource, with the object meta set only with // Name and Namespace. EmptyMetaDeepCopy() metav1.Object }
mikeee/dapr
pkg/runtime/meta/resource.go
GO
mit
1,181
//go:build unit // +build unit /* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package mock import ( "context" "github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/secretstores" ) type SecretStore struct { secretstores.SecretStore CloseErr error } func (s *SecretStore) GetSecret(ctx context.Context, req secretstores.GetSecretRequest) (secretstores.GetSecretResponse, error) { return secretstores.GetSecretResponse{ Data: map[string]string{ "key1": "value1", "_value": "_value_data", "name1": "value1", }, }, nil } func (s *SecretStore) Init(ctx context.Context, metadata secretstores.Metadata) error { return nil } func (s *SecretStore) Close() error { return s.CloseErr } var TestInputBindingData = []byte("fakedata") type Binding struct { ReadErrorCh chan bool Data string Metadata map[string]string CloseErr error } func (b *Binding) Init(ctx context.Context, metadata bindings.Metadata) error { return nil } func (b *Binding) Read(ctx context.Context, handler bindings.Handler) error { b.Data = string(TestInputBindingData) metadata := map[string]string{} if b.Metadata != nil { metadata = b.Metadata } resp := &bindings.ReadResponse{ Metadata: metadata, Data: []byte(b.Data), } if b.ReadErrorCh != nil { go func() { _, err := handler(ctx, resp) b.ReadErrorCh <- (err != nil) }() return nil } _, err := handler(ctx, resp) return err } func (b *Binding) Operations() []bindings.OperationKind { return []bindings.OperationKind{bindings.CreateOperation, bindings.ListOperation} } func (b *Binding) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { return nil, nil } func (b *Binding) Close() error { return b.CloseErr } type MockKubernetesStateStore struct { Callback func(context.Context) error CloseFn func() error } func (m *MockKubernetesStateStore) Init(ctx context.Context, metadata secretstores.Metadata) error { if m.Callback != nil { return m.Callback(ctx) } return nil } func (m *MockKubernetesStateStore) GetSecret(ctx context.Context, req secretstores.GetSecretRequest) (secretstores.GetSecretResponse, error) { return secretstores.GetSecretResponse{ Data: map[string]string{ "key1": "value1", "_value": "_value_data", "name1": "value1", }, }, nil } func (m *MockKubernetesStateStore) BulkGetSecret(ctx context.Context, req secretstores.BulkGetSecretRequest) (secretstores.BulkGetSecretResponse, error) { response := map[string]map[string]string{} response["k8s-secret"] = map[string]string{ "key1": "value1", "_value": "_value_data", "name1": "value1", } return secretstores.BulkGetSecretResponse{ Data: response, }, nil } func (m *MockKubernetesStateStore) Close() error { if m.CloseFn != nil { return m.CloseFn() } return nil } func (m *MockKubernetesStateStore) Features() []secretstores.Feature { return []secretstores.Feature{} } func NewMockKubernetesStore() secretstores.SecretStore { return &MockKubernetesStateStore{} } func NewMockKubernetesStoreWithInitCallback(cb func(context.Context) error) secretstores.SecretStore { return &MockKubernetesStateStore{Callback: cb} } func NewMockKubernetesStoreWithClose(closeFn func() error) secretstores.SecretStore { return &MockKubernetesStateStore{CloseFn: closeFn} }
mikeee/dapr
pkg/runtime/mock/mock.go
GO
mit
3,877
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package binding import ( "context" "errors" "fmt" "io" "strings" "sync" "github.com/dapr/components-contrib/bindings" "github.com/dapr/dapr/pkg/api/grpc/manager" "github.com/dapr/dapr/pkg/apis/common" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compbindings "github.com/dapr/dapr/pkg/components/bindings" "github.com/dapr/dapr/pkg/config" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/kit/logger" ) const ( ComponentDirection = "direction" ComponentTypeInput = "input" ComponentTypeOutput = "output" // output bindings concurrency. ConcurrencyParallel = "parallel" ConcurrencySequential = "sequential" ) var log = logger.NewLogger("dapr.runtime.processor.binding") type Options struct { IsHTTP bool Registry *compbindings.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta Resiliency resiliency.Provider GRPC *manager.Manager TracingSpec *config.TracingSpec Channels *channels.Channels } type binding struct { isHTTP bool registry *compbindings.Registry resiliency resiliency.Provider compStore *compstore.ComponentStore meta *meta.Meta channels *channels.Channels tracingSpec *config.TracingSpec grpc *manager.Manager lock sync.Mutex readingBindings bool stopForever bool subscribeBindingList []string inputCancels map[string]context.CancelFunc wg sync.WaitGroup } func New(opts Options) *binding { return &binding{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, isHTTP: opts.IsHTTP, resiliency: opts.Resiliency, tracingSpec: opts.TracingSpec, grpc: opts.GRPC, channels: opts.Channels, inputCancels: make(map[string]context.CancelFunc), } } func (b *binding) Init(ctx context.Context, comp compapi.Component) error { b.lock.Lock() defer b.lock.Unlock() var found bool if b.registry.HasInputBinding(comp.Spec.Type, comp.Spec.Version) { if err := b.initInputBinding(ctx, comp); err != nil { return err } found = true } if b.registry.HasOutputBinding(comp.Spec.Type, comp.Spec.Version) { if err := b.initOutputBinding(ctx, comp); err != nil { return err } found = true } if !found { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return fmt.Errorf("couldn't find binding %s", comp.LogName()) } return nil } func (b *binding) Close(comp compapi.Component) error { b.lock.Lock() defer b.lock.Unlock() var errs []error inbinding, ok := b.compStore.GetInputBinding(comp.Name) if ok { defer b.compStore.DeleteInputBinding(comp.Name) if cancel := b.inputCancels[comp.Name]; cancel != nil { cancel() } delete(b.inputCancels, comp.Name) if err := inbinding.Close(); err != nil { errs = append(errs, err) } } outbinding, ok := b.compStore.GetOutputBinding(comp.Name) if ok { defer b.compStore.DeleteOutputBinding(comp.Name) if err := b.closeOutputBinding(outbinding); err != nil { errs = append(errs, err) } } return errors.Join(errs...) } func (b *binding) closeOutputBinding(binding bindings.OutputBinding) error { closer, ok := binding.(io.Closer) if ok && closer != nil { return closer.Close() } return nil } func (b *binding) initInputBinding(ctx context.Context, comp compapi.Component) error { if !b.isBindingOfDirection(ComponentTypeInput, comp.Spec.Metadata) { return nil } fName := comp.LogName() binding, err := b.registry.CreateInputBinding(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } meta, err := b.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } err = binding.Init(ctx, bindings.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } log.Infof("successful init for input binding (%s)", comp.LogName()) b.compStore.AddInputBindingRoute(comp.Name, comp.Name) for _, item := range comp.Spec.Metadata { if item.Name == "route" { b.compStore.AddInputBindingRoute(comp.ObjectMeta.Name, item.Value.String()) break } } b.compStore.AddInputBinding(comp.Name, binding) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) if b.readingBindings { return b.startInputBinding(comp, binding) } return nil } func (b *binding) initOutputBinding(ctx context.Context, comp compapi.Component) error { if !b.isBindingOfDirection(ComponentTypeOutput, comp.Spec.Metadata) { return nil } fName := comp.LogName() binding, err := b.registry.CreateOutputBinding(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } if binding != nil { meta, err := b.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } err = binding.Init(ctx, bindings.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } log.Infof("successful init for output binding (%s)", comp.LogName()) b.compStore.AddOutputBinding(comp.ObjectMeta.Name, binding) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) } return nil } func (b *binding) isBindingOfDirection(direction string, metadata []common.NameValuePair) bool { directionFound := false for _, m := range metadata { if strings.EqualFold(m.Name, ComponentDirection) { directionFound = true directions := strings.Split(m.Value.String(), ",") for _, d := range directions { if strings.TrimSpace(strings.ToLower(d)) == direction { return true } } } } return !directionFound }
mikeee/dapr
pkg/runtime/processor/binding/binding.go
GO
mit
7,161
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package binding import ( "testing" "github.com/stretchr/testify/assert" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "github.com/dapr/dapr/pkg/apis/common" ) func TestIsBindingOfDirection(t *testing.T) { t.Run("no direction in metadata for input binding", func(t *testing.T) { m := []common.NameValuePair{} r := (new(binding)).isBindingOfDirection("input", m) assert.True(t, r) }) t.Run("no direction in metadata for output binding", func(t *testing.T) { m := []common.NameValuePair{} r := (new(binding)).isBindingOfDirection("output", m) assert.True(t, r) }) t.Run("input direction in metadata", func(t *testing.T) { m := []common.NameValuePair{ { Name: "direction", Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("input"), }, }, }, } r := (new(binding)).isBindingOfDirection("input", m) f := (new(binding)).isBindingOfDirection("output", m) assert.True(t, r) assert.False(t, f) }) t.Run("output direction in metadata", func(t *testing.T) { m := []common.NameValuePair{ { Name: "direction", Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("output"), }, }, }, } r := (new(binding)).isBindingOfDirection("output", m) f := (new(binding)).isBindingOfDirection("input", m) assert.True(t, r) assert.False(t, f) }) t.Run("input and output direction in metadata", func(t *testing.T) { m := []common.NameValuePair{ { Name: "direction", Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("input, output"), }, }, }, } r := (new(binding)).isBindingOfDirection("output", m) f := (new(binding)).isBindingOfDirection("input", m) assert.True(t, r) assert.True(t, f) }) t.Run("invalid direction for input binding", func(t *testing.T) { m := []common.NameValuePair{ { Name: "direction", Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("aaa"), }, }, }, } f := (new(binding)).isBindingOfDirection("input", m) assert.False(t, f) }) t.Run("invalid direction for output binding", func(t *testing.T) { m := []common.NameValuePair{ { Name: "direction", Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("aaa"), }, }, }, } f := (new(binding)).isBindingOfDirection("output", m) assert.False(t, f) }) }
mikeee/dapr
pkg/runtime/processor/binding/binding_test.go
GO
mit
2,952
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package binding_test import ( "context" "testing" "github.com/stretchr/testify/require" "github.com/dapr/components-contrib/bindings" "github.com/dapr/dapr/pkg/api/grpc/manager" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/config" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/processor" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/security/fake" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) func TestInitBindings(t *testing.T) { t.Run("single input binding", func(t *testing.T) { reg := registry.New(registry.NewOptions()) reg.Bindings().RegisterInputBinding( func(_ logger.Logger) bindings.InputBinding { return &daprt.MockBinding{} }, "testInputBinding", ) proc := processor.New(processor.Options{ Registry: reg, ComponentStore: compstore.New(), GlobalConfig: new(config.Configuration), Meta: meta.New(meta.Options{}), GRPC: manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: 0}), Security: fake.New(), }) c := compapi.Component{} c.ObjectMeta.Name = "testInputBinding" c.Spec.Type = "bindings.testInputBinding" err := proc.Init(context.TODO(), c) require.NoError(t, err) }) t.Run("single output binding", func(t *testing.T) { reg := registry.New(registry.NewOptions()) reg.Bindings().RegisterOutputBinding(func(_ logger.Logger) bindings.OutputBinding { return &daprt.MockBinding{} }, "testOutputBinding", ) proc := processor.New(processor.Options{ Registry: reg, ComponentStore: compstore.New(), GlobalConfig: new(config.Configuration), Meta: meta.New(meta.Options{}), Security: fake.New(), }) c := compapi.Component{} c.ObjectMeta.Name = "testOutputBinding" c.Spec.Type = "bindings.testOutputBinding" err := proc.Init(context.TODO(), c) require.NoError(t, err) }) t.Run("one input binding, one output binding", func(t *testing.T) { reg := registry.New(registry.NewOptions()) reg.Bindings().RegisterInputBinding(func(_ logger.Logger) bindings.InputBinding { return &daprt.MockBinding{} }, "testinput", ) reg.Bindings().RegisterOutputBinding( func(_ logger.Logger) bindings.OutputBinding { return &daprt.MockBinding{} }, "testoutput", ) proc := processor.New(processor.Options{ Registry: reg, ComponentStore: compstore.New(), GlobalConfig: new(config.Configuration), Meta: meta.New(meta.Options{}), GRPC: manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: 0}), Security: fake.New(), }) input := compapi.Component{} input.ObjectMeta.Name = "testinput" input.Spec.Type = "bindings.testinput" err := proc.Init(context.TODO(), input) require.NoError(t, err) output := compapi.Component{} output.ObjectMeta.Name = "testoutput" output.Spec.Type = "bindings.testoutput" err = proc.Init(context.TODO(), output) require.NoError(t, err) }) t.Run("one not exist binding", func(t *testing.T) { reg := registry.New(registry.NewOptions()) // no binding registered, just try to init a not exist binding proc := processor.New(processor.Options{ Registry: reg, ComponentStore: compstore.New(), GlobalConfig: new(config.Configuration), Meta: meta.New(meta.Options{}), GRPC: manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: 0}), Security: fake.New(), }) c := compapi.Component{} c.ObjectMeta.Name = "testNotExistBinding" c.Spec.Type = "bindings.testNotExistBinding" err := proc.Init(context.TODO(), c) require.Error(t, err) }) }
mikeee/dapr
pkg/runtime/processor/binding/init_test.go
GO
mit
4,425
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package binding import ( "context" "encoding/json" "errors" "fmt" "net/http" "strings" "time" "go.opentelemetry.io/otel/trace" md "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" "github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/state" componentsV1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/components" stateLoader "github.com/dapr/dapr/pkg/components/state" diag "github.com/dapr/dapr/pkg/diagnostics" diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" ) func (b *binding) StartReadingFromBindings(ctx context.Context) error { b.lock.Lock() defer b.lock.Unlock() if b.stopForever { return nil } b.readingBindings = true if b.channels.AppChannel() == nil { return errors.New("app channel not initialized") } // Clean any previous state for _, cancel := range b.inputCancels { cancel() } b.inputCancels = make(map[string]context.CancelFunc) comps := b.compStore.ListComponents() bindings := make(map[string]componentsV1alpha1.Component) for i, c := range comps { if strings.HasPrefix(c.Spec.Type, string(components.CategoryBindings)) { bindings[c.ObjectMeta.Name] = comps[i] } } for name, bind := range b.compStore.ListInputBindings() { if err := b.startInputBinding(bindings[name], bind); err != nil { return err } } return nil } func (b *binding) startInputBinding(comp componentsV1alpha1.Component, binding bindings.InputBinding) error { var isSubscribed bool meta, err := b.meta.ToBaseMetadata(comp) if err != nil { return err } m := meta.Properties ctx, cancel := context.WithCancel(context.Background()) if isBindingOfExplicitDirection(ComponentTypeInput, m) { isSubscribed = true } else { var err error isSubscribed, err = b.isAppSubscribedToBinding(ctx, comp.Name) if err != nil { cancel() return err } } if !isSubscribed { log.Infof("app has not subscribed to binding %s.", comp.Name) cancel() return nil } if err := b.readFromBinding(ctx, comp.Name, binding); err != nil { log.Errorf("error reading from input binding %s: %s", comp.Name, err) cancel() return nil } b.inputCancels[comp.Name] = cancel return nil } func (b *binding) StopReadingFromBindings(forever bool) { b.lock.Lock() defer b.lock.Unlock() defer b.wg.Wait() if forever { b.stopForever = true } b.readingBindings = false for _, cancel := range b.inputCancels { cancel() } b.inputCancels = make(map[string]context.CancelFunc) } func (b *binding) sendBatchOutputBindingsParallel(ctx context.Context, to []string, data []byte) { b.wg.Add(len(to)) for _, dst := range to { go func(name string) { defer b.wg.Done() _, err := b.SendToOutputBinding(ctx, name, &bindings.InvokeRequest{ Data: data, Operation: bindings.CreateOperation, }) if err != nil { log.Error(err) } }(dst) } } func (b *binding) sendBatchOutputBindingsSequential(ctx context.Context, to []string, data []byte) error { for _, dst := range to { _, err := b.SendToOutputBinding(ctx, dst, &bindings.InvokeRequest{ Data: data, Operation: bindings.CreateOperation, }) if err != nil { return err } } return nil } func (b *binding) SendToOutputBinding(ctx context.Context, name string, req *bindings.InvokeRequest) (*bindings.InvokeResponse, error) { if req.Operation == "" { return nil, errors.New("operation field is missing from request") } if binding, ok := b.compStore.GetOutputBinding(name); ok { ops := binding.Operations() for _, o := range ops { if o == req.Operation { policyRunner := resiliency.NewRunner[*bindings.InvokeResponse](ctx, b.resiliency.ComponentOutboundPolicy(name, resiliency.Binding), ) return policyRunner(func(ctx context.Context) (*bindings.InvokeResponse, error) { return binding.Invoke(ctx, req) }) } } supported := make([]string, 0, len(ops)) for _, o := range ops { supported = append(supported, string(o)) } return nil, fmt.Errorf("binding %s does not support operation %s. supported operations:%s", name, req.Operation, strings.Join(supported, " ")) } return nil, fmt.Errorf("couldn't find output binding %s", name) } func (b *binding) onAppResponse(ctx context.Context, response *bindings.AppResponse) error { if len(response.State) > 0 { b.wg.Add(1) go func(reqs []state.SetRequest) { defer b.wg.Done() store, ok := b.compStore.GetStateStore(response.StoreName) if !ok { return } err := stateLoader.PerformBulkStoreOperation(ctx, reqs, b.resiliency.ComponentOutboundPolicy(response.StoreName, resiliency.Statestore), state.BulkStoreOpts{}, store.Set, store.BulkSet, ) if err != nil { log.Errorf("error saving state from app response: %v", err) } }(response.State) } if len(response.To) > 0 { data, err := json.Marshal(&response.Data) if err != nil { return err } if response.Concurrency == ConcurrencyParallel { b.sendBatchOutputBindingsParallel(ctx, response.To, data) } else { return b.sendBatchOutputBindingsSequential(ctx, response.To, data) } } return nil } func (b *binding) sendBindingEventToApp(ctx context.Context, bindingName string, data []byte, metadata map[string]string) ([]byte, error) { var response bindings.AppResponse spanName := "bindings/" + bindingName spanContext := trace.SpanContext{} // Check the grpc-trace-bin with fallback to traceparent. validTraceparent := false if val, ok := metadata[diag.GRPCTraceContextKey]; ok { if sc, ok := diagUtils.SpanContextFromBinary([]byte(val)); ok { spanContext = sc } } else if val, ok := metadata[diag.TraceparentHeader]; ok { if sc, ok := diag.SpanContextFromW3CString(val); ok { spanContext = sc validTraceparent = true // Only parse the tracestate if we've successfully parsed the traceparent. if val, ok := metadata[diag.TracestateHeader]; ok { ts := diag.TraceStateFromW3CString(val) spanContext.WithTraceState(*ts) } } } // span is nil if tracing is disabled (sampling rate is 0) ctx, span := diag.StartInternalCallbackSpan(ctx, spanName, spanContext, b.tracingSpec) var appResponseBody []byte path, _ := b.compStore.GetInputBindingRoute(bindingName) if path == "" { path = bindingName } if !b.isHTTP { if span != nil { ctx = diag.SpanContextToGRPCMetadata(ctx, span.SpanContext()) } // Add workaround to fallback on checking traceparent header. // As grpc-trace-bin is not yet there in OpenTelemetry unlike OpenCensus, tracking issue https://github.com/open-telemetry/opentelemetry-specification/issues/639 // and grpc-dotnet client adheres to OpenTelemetry Spec which only supports http based traceparent header in gRPC path. // TODO: Remove this workaround fix once grpc-dotnet supports grpc-trace-bin header. Tracking issue https://github.com/dapr/dapr/issues/1827. if validTraceparent && span != nil { spanContextHeaders := make(map[string]string, 2) diag.SpanContextToHTTPHeaders(span.SpanContext(), func(key string, val string) { spanContextHeaders[key] = val }) for key, val := range spanContextHeaders { ctx = md.AppendToOutgoingContext(ctx, key, val) } } conn, err := b.grpc.GetAppClient() if err != nil { return nil, fmt.Errorf("error while getting app client: %w", err) } client := runtimev1pb.NewAppCallbackClient(conn) req := &runtimev1pb.BindingEventRequest{ Name: bindingName, Data: data, Metadata: metadata, } start := time.Now() policyRunner := resiliency.NewRunner[*runtimev1pb.BindingEventResponse](ctx, b.resiliency.ComponentInboundPolicy(bindingName, resiliency.Binding), ) resp, err := policyRunner(func(ctx context.Context) (*runtimev1pb.BindingEventResponse, error) { return client.OnBindingEvent(ctx, req) }) if span != nil { m := diag.ConstructInputBindingSpanAttributes( bindingName, "/dapr.proto.runtime.v1.AppCallback/OnBindingEvent") diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromGRPCError(span, err) span.End() } if diag.DefaultGRPCMonitoring.IsEnabled() { diag.DefaultGRPCMonitoring.ServerRequestSent(ctx, "/dapr.proto.runtime.v1.AppCallback/OnBindingEvent", status.Code(err).String(), int64(len(req.GetData())), int64(len(resp.GetData())), start) } if err != nil { return nil, fmt.Errorf("error invoking app: %w", err) } if resp != nil { if resp.GetConcurrency() == runtimev1pb.BindingEventResponse_PARALLEL { //nolint:nosnakecase response.Concurrency = ConcurrencyParallel } else { response.Concurrency = ConcurrencySequential } response.To = resp.GetTo() if resp.GetData() != nil { appResponseBody = resp.GetData() var d interface{} err := json.Unmarshal(resp.GetData(), &d) if err == nil { response.Data = d } } } } else { policyDef := b.resiliency.ComponentInboundPolicy(bindingName, resiliency.Binding) reqMetadata := make(map[string][]string, len(metadata)) for k, v := range metadata { reqMetadata[k] = []string{v} } req := invokev1.NewInvokeMethodRequest(path). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(data). WithContentType(invokev1.JSONContentType). WithMetadata(reqMetadata) if policyDef != nil { req.WithReplay(policyDef.HasRetries()) } defer req.Close() respErr := errors.New("error sending binding event to application") policyRunner := resiliency.NewRunnerWithOptions(ctx, policyDef, resiliency.RunnerOpts[*invokev1.InvokeMethodResponse]{ Disposer: resiliency.DisposerCloser[*invokev1.InvokeMethodResponse], }, ) resp, err := policyRunner(func(ctx context.Context) (*invokev1.InvokeMethodResponse, error) { rResp, rErr := b.channels.AppChannel().InvokeMethod(ctx, req, "") if rErr != nil { return rResp, rErr } if rResp != nil && rResp.Status().GetCode() != http.StatusOK { return rResp, fmt.Errorf("%w, status %d", respErr, rResp.Status().GetCode()) } return rResp, nil }) if err != nil && !errors.Is(err, respErr) { return nil, fmt.Errorf("error invoking app: %w", err) } if resp == nil { return nil, errors.New("error invoking app: response object is nil") } defer resp.Close() if span != nil { m := diag.ConstructInputBindingSpanAttributes( bindingName, http.MethodPost+" /"+bindingName, ) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromHTTPStatus(span, int(resp.Status().GetCode())) span.End() } appResponseBody, err = resp.RawDataFull() // ::TODO report metrics for http, such as grpc if code := resp.Status().GetCode(); code < 200 || code > 299 { return nil, fmt.Errorf("fails to send binding event to http app channel, status code: %d body: %s", code, string(appResponseBody)) } if err != nil { return nil, fmt.Errorf("failed to read response body: %w", err) } } if len(response.State) > 0 || len(response.To) > 0 { if err := b.onAppResponse(ctx, &response); err != nil { log.Errorf("error executing app response: %s", err) } } return appResponseBody, nil } func (b *binding) readFromBinding(readCtx context.Context, name string, binding bindings.InputBinding) error { return binding.Read(readCtx, func(ctx context.Context, resp *bindings.ReadResponse) ([]byte, error) { if resp == nil { return nil, nil } start := time.Now() b, err := b.sendBindingEventToApp(ctx, name, resp.Data, resp.Metadata) elapsed := diag.ElapsedSince(start) diag.DefaultComponentMonitoring.InputBindingEvent(context.Background(), name, err == nil, elapsed) if err != nil { log.Debugf("error from app consumer for binding [%s]: %s", name, err) return nil, err } return b, nil }) } func (b *binding) getSubscribedBindingsGRPC(ctx context.Context) ([]string, error) { conn, err := b.grpc.GetAppClient() if err != nil { return nil, fmt.Errorf("error while getting app client: %w", err) } client := runtimev1pb.NewAppCallbackClient(conn) resp, err := client.ListInputBindings(ctx, &emptypb.Empty{}) bindings := []string{} if err == nil && resp != nil { bindings = resp.GetBindings() } return bindings, nil } func (b *binding) isAppSubscribedToBinding(ctx context.Context, binding string) (bool, error) { // if gRPC, looks for the binding in the list of bindings returned from the app if !b.isHTTP { if b.subscribeBindingList == nil { list, err := b.getSubscribedBindingsGRPC(ctx) if err != nil { return false, err } b.subscribeBindingList = list } for _, b := range b.subscribeBindingList { if b == binding { return true, nil } } } else { // if HTTP, check if there's an endpoint listening for that binding path, _ := b.compStore.GetInputBindingRoute(binding) req := invokev1.NewInvokeMethodRequest(path). WithHTTPExtension(http.MethodOptions, ""). WithContentType(invokev1.JSONContentType) defer req.Close() resp, err := b.channels.AppChannel().InvokeMethod(ctx, req, "") if err != nil { return false, fmt.Errorf("could not invoke OPTIONS method on input binding subscription endpoint %q: %v", path, err) } defer resp.Close() code := resp.Status().GetCode() return code/100 == 2 || code == http.StatusMethodNotAllowed, nil } return false, nil } func isBindingOfExplicitDirection(direction string, metadata map[string]string) bool { for k, v := range metadata { if strings.EqualFold(k, ComponentDirection) { directions := strings.Split(v, ",") for _, d := range directions { if strings.TrimSpace(strings.ToLower(d)) == direction { return true } } } } return false }
mikeee/dapr
pkg/runtime/processor/binding/send.go
GO
mit
14,445
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package binding import ( "context" "crypto/x509" "io" "net/http" "testing" "time" "github.com/phayes/freeport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/dapr/components-contrib/bindings" "github.com/dapr/dapr/pkg/api/grpc/manager" commonapi "github.com/dapr/dapr/pkg/apis/common" componentsV1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1" channelt "github.com/dapr/dapr/pkg/channel/testing" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/meta" rtmock "github.com/dapr/dapr/pkg/runtime/mock" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/security" daprt "github.com/dapr/dapr/pkg/testing" testinggrpc "github.com/dapr/dapr/pkg/testing/grpc" "github.com/dapr/kit/logger" ) func TestIsBindingOfExplicitDirection(t *testing.T) { t.Run("no direction in metadata input binding", func(t *testing.T) { m := map[string]string{} r := isBindingOfExplicitDirection("input", m) assert.False(t, r) }) t.Run("no direction in metadata output binding", func(t *testing.T) { m := map[string]string{} r := isBindingOfExplicitDirection("input", m) assert.False(t, r) }) t.Run("direction is input binding", func(t *testing.T) { m := map[string]string{ "direction": "input", } r := isBindingOfExplicitDirection("input", m) assert.True(t, r) }) t.Run("direction is output binding", func(t *testing.T) { m := map[string]string{ "direction": "output", } r := isBindingOfExplicitDirection("output", m) assert.True(t, r) }) t.Run("direction is not output binding", func(t *testing.T) { m := map[string]string{ "direction": "input", } r := isBindingOfExplicitDirection("output", m) assert.False(t, r) }) t.Run("direction is not input binding", func(t *testing.T) { m := map[string]string{ "direction": "output", } r := isBindingOfExplicitDirection("input", m) assert.False(t, r) }) t.Run("direction is both input and output binding", func(t *testing.T) { m := map[string]string{ "direction": "output, input", } r := isBindingOfExplicitDirection("input", m) assert.True(t, r) r2 := isBindingOfExplicitDirection("output", m) assert.True(t, r2) }) } func TestStartReadingFromBindings(t *testing.T) { t.Run("OPTIONS request when direction is not specified", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) mockAppChannel.On("InvokeMethod", mock.Anything, mock.Anything).Return(invokev1.NewInvokeMethodResponse(200, "OK", nil), nil) m := &rtmock.Binding{} b.compStore.AddInputBinding("test", m) err := b.StartReadingFromBindings(context.Background()) require.NoError(t, err) assert.True(t, mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything)) }) t.Run("No OPTIONS request when direction is specified", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) mockAppChannel.On("InvokeMethod", mock.Anything, mock.Anything).Return(invokev1.NewInvokeMethodResponse(200, "OK", nil), nil) m := &rtmock.Binding{ Metadata: map[string]string{ "direction": "input", }, } b.compStore.AddInputBinding("test", m) require.NoError(t, b.compStore.AddPendingComponentForCommit(componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "bindings.test", Metadata: []commonapi.NameValuePair{ { Name: "direction", Value: commonapi.DynamicValue{ JSON: v1.JSON{Raw: []byte("input")}, }, }, }, }, })) require.NoError(t, b.compStore.CommitPendingComponent()) err := b.StartReadingFromBindings(context.Background()) require.NoError(t, err) assert.True(t, mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything)) }) } func TestGetSubscribedBindingsGRPC(t *testing.T) { secP, err := security.New(context.Background(), security.Options{ TrustAnchors: []byte("test"), AppID: "test", ControlPlaneTrustDomain: "test.example.com", ControlPlaneNamespace: "default", MTLSEnabled: false, OverrideCertRequestFn: func(context.Context, []byte) ([]*x509.Certificate, error) { return []*x509.Certificate{nil}, nil }, }) require.NoError(t, err) go secP.Run(context.Background()) sec, err := secP.Handler(context.Background()) require.NoError(t, err) testCases := []struct { name string expectedResponse []string responseError error responseFromApp []string }{ { name: "get list of subscriber bindings success", expectedResponse: []string{"binding1", "binding2"}, responseFromApp: []string{"binding1", "binding2"}, }, { name: "get list of subscriber bindings error from app", expectedResponse: []string{}, responseError: assert.AnError, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { port, _ := freeport.GetFreePort() b := New(Options{ IsHTTP: false, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), GRPC: manager.NewManager(sec, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}), }) // create mock application server first grpcServer := testinggrpc.StartTestAppCallbackGRPCServer(t, port, &channelt.MockServer{ Error: tc.responseError, Bindings: tc.responseFromApp, }) defer grpcServer.Stop() // act resp, _ := b.getSubscribedBindingsGRPC(context.Background()) // assert assert.Equal(t, tc.expectedResponse, resp, "expected response to match") }) } } func TestReadInputBindings(t *testing.T) { const testInputBindingName = "inputbinding" const testInputBindingMethod = "inputbinding" t.Run("app acknowledge, no retry", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) fakeBindingResp := invokev1.NewInvokeMethodResponse(200, "OK", nil) defer fakeBindingResp.Close() fakeReq := invokev1.NewInvokeMethodRequest(testInputBindingMethod). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(rtmock.TestInputBindingData). WithContentType("application/json"). WithMetadata(map[string][]string{}) defer fakeReq.Close() // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("OK"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), matchDaprRequestMethod(testInputBindingMethod)).Return(fakeBindingResp, nil) mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), fakeReq).Return(fakeResp, nil) b.compStore.AddInputBindingRoute(testInputBindingName, testInputBindingName) mockBinding := rtmock.Binding{} ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) ch := make(chan bool, 1) mockBinding.ReadErrorCh = ch b.readFromBinding(ctx, testInputBindingName, &mockBinding) cancel() assert.False(t, <-ch) }) t.Run("app returns error", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) fakeBindingReq := invokev1.NewInvokeMethodRequest(testInputBindingMethod). WithHTTPExtension(http.MethodOptions, ""). WithContentType(invokev1.JSONContentType) defer fakeBindingReq.Close() fakeBindingResp := invokev1.NewInvokeMethodResponse(200, "OK", nil) defer fakeBindingResp.Close() fakeReq := invokev1.NewInvokeMethodRequest(testInputBindingMethod). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(rtmock.TestInputBindingData). WithContentType("application/json"). WithMetadata(map[string][]string{}) defer fakeReq.Close() // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(500, "Internal Error", nil). WithRawDataString("Internal Error"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), fakeBindingReq).Return(fakeBindingResp, nil) mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), fakeReq).Return(fakeResp, nil) b.compStore.AddInputBindingRoute(testInputBindingName, testInputBindingName) mockBinding := rtmock.Binding{} ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) ch := make(chan bool, 1) mockBinding.ReadErrorCh = ch b.readFromBinding(ctx, testInputBindingName, &mockBinding) cancel() assert.True(t, <-ch) }) t.Run("binding has data and metadata", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) fakeBindingReq := invokev1.NewInvokeMethodRequest(testInputBindingMethod). WithHTTPExtension(http.MethodOptions, ""). WithContentType(invokev1.JSONContentType) defer fakeBindingReq.Close() fakeBindingResp := invokev1.NewInvokeMethodResponse(200, "OK", nil) defer fakeBindingResp.Close() fakeReq := invokev1.NewInvokeMethodRequest(testInputBindingMethod). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(rtmock.TestInputBindingData). WithContentType("application/json"). WithMetadata(map[string][]string{"bindings": {"input"}}) defer fakeReq.Close() // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("OK"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), matchDaprRequestMethod(testInputBindingMethod)).Return(fakeBindingResp, nil) mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), fakeReq).Return(fakeResp, nil) b.compStore.AddInputBindingRoute(testInputBindingName, testInputBindingName) mockBinding := rtmock.Binding{Metadata: map[string]string{"bindings": "input"}} ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) ch := make(chan bool, 1) mockBinding.ReadErrorCh = ch b.readFromBinding(ctx, testInputBindingName, &mockBinding) cancel() assert.Equal(t, string(rtmock.TestInputBindingData), mockBinding.Data) }) t.Run("start and stop reading", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) closeCh := make(chan struct{}) defer close(closeCh) mockBinding := &daprt.MockBinding{} mockBinding.SetOnReadCloseCh(closeCh) mockBinding.On("Read", mock.MatchedBy(daprt.MatchContextInterface), mock.Anything).Return(nil).Once() ctx, cancel := context.WithCancel(context.Background()) b.readFromBinding(ctx, testInputBindingName, mockBinding) time.Sleep(80 * time.Millisecond) cancel() select { case <-closeCh: // All good case <-time.After(time.Second): t.Fatal("timeout while waiting for binding to stop reading") } mockBinding.AssertNumberOfCalls(t, "Read", 1) }) } func TestInvokeOutputBindings(t *testing.T) { t.Run("output binding missing operation", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.SendToOutputBinding(context.Background(), "mockBinding", &bindings.InvokeRequest{ Data: []byte(""), }) require.Error(t, err) assert.Equal(t, "operation field is missing from request", err.Error()) }) t.Run("output binding valid operation", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) b.compStore.AddOutputBinding("mockBinding", &rtmock.Binding{}) _, err := b.SendToOutputBinding(context.Background(), "mockBinding", &bindings.InvokeRequest{ Data: []byte(""), Operation: bindings.CreateOperation, }) require.NoError(t, err) }) t.Run("output binding invalid operation", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) b.compStore.AddOutputBinding("mockBinding", &rtmock.Binding{}) _, err := b.SendToOutputBinding(context.Background(), "mockBinding", &bindings.InvokeRequest{ Data: []byte(""), Operation: bindings.GetOperation, }) require.Error(t, err) assert.Equal(t, "binding mockBinding does not support operation get. supported operations:create list", err.Error()) }) } func TestBindingTracingHttp(t *testing.T) { b := New(Options{ IsHTTP: true, Resiliency: resiliency.New(log), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) t.Run("traceparent passed through with response status code 200", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.On("InvokeMethod", mock.Anything, mock.Anything).Return(invokev1.NewInvokeMethodResponse(200, "OK", nil), nil) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.sendBindingEventToApp(context.Background(), "mockBinding", []byte(""), map[string]string{"traceparent": "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01"}) require.NoError(t, err) mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything) assert.Len(t, mockAppChannel.Calls, 1) req := mockAppChannel.Calls[0].Arguments.Get(1).(*invokev1.InvokeMethodRequest) assert.Contains(t, req.Metadata(), "traceparent") assert.Contains(t, req.Metadata()["traceparent"].GetValues(), "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01") }) t.Run("traceparent passed through with response status code 204", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.On("InvokeMethod", mock.Anything, mock.Anything).Return(invokev1.NewInvokeMethodResponse(204, "OK", nil), nil) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.sendBindingEventToApp(context.Background(), "mockBinding", []byte(""), map[string]string{"traceparent": "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01"}) require.NoError(t, err) mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything) assert.Len(t, mockAppChannel.Calls, 1) req := mockAppChannel.Calls[0].Arguments.Get(1).(*invokev1.InvokeMethodRequest) assert.Contains(t, req.Metadata(), "traceparent") assert.Contains(t, req.Metadata()["traceparent"].GetValues(), "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01") }) t.Run("bad traceparent does not fail request", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.On("InvokeMethod", mock.Anything, mock.Anything).Return(invokev1.NewInvokeMethodResponse(200, "OK", nil), nil) b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.sendBindingEventToApp(context.Background(), "mockBinding", []byte(""), map[string]string{"traceparent": "I am not a traceparent"}) require.NoError(t, err) mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything) assert.Len(t, mockAppChannel.Calls, 1) }) } func TestBindingResiliency(t *testing.T) { b := New(Options{ Resiliency: resiliency.FromConfigurations(logger.NewLogger("test"), daprt.TestResiliency), Registry: registry.New(registry.NewOptions()).Bindings(), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{}), }) failingChannel := daprt.FailingAppChannel{ Failure: daprt.NewFailure( map[string]int{ "inputFailingKey": 1, }, map[string]time.Duration{ "inputTimeoutKey": time.Second * 10, }, map[string]int{}, ), KeyFunc: func(req *invokev1.InvokeMethodRequest) string { r, _ := io.ReadAll(req.RawData()) return string(r) }, } b.channels = new(channels.Channels).WithAppChannel(&failingChannel) b.isHTTP = true failingBinding := daprt.FailingBinding{ Failure: daprt.NewFailure( map[string]int{ "outputFailingKey": 1, }, map[string]time.Duration{ "outputTimeoutKey": time.Second * 10, }, map[string]int{}, ), } b.registry.RegisterOutputBinding( func(_ logger.Logger) bindings.OutputBinding { return &failingBinding }, "failingoutput", ) output := componentsV1alpha1.Component{} output.ObjectMeta.Name = "failOutput" output.Spec.Type = "bindings.failingoutput" err := b.Init(context.TODO(), output) require.NoError(t, err) t.Run("output binding retries on failure with resiliency", func(t *testing.T) { req := &bindings.InvokeRequest{ Data: []byte("outputFailingKey"), Operation: "create", } _, err := b.SendToOutputBinding(context.Background(), "failOutput", req) require.NoError(t, err) assert.Equal(t, 2, failingBinding.Failure.CallCount("outputFailingKey")) }) t.Run("output binding times out with resiliency", func(t *testing.T) { req := &bindings.InvokeRequest{ Data: []byte("outputTimeoutKey"), Operation: "create", } start := time.Now() _, err := b.SendToOutputBinding(context.Background(), "failOutput", req) end := time.Now() require.Error(t, err) assert.Equal(t, 2, failingBinding.Failure.CallCount("outputTimeoutKey")) assert.Less(t, end.Sub(start), time.Second*10) }) t.Run("input binding retries on failure with resiliency", func(t *testing.T) { _, err := b.sendBindingEventToApp(context.Background(), "failingInputBinding", []byte("inputFailingKey"), map[string]string{}) require.NoError(t, err) assert.Equal(t, 2, failingChannel.Failure.CallCount("inputFailingKey")) }) t.Run("input binding times out with resiliency", func(t *testing.T) { start := time.Now() _, err := b.sendBindingEventToApp(context.Background(), "failingInputBinding", []byte("inputTimeoutKey"), map[string]string{}) end := time.Now() require.Error(t, err) assert.Equal(t, 2, failingChannel.Failure.CallCount("inputTimeoutKey")) assert.Less(t, end.Sub(start), time.Second*10) }) } func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true }) }
mikeee/dapr
pkg/runtime/processor/binding/send_test.go
GO
mit
21,045
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "context" "errors" "fmt" "strings" "time" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/components" diag "github.com/dapr/dapr/pkg/diagnostics" rterrors "github.com/dapr/dapr/pkg/runtime/errors" ) // Init initializes a component of a category. func (p *Processor) Init(ctx context.Context, comp componentsapi.Component) error { p.lock.Lock() defer p.lock.Unlock() m, err := p.managerFromComp(comp) if err != nil { return err } if err := p.compStore.AddPendingComponentForCommit(comp); err != nil { return err } if err := m.Init(p.security.WithSVIDContext(ctx), comp); err != nil { return errors.Join(err, p.compStore.DropPendingComponent()) } return p.compStore.CommitPendingComponent() } // Close closes the component. func (p *Processor) Close(comp componentsapi.Component) error { p.lock.Lock() defer p.lock.Unlock() m, err := p.managerFromComp(comp) if err != nil { return err } if err := m.Close(comp); err != nil { return err } p.compStore.DeleteComponent(comp.Name) return nil } func (p *Processor) AddPendingComponent(ctx context.Context, comp componentsapi.Component) bool { p.chlock.RLock() defer p.chlock.RUnlock() if p.shutdown.Load() { return false } p.pendingComponentsWaiting.Add(1) select { case <-ctx.Done(): p.pendingComponentsWaiting.Done() return false case <-p.closedCh: p.pendingComponentsWaiting.Done() return false case p.pendingComponents <- comp: return true } } func (p *Processor) processComponents(ctx context.Context) error { process := func(comp componentsapi.Component) error { if comp.Name == "" { return nil } err := p.processComponentAndDependents(ctx, comp) if err != nil { err = fmt.Errorf("process component %s error: %s", comp.Name, err) if !comp.Spec.IgnoreErrors { log.Warnf("Error processing component, daprd will exit gracefully") return err } log.Error(err) } return nil } for comp := range p.pendingComponents { err := process(comp) p.pendingComponentsWaiting.Done() if err != nil { return err } } return nil } // WaitForEmptyComponentQueue waits for the component queue to be empty. func (p *Processor) WaitForEmptyComponentQueue() { p.pendingComponentsWaiting.Wait() } func (p *Processor) processComponentAndDependents(ctx context.Context, comp componentsapi.Component) error { log.Debug("Loading component: " + comp.LogName()) res := p.preprocessOneComponent(ctx, &comp) if res.unreadyDependency != "" { p.pendingComponentDependents[res.unreadyDependency] = append(p.pendingComponentDependents[res.unreadyDependency], comp) return nil } compCategory := p.category(comp) if compCategory == "" { // the category entered is incorrect, return error return fmt.Errorf("incorrect type %s", comp.Spec.Type) } timeout, err := time.ParseDuration(comp.Spec.InitTimeout) if err != nil { timeout = defaultComponentInitTimeout } ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() err = p.Init(ctx, comp) // If the context is canceled, we want to return an init error. if errors.Is(ctx.Err(), context.DeadlineExceeded) { err = fmt.Errorf("init timeout for component %s exceeded after %s", comp.LogName(), timeout.String()) } if err != nil { log.Errorf("Failed to init component %s: %s", comp.LogName(), err) diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, comp.LogName(), err) } log.Info("Component loaded: " + comp.LogName()) diag.DefaultMonitoring.ComponentLoaded() dependency := componentDependency(compCategory, comp.Name) if deps, ok := p.pendingComponentDependents[dependency]; ok { delete(p.pendingComponentDependents, dependency) for _, dependent := range deps { if err := p.processComponentAndDependents(ctx, dependent); err != nil { return err } } } return nil } type componentPreprocessRes struct { unreadyDependency string } func (p *Processor) preprocessOneComponent(ctx context.Context, comp *componentsapi.Component) componentPreprocessRes { _, unreadySecretsStore := p.secret.ProcessResource(ctx, comp) if unreadySecretsStore != "" { return componentPreprocessRes{ unreadyDependency: componentDependency(components.CategorySecretStore, unreadySecretsStore), } } return componentPreprocessRes{} } func (p *Processor) category(comp componentsapi.Component) components.Category { for category := range p.managers { if strings.HasPrefix(comp.Spec.Type, string(category)+".") { return category } } return "" } func componentDependency(compCategory components.Category, name string) string { return string(compCategory) + ":" + name }
mikeee/dapr
pkg/runtime/processor/components.go
GO
mit
5,348
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package configuration import ( "context" "io" "sync" contribconfig "github.com/dapr/components-contrib/configuration" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compconfig "github.com/dapr/dapr/pkg/components/configuration" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" ) type Options struct { Registry *compconfig.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta } type configuration struct { registry *compconfig.Registry compStore *compstore.ComponentStore meta *meta.Meta lock sync.Mutex } func New(opts Options) *configuration { return &configuration{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, } } func (c *configuration) Init(ctx context.Context, comp compapi.Component) error { c.lock.Lock() defer c.lock.Unlock() fName := comp.LogName() config, err := c.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } if config != nil { meta, err := c.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } err = config.Init(ctx, contribconfig.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } c.compStore.AddConfiguration(comp.ObjectMeta.Name, config) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) } return nil } func (c *configuration) Close(comp compapi.Component) error { c.lock.Lock() defer c.lock.Unlock() conf, ok := c.compStore.GetConfiguration(comp.ObjectMeta.Name) if !ok { return nil } defer c.compStore.DeleteConfiguration(comp.ObjectMeta.Name) closer, ok := conf.(io.Closer) if ok && closer != nil { if err := closer.Close(); err != nil { return err } } return nil }
mikeee/dapr
pkg/runtime/processor/configuration/configuration.go
GO
mit
2,860
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package crypto import ( "context" "io" "sync" contribcrypto "github.com/dapr/components-contrib/crypto" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compcrypto "github.com/dapr/dapr/pkg/components/crypto" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" ) type Options struct { Registry *compcrypto.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta } type crypto struct { registry *compcrypto.Registry compStore *compstore.ComponentStore meta *meta.Meta lock sync.Mutex } func New(opts Options) *crypto { return &crypto{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, } } func (c *crypto) Init(ctx context.Context, comp compapi.Component) error { c.lock.Lock() defer c.lock.Unlock() fName := comp.LogName() component, err := c.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } meta, err := c.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } err = component.Init(ctx, contribcrypto.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } c.compStore.AddCryptoProvider(comp.ObjectMeta.Name, component) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) return nil } func (c *crypto) Close(comp compapi.Component) error { c.lock.Lock() defer c.lock.Unlock() defer c.compStore.DeleteCryptoProvider(comp.ObjectMeta.Name) crypto, ok := c.compStore.GetCryptoProvider(comp.ObjectMeta.Name) if !ok { return nil } closer, ok := crypto.(io.Closer) if ok && closer != nil { if err := closer.Close(); err != nil { return err } } return nil }
mikeee/dapr
pkg/runtime/processor/crypto/crypto.go
GO
mit
2,785
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "context" apiextapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" commonapi "github.com/dapr/dapr/pkg/apis/common" httpendpointsapi "github.com/dapr/dapr/pkg/apis/httpEndpoint/v1alpha1" "github.com/dapr/dapr/pkg/internal/apis" ) func (p *Processor) AddPendingEndpoint(ctx context.Context, endpoint httpendpointsapi.HTTPEndpoint) bool { p.chlock.RLock() defer p.chlock.RUnlock() if p.shutdown.Load() { return false } select { case <-ctx.Done(): return false case <-p.closedCh: return false case p.pendingHTTPEndpoints <- endpoint: return true } } func (p *Processor) processHTTPEndpoints(ctx context.Context) error { for endpoint := range p.pendingHTTPEndpoints { if endpoint.Name == "" { continue } p.processHTTPEndpointSecrets(ctx, &endpoint) p.compStore.AddHTTPEndpoint(endpoint) } return nil } func (p *Processor) processHTTPEndpointSecrets(ctx context.Context, endpoint *httpendpointsapi.HTTPEndpoint) { _, _ = p.secret.ProcessResource(ctx, endpoint) tlsResource := apis.GenericNameValueResource{ Name: endpoint.ObjectMeta.Name, Namespace: endpoint.ObjectMeta.Namespace, SecretStore: endpoint.Auth.SecretStore, Pairs: []commonapi.NameValuePair{}, } root, clientCert, clientKey := "root", "clientCert", "clientKey" ca := commonapi.NameValuePair{ Name: root, } if endpoint.HasTLSRootCA() { ca.Value = *endpoint.Spec.ClientTLS.RootCA.Value } if endpoint.HasTLSRootCASecret() { ca.SecretKeyRef = *endpoint.Spec.ClientTLS.RootCA.SecretKeyRef } tlsResource.Pairs = append(tlsResource.Pairs, ca) cCert := commonapi.NameValuePair{ Name: clientCert, } if endpoint.HasTLSClientCert() { cCert.Value = *endpoint.Spec.ClientTLS.Certificate.Value } if endpoint.HasTLSClientCertSecret() { cCert.SecretKeyRef = *endpoint.Spec.ClientTLS.Certificate.SecretKeyRef } tlsResource.Pairs = append(tlsResource.Pairs, cCert) cKey := commonapi.NameValuePair{ Name: clientKey, } if endpoint.HasTLSPrivateKey() { cKey.Value = *endpoint.Spec.ClientTLS.PrivateKey.Value } if endpoint.HasTLSPrivateKeySecret() { cKey.SecretKeyRef = *endpoint.Spec.ClientTLS.PrivateKey.SecretKeyRef } tlsResource.Pairs = append(tlsResource.Pairs, cKey) updated, _ := p.secret.ProcessResource(ctx, tlsResource) if updated { for _, np := range tlsResource.Pairs { dv := &commonapi.DynamicValue{ JSON: apiextapi.JSON{ Raw: np.Value.Raw, }, } switch np.Name { case root: endpoint.Spec.ClientTLS.RootCA.Value = dv case clientCert: endpoint.Spec.ClientTLS.Certificate.Value = dv case clientKey: endpoint.Spec.ClientTLS.PrivateKey.Value = dv } } } }
mikeee/dapr
pkg/runtime/processor/httpendpoints.go
GO
mit
3,276
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package lock import ( "context" "fmt" "io" contriblock "github.com/dapr/components-contrib/lock" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" complock "github.com/dapr/dapr/pkg/components/lock" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" ) type Options struct { Registry *complock.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta } type lock struct { registry *complock.Registry compStore *compstore.ComponentStore meta *meta.Meta } func New(opts Options) *lock { return &lock{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, } } func (l *lock) Init(ctx context.Context, comp compapi.Component) error { // create the component fName := comp.LogName() store, err := l.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } if store == nil { return nil } // initialization meta, err := l.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } props := meta.Properties err = store.InitLockStore(ctx, contriblock.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } // save lock related configuration l.compStore.AddLock(comp.ObjectMeta.Name, store) err = complock.SaveLockConfiguration(comp.ObjectMeta.Name, props) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) wrapError := fmt.Errorf("failed to save lock keyprefix: %s", err) return rterrors.NewInit(rterrors.InitComponentFailure, fName, wrapError) } diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) return nil } func (l *lock) Close(comp compapi.Component) error { lock, ok := l.compStore.GetLock(comp.ObjectMeta.Name) if !ok { return nil } defer l.compStore.DeleteLock(comp.ObjectMeta.Name) closer, ok := lock.(io.Closer) if ok && closer != nil { if err := closer.Close(); err != nil { return err } } return nil }
mikeee/dapr
pkg/runtime/processor/lock/lock.go
GO
mit
3,087
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "context" "fmt" "github.com/microsoft/durabletask-go/backend" "github.com/dapr/components-contrib/bindings" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/runtime/meta" ) // manager implements the life cycle events of a component category. type manager interface { Init(context.Context, componentsapi.Component) error Close(componentsapi.Component) error } type StateManager interface { ActorStateStoreName() (string, bool) manager } type SecretManager interface { ProcessResource(context.Context, meta.Resource) (bool, string) manager } type SubscribeManager interface { InitProgramaticSubscriptions(context.Context) error StartAppSubscriptions() error StopAppSubscriptions() StopAllSubscriptionsForever() ReloadDeclaredAppSubscription(name, pubsubName string) error StartStreamerSubscription(key string) error StopStreamerSubscription(pubsubName, key string) ReloadPubSub(string) error StopPubSub(string) } type BindingManager interface { SendToOutputBinding(context.Context, string, *bindings.InvokeRequest) (*bindings.InvokeResponse, error) StartReadingFromBindings(context.Context) error StopReadingFromBindings(forever bool) manager } type WorkflowBackendManager interface { Backend() (backend.Backend, bool) } func (p *Processor) managerFromComp(comp componentsapi.Component) (manager, error) { category := p.category(comp) m, ok := p.managers[category] if !ok { return nil, fmt.Errorf("unknown component category: %q", category) } return m, nil } func (p *Processor) State() StateManager { return p.state } func (p *Processor) Secret() SecretManager { return p.secret } func (p *Processor) Binding() BindingManager { return p.binding } func (p *Processor) WorkflowBackend() WorkflowBackendManager { return p.workflowBackend } func (p *Processor) Subscriber() SubscribeManager { return p.subscriber }
mikeee/dapr
pkg/runtime/processor/manager.go
GO
mit
2,495
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package middleware import ( "context" "fmt" contribmiddle "github.com/dapr/components-contrib/middleware" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compmiddlehttp "github.com/dapr/dapr/pkg/components/middleware/http" "github.com/dapr/dapr/pkg/middleware/http" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" ) type Options struct { // Metadata is the metadata helper. Meta *meta.Meta // RegistryHTTP is the HTTP middleware registry. RegistryHTTP *compmiddlehttp.Registry // HTTP is the HTTP middleware pipeline. HTTP *http.HTTP } // middleware is a component that implements the middleware interface. type middleware struct { meta *meta.Meta registryHTTP *compmiddlehttp.Registry http *http.HTTP } func New(opts Options) *middleware { return &middleware{ meta: opts.Meta, registryHTTP: opts.RegistryHTTP, http: opts.HTTP, } } func (m *middleware) Init(_ context.Context, comp compapi.Component) error { meta, err := m.meta.ToBaseMetadata(comp) if err != nil { return err } middle, err := m.registryHTTP.Create(comp.Spec.Type, comp.Spec.Version, contribmiddle.Metadata{Base: meta}, comp.LogName()) if err != nil { return rterrors.NewInit(rterrors.CreateComponentFailure, comp.LogName(), fmt.Errorf("process component %s error: %w", comp.Name, err), ) } m.http.Add(http.Spec{ Component: comp, Implementation: middle, }) return nil } func (m *middleware) Close(comp compapi.Component) error { m.http.Remove(comp.Name) return nil }
mikeee/dapr
pkg/runtime/processor/middleware/middleware.go
GO
mit
2,148
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package middleware import ( "context" nethttp "net/http" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" contribmiddleware "github.com/dapr/components-contrib/middleware" "github.com/dapr/dapr/pkg/apis/common" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compmiddlehttp "github.com/dapr/dapr/pkg/components/middleware/http" "github.com/dapr/dapr/pkg/config" daprmiddleware "github.com/dapr/dapr/pkg/middleware" "github.com/dapr/dapr/pkg/middleware/http" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/kit/logger" ) func TestInit(t *testing.T) { t.Run("error when component type doesn't exist", func(t *testing.T) { m := New(Options{ RegistryHTTP: registry.New( registry.NewOptions().WithHTTPMiddlewares(compmiddlehttp.NewRegistry()), ).HTTPMiddlewares(), Meta: meta.New(meta.Options{}), HTTP: http.New(), }) err := m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: compapi.ComponentSpec{ Type: "middleware.http.uppercase", Version: "v1", }, }) require.Error(t, err) require.ErrorContains(t, err, "CREATE_COMPONENT_FAILURE") }) t.Run("no error when http middleware component type exists", func(t *testing.T) { reg := registry.New( registry.NewOptions().WithHTTPMiddlewares(compmiddlehttp.NewRegistry()), ).HTTPMiddlewares() m := New(Options{ RegistryHTTP: reg, Meta: meta.New(meta.Options{}), HTTP: http.New(), }) reg.RegisterComponent(func(logger.Logger) compmiddlehttp.FactoryMethod { return func(meta contribmiddleware.Metadata) (daprmiddleware.HTTP, error) { assert.Equal(t, "test", meta.Name) assert.Equal(t, map[string]string{"routes": `{"/foo":"/v1.0/invoke/nowhere/method/bar"}`}, meta.Properties) return nil, nil } }, "mock") err := m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: compapi.ComponentSpec{ Type: "middleware.http.mock", Version: "v1", Metadata: []common.NameValuePair{{Name: "routes", Value: common.DynamicValue{ JSON: apiextv1.JSON{Raw: []byte(`{"/foo":"/v1.0/invoke/nowhere/method/bar"}`)}, }}}, }, }) require.NoError(t, err) }) t.Run("different version should error", func(t *testing.T) { reg := registry.New( registry.NewOptions().WithHTTPMiddlewares(compmiddlehttp.NewRegistry()), ).HTTPMiddlewares() m := New(Options{ RegistryHTTP: reg, Meta: meta.New(meta.Options{}), HTTP: http.New(), }) reg.RegisterComponent(func(logger.Logger) compmiddlehttp.FactoryMethod { return func(meta contribmiddleware.Metadata) (daprmiddleware.HTTP, error) { return nil, nil } }, "mock") err := m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: compapi.ComponentSpec{ Type: "middleware.http.mock", Version: "v2", }, }) require.Error(t, err) }) t.Run("different type should error", func(t *testing.T) { reg := registry.New( registry.NewOptions().WithHTTPMiddlewares(compmiddlehttp.NewRegistry()), ).HTTPMiddlewares() m := New(Options{ RegistryHTTP: reg, Meta: meta.New(meta.Options{}), HTTP: http.New(), }) reg.RegisterComponent(func(logger.Logger) compmiddlehttp.FactoryMethod { return func(meta contribmiddleware.Metadata) (daprmiddleware.HTTP, error) { return nil, nil } }, "notmock") err := m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: compapi.ComponentSpec{ Type: "middleware.http.mock", Version: "v1", }, }) require.Error(t, err) }) t.Run("middleware should be added to HTTP middleware manager", func(t *testing.T) { reg := registry.New( registry.NewOptions().WithHTTPMiddlewares(compmiddlehttp.NewRegistry()), ).HTTPMiddlewares() mngr := http.New() pipeline := mngr.BuildPipelineFromSpec("test", &config.PipelineSpec{ Handlers: []config.HandlerSpec{ { Name: "test", Type: "middleware.http.mock", Version: "v1", }, }, }) m := New(Options{ RegistryHTTP: reg, Meta: meta.New(meta.Options{}), HTTP: mngr, }) var rootCalled int handler := pipeline(nethttp.HandlerFunc(func(nethttp.ResponseWriter, *nethttp.Request) { rootCalled++ })) var middlewareCalled int reg.RegisterComponent(func(logger.Logger) compmiddlehttp.FactoryMethod { return func(meta contribmiddleware.Metadata) (daprmiddleware.HTTP, error) { return func(next nethttp.Handler) nethttp.Handler { middlewareCalled++ return next }, nil } }, "mock") assert.Equal(t, 0, rootCalled) handler.ServeHTTP(nil, nil) assert.Equal(t, 1, rootCalled) assert.Equal(t, 0, middlewareCalled) err := m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: compapi.ComponentSpec{ Type: "middleware.http.mock", Version: "v1", }, }) require.NoError(t, err) handler.ServeHTTP(nil, nil) assert.Equal(t, 2, rootCalled) assert.Equal(t, 1, middlewareCalled) }) t.Run("middleware should be removed from HTTP middleware manager when closed", func(t *testing.T) { reg := registry.New( registry.NewOptions().WithHTTPMiddlewares(compmiddlehttp.NewRegistry()), ).HTTPMiddlewares() mngr := http.New() pipeline := mngr.BuildPipelineFromSpec("test", &config.PipelineSpec{ Handlers: []config.HandlerSpec{ {Name: "test1", Type: "middleware.http.mock", Version: "v1"}, {Name: "test2", Type: "middleware.http.mock", Version: "v1"}, }, }) m := New(Options{ RegistryHTTP: reg, Meta: meta.New(meta.Options{}), HTTP: mngr, }) var rootCalled int handler := pipeline(nethttp.HandlerFunc(func(nethttp.ResponseWriter, *nethttp.Request) { rootCalled++ })) var middlewareCalled int reg.RegisterComponent(func(logger.Logger) compmiddlehttp.FactoryMethod { return func(meta contribmiddleware.Metadata) (daprmiddleware.HTTP, error) { return func(next nethttp.Handler) nethttp.Handler { return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { middlewareCalled++ next.ServeHTTP(w, r) }) }, nil } }, "mock") require.NoError(t, m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "test1"}, Spec: compapi.ComponentSpec{Type: "middleware.http.mock", Version: "v1"}, })) require.NoError(t, m.Init(context.Background(), compapi.Component{ ObjectMeta: metav1.ObjectMeta{Name: "test2"}, Spec: compapi.ComponentSpec{Type: "middleware.http.mock", Version: "v1"}, })) assert.Equal(t, 0, rootCalled) assert.Equal(t, 0, middlewareCalled) handler.ServeHTTP(nil, nil) assert.Equal(t, 1, rootCalled) assert.Equal(t, 2, middlewareCalled) handler.ServeHTTP(nil, nil) assert.Equal(t, 2, rootCalled) assert.Equal(t, 4, middlewareCalled) m.Close(compapi.Component{ObjectMeta: metav1.ObjectMeta{Name: "test1"}}) handler.ServeHTTP(nil, nil) assert.Equal(t, 3, rootCalled) assert.Equal(t, 5, middlewareCalled) m.Close(compapi.Component{ObjectMeta: metav1.ObjectMeta{Name: "test2"}}) handler.ServeHTTP(nil, nil) assert.Equal(t, 4, rootCalled) assert.Equal(t, 5, middlewareCalled) }) }
mikeee/dapr
pkg/runtime/processor/middleware/middleware_test.go
GO
mit
8,195
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "context" "errors" "sync" "sync/atomic" "time" grpcmanager "github.com/dapr/dapr/pkg/api/grpc/manager" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" httpendpointsapi "github.com/dapr/dapr/pkg/apis/httpEndpoint/v1alpha1" "github.com/dapr/dapr/pkg/components" "github.com/dapr/dapr/pkg/config" "github.com/dapr/dapr/pkg/middleware/http" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/outbox" operatorv1 "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/processor/binding" "github.com/dapr/dapr/pkg/runtime/processor/configuration" "github.com/dapr/dapr/pkg/runtime/processor/crypto" "github.com/dapr/dapr/pkg/runtime/processor/lock" "github.com/dapr/dapr/pkg/runtime/processor/middleware" "github.com/dapr/dapr/pkg/runtime/processor/pubsub" "github.com/dapr/dapr/pkg/runtime/processor/secret" "github.com/dapr/dapr/pkg/runtime/processor/state" "github.com/dapr/dapr/pkg/runtime/processor/subscriber" "github.com/dapr/dapr/pkg/runtime/processor/wfbackend" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/security" "github.com/dapr/kit/concurrency" "github.com/dapr/kit/logger" ) const ( defaultComponentInitTimeout = time.Second * 5 ) var log = logger.NewLogger("dapr.runtime.processor") type Options struct { // ID is the ID of this Dapr instance. ID string // Namespace is the namespace of this Dapr instance. Namespace string // Mode is the mode of this Dapr instance. Mode modes.DaprMode // PodName is the name of the pod. PodName string // ActorsEnabled indicates whether placement service is enabled in this Dapr cluster. ActorsEnabled bool // IsHTTP indicates whether the connection to the application is using the // HTTP protocol. IsHTTP bool // Registry is the all-component registry. Registry *registry.Registry // ComponentStore is the component store. ComponentStore *compstore.ComponentStore // Metadata is the metadata helper. Meta *meta.Meta // GlobalConfig is the global configuration. GlobalConfig *config.Configuration Resiliency resiliency.Provider GRPC *grpcmanager.Manager Channels *channels.Channels OperatorClient operatorv1.OperatorClient MiddlewareHTTP *http.HTTP Security security.Handler Outbox outbox.Outbox Adapter rtpubsub.Adapter AdapterStreamer rtpubsub.AdapterStreamer } // Processor manages the lifecycle of all components categories. type Processor struct { appID string compStore *compstore.ComponentStore managers map[components.Category]manager state StateManager secret SecretManager binding BindingManager workflowBackend WorkflowBackendManager security security.Handler subscriber *subscriber.Subscriber pendingHTTPEndpoints chan httpendpointsapi.HTTPEndpoint pendingComponents chan componentsapi.Component pendingComponentsWaiting sync.WaitGroup pendingComponentDependents map[string][]componentsapi.Component subErrCh chan error lock sync.RWMutex chlock sync.RWMutex running atomic.Bool shutdown atomic.Bool closedCh chan struct{} } func New(opts Options) *Processor { subscriber := subscriber.New(subscriber.Options{ AppID: opts.ID, Namespace: opts.Namespace, Resiliency: opts.Resiliency, TracingSpec: opts.GlobalConfig.Spec.TracingSpec, IsHTTP: opts.IsHTTP, Channels: opts.Channels, GRPC: opts.GRPC, CompStore: opts.ComponentStore, Adapter: opts.Adapter, AdapterStreamer: opts.AdapterStreamer, }) state := state.New(state.Options{ ActorsEnabled: opts.ActorsEnabled, Registry: opts.Registry.StateStores(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, Outbox: opts.Outbox, }) secret := secret.New(secret.Options{ Registry: opts.Registry.SecretStores(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, OperatorClient: opts.OperatorClient, }) binding := binding.New(binding.Options{ Registry: opts.Registry.Bindings(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, IsHTTP: opts.IsHTTP, Resiliency: opts.Resiliency, GRPC: opts.GRPC, TracingSpec: opts.GlobalConfig.Spec.TracingSpec, Channels: opts.Channels, }) wfbe := wfbackend.New(wfbackend.Options{ AppID: opts.ID, Registry: opts.Registry.WorkflowBackends(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, }) return &Processor{ appID: opts.ID, pendingHTTPEndpoints: make(chan httpendpointsapi.HTTPEndpoint), pendingComponents: make(chan componentsapi.Component), pendingComponentDependents: make(map[string][]componentsapi.Component), subErrCh: make(chan error), closedCh: make(chan struct{}), compStore: opts.ComponentStore, state: state, binding: binding, secret: secret, workflowBackend: wfbe, security: opts.Security, subscriber: subscriber, managers: map[components.Category]manager{ components.CategoryBindings: binding, components.CategoryConfiguration: configuration.New(configuration.Options{ Registry: opts.Registry.Configurations(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, }), components.CategoryCryptoProvider: crypto.New(crypto.Options{ Registry: opts.Registry.Crypto(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, }), components.CategoryLock: lock.New(lock.Options{ Registry: opts.Registry.Locks(), ComponentStore: opts.ComponentStore, Meta: opts.Meta, }), components.CategoryPubSub: pubsub.New(pubsub.Options{ AppID: opts.ID, Registry: opts.Registry.PubSubs(), Meta: opts.Meta, ComponentStore: opts.ComponentStore, Subscriber: subscriber, }), components.CategorySecretStore: secret, components.CategoryStateStore: state, components.CategoryWorkflowBackend: wfbe, components.CategoryMiddleware: middleware.New(middleware.Options{ Meta: opts.Meta, RegistryHTTP: opts.Registry.HTTPMiddlewares(), HTTP: opts.MiddlewareHTTP, }), }, } } func (p *Processor) Process(ctx context.Context) error { if !p.running.CompareAndSwap(false, true) { return errors.New("processor is already running") } return concurrency.NewRunnerManager( p.processComponents, p.processHTTPEndpoints, p.processSubscriptions, p.subscriber.Run, func(ctx context.Context) error { <-ctx.Done() close(p.closedCh) p.chlock.Lock() defer p.chlock.Unlock() p.shutdown.Store(true) close(p.pendingComponents) close(p.pendingHTTPEndpoints) return nil }, ).Run(ctx) }
mikeee/dapr
pkg/runtime/processor/processor.go
GO
mit
7,827
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "context" "strings" "testing" "time" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/secretstores" commonapi "github.com/dapr/dapr/pkg/apis/common" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/components" "github.com/dapr/dapr/pkg/config" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/meta" rtmock "github.com/dapr/dapr/pkg/runtime/mock" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/security/fake" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) func newTestProcWithID(id string) (*Processor, *registry.Registry) { reg := registry.New(registry.NewOptions()) return New(Options{ ID: id, Namespace: "test", Registry: reg, ComponentStore: compstore.New(), Meta: meta.New(meta.Options{ ID: id, PodName: "testPodName", Namespace: "test", Mode: modes.StandaloneMode, }), Resiliency: resiliency.New(log), Mode: modes.StandaloneMode, PodName: "testPodName", OperatorClient: nil, GRPC: nil, Channels: new(channels.Channels), GlobalConfig: new(config.Configuration), Security: fake.New(), }), reg } func newTestProc() (*Processor, *registry.Registry) { return newTestProcWithID("id") } func TestProcessComponentsAndDependents(t *testing.T) { proc, _ := newTestProc() incorrectComponentType := componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "testpubsub", }, Spec: componentsapi.ComponentSpec{ Type: "pubsubs.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, } t.Run("test incorrect type", func(t *testing.T) { err := proc.processComponentAndDependents(context.Background(), incorrectComponentType) require.Error(t, err, "expected an error") assert.Equal(t, "incorrect type pubsubs.mockPubSub", err.Error(), "expected error strings to match") }) } func TestInitSecretStores(t *testing.T) { t.Run("init with store", func(t *testing.T) { proc, reg := newTestProc() m := rtmock.NewMockKubernetesStore() reg.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) err := proc.processComponentAndDependents(context.Background(), componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMock", }, Spec: componentsapi.ComponentSpec{ Type: "secretstores.kubernetesMock", Version: "v1", }, }) require.NoError(t, err) }) t.Run("secret store is registered", func(t *testing.T) { proc, reg := newTestProc() m := rtmock.NewMockKubernetesStore() reg.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) err := proc.processComponentAndDependents(context.Background(), componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMock", }, Spec: componentsapi.ComponentSpec{ Type: "secretstores.kubernetesMock", Version: "v1", }, }) require.NoError(t, err) store, ok := proc.compStore.GetSecretStore("kubernetesMock") assert.True(t, ok) assert.NotNil(t, store) }) t.Run("get secret store", func(t *testing.T) { proc, reg := newTestProc() m := rtmock.NewMockKubernetesStore() reg.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) proc.processComponentAndDependents(context.Background(), componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMock", }, Spec: componentsapi.ComponentSpec{ Type: "secretstores.kubernetesMock", Version: "v1", }, }) s, ok := proc.compStore.GetSecretStore("kubernetesMock") assert.True(t, ok) assert.NotNil(t, s) }) } func TestExtractComponentCategory(t *testing.T) { compCategoryTests := []struct { specType string category string }{ {"pubsub.redis", "pubsub"}, {"pubsubs.redis", ""}, {"secretstores.azure.keyvault", "secretstores"}, {"secretstore.azure.keyvault", ""}, {"state.redis", "state"}, {"states.redis", ""}, {"bindings.kafka", "bindings"}, {"binding.kafka", ""}, {"this.is.invalid.category", ""}, } p := New(Options{ Registry: registry.New(registry.NewOptions()), GlobalConfig: new(config.Configuration), }) for _, tt := range compCategoryTests { t.Run(tt.specType, func(t *testing.T) { fakeComp := componentsapi.Component{ Spec: componentsapi.ComponentSpec{ Type: tt.specType, Version: "v1", }, } assert.Equal(t, string(p.category(fakeComp)), tt.category) }) } } func TestMetadataUUID(t *testing.T) { pubsubComponent := componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "testpubsub", }, Spec: componentsapi.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, } pubsubComponent.Spec.Metadata = append( pubsubComponent.Spec.Metadata, commonapi.NameValuePair{ Name: "consumerID", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("{uuid}"), }, }, }, commonapi.NameValuePair{ Name: "twoUUIDs", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("{uuid} {uuid}"), }, }, }) proc, reg := newTestProc() mockPubSub := new(daprt.MockPubSub) reg.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) mockPubSub.On("Init", mock.Anything).Return(nil).Run(func(args mock.Arguments) { metadata := args.Get(0).(pubsub.Metadata) consumerID := metadata.Properties["consumerID"] var uuid0, uuid1, uuid2 uuid.UUID uuid0, err := uuid.Parse(consumerID) require.NoError(t, err) twoUUIDs := metadata.Properties["twoUUIDs"] uuids := strings.Split(twoUUIDs, " ") assert.Len(t, uuids, 2) uuid1, err = uuid.Parse(uuids[0]) require.NoError(t, err) uuid2, err = uuid.Parse(uuids[1]) require.NoError(t, err) assert.NotEqual(t, uuid0, uuid1) assert.NotEqual(t, uuid0, uuid2) assert.NotEqual(t, uuid1, uuid2) }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) require.NoError(t, err) } func TestMetadataPodName(t *testing.T) { t.Setenv("POD_NAME", "testPodName") pubsubComponent := componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "testpubsub", }, Spec: componentsapi.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, } pubsubComponent.Spec.Metadata = append( pubsubComponent.Spec.Metadata, commonapi.NameValuePair{ Name: "consumerID", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("{podName}"), }, }, }) proc, reg := newTestProc() mockPubSub := new(daprt.MockPubSub) reg.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) mockPubSub.On("Init", mock.Anything).Return(nil).Run(func(args mock.Arguments) { metadata := args.Get(0).(pubsub.Metadata) consumerID := metadata.Properties["consumerID"] assert.Equal(t, "testPodName", consumerID) }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) require.NoError(t, err) } func TestMetadataNamespace(t *testing.T) { t.Setenv("NAMESPACE", "test") pubsubComponent := componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "testpubsub", }, Spec: componentsapi.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, } pubsubComponent.Spec.Metadata = append( pubsubComponent.Spec.Metadata, commonapi.NameValuePair{ Name: "consumerID", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("{namespace}"), }, }, }) proc, reg := newTestProcWithID("app1") mockPubSub := new(daprt.MockPubSub) reg.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) mockPubSub.On("Init", mock.Anything).Return(nil).Run(func(args mock.Arguments) { metadata := args.Get(0).(pubsub.Metadata) consumerID := metadata.Properties["consumerID"] assert.Equal(t, "test.app1", consumerID) }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) require.NoError(t, err) } func TestMetadataClientID(t *testing.T) { pubsubComponent := componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "testpubsub", }, Spec: componentsapi.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, } // ClientID should be namespace.AppID for Kubernetes t.Run("Kubernetes Mode AppID", func(t *testing.T) { t.Setenv("NAMESPACE", "test") pubsubComponent.Spec.Metadata = append( pubsubComponent.Spec.Metadata, commonapi.NameValuePair{ Name: "clientID", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("{namespace}"), }, }, }) proc, reg := newTestProcWithID("myApp") mockPubSub := new(daprt.MockPubSub) reg.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) var k8sClientID string clientIDChan := make(chan string, 1) mockPubSub.On("Init", mock.Anything).Return(nil).Run(func(args mock.Arguments) { metadata := args.Get(0).(pubsub.Metadata) k8sClientID = metadata.Properties["clientID"] clientIDChan <- k8sClientID }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) require.NoError(t, err) select { case clientID := <-clientIDChan: assert.Equal(t, "test.myApp", clientID) case <-time.After(20 * time.Second): t.Error("Timed out waiting for clientID for Kubernetes Mode test") } }) // ClientID should be AppID for Self-Hosted t.Run("Standalone Mode AppID", func(t *testing.T) { pubsubComponent.Spec.Metadata = append( pubsubComponent.Spec.Metadata, commonapi.NameValuePair{ Name: "clientID", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("{appID} {appID}"), }, }, }) proc, reg := newTestProcWithID(daprt.TestRuntimeConfigID) mockPubSub := new(daprt.MockPubSub) reg.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) var standAloneClientID string clientIDChan := make(chan string, 1) mockPubSub.On("Init", mock.Anything).Return(nil).Run(func(args mock.Arguments) { metadata := args.Get(0).(pubsub.Metadata) standAloneClientID = metadata.Properties["clientID"] clientIDChan <- standAloneClientID }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) require.NoError(t, err) appIds := strings.Split(standAloneClientID, " ") assert.Len(t, appIds, 2) for _, appID := range appIds { assert.Equal(t, daprt.TestRuntimeConfigID, appID) } select { case clientID := <-clientIDChan: assert.Equal(t, standAloneClientID, clientID) case <-time.After(20 * time.Second): t.Error("Timed out waiting for clientID for Standalone Mode test") } }) } func TestProcessNoWorkflow(t *testing.T) { proc, _ := newTestProc() _, ok := proc.managers[components.CategoryWorkflow] require.False(t, ok, "workflow cannot be registered as user facing component") }
mikeee/dapr
pkg/runtime/processor/processor_test.go
GO
mit
12,475
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "strings" "sync" contribpubsub "github.com/dapr/components-contrib/pubsub" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" comppubsub "github.com/dapr/dapr/pkg/components/pubsub" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/processor/subscriber" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/scopes" ) type Options struct { AppID string Registry *comppubsub.Registry Meta *meta.Meta ComponentStore *compstore.ComponentStore Subscriber *subscriber.Subscriber } type pubsub struct { appID string registry *comppubsub.Registry meta *meta.Meta compStore *compstore.ComponentStore subscriber *subscriber.Subscriber lock sync.RWMutex } func New(opts Options) *pubsub { return &pubsub{ appID: opts.AppID, registry: opts.Registry, meta: opts.Meta, compStore: opts.ComponentStore, subscriber: opts.Subscriber, } } func (p *pubsub) Init(ctx context.Context, comp compapi.Component) error { p.lock.Lock() defer p.lock.Unlock() fName := comp.LogName() pubSub, err := p.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } baseMetadata, err := p.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } properties := baseMetadata.Properties consumerID := strings.TrimSpace(properties["consumerID"]) if consumerID == "" { consumerID = p.appID } properties["consumerID"] = consumerID err = pubSub.Init(ctx, contribpubsub.Metadata{Base: baseMetadata}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } pubsubName := comp.ObjectMeta.Name pubsubItem := &rtpubsub.PubsubItem{ Component: pubSub, ScopedSubscriptions: scopes.GetScopedTopics(scopes.SubscriptionScopes, p.appID, properties), ScopedPublishings: scopes.GetScopedTopics(scopes.PublishingScopes, p.appID, properties), AllowedTopics: scopes.GetAllowedTopics(properties), ProtectedTopics: scopes.GetProtectedTopics(properties), NamespaceScoped: meta.ContainsNamespace(comp.Spec.Metadata), } p.compStore.AddPubSub(pubsubName, pubsubItem) if err := p.subscriber.ReloadPubSub(pubsubName); err != nil { p.compStore.DeletePubSub(pubsubName) diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) return nil } func (p *pubsub) Close(comp compapi.Component) error { p.lock.Lock() defer p.lock.Unlock() p.subscriber.StopPubSub(comp.Name) ps, ok := p.compStore.GetPubSub(comp.Name) if !ok { return nil } defer p.compStore.DeletePubSub(comp.Name) if err := ps.Component.Close(); err != nil { return err } return nil }
mikeee/dapr
pkg/runtime/processor/pubsub/pubsub.go
GO
mit
3,979
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "encoding/json" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/dapr/components-contrib/metadata" contribpubsub "github.com/dapr/components-contrib/pubsub" commonapi "github.com/dapr/dapr/pkg/apis/common" componentsV1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1" channelt "github.com/dapr/dapr/pkg/channel/testing" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/processor/subscriber" runtimePubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/runtime/registry" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) const ( TestPubsubName = "testpubsub" TestSecondPubsubName = "testpubsub2" TestRuntimeConfigID = "consumer0" ) func TestInitPubSub(t *testing.T) { pubsubComponents := []componentsV1alpha1.Component{ { ObjectMeta: metav1.ObjectMeta{ Name: TestPubsubName, }, Spec: componentsV1alpha1.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, }, { ObjectMeta: metav1.ObjectMeta{ Name: TestSecondPubsubName, }, Spec: componentsV1alpha1.ComponentSpec{ Type: "pubsub.mockPubSub2", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, }, } initMockPubSubForRuntime := func() (*pubsub, *channelt.MockAppChannel, *daprt.MockPubSub, *daprt.MockPubSub) { mockPubSub := new(daprt.MockPubSub) mockPubSub2 := new(daprt.MockPubSub) registry := registry.New(registry.NewOptions()) registry.PubSubs().RegisterComponent(func(_ logger.Logger) contribpubsub.PubSub { return mockPubSub }, "mockPubSub") registry.PubSubs().RegisterComponent(func(_ logger.Logger) contribpubsub.PubSub { return mockPubSub2 }, "mockPubSub2") expectedMetadata := contribpubsub.Metadata{ Base: metadata.Base{ Name: TestPubsubName, Properties: daprt.GetFakeProperties(), }, } mockPubSub.On("Init", expectedMetadata).Return(nil) mockPubSub.On( "Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) expectedSecondPubsubMetadata := contribpubsub.Metadata{ Base: metadata.Base{ Name: TestSecondPubsubName, Properties: daprt.GetFakeProperties(), }, } mockPubSub2.On("Init", expectedSecondPubsubMetadata).Return(nil) mockPubSub2.On( "Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockAppChannel := new(channelt.MockAppChannel) channels := new(channels.Channels).WithAppChannel(mockAppChannel) compStore := compstore.New() resiliency := resiliency.New(logger.NewLogger("test")) ps := New(Options{ Subscriber: subscriber.New(subscriber.Options{ Channels: channels, Resiliency: resiliency, CompStore: compStore, IsHTTP: true, }), Registry: registry.PubSubs(), Meta: meta.New(meta.Options{}), ComponentStore: compStore, }) return ps, mockAppChannel, mockPubSub, mockPubSub2 } t.Run("subscribe 2 topics", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } require.NoError(t, ps.subscriber.StartAppSubscriptions()) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 2) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 1) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("if not subscribing yet should not call Subscribe", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 0) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 0) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 0) }) t.Run("if start subscribing then not subscribing should not call Subscribe", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.subscriber.StartAppSubscriptions()) ps.subscriber.StopAppSubscriptions() // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 0) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 0) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 0) }) t.Run("if start subscription then init, expect Subscribe", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.subscriber.StartAppSubscriptions()) // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 2) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 1) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("if not subscribing yet should not call Subscribe", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) // act for _, comp := range pubsubComponents { require.NoError(t, ps.Init(context.Background(), comp)) } // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 0) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 0) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 0) }) t.Run("if start subscribing then not subscribing should not call Subscribe", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.subscriber.StartAppSubscriptions()) ps.subscriber.StopAppSubscriptions() // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 0) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 0) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 0) }) t.Run("if start subscription then init, expect Subscribe", func(t *testing.T) { ps, mockAppChannel, mockPubSub, mockPubSub2 := initMockPubSubForRuntime() // User App subscribes 2 topics via http app channel subs := getSubscriptionsJSONString( []string{"topic0", "topic1"}, // first pubsub []string{"topic0"}, // second pubsub ) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(subs). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.subscriber.StartAppSubscriptions()) // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub2.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 2) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 1) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("subscribe to topic with custom route", func(t *testing.T) { ps, mockAppChannel, mockPubSub, _ := initMockPubSubForRuntime() // User App subscribes to a topic via http app channel sub := getSubscriptionCustom("topic0", "customroute/topic0") fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString(sub). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } require.NoError(t, ps.subscriber.StartAppSubscriptions()) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 1) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("subscribe 0 topics unless user app provides topic list", func(t *testing.T) { ps, mockAppChannel, mockPubSub, _ := initMockPubSubForRuntime() fakeResp := invokev1.NewInvokeMethodResponse(404, "Not Found", nil) defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) require.NoError(t, err) } require.NoError(t, ps.subscriber.StartAppSubscriptions()) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) mockPubSub.AssertNumberOfCalls(t, "Subscribe", 0) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) } func TestConsumerID(t *testing.T) { metadata := []commonapi.NameValuePair{ { Name: "host", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("localhost"), }, }, }, { Name: "password", Value: commonapi.DynamicValue{ JSON: v1.JSON{ Raw: []byte("fakePassword"), }, }, }, } pubsubComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: TestPubsubName, }, Spec: componentsV1alpha1.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: metadata, }, } mockPubSub := new(daprt.MockPubSub) mockPubSub.On("Init", mock.Anything).Return(nil).Run(func(args mock.Arguments) { metadata := args.Get(0).(contribpubsub.Metadata) consumerID := metadata.Properties["consumerID"] assert.Equal(t, TestRuntimeConfigID, consumerID) }) mockAppChannel := new(channelt.MockAppChannel) channels := new(channels.Channels).WithAppChannel(mockAppChannel) compStore := compstore.New() registry := registry.New(registry.NewOptions()) registry.PubSubs().RegisterComponent(func(_ logger.Logger) contribpubsub.PubSub { return mockPubSub }, "mockPubSub") ps := New(Options{ Subscriber: subscriber.New(subscriber.Options{ Channels: channels, Resiliency: resiliency.New(logger.NewLogger("test")), CompStore: compStore, IsHTTP: true, }), Registry: registry.PubSubs(), Meta: meta.New(meta.Options{}), AppID: TestRuntimeConfigID, ComponentStore: compStore, }) err := ps.Init(context.Background(), pubsubComponent) require.NoError(t, err) } // helper to populate subscription array for 2 pubsubs. // 'topics' are the topics for the first pubsub. // 'topics2' are the topics for the second pubsub. func getSubscriptionsJSONString(topics []string, topics2 []string) string { s := []runtimePubsub.SubscriptionJSON{} for _, t := range topics { s = append(s, runtimePubsub.SubscriptionJSON{ PubsubName: TestPubsubName, Topic: t, Routes: runtimePubsub.RoutesJSON{ Default: t, }, }) } for _, t := range topics2 { s = append(s, runtimePubsub.SubscriptionJSON{ PubsubName: TestSecondPubsubName, Topic: t, Routes: runtimePubsub.RoutesJSON{ Default: t, }, }) } b, _ := json.Marshal(&s) return string(b) } func getSubscriptionCustom(topic, path string) string { s := []runtimePubsub.SubscriptionJSON{ { PubsubName: TestPubsubName, Topic: topic, Routes: runtimePubsub.RoutesJSON{ Default: path, }, }, } b, _ := json.Marshal(&s) return string(b) } func matchContextInterface(v any) bool { _, ok := v.(context.Context) return ok } func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true }) }
mikeee/dapr
pkg/runtime/processor/pubsub/pubsub_test.go
GO
mit
16,388
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package secret import ( "context" "encoding/base64" "encoding/json" "io" "os" "strings" "sync" "github.com/dapr/components-contrib/secretstores" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compsecret "github.com/dapr/dapr/pkg/components/secretstores" diag "github.com/dapr/dapr/pkg/diagnostics" operatorv1pb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/security/consts" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime.processor.secret") type Options struct { Registry *compsecret.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta OperatorClient operatorv1pb.OperatorClient } type secret struct { registry *compsecret.Registry compStore *compstore.ComponentStore meta *meta.Meta operatorClient operatorv1pb.OperatorClient lock sync.Mutex } func New(opts Options) *secret { return &secret{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, operatorClient: opts.OperatorClient, } } func (s *secret) Init(ctx context.Context, comp compapi.Component) error { s.lock.Lock() defer s.lock.Unlock() fName := comp.LogName() secretStore, err := s.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } meta, err := s.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } err = secretStore.Init(ctx, secretstores.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } s.compStore.AddSecretStore(comp.ObjectMeta.Name, secretStore) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) return nil } func (s *secret) Close(comp compapi.Component) error { s.lock.Lock() defer s.lock.Unlock() sec, ok := s.compStore.GetSecretStore(comp.Name) if !ok { return nil } defer s.compStore.DeleteSecretStore(comp.Name) closer, ok := sec.(io.Closer) if ok && closer != nil { if err := closer.Close(); err != nil { return err } } return nil } // Returns the component or HTTP endpoint updated with the secrets applied. // If the resource references a secret store that hasn't been loaded yet, it returns the name of the secret store component as second returned value. func (s *secret) ProcessResource(ctx context.Context, resource meta.Resource) (updated bool, secretStoreName string) { cache := map[string]secretstores.GetSecretResponse{} secretStoreName = s.meta.AuthSecretStoreOrDefault(resource) metadata := resource.NameValuePairs() for i, m := range metadata { // If there's an env var and no value, use that if !m.HasValue() && m.EnvRef != "" { if isEnvVarAllowed(m.EnvRef) { metadata[i].SetValue([]byte(os.Getenv(m.EnvRef))) } else { log.Warnf("%s %s references an env variable that isn't allowed: %s", resource.Kind(), resource.GetName(), m.EnvRef) } metadata[i].EnvRef = "" updated = true continue } if m.SecretKeyRef.Name == "" { continue } // If running in Kubernetes and have an operator client, do not fetch secrets from the Kubernetes secret store as they will be populated by the operator. // Instead, base64 decode the secret values into their real self. if s.operatorClient != nil && secretStoreName == compsecret.BuiltinKubernetesSecretStore { var jsonVal string err := json.Unmarshal(m.Value.Raw, &jsonVal) if err != nil { log.Errorf("Error decoding secret: %v", err) continue } dec, err := base64.StdEncoding.DecodeString(jsonVal) if err != nil { log.Errorf("Error decoding secret: %v", err) continue } metadata[i].SetValue(dec) updated = true continue } secretStore, ok := s.compStore.GetSecretStore(secretStoreName) if !ok { log.Warnf("%s %s references a secret store that isn't loaded: %s", resource.Kind(), resource.GetName(), secretStoreName) return updated, secretStoreName } resp, ok := cache[m.SecretKeyRef.Name] if !ok { r, err := secretStore.GetSecret(ctx, secretstores.GetSecretRequest{ Name: m.SecretKeyRef.Name, Metadata: map[string]string{ "namespace": resource.GetNamespace(), }, }) if err != nil { log.Errorf("Error getting secret: %v", err) continue } resp = r } // Use the SecretKeyRef.Name key if SecretKeyRef.Key is not given secretKeyName := m.SecretKeyRef.Key if secretKeyName == "" { secretKeyName = m.SecretKeyRef.Name } val, ok := resp.Data[secretKeyName] if ok && val != "" { metadata[i].SetValue([]byte(val)) updated = true } cache[m.SecretKeyRef.Name] = resp } return updated, "" } func isEnvVarAllowed(key string) bool { // First, apply a denylist that blocks access to sensitive env vars key = strings.ToUpper(key) switch { case key == "": return false case key == "APP_API_TOKEN": return false case strings.HasPrefix(key, "DAPR_"): return false case strings.Contains(key, " "): return false } // If we have a `DAPR_ENV_KEYS` env var (which is added by the Dapr Injector in Kubernetes mode), use that as allowlist too allowlist := os.Getenv(consts.EnvKeysEnvVar) if allowlist == "" { return true } // Need to check for the full var, so there must be a space after OR it must be the end of the string, and there must be a space before OR it must be at the beginning of the string idx := strings.Index(allowlist, key) if idx >= 0 && (idx+len(key) == len(allowlist) || allowlist[idx+len(key)] == ' ') && (idx == 0 || allowlist[idx-1] == ' ') { return true } return false }
mikeee/dapr
pkg/runtime/processor/secret/secret.go
GO
mit
6,671
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package secret import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/dapr/components-contrib/secretstores" commonapi "github.com/dapr/dapr/pkg/apis/common" componentsapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compsecret "github.com/dapr/dapr/pkg/components/secretstores" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/runtime/compstore" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/mock" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/security/consts" "github.com/dapr/kit/logger" ) func TestProcessResourceSecrets(t *testing.T) { createMockBinding := func() *componentsapi.Component { return &componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "mockBinding", }, Spec: componentsapi.ComponentSpec{ Type: "bindings.mock", Version: "v1", Metadata: []commonapi.NameValuePair{}, }, } } t.Run("Standalone Mode", func(t *testing.T) { mockBinding := createMockBinding() mockBinding.Spec.Metadata = append(mockBinding.Spec.Metadata, commonapi.NameValuePair{ Name: "a", SecretKeyRef: commonapi.SecretKeyRef{ Key: "key1", Name: "name1", }, }) mockBinding.Auth.SecretStore = compsecret.BuiltinKubernetesSecretStore sec := New(Options{ Registry: registry.New(registry.NewOptions()).SecretStores(), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{ ID: "test", Mode: modes.StandaloneMode, }), }) m := mock.NewMockKubernetesStore() sec.registry.RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, compsecret.BuiltinKubernetesSecretStore, ) // add Kubernetes component manually require.NoError(t, sec.Init(context.Background(), componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: compsecret.BuiltinKubernetesSecretStore, }, Spec: componentsapi.ComponentSpec{ Type: "secretstores.kubernetes", Version: "v1", }, })) updated, unready := sec.ProcessResource(context.Background(), mockBinding) assert.True(t, updated) assert.Equal(t, "value1", mockBinding.Spec.Metadata[0].Value.String()) assert.Empty(t, unready) }) t.Run("Look up name only", func(t *testing.T) { mockBinding := createMockBinding() mockBinding.Spec.Metadata = append(mockBinding.Spec.Metadata, commonapi.NameValuePair{ Name: "a", SecretKeyRef: commonapi.SecretKeyRef{ Name: "name1", }, }) mockBinding.Auth.SecretStore = "mock" sec := New(Options{ Registry: registry.New(registry.NewOptions()).SecretStores(), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{ ID: "test", Mode: modes.KubernetesMode, }), }) sec.registry.RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return &mock.SecretStore{} }, "mock", ) // initSecretStore appends Kubernetes component even if kubernetes component is not added err := sec.Init(context.Background(), componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "mock", }, Spec: componentsapi.ComponentSpec{ Type: "secretstores.mock", Version: "v1", }, }) require.NoError(t, err) updated, unready := sec.ProcessResource(context.Background(), mockBinding) assert.True(t, updated) assert.Equal(t, "value1", mockBinding.Spec.Metadata[0].Value.String()) assert.Empty(t, unready) }) t.Run("Secret from env", func(t *testing.T) { t.Setenv("MY_ENV_VAR", "ciao mondo") mockBinding := createMockBinding() mockBinding.Spec.Metadata = append(mockBinding.Spec.Metadata, commonapi.NameValuePair{ Name: "a", EnvRef: "MY_ENV_VAR", }) sec := New(Options{ Registry: registry.New(registry.NewOptions()).SecretStores(), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{ ID: "test", Mode: modes.StandaloneMode, }), }) updated, unready := sec.ProcessResource(context.Background(), mockBinding) assert.True(t, updated) assert.Equal(t, "ciao mondo", mockBinding.Spec.Metadata[0].Value.String()) assert.Empty(t, unready) }) t.Run("Disallowed env var", func(t *testing.T) { t.Setenv("APP_API_TOKEN", "test") t.Setenv("DAPR_KEY", "test") mockBinding := createMockBinding() mockBinding.Spec.Metadata = append(mockBinding.Spec.Metadata, commonapi.NameValuePair{ Name: "a", EnvRef: "DAPR_KEY", }, commonapi.NameValuePair{ Name: "b", EnvRef: "APP_API_TOKEN", }, ) sec := New(Options{ Registry: registry.New(registry.NewOptions()).SecretStores(), ComponentStore: compstore.New(), Meta: meta.New(meta.Options{ ID: "test", Mode: modes.StandaloneMode, }), }) updated, unready := sec.ProcessResource(context.Background(), mockBinding) assert.True(t, updated) assert.Equal(t, "", mockBinding.Spec.Metadata[0].Value.String()) assert.Equal(t, "", mockBinding.Spec.Metadata[1].Value.String()) assert.Empty(t, unready) }) } func TestIsEnvVarAllowed(t *testing.T) { t.Run("no allowlist", func(t *testing.T) { tests := []struct { name string key string want bool }{ {name: "empty string is not allowed", key: "", want: false}, {name: "key is allowed", key: "FOO", want: true}, {name: "keys starting with DAPR_ are denied", key: "DAPR_TEST", want: false}, {name: "APP_API_TOKEN is denied", key: "APP_API_TOKEN", want: false}, {name: "keys with a space are denied", key: "FOO BAR", want: false}, {name: "case insensitive app_api_token", key: "app_api_token", want: false}, {name: "case insensitive dapr_foo", key: "dapr_foo", want: false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isEnvVarAllowed(tt.key); got != tt.want { t.Errorf("isEnvVarAllowed(%q) = %v, want %v", tt.key, got, tt.want) } }) } }) t.Run("with allowlist", func(t *testing.T) { t.Setenv(consts.EnvKeysEnvVar, "FOO BAR TEST") tests := []struct { name string key string want bool }{ {name: "FOO is allowed", key: "FOO", want: true}, {name: "BAR is allowed", key: "BAR", want: true}, {name: "TEST is allowed", key: "TEST", want: true}, {name: "FO is not allowed", key: "FO", want: false}, {name: "EST is not allowed", key: "EST", want: false}, {name: "BA is not allowed", key: "BA", want: false}, {name: "AR is not allowed", key: "AR", want: false}, {name: "keys starting with DAPR_ are denied", key: "DAPR_TEST", want: false}, {name: "APP_API_TOKEN is denied", key: "APP_API_TOKEN", want: false}, {name: "keys with a space are denied", key: "FOO BAR", want: false}, {name: "case insensitive allowlist", key: "foo", want: true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isEnvVarAllowed(tt.key); got != tt.want { t.Errorf("isEnvVarAllowed(%q) = %v, want %v", tt.key, got, tt.want) } }) } }) }
mikeee/dapr
pkg/runtime/processor/secret/secret_test.go
GO
mit
7,612
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package state import ( "context" "fmt" "io" "strings" "sync" contribstate "github.com/dapr/components-contrib/state" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compstate "github.com/dapr/dapr/pkg/components/state" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/encryption" "github.com/dapr/dapr/pkg/outbox" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/kit/logger" "github.com/dapr/kit/utils" ) const ( PropertyKeyActorStateStore = "actorstatestore" ) var log = logger.NewLogger("dapr.runtime.processor.state") type Options struct { Registry *compstate.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta ActorsEnabled bool Outbox outbox.Outbox } type state struct { registry *compstate.Registry compStore *compstore.ComponentStore meta *meta.Meta lock sync.RWMutex actorStateStoreName *string actorsEnabled bool outbox outbox.Outbox } func New(opts Options) *state { return &state{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, actorsEnabled: opts.ActorsEnabled, outbox: opts.Outbox, } } func (s *state) Init(ctx context.Context, comp compapi.Component) error { s.lock.Lock() defer s.lock.Unlock() fName := comp.LogName() store, err := s.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } if store == nil { return nil } secretStoreName := s.meta.AuthSecretStoreOrDefault(&comp) secretStore, _ := s.compStore.GetSecretStore(secretStoreName) encKeys, err := encryption.ComponentEncryptionKey(comp, secretStore) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "creation", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } if encKeys.Primary.Key != "" { ok := encryption.AddEncryptedStateStore(comp.ObjectMeta.Name, encKeys) if ok { log.Infof("Automatic encryption enabled for state store %s", comp.ObjectMeta.Name) log.Info("WARNING: Automatic state store encryption should never be used to store more than 4 billion items in the state store (including updates). Storing more items than that can cause the private key to be exposed.") } } meta, err := s.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } props := meta.Properties err = store.Init(ctx, contribstate.Metadata{Base: meta}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } // when placement address list is not empty, set specified actor store. if s.actorsEnabled { // set specified actor store if "actorStateStore" is true in the spec. actorStoreSpecified := false for k, v := range props { //nolint:gocritic if strings.ToLower(k) == PropertyKeyActorStateStore { actorStoreSpecified = utils.IsTruthy(v) break } } if actorStoreSpecified { if s.actorStateStoreName == nil { log.Info("Using '" + comp.ObjectMeta.Name + "' as actor state store") s.actorStateStoreName = &comp.ObjectMeta.Name } else if *s.actorStateStoreName != comp.ObjectMeta.Name { return fmt.Errorf("detected duplicate actor state store: %s and %s", *s.actorStateStoreName, comp.ObjectMeta.Name) } s.compStore.AddStateStoreActor(comp.ObjectMeta.Name, store) } } s.compStore.AddStateStore(comp.ObjectMeta.Name, store) err = compstate.SaveStateConfiguration(comp.ObjectMeta.Name, props) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) wrapError := fmt.Errorf("failed to save lock keyprefix: %s", err.Error()) return rterrors.NewInit(rterrors.InitComponentFailure, fName, wrapError) } s.outbox.AddOrUpdateOutbox(comp) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) return nil } func (s *state) Close(comp compapi.Component) error { s.lock.Lock() defer s.lock.Unlock() ss, ok := s.compStore.GetStateStore(comp.Name) if !ok { return nil } defer s.compStore.DeleteStateStore(comp.Name) closer, ok := ss.(io.Closer) if ok && closer != nil { if err := closer.Close(); err != nil { return err } } return nil } func (s *state) ActorStateStoreName() (string, bool) { s.lock.RLock() defer s.lock.RUnlock() if s.actorStateStoreName == nil { return "", false } return *s.actorStateStoreName, true }
mikeee/dapr
pkg/runtime/processor/state/state.go
GO
mit
5,461
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package state_test import ( "context" "crypto/rand" "encoding/hex" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/dapr/components-contrib/metadata" contribstate "github.com/dapr/components-contrib/state" "github.com/dapr/dapr/pkg/apis/common" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" stateLoader "github.com/dapr/dapr/pkg/components/state" "github.com/dapr/dapr/pkg/config" "github.com/dapr/dapr/pkg/encryption" "github.com/dapr/dapr/pkg/modes" outboxfake "github.com/dapr/dapr/pkg/outbox/fake" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/mock" "github.com/dapr/dapr/pkg/runtime/processor" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/security/fake" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) func TestInitState(t *testing.T) { reg := registry.New(registry.NewOptions().WithStateStores(stateLoader.NewRegistry())) compStore := compstore.New() proc := processor.New(processor.Options{ Registry: reg, ComponentStore: compStore, GlobalConfig: new(config.Configuration), Meta: meta.New(meta.Options{Mode: modes.StandaloneMode}), Security: fake.New(), Outbox: outboxfake.New(), }) bytes := make([]byte, 32) rand.Read(bytes) primaryKey := hex.EncodeToString(bytes) mockStateComponent := func(name string) compapi.Component { return compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: compapi.ComponentSpec{ Type: "state.mockState", Version: "v1", Metadata: []common.NameValuePair{ { Name: "actorstatestore", Value: common.DynamicValue{ JSON: apiextv1.JSON{Raw: []byte("true")}, }, }, { Name: "primaryEncryptionKey", Value: common.DynamicValue{ JSON: apiextv1.JSON{Raw: []byte(primaryKey)}, }, }, }, }, Auth: compapi.Auth{ SecretStore: "mockSecretStore", }, } } t.Run("test init state store", func(t *testing.T) { // setup initMockStateStoreForRegistry(reg, "noerror", primaryKey, nil) // act err := proc.Init(context.TODO(), mockStateComponent("noerror")) // assert require.NoError(t, err, "expected no error") }) t.Run("test init state store error", func(t *testing.T) { // setup initMockStateStoreForRegistry(reg, "error", primaryKey, assert.AnError) // act err := proc.Init(context.TODO(), mockStateComponent("error")) // assert require.Error(t, err, "expected error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.InitComponentFailure, "error (state.mockState/v1)", assert.AnError).Error(), "expected error strings to match") }) t.Run("test init state store, encryption not enabled", func(t *testing.T) { // setup initMockStateStoreForRegistry(reg, "noencryption", primaryKey, nil) // act err := proc.Init(context.TODO(), mockStateComponent("noencryption")) ok := encryption.EncryptedStateStore("noencryption") // assert require.NoError(t, err) assert.False(t, ok) }) t.Run("test init state store, encryption enabled", func(t *testing.T) { // setup initMockStateStoreForRegistry(reg, "encryption", primaryKey, nil) compStore.AddSecretStore("mockSecretStore", &mock.SecretStore{}) err := proc.Init(context.TODO(), mockStateComponent("encryption")) ok := encryption.EncryptedStateStore("encryption") // assert require.NoError(t, err) assert.True(t, ok) }) } func initMockStateStoreForRegistry(reg *registry.Registry, name, encryptKey string, e error) *daprt.MockStateStore { mockStateStore := new(daprt.MockStateStore) reg.StateStores().RegisterComponent( func(_ logger.Logger) contribstate.Store { return mockStateStore }, "mockState", ) expectedMetadata := contribstate.Metadata{Base: metadata.Base{ Name: name, Properties: map[string]string{ "actorstatestore": "true", "primaryEncryptionKey": encryptKey, }, }} expectedMetadataUppercase := contribstate.Metadata{Base: metadata.Base{ Name: name, Properties: map[string]string{ "ACTORSTATESTORE": "true", "primaryEncryptionKey": encryptKey, }, }} mockStateStore.On("Init", expectedMetadata).Return(e) mockStateStore.On("Init", expectedMetadataUppercase).Return(e) return mockStateStore }
mikeee/dapr
pkg/runtime/processor/state/state_test.go
GO
mit
5,129
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscriber import ( "context" "errors" "fmt" "sync" "sync/atomic" "google.golang.org/grpc" "github.com/dapr/dapr/pkg/api/grpc/manager" "github.com/dapr/dapr/pkg/config" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/runtime/subscription" "github.com/dapr/kit/logger" ) type Options struct { AppID string Namespace string Resiliency resiliency.Provider TracingSpec *config.TracingSpec IsHTTP bool Channels *channels.Channels GRPC *manager.Manager CompStore *compstore.ComponentStore Adapter rtpubsub.Adapter AdapterStreamer rtpubsub.AdapterStreamer } type Subscriber struct { appID string namespace string resiliency resiliency.Provider tracingSpec *config.TracingSpec isHTTP bool channels *channels.Channels grpc *manager.Manager compStore *compstore.ComponentStore adapter rtpubsub.Adapter adapterStreamer rtpubsub.AdapterStreamer appSubs map[string][]*namedSubscription streamSubs map[string][]*namedSubscription appSubActive bool hasInitProg bool lock sync.RWMutex running atomic.Bool closed bool } type namedSubscription struct { name *string *subscription.Subscription } var log = logger.NewLogger("dapr.runtime.processor.subscription") func New(opts Options) *Subscriber { return &Subscriber{ appID: opts.AppID, namespace: opts.Namespace, resiliency: opts.Resiliency, tracingSpec: opts.TracingSpec, isHTTP: opts.IsHTTP, channels: opts.Channels, grpc: opts.GRPC, compStore: opts.CompStore, adapter: opts.Adapter, adapterStreamer: opts.AdapterStreamer, appSubs: make(map[string][]*namedSubscription), streamSubs: make(map[string][]*namedSubscription), } } func (s *Subscriber) Run(ctx context.Context) error { if !s.running.CompareAndSwap(false, true) { return errors.New("subscriber is already running") } <-ctx.Done() s.StopAllSubscriptionsForever() return nil } func (s *Subscriber) ReloadPubSub(name string) error { s.lock.Lock() defer s.lock.Unlock() if s.closed { return nil } ps, _ := s.compStore.GetPubSub(name) var errs []error if err := s.reloadPubSubStream(name, ps); err != nil { errs = append(errs, fmt.Errorf("failed to reload pubsub for subscription streams %s: %s", name, err)) } if err := s.reloadPubSubApp(name, ps); err != nil { errs = append(errs, fmt.Errorf("failed to reload pubsub for subscription apps %s: %s", name, err)) } return errors.Join(errs...) } func (s *Subscriber) StartStreamerSubscription(key string) error { s.lock.Lock() defer s.lock.Unlock() if s.closed { return nil } sub, ok := s.compStore.GetStreamSubscription(key) if !ok { return nil } pubsub, ok := s.compStore.GetPubSub(sub.PubsubName) if !ok { return nil } ss, err := s.startSubscription(pubsub, sub, true) if err != nil { return fmt.Errorf("failed to create subscription for %s: %s", sub.PubsubName, err) } s.streamSubs[sub.PubsubName] = append(s.streamSubs[sub.PubsubName], &namedSubscription{ name: &key, Subscription: ss, }) return nil } func (s *Subscriber) StopStreamerSubscription(pubsubName, key string) { s.lock.Lock() defer s.lock.Unlock() if s.closed { return } for i, sub := range s.streamSubs[pubsubName] { if sub.name != nil && *sub.name == key { sub.Stop() s.streamSubs[pubsubName] = append(s.streamSubs[pubsubName][:i], s.streamSubs[pubsubName][i+1:]...) return } } } func (s *Subscriber) ReloadDeclaredAppSubscription(name, pubsubName string) error { s.lock.Lock() defer s.lock.Unlock() if s.closed { return nil } for i, appsub := range s.appSubs[pubsubName] { if appsub.name != nil && name == *appsub.name { appsub.Stop() s.appSubs[pubsubName] = append(s.appSubs[pubsubName][:i], s.appSubs[pubsubName][i+1:]...) break } } ps, ok := s.compStore.GetPubSub(pubsubName) if !ok { return nil } sub, ok := s.compStore.GetDeclarativeSubscription(name) if !ok { return nil } if !rtpubsub.IsOperationAllowed(sub.Topic, ps, ps.ScopedSubscriptions) { return nil } ss, err := s.startSubscription(ps, sub.NamedSubscription, false) if err != nil { return fmt.Errorf("failed to create subscription for %s: %s", sub.PubsubName, err) } s.appSubs[sub.PubsubName] = append(s.appSubs[sub.PubsubName], &namedSubscription{ name: &name, Subscription: ss, }) return nil } func (s *Subscriber) StopPubSub(name string) { s.lock.Lock() defer s.lock.Unlock() for _, sub := range s.appSubs[name] { sub.Stop() } for _, sub := range s.streamSubs[name] { sub.Stop() } s.appSubs[name] = nil s.streamSubs[name] = nil } func (s *Subscriber) StartAppSubscriptions() error { s.lock.Lock() defer s.lock.Unlock() if s.appSubActive || s.closed { return nil } if err := s.initProgramaticSubscriptions(context.TODO()); err != nil { return err } s.appSubActive = true for _, subs := range s.appSubs { for _, sub := range subs { sub.Stop() } } s.appSubs = make(map[string][]*namedSubscription) var errs []error for name, ps := range s.compStore.ListPubSubs() { ps := ps for _, sub := range s.compStore.ListSubscriptionsAppByPubSub(name) { ss, err := s.startSubscription(ps, sub, false) if err != nil { errs = append(errs, err) continue } s.appSubs[name] = append(s.appSubs[name], &namedSubscription{ name: sub.Name, Subscription: ss, }) } } return errors.Join(errs...) } func (s *Subscriber) StopAppSubscriptions() { s.lock.Lock() defer s.lock.Unlock() if !s.appSubActive { return } s.appSubActive = false for _, psub := range s.appSubs { for _, sub := range psub { sub.Stop() } } s.appSubs = make(map[string][]*namedSubscription) } func (s *Subscriber) StopAllSubscriptionsForever() { s.lock.Lock() defer s.lock.Unlock() s.closed = true for _, psubs := range s.appSubs { for _, sub := range psubs { sub.Stop() } } for _, psubs := range s.streamSubs { for _, sub := range psubs { sub.Stop() } } s.appSubs = make(map[string][]*namedSubscription) s.streamSubs = make(map[string][]*namedSubscription) } func (s *Subscriber) InitProgramaticSubscriptions(ctx context.Context) error { s.lock.Lock() defer s.lock.Unlock() return s.initProgramaticSubscriptions(ctx) } func (s *Subscriber) reloadPubSubStream(name string, pubsub *rtpubsub.PubsubItem) error { for _, sub := range s.streamSubs[name] { sub.Stop() } s.streamSubs[name] = nil if s.closed || pubsub == nil { return nil } subs := make([]*namedSubscription, 0, len(s.compStore.ListSubscriptionsStreamByPubSub(name))) var errs []error for _, sub := range s.compStore.ListSubscriptionsStreamByPubSub(name) { ss, err := s.startSubscription(pubsub, sub, true) if err != nil { errs = append(errs, fmt.Errorf("failed to create subscription for %s: %s", name, err)) continue } subs = append(subs, &namedSubscription{ name: sub.Name, Subscription: ss, }) } s.streamSubs[name] = subs return errors.Join(errs...) } func (s *Subscriber) reloadPubSubApp(name string, pubsub *rtpubsub.PubsubItem) error { for _, sub := range s.appSubs[name] { sub.Stop() } s.appSubs[name] = nil if !s.appSubActive || s.closed || pubsub == nil { return nil } if err := s.initProgramaticSubscriptions(context.TODO()); err != nil { return err } var errs []error subs := make([]*namedSubscription, 0, len(s.compStore.ListSubscriptionsAppByPubSub(name))) for _, sub := range s.compStore.ListSubscriptionsAppByPubSub(name) { ss, err := s.startSubscription(pubsub, sub, false) if err != nil { errs = append(errs, fmt.Errorf("failed to create subscription for %s: %s", name, err)) continue } subs = append(subs, &namedSubscription{ name: sub.Name, Subscription: ss, }) } s.appSubs[name] = subs return errors.Join(errs...) } func (s *Subscriber) initProgramaticSubscriptions(ctx context.Context) error { if s.hasInitProg { return nil } // If no pubsubs registered, return early. if len(s.compStore.ListPubSubs()) == 0 { return nil } appChannel := s.channels.AppChannel() if appChannel == nil { log.Warn("app channel not initialized, make sure -app-port is specified if pubsub subscription is required") return nil } s.hasInitProg = true var ( subscriptions []rtpubsub.Subscription err error ) // handle app subscriptions if s.isHTTP { subscriptions, err = rtpubsub.GetSubscriptionsHTTP(ctx, appChannel, log, s.resiliency) } else { var conn grpc.ClientConnInterface conn, err = s.grpc.GetAppClient() if err != nil { return fmt.Errorf("error while getting app client: %w", err) } client := runtimev1pb.NewAppCallbackClient(conn) subscriptions, err = rtpubsub.GetSubscriptionsGRPC(ctx, client, log, s.resiliency) } if err != nil { return err } subbedTopics := make(map[string][]string) for _, sub := range subscriptions { subbedTopics[sub.PubsubName] = append(subbedTopics[sub.PubsubName], sub.Topic) } for pubsubName, topics := range subbedTopics { log.Infof("app is subscribed to the following topics: [%s] through pubsub=%s", topics, pubsubName) } s.compStore.SetProgramaticSubscriptions(subscriptions...) return nil } func (s *Subscriber) startSubscription(pubsub *rtpubsub.PubsubItem, comp *compstore.NamedSubscription, isStreamer bool) (*subscription.Subscription, error) { var streamer rtpubsub.AdapterStreamer if isStreamer { streamer = s.adapterStreamer } return subscription.New(subscription.Options{ AppID: s.appID, Namespace: s.namespace, PubSubName: comp.PubsubName, Topic: comp.Topic, IsHTTP: s.isHTTP, PubSub: pubsub, Resiliency: s.resiliency, TraceSpec: s.tracingSpec, Route: comp.Subscription, Channels: s.channels, GRPC: s.grpc, Adapter: s.adapter, AdapterStreamer: streamer, }) }
mikeee/dapr
pkg/runtime/processor/subscriber/subscriber.go
GO
mit
10,908
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscriber import ( "context" "encoding/json" "slices" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" contribpubsub "github.com/dapr/components-contrib/pubsub" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" channelt "github.com/dapr/dapr/pkg/channel/testing" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) const ( TestRuntimeConfigID = "consumer0" TestPubsubName = "testpubsub" ) func TestSubscriptionLifecycle(t *testing.T) { mockPubSub1 := new(daprt.InMemoryPubsub) mockPubSub2 := new(daprt.InMemoryPubsub) mockPubSub3 := new(daprt.InMemoryPubsub) mockPubSub1.On("Init", mock.Anything).Return(nil) mockPubSub2.On("Init", mock.Anything).Return(nil) mockPubSub3.On("Init", mock.Anything).Return(nil) mockPubSub1.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub2.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub3.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub1.On("unsubscribed", "topic1").Return(nil) mockPubSub2.On("unsubscribed", "topic2").Return(nil) mockPubSub3.On("unsubscribed", "topic3").Return(nil) require.NoError(t, mockPubSub1.Init(context.Background(), contribpubsub.Metadata{})) require.NoError(t, mockPubSub2.Init(context.Background(), contribpubsub.Metadata{})) require.NoError(t, mockPubSub3.Init(context.Background(), contribpubsub.Metadata{})) compStore := compstore.New() compStore.AddPubSub("mockPubSub1", &rtpubsub.PubsubItem{ Component: mockPubSub1, }) compStore.AddPubSub("mockPubSub2", &rtpubsub.PubsubItem{ Component: mockPubSub2, }) compStore.AddPubSub("mockPubSub3", &rtpubsub.PubsubItem{ Component: mockPubSub3, }) compStore.SetProgramaticSubscriptions( rtpubsub.Subscription{ PubsubName: "mockPubSub1", Topic: "topic1", Rules: []*rtpubsub.Rule{{Path: "/"}}, }, rtpubsub.Subscription{ PubsubName: "mockPubSub2", Topic: "topic2", Rules: []*rtpubsub.Rule{{Path: "/"}}, }, rtpubsub.Subscription{ PubsubName: "mockPubSub3", Topic: "topic3", Rules: []*rtpubsub.Rule{{Path: "/"}}, }, ) compStore.AddStreamSubscription(&subapi.Subscription{ ObjectMeta: metav1.ObjectMeta{Name: "sub1||"}, Spec: subapi.SubscriptionSpec{ Pubsubname: "mockPubSub1", Topic: "topic4", Routes: subapi.Routes{Default: "/"}, }, }) compStore.AddStreamSubscription(&subapi.Subscription{ ObjectMeta: metav1.ObjectMeta{Name: "sub2||"}, Spec: subapi.SubscriptionSpec{ Pubsubname: "mockPubSub2", Topic: "topic5", Routes: subapi.Routes{Default: "/"}, }, }) compStore.AddStreamSubscription(&subapi.Subscription{ ObjectMeta: metav1.ObjectMeta{Name: "sub3||"}, Spec: subapi.SubscriptionSpec{ Pubsubname: "mockPubSub3", Topic: "topic6", Routes: subapi.Routes{Default: "/"}, }, }) subs := New(Options{ CompStore: compStore, IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", AppID: TestRuntimeConfigID, Channels: new(channels.Channels).WithAppChannel(new(channelt.MockAppChannel)), }) subs.hasInitProg = true gotTopics := make([][]string, 3) changeCalled := make([]atomic.Int32, 3) mockPubSub1.SetOnSubscribedTopicsChanged(func(topics []string) { gotTopics[0] = topics changeCalled[0].Add(1) }) mockPubSub2.SetOnSubscribedTopicsChanged(func(topics []string) { gotTopics[1] = topics changeCalled[1].Add(1) }) mockPubSub3.SetOnSubscribedTopicsChanged(func(topics []string) { gotTopics[2] = topics changeCalled[2].Add(1) }) require.NoError(t, subs.StartAppSubscriptions()) assert.Equal(t, []string{"topic1"}, gotTopics[0]) assert.Equal(t, []string{"topic2"}, gotTopics[1]) assert.Equal(t, []string{"topic3"}, gotTopics[2]) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 1) subs.StopAppSubscriptions() assert.Eventually(t, func() bool { return changeCalled[0].Load() == 2 && changeCalled[1].Load() == 2 && changeCalled[2].Load() == 2 }, time.Second, 10*time.Millisecond) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 1) require.NoError(t, subs.StartAppSubscriptions()) assert.Equal(t, []string{"topic1"}, gotTopics[0]) assert.Equal(t, []string{"topic2"}, gotTopics[1]) assert.Equal(t, []string{"topic3"}, gotTopics[2]) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 2) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 2) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 2) subs.StopAppSubscriptions() assert.Eventually(t, func() bool { return changeCalled[0].Load() == 4 && changeCalled[1].Load() == 4 && changeCalled[2].Load() == 4 }, time.Second, 10*time.Millisecond) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 2) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 2) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 2) require.NoError(t, subs.StartAppSubscriptions()) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 3) subs.StopAllSubscriptionsForever() assert.Eventually(t, func() bool { return changeCalled[0].Load() == 6 && changeCalled[1].Load() == 6 && changeCalled[2].Load() == 6 }, time.Second, 10*time.Millisecond) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 3) require.NoError(t, subs.StartAppSubscriptions()) require.NoError(t, subs.StartAppSubscriptions()) require.NoError(t, subs.StartAppSubscriptions()) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 3) } func Test_initProgramaticSubscriptions(t *testing.T) { t.Run("get topic routes but no pubsubs are registered", func(t *testing.T) { compStore := compstore.New() subs := New(Options{ CompStore: compStore, IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", AppID: TestRuntimeConfigID, Channels: new(channels.Channels), }) require.NoError(t, subs.initProgramaticSubscriptions(context.Background())) assert.Empty(t, compStore.ListProgramaticSubscriptions()) }) t.Run("get topic routes but app channel is nil", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, new(rtpubsub.PubsubItem)) subs := New(Options{ CompStore: compStore, IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", AppID: TestRuntimeConfigID, Channels: new(channels.Channels), }) require.NoError(t, subs.initProgramaticSubscriptions(context.Background())) assert.Empty(t, compStore.ListProgramaticSubscriptions()) }) t.Run("load programmatic subscriptions. Multiple calls invokes once", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) compStore := compstore.New() compStore.AddPubSub(TestPubsubName, new(rtpubsub.PubsubItem)) subs := New(Options{ CompStore: compStore, IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", AppID: TestRuntimeConfigID, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), }) b, err := json.Marshal([]rtpubsub.SubscriptionJSON{ { PubsubName: TestPubsubName, Topic: "topic1", Routes: rtpubsub.RoutesJSON{ Default: "/", }, }, }) require.NoError(t, err) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(b). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.AnythingOfType("context.backgroundCtx"), mock.AnythingOfType("*v1.InvokeMethodRequest")).Return(fakeResp, nil) require.NoError(t, subs.initProgramaticSubscriptions(context.Background())) require.NoError(t, subs.initProgramaticSubscriptions(context.Background())) require.NoError(t, subs.initProgramaticSubscriptions(context.Background())) assert.Len(t, compStore.ListProgramaticSubscriptions(), 1) }) } func TestReloadPubSub(t *testing.T) { mockPubSub1 := new(daprt.InMemoryPubsub) mockPubSub2 := new(daprt.InMemoryPubsub) mockPubSub3 := new(daprt.InMemoryPubsub) mockPubSub1.On("Init", mock.Anything).Return(nil) mockPubSub2.On("Init", mock.Anything).Return(nil) mockPubSub3.On("Init", mock.Anything).Return(nil) mockPubSub1.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub2.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub3.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub1.On("unsubscribed", "topic1").Return(nil) mockPubSub2.On("unsubscribed", "topic2").Return(nil) mockPubSub3.On("unsubscribed", "topic3").Return(nil) mockPubSub1.On("unsubscribed", "topic4").Return(nil) mockPubSub2.On("unsubscribed", "topic5").Return(nil) mockPubSub3.On("unsubscribed", "topic6").Return(nil) require.NoError(t, mockPubSub1.Init(context.Background(), contribpubsub.Metadata{})) require.NoError(t, mockPubSub2.Init(context.Background(), contribpubsub.Metadata{})) require.NoError(t, mockPubSub3.Init(context.Background(), contribpubsub.Metadata{})) compStore := compstore.New() compStore.AddPubSub("mockPubSub1", &rtpubsub.PubsubItem{ Component: mockPubSub1, }) compStore.AddPubSub("mockPubSub2", &rtpubsub.PubsubItem{ Component: mockPubSub2, }) compStore.AddPubSub("mockPubSub3", &rtpubsub.PubsubItem{ Component: mockPubSub3, }) compStore.SetProgramaticSubscriptions( rtpubsub.Subscription{ PubsubName: "mockPubSub1", Topic: "topic1", Rules: []*rtpubsub.Rule{{Path: "/"}}, }, rtpubsub.Subscription{ PubsubName: "mockPubSub2", Topic: "topic2", Rules: []*rtpubsub.Rule{{Path: "/"}}, }, rtpubsub.Subscription{ PubsubName: "mockPubSub3", Topic: "topic3", Rules: []*rtpubsub.Rule{{Path: "/"}}, }, ) subs := New(Options{ CompStore: compStore, IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", AppID: TestRuntimeConfigID, Channels: new(channels.Channels).WithAppChannel(new(channelt.MockAppChannel)), }) subs.hasInitProg = true gotTopics := make([][]string, 3) changeCalled := make([]atomic.Int32, 3) mockPubSub1.SetOnSubscribedTopicsChanged(func(topics []string) { gotTopics[0] = append(gotTopics[0], topics...) slices.Sort(gotTopics[0]) gotTopics[0] = slices.Compact(gotTopics[0]) changeCalled[0].Add(1) }) mockPubSub2.SetOnSubscribedTopicsChanged(func(topics []string) { gotTopics[1] = append(gotTopics[1], topics...) slices.Sort(gotTopics[1]) gotTopics[1] = slices.Compact(gotTopics[1]) changeCalled[1].Add(1) }) mockPubSub3.SetOnSubscribedTopicsChanged(func(topics []string) { gotTopics[2] = append(gotTopics[2], topics...) slices.Sort(gotTopics[2]) gotTopics[2] = slices.Compact(gotTopics[2]) changeCalled[2].Add(1) }) require.NoError(t, subs.StartAppSubscriptions()) assert.Equal(t, []string{"topic1"}, gotTopics[0]) assert.Equal(t, []string{"topic2"}, gotTopics[1]) assert.Equal(t, []string{"topic3"}, gotTopics[2]) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 1) compStore.AddStreamSubscription(&subapi.Subscription{ ObjectMeta: metav1.ObjectMeta{Name: "sub1||"}, Spec: subapi.SubscriptionSpec{ Pubsubname: "mockPubSub1", Topic: "topic4", Routes: subapi.Routes{Default: "/"}, }, }) compStore.AddStreamSubscription(&subapi.Subscription{ ObjectMeta: metav1.ObjectMeta{Name: "sub2||"}, Spec: subapi.SubscriptionSpec{ Pubsubname: "mockPubSub2", Topic: "topic5", Routes: subapi.Routes{Default: "/"}, }, }) compStore.AddStreamSubscription(&subapi.Subscription{ ObjectMeta: metav1.ObjectMeta{Name: "sub3||"}, Spec: subapi.SubscriptionSpec{ Pubsubname: "mockPubSub3", Topic: "topic6", Routes: subapi.Routes{Default: "/"}, }, }) subs.ReloadPubSub("mockPubSub1") assert.Eventually(t, func() bool { return changeCalled[0].Load() == 4 }, time.Second, 10*time.Millisecond) assert.Equal(t, []string{"topic1", "topic4"}, gotTopics[0]) assert.Equal(t, []string{"topic2"}, gotTopics[1]) assert.Equal(t, []string{"topic3"}, gotTopics[2]) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 0) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 0) subs.ReloadPubSub("mockPubSub2") assert.Eventually(t, func() bool { return changeCalled[1].Load() == 4 }, time.Second, 10*time.Millisecond) assert.Equal(t, []string{"topic1", "topic4"}, gotTopics[0]) assert.Equal(t, []string{"topic2", "topic5"}, gotTopics[1]) assert.Equal(t, []string{"topic3"}, gotTopics[2]) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 1) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 0) subs.ReloadPubSub("mockPubSub3") assert.Eventually(t, func() bool { return changeCalled[2].Load() == 4 }, time.Second, 10*time.Millisecond) assert.Equal(t, []string{"topic1", "topic4"}, gotTopics[0]) assert.Equal(t, []string{"topic2", "topic5"}, gotTopics[1]) assert.Equal(t, []string{"topic3", "topic6"}, gotTopics[2]) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 1) subs.StopPubSub("mockPubSub1") assert.Eventually(t, func() bool { return changeCalled[0].Load() == 6 }, time.Second, 10*time.Millisecond) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 1) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 1) subs.StopPubSub("mockPubSub2") assert.Eventually(t, func() bool { return changeCalled[1].Load() == 6 }, time.Second, 10*time.Millisecond) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 1) subs.StopPubSub("mockPubSub3") assert.Eventually(t, func() bool { return changeCalled[2].Load() == 6 }, time.Second, 10*time.Millisecond) mockPubSub1.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub2.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub3.AssertNumberOfCalls(t, "Subscribe", 3) mockPubSub1.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub2.AssertNumberOfCalls(t, "unsubscribed", 3) mockPubSub3.AssertNumberOfCalls(t, "unsubscribed", 3) }
mikeee/dapr
pkg/runtime/processor/subscriber/subscriber_test.go
GO
mit
16,904
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "context" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/utils" ) func (p *Processor) AddPendingSubscription(ctx context.Context, subscriptions ...subapi.Subscription) bool { p.lock.Lock() defer p.lock.Unlock() if p.shutdown.Load() { return false } scopedSubs := p.scopeFilterSubscriptions(subscriptions) if len(scopedSubs) == 0 { return true } for i := range scopedSubs { comp := scopedSubs[i] sub := rtpubsub.Subscription{ PubsubName: comp.Spec.Pubsubname, Topic: comp.Spec.Topic, DeadLetterTopic: comp.Spec.DeadLetterTopic, Metadata: comp.Spec.Metadata, Scopes: comp.Scopes, BulkSubscribe: &rtpubsub.BulkSubscribe{ Enabled: comp.Spec.BulkSubscribe.Enabled, MaxMessagesCount: comp.Spec.BulkSubscribe.MaxMessagesCount, MaxAwaitDurationMs: comp.Spec.BulkSubscribe.MaxAwaitDurationMs, }, } for _, rule := range comp.Spec.Routes.Rules { erule, err := rtpubsub.CreateRoutingRule(rule.Match, rule.Path) if err != nil { p.errorSubscriptions(ctx, err) return false } sub.Rules = append(sub.Rules, erule) } if len(comp.Spec.Routes.Default) > 0 { sub.Rules = append(sub.Rules, &rtpubsub.Rule{ Path: comp.Spec.Routes.Default, }) } p.compStore.AddDeclarativeSubscription(&comp, sub) if err := p.subscriber.ReloadDeclaredAppSubscription(comp.Name, comp.Spec.Pubsubname); err != nil { p.compStore.DeleteDeclarativeSubscription(comp.Name) p.errorSubscriptions(ctx, err) return false } } return true } func (p *Processor) scopeFilterSubscriptions(subs []subapi.Subscription) []subapi.Subscription { scopedSubs := make([]subapi.Subscription, 0, len(subs)) for _, sub := range subs { if len(sub.Scopes) > 0 && !utils.Contains[string](sub.Scopes, p.appID) { continue } scopedSubs = append(scopedSubs, sub) } return scopedSubs } func (p *Processor) CloseSubscription(ctx context.Context, sub *subapi.Subscription) error { p.lock.Lock() defer p.lock.Unlock() if _, ok := p.compStore.GetDeclarativeSubscription(sub.Name); !ok { return nil } p.compStore.DeleteDeclarativeSubscription(sub.Name) if err := p.subscriber.ReloadDeclaredAppSubscription(sub.Name, sub.Spec.Pubsubname); err != nil { return err } return nil } func (p *Processor) processSubscriptions(ctx context.Context) error { select { case <-ctx.Done(): return nil case <-p.closedCh: return nil case err := <-p.subErrCh: return err } } func (p *Processor) errorSubscriptions(ctx context.Context, err error) { select { case p.subErrCh <- err: case <-ctx.Done(): case <-p.closedCh: } }
mikeee/dapr
pkg/runtime/processor/subscriptions.go
GO
mit
3,307
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package processor import ( "testing" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" ) func Test_scopeFilterSubscriptions(t *testing.T) { tests := map[string]struct { input []subapi.Subscription exp []subapi.Subscription }{ "nil subs": { input: nil, exp: []subapi.Subscription{}, }, "no subs": { input: []subapi.Subscription{}, exp: []subapi.Subscription{}, }, "single no scope": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{}, }, }, exp: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{}, }, }, }, "multiple no scope": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{}, }, }, exp: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{}, }, }, }, "single scoped": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-1", "id-2"}, }, }, exp: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-1", "id-2"}, }, }, }, "multiple scoped": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-1", "id-2"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{"id-1"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{}, }, }, exp: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-1", "id-2"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{"id-1"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{}, }, }, }, "single out of scope": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-2"}, }, }, exp: []subapi.Subscription{}, }, "multiple out of scope": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-3", "id-2"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{"id-2"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{"id-3"}, }, }, exp: []subapi.Subscription{}, }, "multiple some scoped": { input: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub1"}, Scopes: []string{"id-3", "id-2"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{"id-1"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{"id-3", "id-1"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub4"}, Scopes: []string{}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub5"}, Scopes: []string{"id-3", "id-4"}, }, }, exp: []subapi.Subscription{ { ObjectMeta: metav1.ObjectMeta{Name: "sub2"}, Scopes: []string{"id-1"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub3"}, Scopes: []string{"id-3", "id-1"}, }, { ObjectMeta: metav1.ObjectMeta{Name: "sub4"}, Scopes: []string{}, }, }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { got := (&Processor{appID: "id-1"}).scopeFilterSubscriptions(test.input) require.Equal(t, test.exp, got) }) } }
mikeee/dapr
pkg/runtime/processor/subscriptions_test.go
GO
mit
4,731
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package wfbackend import ( "context" "errors" "sync" "time" "github.com/microsoft/durabletask-go/backend" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" wfbeComp "github.com/dapr/dapr/pkg/components/wfbackend" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime.processor.workflowbackend") type Options struct { AppID string Registry *wfbeComp.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta } type workflowBackend struct { registry *wfbeComp.Registry compStore *compstore.ComponentStore meta *meta.Meta lock sync.Mutex backend backend.Backend appID string } func New(opts Options) *workflowBackend { return &workflowBackend{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, appID: opts.AppID, } } func (w *workflowBackend) Init(ctx context.Context, comp compapi.Component) error { w.lock.Lock() defer w.lock.Unlock() if w.backend != nil { // Can only have 1 workflow backend component return errors.New("cannot create more than one workflow backend component") } // Create the component fName := comp.LogName() beFactory, err := w.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { log.Errorf("Error creating workflow backend component (%s): %v", fName, err) diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return err } if beFactory == nil { return nil } // Initialization baseMetadata, err := w.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } be, err := beFactory(wfbeComp.Metadata{ AppID: w.appID, Base: baseMetadata, }) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } log.Infof("Using %s as workflow backend", comp.Spec.Type) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) w.backend = be w.compStore.AddWorkflowBackend(comp.Name, be) return nil } func (w *workflowBackend) Close(comp compapi.Component) error { w.lock.Lock() defer w.lock.Unlock() backend, ok := w.compStore.GetWorkflowBackend(comp.Name) if !ok { return nil } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() defer w.compStore.DeleteWorkflowBackend(comp.Name) w.backend = nil return backend.Stop(ctx) } func (w *workflowBackend) Backend() (backend.Backend, bool) { w.lock.Lock() defer w.lock.Unlock() if w.backend == nil { return nil, false } return w.backend, true } func ComponentDefinition() compapi.Component { return compapi.Component{ TypeMeta: metav1.TypeMeta{Kind: "Component"}, ObjectMeta: metav1.ObjectMeta{Name: "dapr"}, Spec: compapi.ComponentSpec{Type: "workflow.dapr", Version: "v1"}, } }
mikeee/dapr
pkg/runtime/processor/wfbackend/wfbackend.go
GO
mit
3,820
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package workflow import ( "context" "sync" "github.com/dapr/components-contrib/workflows" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" compworkflow "github.com/dapr/dapr/pkg/components/workflows" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime.processor.workflow") type Options struct { Registry *compworkflow.Registry ComponentStore *compstore.ComponentStore Meta *meta.Meta } type workflow struct { registry *compworkflow.Registry compStore *compstore.ComponentStore meta *meta.Meta lock sync.Mutex } func New(opts Options) *workflow { return &workflow{ registry: opts.Registry, compStore: opts.ComponentStore, meta: opts.Meta, } } func (w *workflow) Init(ctx context.Context, comp compapi.Component) error { w.lock.Lock() defer w.lock.Unlock() // create the component fName := comp.LogName() workflowComp, err := w.registry.Create(comp.Spec.Type, comp.Spec.Version, fName) if err != nil { log.Warnf("error creating workflow component (%s): %s", comp.LogName(), err) diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return err } if workflowComp == nil { return nil } // initialization baseMetadata, err := w.meta.ToBaseMetadata(comp) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } err = workflowComp.Init(workflows.Metadata{Base: baseMetadata}) if err != nil { diag.DefaultMonitoring.ComponentInitFailed(comp.Spec.Type, "init", comp.ObjectMeta.Name) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } // save workflow related configuration w.compStore.AddWorkflow(comp.ObjectMeta.Name, workflowComp) diag.DefaultMonitoring.ComponentInitialized(comp.Spec.Type) return nil } func (w *workflow) Close(comp compapi.Component) error { w.lock.Lock() defer w.lock.Unlock() // We don't "Close" a workflow here because that has no meaning today since // Dapr doesn't support third-party workflows. Internal workflows are based // on the actor subsystem so there is nothing to close. w.compStore.DeleteWorkflow(comp.Name) return nil }
mikeee/dapr
pkg/runtime/processor/workflow/workflow.go
GO
mit
3,008
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" contribPubsub "github.com/dapr/components-contrib/pubsub" rtv1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" ) // PubsubItem is a pubsub component with its scoped subscriptions and // publishings. type PubsubItem struct { Component contribPubsub.PubSub ScopedSubscriptions []string ScopedPublishings []string AllowedTopics []string ProtectedTopics []string NamespaceScoped bool } // Adapter is the interface for message buses. type Adapter interface { Publish(context.Context, *contribPubsub.PublishRequest) error BulkPublish(context.Context, *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error) } type AdapterStreamer interface { Subscribe(rtv1pb.Dapr_SubscribeTopicEventsAlpha1Server, *rtv1pb.SubscribeTopicEventsInitialRequestAlpha1) error Publish(context.Context, *SubscribedMessage) error StreamerKey(pubsub, topic string) string } func IsOperationAllowed(topic string, pubSub *PubsubItem, scopedTopics []string) bool { var inAllowedTopics, inProtectedTopics bool // first check if allowedTopics contain it if len(pubSub.AllowedTopics) > 0 { for _, t := range pubSub.AllowedTopics { if t == topic { inAllowedTopics = true break } } if !inAllowedTopics { return false } } // check if topic is protected if len(pubSub.ProtectedTopics) > 0 { for _, t := range pubSub.ProtectedTopics { if t == topic { inProtectedTopics = true break } } } // if topic is protected then a scope must be applied if !inProtectedTopics && len(scopedTopics) == 0 { return true } // check if a granular scope has been applied allowedScope := false for _, t := range scopedTopics { if t == topic { allowedScope = true break } } return allowedScope }
mikeee/dapr
pkg/runtime/pubsub/adapter.go
GO
mit
2,368
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "testing" "github.com/stretchr/testify/assert" ) func TestIsOperationAllowed(t *testing.T) { t.Run("test protected topics, no scopes, operation not allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{ProtectedTopics: []string{"topic1"}}, nil) assert.False(t, a) }) t.Run("test allowed topics, no scopes, operation allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{AllowedTopics: []string{"topic1"}}, nil) assert.True(t, a) }) t.Run("test allowed topics, no scopes, operation not allowed", func(t *testing.T) { a := IsOperationAllowed("topic2", &PubsubItem{AllowedTopics: []string{"topic1"}}, nil) assert.False(t, a) }) t.Run("test other protected topics, no allowed topics, no scopes, operation allowed", func(t *testing.T) { a := IsOperationAllowed("topic2", &PubsubItem{ProtectedTopics: []string{"topic1"}}, nil) assert.True(t, a) }) t.Run("test allowed topics, with scopes, operation allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{AllowedTopics: []string{"topic1"}, ScopedPublishings: []string{"topic1"}}, nil) assert.True(t, a) }) t.Run("test protected topics, with scopes, operation allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{ProtectedTopics: []string{"topic1"}, ScopedPublishings: []string{"topic1"}}, []string{"topic1"}) assert.True(t, a) }) t.Run("topic in allowed topics, not in existing publishing scopes, operation not allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{AllowedTopics: []string{"topic1"}, ScopedPublishings: []string{"topic2"}}, []string{"topic2"}) assert.False(t, a) }) t.Run("topic in protected topics, not in existing publishing scopes, operation not allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{ProtectedTopics: []string{"topic1"}, ScopedPublishings: []string{"topic2"}}, nil) assert.False(t, a) }) t.Run("topic in allowed topics, not in publishing scopes, operation allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{AllowedTopics: []string{"topic1"}, ScopedPublishings: []string{}}, nil) assert.True(t, a) }) t.Run("topic in protected topics, not in publishing scopes, operation not allowed", func(t *testing.T) { a := IsOperationAllowed("topic1", &PubsubItem{ProtectedTopics: []string{"topic1"}, ScopedPublishings: []string{}}, nil) assert.False(t, a) }) t.Run("topics A and B in allowed topics, A in publishing scopes, operation allowed for A only", func(t *testing.T) { pubsub := &PubsubItem{AllowedTopics: []string{"A", "B"}, ScopedPublishings: []string{"A"}} a := IsOperationAllowed("A", pubsub, []string{"A"}) assert.True(t, a) b := IsOperationAllowed("B", pubsub, []string{"A"}) assert.False(t, b) }) t.Run("topics A and B in protected topics, A in publishing scopes, operation allowed for A only", func(t *testing.T) { pubSub := &PubsubItem{ProtectedTopics: []string{"A", "B"}, ScopedPublishings: []string{"A"}} a := IsOperationAllowed("A", pubSub, []string{"A"}) assert.True(t, a) b := IsOperationAllowed("B", pubSub, []string{"A"}) assert.False(t, b) }) }
mikeee/dapr
pkg/runtime/pubsub/adapter_test.go
GO
mit
3,784
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "sync/atomic" contribPubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/utils" ) func ApplyBulkPublishResiliency(ctx context.Context, req *contribPubsub.BulkPublishRequest, policyDef *resiliency.PolicyDefinition, bulkPublisher contribPubsub.BulkPublisher, ) (contribPubsub.BulkPublishResponse, error) { // Contains the latest request entries to be sent to the component var requestEntries atomic.Pointer[[]contribPubsub.BulkMessageEntry] requestEntries.Store(&req.Entries) policyRunner := resiliency.NewRunnerWithOptions(ctx, policyDef, resiliency.RunnerOpts[contribPubsub.BulkPublishResponse]{ Accumulator: func(res contribPubsub.BulkPublishResponse) { if len(res.FailedEntries) == 0 { return } // requestEntries can be modified here as Accumulator is executed synchronously failedEntryIds := extractEntryIds(res.FailedEntries) filteredEntries := utils.Filter(*requestEntries.Load(), func(item contribPubsub.BulkMessageEntry) bool { _, ok := failedEntryIds[item.EntryId] return ok }) requestEntries.Store(&filteredEntries) }, }) res, err := policyRunner(func(ctx context.Context) (contribPubsub.BulkPublishResponse, error) { newEntries := *requestEntries.Load() newReq := &contribPubsub.BulkPublishRequest{ PubsubName: req.PubsubName, Topic: req.Topic, Entries: newEntries, Metadata: req.Metadata, } return bulkPublisher.BulkPublish(ctx, newReq) }) // If final error is timeout, CB open or CB too many requests, return the current request entries as failed if err != nil && (len(res.FailedEntries) == 0 || resiliency.IsTimeoutExeceeded(err) || resiliency.IsCircuitBreakerError(err)) { return contribPubsub.NewBulkPublishResponse(*requestEntries.Load(), err), err } // Otherwise, retry has exhausted, return the final response got from the bulk publisher return res, err } func extractEntryIds(failedEntries []contribPubsub.BulkPublishResponseFailedEntry) map[string]struct{} { entryIds := make(map[string]struct{}, len(failedEntries)) for _, failedEntry := range failedEntries { entryIds[failedEntry.EntryId] = struct{}{} } return entryIds }
mikeee/dapr
pkg/runtime/pubsub/bulkpublish_resiliency.go
GO
mit
2,820
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "strconv" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" contribPubsub "github.com/dapr/components-contrib/pubsub" resiliencyV1alpha "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/resiliency/breaker" "github.com/dapr/kit/logger" "github.com/dapr/kit/ptr" ) var ( testLogger = logger.NewLogger("dapr.test") zero = contribPubsub.BulkPublishResponse{} ) type mockBulkPublisher struct { t *testing.T rwLock sync.RWMutex entryIDRetryTimes map[string]int failEvenOnes bool failAllEvents bool applyTimeout bool timeoutSleep time.Duration failCount int } // Pass in failCount to fail the first n times // Pass in failEvenOnes to fail all events with even entryId // Pass in failAllEvents to fail all events func NewMockBulkPublisher(t *testing.T, failCount int, failEvenOnes bool, failAllEvents bool) *mockBulkPublisher { return &mockBulkPublisher{ t: t, rwLock: sync.RWMutex{}, entryIDRetryTimes: map[string]int{}, failCount: failCount, failEvenOnes: failEvenOnes, failAllEvents: failAllEvents, } } func (m *mockBulkPublisher) BulkPublish(ctx context.Context, req *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error) { m.rwLock.Lock() defer m.rwLock.Unlock() if req == nil { return zero, assert.AnError } if m.applyTimeout { time.Sleep(m.timeoutSleep) // return some error res := contribPubsub.NewBulkPublishResponse(req.Entries, assert.AnError) res.FailedEntries = res.FailedEntries[1:] return res, assert.AnError } res := contribPubsub.BulkPublishResponse{ FailedEntries: make([]contribPubsub.BulkPublishResponseFailedEntry, 0, len(req.Entries)), } for _, entry := range req.Entries { // count the entryId retry times if _, ok := m.entryIDRetryTimes[entry.EntryId]; ok { m.entryIDRetryTimes[entry.EntryId]++ } else { m.entryIDRetryTimes[entry.EntryId] = 1 } // assert the data and metadata are correct assert.Equal(m.t, map[string]string{ "key" + entry.EntryId: "value" + entry.EntryId, }, entry.Metadata) assert.Equal(m.t, entry.Event, []byte("data "+entry.EntryId)) } // fail events based on the input count if m.failCount > 0 { m.failCount-- for _, entry := range req.Entries { k, _ := strconv.ParseInt(entry.EntryId, 10, 32) if m.failAllEvents || (k%2 == 0 && m.failEvenOnes) { res.FailedEntries = append(res.FailedEntries, contribPubsub.BulkPublishResponseFailedEntry{ EntryId: entry.EntryId, Error: assert.AnError, }) } } return res, assert.AnError } return res, nil } func TestApplyBulkPublishResiliency(t *testing.T) { ctx := context.Background() pubsubName := "test-pubsub" bulkMessageEntries := []contribPubsub.BulkMessageEntry{ { EntryId: "0", Metadata: map[string]string{ "key0": "value0", }, Event: []byte("data 0"), }, { EntryId: "1", Metadata: map[string]string{ "key1": "value1", }, Event: []byte("data 1"), }, { EntryId: "2", Metadata: map[string]string{ "key2": "value2", }, Event: []byte("data 2"), }, { EntryId: "3", Metadata: map[string]string{ "key3": "value3", }, Event: []byte("data 3"), }, { EntryId: "4", Metadata: map[string]string{ "key4": "value4", }, Event: []byte("data 4"), }, { EntryId: "5", Metadata: map[string]string{ "key5": "value5", }, Event: []byte("data 5"), }, } // Create test retry and timeout configurations shortRetry := resiliencyV1alpha.Retry{ Policy: "constant", Duration: "2s", } longRetry := resiliencyV1alpha.Retry{ Policy: "constant", Duration: "10s", } longTimeout := "10s" // Create Mock request req := &contribPubsub.BulkPublishRequest{ Entries: bulkMessageEntries, // note underling array is shared across tests Topic: "test-topic", PubsubName: pubsubName, } t.Run("fail all events with retries", func(t *testing.T) { // Setup // fail all events once bulkPublisher := NewMockBulkPublisher(t, 1, true, true) // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert // expecting no final error, the events will pass in the second try require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) assertRetryCount(t, map[string]int{ "0": 2, "2": 2, "4": 2, "1": 2, "3": 2, "5": 2, }, bulkPublisher.entryIDRetryTimes) }) t.Run("fail all events exhaust retries", func(t *testing.T) { // Setup // fail all events and exhaust retries // mock bulk publisher set to fail all events 3 times bulkPublisher := NewMockBulkPublisher(t, 3, true, true) // set short retry with 2 retries max shortRetry.MaxRetries = ptr.Of(2) // timeout will not be triggered here policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert // Expect final error from the bulk publisher require.Error(t, err) assert.Equal(t, assert.AnError, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // It is 3 here because the first try is before resiliency kicks in and then it is retried twice // which exhausts retries assertRetryCount(t, map[string]int{ "0": 3, "2": 3, "4": 3, "1": 3, "3": 3, "5": 3, }, bulkPublisher.entryIDRetryTimes) }) t.Run("partial failures with retries", func(t *testing.T) { // Setup // fail events with even Entry ID once, simulate partial failure bulkPublisher := NewMockBulkPublisher(t, 1, true, false) // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert // expecting no final error, all the events will pass in the second try require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // expecting even Id'ed events alone to be retried assertRetryCount(t, map[string]int{ "0": 2, "2": 2, "4": 2, "1": 1, "3": 1, "5": 1, }, bulkPublisher.entryIDRetryTimes) }) t.Run("no failures", func(t *testing.T) { // Setup // no failures bulkPublisher := NewMockBulkPublisher(t, 0, false, false) // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert // expecting no final error, all the events will pass in a single try require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) assertRetryCount(t, map[string]int{ "0": 1, "2": 1, "4": 1, "1": 1, "3": 1, "5": 1, }, bulkPublisher.entryIDRetryTimes) }) // Partial failures are not possible on timeouts, the whole bulk request will fail t.Run("fail all events on timeout", func(t *testing.T) { // Setup // fail all events due to timeout shortTimeout := "1s" bulkPublisher := NewMockBulkPublisher(t, 0, false, false) bulkPublisher.applyTimeout = true bulkPublisher.timeoutSleep = 5 * time.Second // set short retry with 0 retry max shortRetry.MaxRetries = ptr.Of(0) // timeout will be triggered here // no retries policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, shortTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) require.ErrorIs(t, err, context.DeadlineExceeded) assert.Len(t, res.FailedEntries, 6) // not asserting the number of called times since it may or may not be updated(component called) in actually code. // In test code, it is not updated. }) t.Run("fail all events with circuitBreaker exhaust retries", func(t *testing.T) { // Setup // fail all events at least 10 times in a row // this will simulate circuitBreaker being triggered bulkPublisher := NewMockBulkPublisher(t, 10, true, true) // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will not be triggered } // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act // Make the request twice to make sure circuitBreaker is exhausted res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // It is 2 here because the first failure is before resiliency policy starts // and after the second failure because the circuitBreaker is configured to trip after a single failure // no other requests pass to the bulk publisher. expectedCBRetryCount := map[string]int{ "0": 2, "2": 2, "4": 2, "1": 2, "3": 2, "5": 2, } assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) // Act // Here the circuitBreaker is open and it will short the request, so the bulkPublisher will not be called res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // Same retry count, bulkPublisher is not called as CB is open assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) }) t.Run("partial failures with circuitBreaker and exhaust retries", func(t *testing.T) { // Setup // fail events with even Ids at least 10 times in a row, simulate partial failures // this will also simulate circuitBreaker being triggered bulkPublisher := NewMockBulkPublisher(t, 10, true, false) // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will not be triggered } // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act // Make the request twice to make sure circuitBreaker is exhausted res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 3) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // It is 2 here because the first failure is before resiliency policy starts // and after the second failure because the circuitBreaker is configured to trip after a single failure // no other requests pass to the bulk publisher. expectedCBRetryCount := map[string]int{ "0": 2, "2": 2, "4": 2, "1": 1, "3": 1, "5": 1, } assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) // Act // Here the circuitBreaker is open and it will short the request, so the bulkPublisher will not be called res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // Same retry count, bulkPublisher is not called as CB is open assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) }) t.Run("pass partial failure with CB with short half-open timeout", func(t *testing.T) { // Setup // fail events with even Ids 2 times in a row, simulate partial failures // this will also simulate circuitBreaker being triggered bulkPublisher := NewMockBulkPublisher(t, 2, true, false) // set a circuit breaker with 1 consecutive failure // short half-open timeout to make sure it is triggered cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "1ms", // half-open after 1ms. So in test this be triggered } // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act // Make the request twice to make sure circuitBreaker is exhausted res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // It is 3 here because the first failure is before resiliency policy starts // and after the second failure because the circuitBreaker is configured to trip after a single failure // Additionally, once more retry is made and circuitBreaker is half-open expectedCBRetryCount := map[string]int{ "0": 3, "2": 3, "4": 3, "1": 1, "3": 1, "5": 1, } assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) }) t.Run("pass partial failure with CB exhaust retries then act with short half-open timeout", func(t *testing.T) { // Setup // fail events with even Ids 2 times in a row, simulate partial failures // this will also simulate circuitBreaker being triggered bulkPublisher := NewMockBulkPublisher(t, 2, true, false) // set a circuit breaker with 1 consecutive failure // short half-open timeout to make sure it is triggered cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "4s", // half-open after 4s. So in test this be triggered } // set short retry with 3 retries max shortRetry.MaxRetries = ptr.Of(3) // timeout will not be triggered here policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act // Make the request twice to make sure circuitBreaker is exhausted res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 3) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // It is 2 here because the first failure is before resiliency policy starts // and after the second failure because the circuitBreaker is configured to trip after a single failure // no other requests pass to the bulk publisher. expectedCBRetryCount := map[string]int{ "0": 2, "2": 2, "4": 2, "1": 1, "3": 1, "5": 1, } assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) // Sleep enough time so that CB switches to half-open state time.Sleep(5 * time.Second) // Act // mock bulk publisher will fail the request only twice, // the circuitBreaker will be half-open now and then after request served will be closed res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // Increase retry count for all event IDs, bulkPublisher is called as CB is half-open expectedCBRetryCount = map[string]int{ "0": 3, "2": 3, "4": 3, "1": 2, "3": 2, "5": 2, } assertRetryCount(t, expectedCBRetryCount, bulkPublisher.entryIDRetryTimes) }) t.Run("fail all events with short timeout CB and short retries", func(t *testing.T) { // Setup // fail events with even Ids at least 10 times in a row, simulate partial failures // this will also simulate circuitBreaker being triggered // timeout will be triggered here bulkPublisher := NewMockBulkPublisher(t, 10, true, false) shortTimeout := "1s" bulkPublisher.applyTimeout = true bulkPublisher.timeoutSleep = 5 * time.Second // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will not be triggered } // set short retry with 2 retries max shortRetry.MaxRetries = ptr.Of(2) // timeout will be triggered here policyProvider := createResPolicyProvider(cb, shortTimeout, shortRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act // Make the request twice to make sure circuitBreaker is exhausted res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // all events fail on timeout // not asserting the number of called times since it may or may not be updated(component called) in actually code. // In test code, it is not updated. // Act // Here the circuitBreaker is open and it will short the request, so the bulkPublisher will not be called res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // Not aaserting the number of called times since it may or may not be updated(component called) in actually code. // In above case, it is not updated. }) t.Run("fail events with circuitBreaker, short timeout and long retries", func(t *testing.T) { // Setup // fail events with even Ids at least 10 times in a row, simulate partial failures // this will also simulate circuitBreaker being triggered // timeout will be triggered here // retries will take longer than the timeout // the background goroutines on timeout will complete bulkPublisher := NewMockBulkPublisher(t, 10, true, false) // short timeout shortTimeout := "1s" bulkPublisher.applyTimeout = true bulkPublisher.timeoutSleep = 5 * time.Second // retry time period twice that of timeout sleep and 10 times that of the timeout longRetry.MaxRetries = ptr.Of(2) // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will not be triggered } // timeout will be triggered here policyProvider := createResPolicyProvider(cb, shortTimeout, longRetry) policyDef := policyProvider.ComponentOutboundPolicy(pubsubName, resiliency.Pubsub) // Act // Make the request twice to make sure circuitBreaker is exhausted res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // all events fail on timeout // not asserting the number of called times since it may or may not be updated(component called) in actually code. // In test code, it is not updated. // Act // Here the circuitBreaker is open and it will short the request, so the bulkPublisher will not be called res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // Not aaserting the number of called times since it may or may not be updated(component called) in actually code. // In above case, it is not updated. }) } func createResPolicyProvider(ciruitBreaker resiliencyV1alpha.CircuitBreaker, timeout string, retry resiliencyV1alpha.Retry) *resiliency.Resiliency { r := &resiliencyV1alpha.Resiliency{ Spec: resiliencyV1alpha.ResiliencySpec{ Policies: resiliencyV1alpha.Policies{ Timeouts: map[string]string{ "pubsubTimeout": timeout, }, CircuitBreakers: map[string]resiliencyV1alpha.CircuitBreaker{ "pubsubCircuitBreaker": ciruitBreaker, }, Retries: map[string]resiliencyV1alpha.Retry{ "pubsubRetry": retry, }, }, Targets: resiliencyV1alpha.Targets{ Components: map[string]resiliencyV1alpha.ComponentPolicyNames{ "test-pubsub": { Outbound: resiliencyV1alpha.PolicyNames{ Timeout: "pubsubTimeout", CircuitBreaker: "pubsubCircuitBreaker", Retry: "pubsubRetry", }, }, }, }, }, } return resiliency.FromConfigurations(testLogger, r) } func assertRetryCount(t *testing.T, expectedIDRetryCountMap map[string]int, actualRetryCountMap map[string]int) { for k, v := range expectedIDRetryCountMap { assert.Equal(t, v, actualRetryCountMap[k], "expected retry/try count to match") } }
mikeee/dapr
pkg/runtime/pubsub/bulkpublish_resiliency_test.go
GO
mit
24,739
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "github.com/google/uuid" "github.com/dapr/kit/logger" contribPubsub "github.com/dapr/components-contrib/pubsub" ) const ( Metadata = "metadata" Entries = "entries" ) var bulkPSLogger = logger.NewLogger("bulk.subscribe") type BulkSubscribeMessageItem struct { EntryId string `json:"entryId"` //nolint:stylecheck Event interface{} `json:"event"` Metadata map[string]string `json:"metadata"` ContentType string `json:"contentType,omitempty"` } type BulkSubscribeEnvelope struct { ID string Entries []BulkSubscribeMessageItem Metadata map[string]string Topic string Pubsub string EventType string } func NewBulkSubscribeEnvelope(req *BulkSubscribeEnvelope) map[string]interface{} { id := req.ID if id == "" { reqID, err := uuid.NewRandom() if err != nil { bulkPSLogger.Warn("Unable to generate uuid for bulk subscribe request") } else { id = reqID.String() } } eventType := req.EventType if eventType == "" { eventType = contribPubsub.DefaultBulkEventType } bulkSubEnvelope := map[string]interface{}{ contribPubsub.IDField: id, contribPubsub.TypeField: eventType, contribPubsub.TopicField: req.Topic, contribPubsub.PubsubField: req.Pubsub, Metadata: req.Metadata, Entries: req.Entries, } return bulkSubEnvelope }
mikeee/dapr
pkg/runtime/pubsub/bulksubscribe_events.go
GO
mit
1,957
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "github.com/mitchellh/mapstructure" contribContenttype "github.com/dapr/components-contrib/contenttype" contribPubsub "github.com/dapr/components-contrib/pubsub" ) // CloudEvent is a request object to create a Dapr compliant cloudevent. // The cloud event properties can manually be overwritten by using metadata beginning with "cloudevent." as prefix. type CloudEvent struct { ID string `mapstructure:"cloudevent.id"` Data []byte `mapstructure:"-"` // cannot be overridden Topic string `mapstructure:"-"` // cannot be overridden Pubsub string `mapstructure:"-"` // cannot be overridden DataContentType string `mapstructure:"-"` // cannot be overridden TraceID string `mapstructure:"cloudevent.traceid"` TraceState string `mapstructure:"cloudevent.tracestate"` Source string `mapstructure:"cloudevent.source"` Type string `mapstructure:"cloudevent.type"` TraceParent string `mapstructure:"cloudevent.traceparent"` } // NewCloudEvent encapsulates the creation of a Dapr cloudevent from an existing cloudevent or a raw payload. func NewCloudEvent(req *CloudEvent, metadata map[string]string) (map[string]interface{}, error) { if contribContenttype.IsCloudEventContentType(req.DataContentType) { return contribPubsub.FromCloudEvent(req.Data, req.Topic, req.Pubsub, req.TraceID, req.TraceState) } // certain metadata beginning with "cloudevent." are considered overrides to the cloudevent envelope // we ignore any error here as the original cloud event envelope is still valid _ = mapstructure.WeakDecode(metadata, req) // allows ignoring of case // the final cloud event envelope contains both "traceid" and "traceparent" set to the same value (req.TraceID) // eventually "traceid" will be deprecated as it was superseded by "traceparent" // currently "traceparent" is not set by the pubsub component and can only set by the user via metadata override // therefore, if an override is set for "traceparent", we use it, otherwise we use the original or overridden "traceid" value if req.TraceParent != "" { req.TraceID = req.TraceParent } return contribPubsub.NewCloudEventsEnvelope(req.ID, req.Source, req.Type, "", req.Topic, req.Pubsub, req.DataContentType, req.Data, req.TraceID, req.TraceState), nil }
mikeee/dapr
pkg/runtime/pubsub/cloudevents.go
GO
mit
2,905
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "encoding/json" "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNewCloudEvent(t *testing.T) { t.Run("raw payload", func(t *testing.T) { ce, err := NewCloudEvent(&CloudEvent{ ID: "", Source: "a", Topic: "b", Data: []byte("hello"), Pubsub: "c", DataContentType: "", TraceID: "d", Type: "custom-type", }, map[string]string{}) require.NoError(t, err) assert.NotEmpty(t, ce["id"]) // validates that the ID is generated assert.True(t, validUUID(ce["id"].(string))) // validates that the ID is a UUID assert.Equal(t, "a", ce["source"].(string)) assert.Equal(t, "b", ce["topic"].(string)) assert.Equal(t, "hello", ce["data"].(string)) assert.Equal(t, "text/plain", ce["datacontenttype"].(string)) assert.Equal(t, "d", ce["traceid"].(string)) assert.Equal(t, "custom-type", ce["type"].(string)) }) t.Run("raw payload no data", func(t *testing.T) { ce, err := NewCloudEvent(&CloudEvent{ ID: "testid", Source: "", // defaults to "Dapr" Topic: "b", Pubsub: "c", DataContentType: "", // defaults to "text/plain" TraceID: "d", Type: "", // defaults to "com.dapr.event.sent" }, map[string]string{}) require.NoError(t, err) assert.Equal(t, "testid", ce["id"].(string)) assert.Equal(t, "Dapr", ce["source"].(string)) assert.Equal(t, "b", ce["topic"].(string)) assert.Empty(t, ce["data"]) assert.Equal(t, "text/plain", ce["datacontenttype"].(string)) assert.Equal(t, "d", ce["traceid"].(string)) assert.Equal(t, "com.dapr.event.sent", ce["type"].(string)) }) t.Run("cloud event metadata override", func(t *testing.T) { ce, err := NewCloudEvent(&CloudEvent{ Topic: "originaltopic", Pubsub: "originalpubsub", DataContentType: "originaldatacontenttype", Data: []byte("originaldata"), }, map[string]string{ // these properties should not actually override anything "cloudevent.topic": "overridetopic", "cloudevent.pubsub": "overridepubsub", "cloudevent.data": "overridedata", "cloudevent.datacontenttype": "overridedatacontenttype", // these properties should override "cloudevent.source": "overridesource", "cloudevent.id": "overrideid", "cloudevent.type": "overridetype", "cloudevent.traceparent": "overridetraceparent", "cloudevent.tracestate": "overridetracestate", }) require.NoError(t, err) assert.Equal(t, "originalpubsub", ce["pubsubname"].(string)) assert.Equal(t, "originaltopic", ce["topic"].(string)) assert.Equal(t, "originaldata", ce["data"].(string)) assert.Equal(t, "originaldatacontenttype", ce["datacontenttype"].(string)) assert.Equal(t, "overridetraceparent", ce["traceid"].(string)) assert.Equal(t, "overridetracestate", ce["tracestate"].(string)) assert.Equal(t, "overridetype", ce["type"].(string)) assert.Equal(t, "overridesource", ce["source"].(string)) assert.Equal(t, "overrideid", ce["id"].(string)) assert.Equal(t, "overridetraceparent", ce["traceparent"].(string)) assert.Equal(t, "overridetracestate", ce["tracestate"].(string)) }) t.Run("custom cloudevent", func(t *testing.T) { m := map[string]interface{}{ "specversion": "1.0", "id": "event", "datacontenttype": "text/plain", "data": "world", } b, _ := json.Marshal(m) ce, err := NewCloudEvent(&CloudEvent{ Data: b, DataContentType: "application/cloudevents+json", Topic: "topic1", TraceID: "trace1", Pubsub: "pubsub", }, map[string]string{}) require.NoError(t, err) assert.Equal(t, "event", ce["id"].(string)) assert.Equal(t, "world", ce["data"].(string)) assert.Equal(t, "text/plain", ce["datacontenttype"].(string)) assert.Equal(t, "topic1", ce["topic"].(string)) assert.Equal(t, "trace1", ce["traceid"].(string)) assert.Equal(t, "pubsub", ce["pubsubname"].(string)) }) } func validUUID(u string) bool { _, err := uuid.Parse(u) return err == nil }
mikeee/dapr
pkg/runtime/pubsub/cloudevents_test.go
GO
mit
4,802
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "errors" "golang.org/x/sync/errgroup" contribPubsub "github.com/dapr/components-contrib/pubsub" ) const ( defaultBulkPublishMaxConcurrency int = 100 ) var ErrBulkPublishFailure = errors.New("bulk publish failed") // defaultBulkPublisher is the default implementation of BulkPublisher. // It is used when the component does not implement BulkPublisher. type defaultBulkPublisher struct { p contribPubsub.PubSub } // NewDefaultBulkPublisher returns a new defaultBulkPublisher from a PubSub. func NewDefaultBulkPublisher(p contribPubsub.PubSub) contribPubsub.BulkPublisher { return &defaultBulkPublisher{ p: p, } } // BulkPublish publishes a list of messages as parallel Publish requests to the topic in the incoming request. // There is no guarantee that messages sent to the broker are in the same order as specified in the request. func (p *defaultBulkPublisher) BulkPublish(ctx context.Context, req *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error) { failedEntries := make([]contribPubsub.BulkPublishResponseFailedEntry, 0, len(req.Entries)) var eg errgroup.Group eg.SetLimit(defaultBulkPublishMaxConcurrency) faileEntryChan := make(chan contribPubsub.BulkPublishResponseFailedEntry, len(req.Entries)) for i := range req.Entries { entry := req.Entries[i] eg.Go(func() error { failedEntry := p.bulkPublishSingleEntry(ctx, req.PubsubName, req.Topic, entry) if failedEntry != nil { faileEntryChan <- *failedEntry return failedEntry.Error } return nil }) } err := eg.Wait() close(faileEntryChan) for entry := range faileEntryChan { failedEntries = append(failedEntries, entry) } return contribPubsub.BulkPublishResponse{FailedEntries: failedEntries}, err } // bulkPublishSingleEntry sends a single message to the broker as a Publish request. func (p *defaultBulkPublisher) bulkPublishSingleEntry(ctx context.Context, pubsubName, topic string, entry contribPubsub.BulkMessageEntry) *contribPubsub.BulkPublishResponseFailedEntry { pr := contribPubsub.PublishRequest{ Data: entry.Event, PubsubName: pubsubName, Topic: topic, Metadata: entry.Metadata, ContentType: &entry.ContentType, } if err := p.p.Publish(ctx, &pr); err != nil { return &contribPubsub.BulkPublishResponseFailedEntry{ EntryId: entry.EntryId, Error: err, } } return nil }
mikeee/dapr
pkg/runtime/pubsub/default_bulkpub.go
GO
mit
2,966
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "errors" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" contribPubsub "github.com/dapr/components-contrib/pubsub" daprt "github.com/dapr/dapr/pkg/testing" ) func TestBulkPublish_DefaultBulkPublisher(t *testing.T) { req := &contribPubsub.BulkPublishRequest{ Entries: []contribPubsub.BulkMessageEntry{ { EntryId: "78a48b5c-ff5a-4275-9bef-4a3bb8eefc3b", Event: []byte("event1"), ContentType: "text/plain", Metadata: map[string]string{}, }, { EntryId: "d64669e2-fab6-4452-a933-8de44e26ca02", Event: []byte("event2"), ContentType: "text/plain", Metadata: map[string]string{}, }, { EntryId: "b3b4b2e1-2b9b-4b9b-9b9b-9b9b9b9b9b9b", Event: []byte("event3"), ContentType: "text/plain", Metadata: map[string]string{}, }, }, PubsubName: "pubsub", Topic: "topic", Metadata: map[string]string{}, } tcs := []struct { name string publishErrors []error nErrors int }{ { name: "default bulk publish without publish errors", publishErrors: []error{nil, nil, nil}, nErrors: 0, }, { name: "default bulk publish with all publish errors", publishErrors: []error{errors.New("publish error"), errors.New("publish error"), errors.New("publish error")}, nErrors: 3, }, { name: "default bulk publish with partial publish errors", publishErrors: []error{nil, nil, errors.New("publish error")}, nErrors: 1, }, } for _, tc := range tcs { t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { // Create publish requests for each message in the bulk request. var pubReqs []*contribPubsub.PublishRequest for _, entry := range req.Entries { contentType := entry.ContentType pubReqs = append(pubReqs, &contribPubsub.PublishRequest{ Data: entry.Event, ContentType: &contentType, Metadata: entry.Metadata, PubsubName: req.PubsubName, Topic: req.Topic, }) } // Set up the mock pubsub to return the publish errors. mockPubSub := &daprt.MockPubSub{Mock: mock.Mock{}} for i, e := range tc.publishErrors { mockPubSub.Mock.On("Publish", pubReqs[i]).Return(e) } bulkPublisher := NewDefaultBulkPublisher(mockPubSub) res, err := bulkPublisher.BulkPublish(context.Background(), req) // Check if the bulk publish method returns an error. if tc.nErrors > 0 { require.Error(t, err) // Response should contain an entry for each message in the bulk request. assert.Len(t, res.FailedEntries, tc.nErrors) } else { require.NoError(t, err) assert.Empty(t, res.FailedEntries) } var pubInvocationArgs []*contribPubsub.PublishRequest // Assert that all Publish requests have the correct topic and pubsub name. for _, call := range mockPubSub.Calls { assert.Equal(t, "Publish", call.Method) pubReq, ok := call.Arguments.Get(0).(*contribPubsub.PublishRequest) assert.True(t, ok) assert.Equal(t, req.PubsubName, pubReq.PubsubName) assert.Equal(t, req.Topic, pubReq.Topic) pubInvocationArgs = append(pubInvocationArgs, pubReq) } // Assert that a Publish request should be there for the message that was in the bulk publish request. for _, pubReq := range pubReqs { assert.Contains(t, pubInvocationArgs, pubReq) } }) } }
mikeee/dapr
pkg/runtime/pubsub/default_bulkpub_test.go
GO
mit
4,057
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "time" "github.com/google/uuid" "golang.org/x/exp/maps" contribPubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/utils" ) const ( defaultMaxMessagesCount int = 100 defaultMaxAwaitDurationMs int = 1 * 1000 ) // msgWithCallback is a wrapper around a message that includes a callback function // that is called when the message is processed. type msgWithCallback struct { msg contribPubsub.BulkMessageEntry cb func(error) } // defaultBulkSubscriber is the default implementation of BulkSubscriber. // It is used when the component does not implement BulkSubscriber. type defaultBulkSubscriber struct { p contribPubsub.PubSub } // NewDefaultBulkSubscriber returns a new defaultBulkSubscriber from a PubSub. func NewDefaultBulkSubscriber(p contribPubsub.PubSub) *defaultBulkSubscriber { return &defaultBulkSubscriber{ p: p, } } // BulkSubscribe subscribes to a topic using a BulkHandler. // Dapr buffers messages in memory and calls the handler with a list of messages // when the buffer is full or max await duration is reached. func (p *defaultBulkSubscriber) BulkSubscribe(ctx context.Context, req contribPubsub.SubscribeRequest, handler contribPubsub.BulkHandler) error { cfg := contribPubsub.BulkSubscribeConfig{ MaxMessagesCount: utils.GetIntValOrDefault(req.BulkSubscribeConfig.MaxMessagesCount, defaultMaxMessagesCount), MaxAwaitDurationMs: utils.GetIntValOrDefault(req.BulkSubscribeConfig.MaxAwaitDurationMs, defaultMaxAwaitDurationMs), } msgCbChan := make(chan msgWithCallback, cfg.MaxMessagesCount) go processBulkMessages(ctx, req.Topic, msgCbChan, cfg, handler) // Subscribe to the topic and listen for messages. return p.p.Subscribe(ctx, req, func(ctx context.Context, msg *contribPubsub.NewMessage) error { entryId, err := uuid.NewRandom() //nolint:stylecheck if err != nil { return err } bulkMsgEntry := contribPubsub.BulkMessageEntry{ EntryId: entryId.String(), Event: msg.Data, Metadata: msg.Metadata, } if msg.ContentType != nil { bulkMsgEntry.ContentType = *msg.ContentType } done := make(chan struct{}) msgCbChan <- msgWithCallback{ msg: bulkMsgEntry, cb: func(ierr error) { err = ierr close(done) }, } // Wait for the message to be processed. <-done return err }) } // processBulkMessages reads messages from msgChan and publishes them to a BulkHandler. // It buffers messages in memory and publishes them in bulk. func processBulkMessages(ctx context.Context, topic string, msgCbChan <-chan msgWithCallback, cfg contribPubsub.BulkSubscribeConfig, handler contribPubsub.BulkHandler) { messages := make([]contribPubsub.BulkMessageEntry, cfg.MaxMessagesCount) msgCbMap := make(map[string]func(error), cfg.MaxMessagesCount) ticker := time.NewTicker(time.Duration(cfg.MaxAwaitDurationMs) * time.Millisecond) defer ticker.Stop() n := 0 for { select { case <-ctx.Done(): flushMessages(ctx, topic, messages[:n], msgCbMap, handler) return case msgCb := <-msgCbChan: messages[n] = msgCb.msg n++ msgCbMap[msgCb.msg.EntryId] = msgCb.cb if n >= cfg.MaxMessagesCount { flushMessages(ctx, topic, messages[:n], msgCbMap, handler) n = 0 maps.Clear(msgCbMap) } case <-ticker.C: flushMessages(ctx, topic, messages[:n], msgCbMap, handler) n = 0 maps.Clear(msgCbMap) } } } // flushMessages writes messages to a BulkHandler and clears the messages slice. func flushMessages(ctx context.Context, topic string, messages []contribPubsub.BulkMessageEntry, msgCbMap map[string]func(error), handler contribPubsub.BulkHandler) { if len(messages) == 0 { return } responses, err := handler(ctx, &contribPubsub.BulkMessage{ Topic: topic, Metadata: map[string]string{}, Entries: messages, }) if err != nil { if responses != nil { // invoke callbacks for each message for _, r := range responses { if cb, ok := msgCbMap[r.EntryId]; ok { cb(r.Error) } } } else { // all messages failed for _, cb := range msgCbMap { cb(err) } } } else { // no error has occurred for _, cb := range msgCbMap { cb(nil) } } }
mikeee/dapr
pkg/runtime/pubsub/default_bulksub.go
GO
mit
4,747
/* Copyright 2022 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" contribPubsub "github.com/dapr/components-contrib/pubsub" ) func TestFlushMessages(t *testing.T) { emptyMessages := []contribPubsub.BulkMessageEntry{} sampleMessages := []contribPubsub.BulkMessageEntry{ {EntryId: "1"}, {EntryId: "2"}, } sampleMsgCbMap := map[string]func(error){ "1": func(err error) {}, "2": func(err error) {}, } t.Run("flushMessages should call handler with messages", func(t *testing.T) { tests := []struct { name string messages []contribPubsub.BulkMessageEntry msgCbMap map[string]func(error) expectedHandlerInvoked bool }{ { name: "handler should not be invoked when messages is empty", messages: emptyMessages, msgCbMap: sampleMsgCbMap, expectedHandlerInvoked: false, }, { name: "handler should be invoked with all messages when messages is not empty", messages: sampleMessages, msgCbMap: sampleMsgCbMap, expectedHandlerInvoked: true, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { handlerInvoked := false handler := func(ctx context.Context, msg *contribPubsub.BulkMessage) ( []contribPubsub.BulkSubscribeResponseEntry, error, ) { handlerInvoked = true assert.Equal(t, len(tc.messages), len(msg.Entries)) for _, entry := range msg.Entries { assert.Contains(t, tc.messages, entry) } return nil, nil } flushMessages(context.Background(), "topic", tc.messages, tc.msgCbMap, handler) assert.Equal(t, tc.expectedHandlerInvoked, handlerInvoked) }) } }) t.Run("flushMessages should invoke callbacks based on handler response", func(t *testing.T) { messages := []contribPubsub.BulkMessageEntry{ {EntryId: "1"}, {EntryId: "2"}, {EntryId: "3"}, } tests := []struct { name string handlerResponses []contribPubsub.BulkSubscribeResponseEntry handlerErr error entryIdErrMap map[string]struct{} //nolint:stylecheck }{ { "all callbacks should be invoked with nil error when handler returns nil error", []contribPubsub.BulkSubscribeResponseEntry{ {EntryId: "1"}, {EntryId: "2"}, }, nil, map[string]struct{}{}, }, { "all callbacks should be invoked with error when handler returns error and responses is nil", nil, errors.New("handler error"), map[string]struct{}{ "1": {}, "2": {}, "3": {}, }, }, { "failed messages' callback should be invoked with error when handler returns error and responses is not nil", []contribPubsub.BulkSubscribeResponseEntry{ {EntryId: "1", Error: errors.New("failed message")}, {EntryId: "2"}, {EntryId: "3", Error: errors.New("failed message")}, }, errors.New("handler error"), map[string]struct{}{ "1": {}, "3": {}, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { handler := func(ctx context.Context, msg *contribPubsub.BulkMessage) ( []contribPubsub.BulkSubscribeResponseEntry, error, ) { return tc.handlerResponses, tc.handlerErr } invokedCallbacks := make(map[string]error) msgCbMap := map[string]func(error){ "1": func(err error) { invokedCallbacks["1"] = err }, "2": func(err error) { invokedCallbacks["2"] = err }, "3": func(err error) { invokedCallbacks["3"] = err }, } flushMessages(context.Background(), "topic", messages, msgCbMap, handler) for id, err := range invokedCallbacks { if _, ok := tc.entryIdErrMap[id]; ok { require.Error(t, err) } else { require.NoError(t, err) } } }) } }) }
mikeee/dapr
pkg/runtime/pubsub/default_bulksub_test.go
GO
mit
4,461
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "errors" "fmt" "github.com/dapr/dapr/pkg/messages" ) var ErrMessageDropped = errors.New("pubsub message dropped") // TODO: remove this and use apierrors.PubSubMsgDropped // pubsub.NotFoundError is returned by the runtime when the pubsub does not exist. type NotFoundError struct { PubsubName string } func (e NotFoundError) Error() string { return fmt.Sprintf("pubsub '%s' not found", e.PubsubName) } // pubsub.NotAllowedError is returned by the runtime when publishing is forbidden. type NotAllowedError struct { Topic string ID string } func (e NotAllowedError) Error() string { return fmt.Sprintf(messages.ErrPubsubForbidden, e.Topic, e.ID) }
mikeee/dapr
pkg/runtime/pubsub/errors.go
GO
mit
1,249
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "encoding/json" "fmt" "strings" "sync" "time" "github.com/cenkalti/backoff/v4" "github.com/google/uuid" contribPubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/state" "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/outbox" "github.com/dapr/kit/logger" "github.com/dapr/kit/utils" ) const ( outboxPublishPubsubKey = "outboxPublishPubsub" outboxPublishTopicKey = "outboxPublishTopic" outboxPubsubKey = "outboxPubsub" outboxDiscardWhenMissingStateKey = "outboxDiscardWhenMissingState" outboxStatePrefix = "outbox" defaultStateScanDelay = time.Second * 1 ) var outboxLogger = logger.NewLogger("dapr.outbox") type outboxConfig struct { publishPubSub string publishTopic string outboxPubsub string outboxDiscardWhenMissingState bool } type outboxImpl struct { cloudEventExtractorFn func(map[string]any, string) string getPubsubFn func(string) (contribPubsub.PubSub, bool) getStateFn func(string) (state.Store, bool) publisher Adapter outboxStores map[string]outboxConfig lock sync.RWMutex namespace string } type OptionsOutbox struct { Publisher Adapter GetPubsubFn func(string) (contribPubsub.PubSub, bool) GetStateFn func(string) (state.Store, bool) CloudEventExtractorFn func(map[string]any, string) string Namespace string } // NewOutbox returns an instance of an Outbox. func NewOutbox(opts OptionsOutbox) outbox.Outbox { return &outboxImpl{ cloudEventExtractorFn: opts.CloudEventExtractorFn, getPubsubFn: opts.GetPubsubFn, getStateFn: opts.GetStateFn, publisher: opts.Publisher, outboxStores: make(map[string]outboxConfig), namespace: opts.Namespace, } } // AddOrUpdateOutbox examines a statestore for outbox properties and saves it for later usage in outbox operations. func (o *outboxImpl) AddOrUpdateOutbox(stateStore v1alpha1.Component) { var publishPubSub, publishTopicKey, outboxPubsub string var outboxDiscardWhenMissingState bool for _, v := range stateStore.Spec.Metadata { switch v.Name { case outboxPublishPubsubKey: publishPubSub = v.Value.String() case outboxPublishTopicKey: publishTopicKey = v.Value.String() case outboxPubsubKey: outboxPubsub = v.Value.String() case outboxDiscardWhenMissingStateKey: outboxDiscardWhenMissingState = utils.IsTruthy(v.Value.String()) } } if publishPubSub != "" && publishTopicKey != "" { o.lock.Lock() defer o.lock.Unlock() if outboxPubsub == "" { outboxPubsub = publishPubSub } o.outboxStores[stateStore.Name] = outboxConfig{ publishPubSub: publishPubSub, publishTopic: publishTopicKey, outboxPubsub: outboxPubsub, outboxDiscardWhenMissingState: outboxDiscardWhenMissingState, } } } // Enabled returns a bool to indicate if a state store has outbox configured func (o *outboxImpl) Enabled(stateStore string) bool { o.lock.RLock() defer o.lock.RUnlock() _, ok := o.outboxStores[stateStore] return ok } func transaction() (state.TransactionalStateOperation, error) { uid, err := uuid.NewRandom() if err != nil { return nil, err } return state.SetRequest{ Key: outboxStatePrefix + "-" + uid.String(), Value: "0", }, nil } // PublishInternal publishes the state to an internal topic for outbox processing and returns the updated list of transactions func (o *outboxImpl) PublishInternal(ctx context.Context, stateStore string, operations []state.TransactionalStateOperation, source, traceID, traceState string) ([]state.TransactionalStateOperation, error) { o.lock.RLock() c, ok := o.outboxStores[stateStore] o.lock.RUnlock() if !ok { return nil, fmt.Errorf("error publishing internal outbox message: could not find outbox configuration on state store %s", stateStore) } projections := map[string]state.SetRequest{} for i, op := range operations { sr, ok := op.(state.SetRequest) if ok { for k, v := range sr.Metadata { if k == "outbox.projection" && utils.IsTruthy(v) { projections[sr.Key] = sr operations = append(operations[:i], operations[i+1:]...) } } } } for _, op := range operations { sr, ok := op.(state.SetRequest) if ok { tr, err := transaction() if err != nil { return nil, err } var payload any var contentType string if proj, ok := projections[sr.Key]; ok { payload = proj.Value if proj.ContentType != nil { contentType = *proj.ContentType } } else { payload = sr.Value if sr.ContentType != nil { contentType = *sr.ContentType } } var ceData []byte bt, ok := payload.([]byte) if ok { ceData = bt } else if contentType != "" && strings.EqualFold(contentType, "application/json") { b, sErr := json.Marshal(payload) if sErr != nil { return nil, sErr } ceData = b } else { ceData = []byte(fmt.Sprintf("%v", payload)) } var dataContentType string if contentType != "" { dataContentType = contentType } ce := contribPubsub.NewCloudEventsEnvelope(tr.GetKey(), source, "", "", "", c.outboxPubsub, dataContentType, ceData, "", traceState) ce[contribPubsub.TraceIDField] = traceID for k, v := range op.GetMetadata() { if k == contribPubsub.DataField || k == contribPubsub.IDField { continue } ce[k] = v } data, err := json.Marshal(ce) if err != nil { return nil, err } err = o.publisher.Publish(ctx, &contribPubsub.PublishRequest{ PubsubName: c.outboxPubsub, Data: data, Topic: outboxTopic(source, c.publishTopic, o.namespace), }) if err != nil { return nil, err } operations = append(operations, tr) } } return operations, nil } func outboxTopic(appID, topic, namespace string) string { return namespace + appID + topic + "outbox" } func (o *outboxImpl) SubscribeToInternalTopics(ctx context.Context, appID string) error { o.lock.RLock() defer o.lock.RUnlock() for stateStore, c := range o.outboxStores { outboxPubsub, ok := o.getPubsubFn(c.outboxPubsub) if !ok { outboxLogger.Warnf("could not subscribe to internal outbox topic: outbox pubsub %s not loaded", c.outboxPubsub) continue } outboxPubsub.Subscribe(ctx, contribPubsub.SubscribeRequest{ Topic: outboxTopic(appID, c.publishTopic, o.namespace), }, func(ctx context.Context, msg *contribPubsub.NewMessage) error { var cloudEvent map[string]interface{} err := json.Unmarshal(msg.Data, &cloudEvent) if err != nil { return err } stateKey := o.cloudEventExtractorFn(cloudEvent, contribPubsub.IDField) store, ok := o.getStateFn(stateStore) if !ok { return fmt.Errorf("cannot get outbox state: state store %s not found", stateStore) } time.Sleep(defaultStateScanDelay) bo := &backoff.ExponentialBackOff{ InitialInterval: time.Millisecond * 500, MaxInterval: time.Second * 3, MaxElapsedTime: time.Second * 10, Multiplier: 3, Clock: backoff.SystemClock, RandomizationFactor: 0.1, } err = backoff.Retry(func() error { resp, sErr := store.Get(ctx, &state.GetRequest{ Key: stateKey, }) if sErr != nil { return sErr } if resp != nil && len(resp.Data) > 0 { return nil } return fmt.Errorf("cannot publish outbox message to topic %s with pubsub %s: outbox state not found", c.publishTopic, c.publishPubSub) }, bo) if err != nil { if c.outboxDiscardWhenMissingState { outboxLogger.Errorf("failed to publish outbox topic to pubsub %s: %s, discarding message", c.publishPubSub, err) //lint:ignore nilerr dropping message return nil } outboxLogger.Errorf("failed to publish outbox topic to pubsub %s: %s, rejecting for later processing", c.publishPubSub, err) return err } cloudEvent[contribPubsub.TopicField] = c.publishTopic cloudEvent[contribPubsub.PubsubField] = c.publishPubSub b, err := json.Marshal(cloudEvent) if err != nil { return err } contentType := cloudEvent[contribPubsub.DataContentTypeField].(string) err = o.publisher.Publish(ctx, &contribPubsub.PublishRequest{ PubsubName: c.publishPubSub, Data: b, Topic: c.publishTopic, ContentType: &contentType, }) if err != nil { return err } err = backoff.Retry(func() error { err = store.Delete(ctx, &state.DeleteRequest{ Key: stateKey, }) if err != nil { return err } return nil }, bo) return err }) } return nil }
mikeee/dapr
pkg/runtime/pubsub/outbox.go
GO
mit
9,423
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( "context" "encoding/json" "errors" "fmt" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" contribPubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/state" "github.com/dapr/dapr/pkg/apis/common" "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/outbox" "github.com/dapr/dapr/pkg/runtime/pubsub/publisher/fake" "github.com/dapr/kit/ptr" ) func newTestOutbox(publishFn func(context.Context, *contribPubsub.PublishRequest) error) outbox.Outbox { p := fake.New() if publishFn != nil { p.WithPublishFn(publishFn) } return NewOutbox(OptionsOutbox{ Publisher: p, CloudEventExtractorFn: extractCloudEventProperty, }) } func TestNewOutbox(t *testing.T) { o := newTestOutbox(nil) assert.NotNil(t, o) } func TestEnabled(t *testing.T) { t.Run("required config", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) assert.True(t, o.Enabled("test")) assert.False(t, o.Enabled("test1")) }) t.Run("missing pubsub config", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) assert.False(t, o.Enabled("test")) assert.False(t, o.Enabled("test1")) }) t.Run("missing topic config", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, }, }, }) assert.False(t, o.Enabled("test")) assert.False(t, o.Enabled("test1")) }) } func TestAddOrUpdateOutbox(t *testing.T) { t.Run("config values correct", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, { Name: outboxPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("2"), }, }, }, }, }, }) c := o.outboxStores["test"] assert.Equal(t, "2", c.outboxPubsub) assert.Equal(t, "a", c.publishPubSub) assert.Equal(t, "1", c.publishTopic) }) t.Run("config default values correct", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) c := o.outboxStores["test"] assert.Equal(t, "a", c.outboxPubsub) assert.Equal(t, "a", c.publishPubSub) assert.Equal(t, "1", c.publishTopic) }) } func TestPublishInternal(t *testing.T) { t.Run("valid operation, correct default parameters", func(t *testing.T) { o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) require.NoError(t, err) assert.Equal(t, "test", cloudEvent["data"]) assert.Equal(t, "a", pr.PubsubName) assert.Equal(t, "testapp1outbox", pr.Topic) assert.Equal(t, "testapp", cloudEvent["source"]) assert.Equal(t, "text/plain", cloudEvent["datacontenttype"]) assert.Equal(t, "a", cloudEvent["pubsubname"]) return nil }).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) _, err := o.PublishInternal(context.Background(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "key", Value: "test", }, }, "testapp", "", "") require.NoError(t, err) }) t.Run("valid operation, correct overridden parameters", func(t *testing.T) { o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) require.NoError(t, err) assert.Equal(t, "test", cloudEvent["data"]) assert.Equal(t, "a", pr.PubsubName) assert.Equal(t, "testapp1outbox", pr.Topic) assert.Equal(t, "testsource", cloudEvent["source"]) assert.Equal(t, "text/plain", cloudEvent["datacontenttype"]) assert.Equal(t, "a", cloudEvent["pubsubname"]) return nil }).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) _, err := o.PublishInternal(context.Background(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "key", Value: "test", Metadata: map[string]string{"source": "testsource"}, }, }, "testapp", "", "") require.NoError(t, err) }) t.Run("valid operation, no datacontenttype", func(t *testing.T) { o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) require.NoError(t, err) assert.Equal(t, "test", cloudEvent["data"]) assert.Equal(t, "a", pr.PubsubName) assert.Equal(t, "testapp1outbox", pr.Topic) assert.Equal(t, "testapp", cloudEvent["source"]) assert.Equal(t, "text/plain", cloudEvent["datacontenttype"]) assert.Equal(t, "a", cloudEvent["pubsubname"]) return nil }).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) contentType := "" _, err := o.PublishInternal(context.TODO(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "key", Value: "test", ContentType: &contentType, }, }, "testapp", "", "") require.NoError(t, err) }) type customData struct { Name string `json:"name"` } t.Run("valid operation, application/json datacontenttype", func(t *testing.T) { o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) require.NoError(t, err) data := cloudEvent["data"] j := customData{} err = json.Unmarshal([]byte(data.(string)), &j) require.NoError(t, err) assert.Equal(t, "test", j.Name) assert.Equal(t, "a", pr.PubsubName) assert.Equal(t, "testapp1outbox", pr.Topic) assert.Equal(t, "testapp", cloudEvent["source"]) assert.Equal(t, "application/json", cloudEvent["datacontenttype"]) assert.Equal(t, "a", cloudEvent["pubsubname"]) return nil }).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) j := customData{ Name: "test", } b, err := json.Marshal(&j) require.NoError(t, err) contentType := "application/json" _, err = o.PublishInternal(context.TODO(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "key", Value: string(b), ContentType: &contentType, }, }, "testapp", "", "") require.NoError(t, err) }) t.Run("missing state store", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) _, err := o.PublishInternal(context.TODO(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "key", Value: "test", }, }, "testapp", "", "") require.Error(t, err) }) t.Run("no op when no transactions", func(t *testing.T) { o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { assert.Fail(t, "unexptected message received") return nil }).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) _, err := o.PublishInternal(context.TODO(), "test", []state.TransactionalStateOperation{}, "testapp", "", "") require.NoError(t, err) }) t.Run("error when pubsub fails", func(t *testing.T) { o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { return errors.New("") }).(*outboxImpl) o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) _, err := o.PublishInternal(context.TODO(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "1", Value: "hello", }, }, "testapp", "", "") require.Error(t, err) }) } func TestSubscribeToInternalTopics(t *testing.T) { t.Run("correct configuration with trace, custom field and nonoverridable fields", func(t *testing.T) { const outboxTopic = "test1outbox" psMock := &outboxPubsubMock{ expectedOutboxTopic: outboxTopic, t: t, } stateMock := &outboxStateMock{ receivedKey: make(chan string, 1), } internalCalledCh := make(chan struct{}) externalCalledCh := make(chan struct{}) var closed bool o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { if pr.Topic == outboxTopic { close(internalCalledCh) } else if pr.Topic == "1" { if !closed { close(externalCalledCh) closed = true } } ce := map[string]string{} json.Unmarshal(pr.Data, &ce) traceID := ce[contribPubsub.TraceIDField] traceState := ce[contribPubsub.TraceStateField] customField := ce["outbox.cloudevent.customfield"] data := ce[contribPubsub.DataField] id := ce[contribPubsub.IDField] assert.Equal(t, "00-ecdf5aaa79bff09b62b201442c0f3061-d2597ed7bfd029e4-01", traceID) assert.Equal(t, "00-ecdf5aaa79bff09b62b201442c0f3061-d2597ed7bfd029e4-01", traceState) assert.Equal(t, "a", customField) assert.Equal(t, "hello", data) assert.Contains(t, id, "outbox-") return psMock.Publish(ctx, pr) }).(*outboxImpl) o.cloudEventExtractorFn = extractCloudEventProperty o.getPubsubFn = func(s string) (contribPubsub.PubSub, bool) { return psMock, true } o.getStateFn = func(s string) (state.Store, bool) { return stateMock, true } stateScan := "1s" o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) const appID = "test" err := o.SubscribeToInternalTopics(context.Background(), appID) require.NoError(t, err) errCh := make(chan error, 1) go func() { trs, pErr := o.PublishInternal(context.Background(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "1", Value: "hello", Metadata: map[string]string{"outbox.cloudevent.customfield": "a", "data": "a", "id": "b"}, }, }, appID, "00-ecdf5aaa79bff09b62b201442c0f3061-d2597ed7bfd029e4-01", "00-ecdf5aaa79bff09b62b201442c0f3061-d2597ed7bfd029e4-01") trs = append(trs[:0], trs[0+1:]...) if pErr != nil { errCh <- pErr return } if len(trs) != 1 { errCh <- fmt.Errorf("expected trs to have len(1), but got: %d", len(trs)) return } errCh <- nil stateMock.expectedKey.Store(ptr.Of(trs[0].GetKey())) }() d, err := time.ParseDuration(stateScan) require.NoError(t, err) start := time.Now() doneCh := make(chan error, 2) timeout := time.After(5 * time.Second) go func() { select { case <-internalCalledCh: doneCh <- nil case <-timeout: doneCh <- errors.New("timeout waiting for internalCalledCh") } }() go func() { select { case <-externalCalledCh: doneCh <- nil case <-timeout: doneCh <- errors.New("timeout waiting for externalCalledCh") } }() for i := 0; i < 2; i++ { require.NoError(t, <-doneCh) } require.GreaterOrEqual(t, time.Since(start), d) // Publishing should not have errored require.NoError(t, <-errCh) expected := stateMock.expectedKey.Load() require.NotNil(t, expected) assert.Equal(t, *expected, <-stateMock.receivedKey) }) t.Run("state store not present", func(t *testing.T) { const outboxTopic = "test1outbox" psMock := &outboxPubsubMock{ expectedOutboxTopic: outboxTopic, t: t, } o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { return psMock.Publish(ctx, pr) }).(*outboxImpl) o.getPubsubFn = func(s string) (contribPubsub.PubSub, bool) { return psMock, true } o.getStateFn = func(s string) (state.Store, bool) { return nil, false } const appID = "test" err := o.SubscribeToInternalTopics(context.Background(), appID) require.NoError(t, err) trs, pErr := o.PublishInternal(context.Background(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "1", Value: "hello", }, }, appID, "", "") require.Error(t, pErr) assert.Empty(t, trs) }) t.Run("outbox state not present", func(t *testing.T) { const outboxTopic = "test1outbox" psMock := &outboxPubsubMock{ expectedOutboxTopic: outboxTopic, t: t, } stateMock := &outboxStateMock{} internalCalledCh := make(chan struct{}) externalCalledCh := make(chan struct{}) o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { if pr.Topic == outboxTopic { close(internalCalledCh) } else if pr.Topic == "1" { close(externalCalledCh) } return psMock.Publish(ctx, pr) }).(*outboxImpl) o.getPubsubFn = func(s string) (contribPubsub.PubSub, bool) { return psMock, true } o.getStateFn = func(s string) (state.Store, bool) { return stateMock, true } const stateScan = "1s" o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, }, }, }) const appID = "test" err := o.SubscribeToInternalTopics(context.Background(), appID) require.NoError(t, err) errCh := make(chan error, 1) go func() { trs, pErr := o.PublishInternal(context.Background(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "1", Value: "hello", }, }, appID, "", "") trs = append(trs[:0], trs[0+1:]...) if pErr != nil { errCh <- pErr return } if len(trs) != 1 { errCh <- fmt.Errorf("expected trs to have len(1), but got: %d", len(trs)) return } errCh <- nil }() d, err := time.ParseDuration(stateScan) require.NoError(t, err) start := time.Now() doneCh := make(chan error, 2) timeout := time.After(2 * time.Second) go func() { select { case <-internalCalledCh: doneCh <- nil case <-timeout: doneCh <- errors.New("timeout waiting for internalCalledCh") } }() go func() { // Here we expect no signal select { case <-externalCalledCh: doneCh <- errors.New("received unexpected signal on externalCalledCh") case <-timeout: doneCh <- nil } }() for i := 0; i < 2; i++ { require.NoError(t, <-doneCh) } require.GreaterOrEqual(t, time.Since(start), d) // Publishing should not have errored require.NoError(t, <-errCh) }) t.Run("outbox state not present with discard", func(t *testing.T) { const outboxTopic = "test1outbox" psMock := &outboxPubsubMock{ expectedOutboxTopic: outboxTopic, t: t, validateNoError: true, } stateMock := &outboxStateMock{ returnEmptyOnGet: true, } internalCalledCh := make(chan struct{}) externalCalledCh := make(chan struct{}) o := newTestOutbox(func(ctx context.Context, pr *contribPubsub.PublishRequest) error { if pr.Topic == outboxTopic { close(internalCalledCh) } else if pr.Topic == "1" { close(externalCalledCh) } return psMock.Publish(ctx, pr) }).(*outboxImpl) o.getPubsubFn = func(s string) (contribPubsub.PubSub, bool) { return psMock, true } o.getStateFn = func(s string) (state.Store, bool) { return stateMock, true } const stateScan = "1s" o.AddOrUpdateOutbox(v1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, Spec: v1alpha1.ComponentSpec{ Metadata: []common.NameValuePair{ { Name: outboxPublishPubsubKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("a"), }, }, }, { Name: outboxPublishTopicKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("1"), }, }, }, { Name: outboxDiscardWhenMissingStateKey, Value: common.DynamicValue{ JSON: v1.JSON{ Raw: []byte("true"), }, }, }, }, }, }) const appID = "test" err := o.SubscribeToInternalTopics(context.Background(), appID) require.NoError(t, err) errCh := make(chan error, 1) go func() { trs, pErr := o.PublishInternal(context.Background(), "test", []state.TransactionalStateOperation{ state.SetRequest{ Key: "1", Value: "hello", }, }, appID, "", "") trs = append(trs[:0], trs[0+1:]...) if pErr != nil { errCh <- pErr return } if len(trs) != 1 { errCh <- fmt.Errorf("expected trs to have len(1), but got: %d", len(trs)) return } errCh <- nil }() d, err := time.ParseDuration(stateScan) require.NoError(t, err) start := time.Now() doneCh := make(chan error, 2) // account for max retry time timeout := time.After(11 * time.Second) go func() { select { case <-internalCalledCh: doneCh <- nil case <-timeout: doneCh <- errors.New("timeout waiting for internalCalledCh") } }() go func() { // Here we expect no signal select { case <-externalCalledCh: doneCh <- errors.New("received unexpected signal on externalCalledCh") case <-timeout: doneCh <- nil } }() for i := 0; i < 2; i++ { require.NoError(t, <-doneCh) } require.GreaterOrEqual(t, time.Since(start), d) // Publishing should not have errored require.NoError(t, <-errCh) }) } type outboxPubsubMock struct { expectedOutboxTopic string t *testing.T handler contribPubsub.Handler validateNoError bool } func (o *outboxPubsubMock) Init(ctx context.Context, metadata contribPubsub.Metadata) error { return nil } func (o *outboxPubsubMock) Features() []contribPubsub.Feature { return nil } func (o *outboxPubsubMock) Publish(ctx context.Context, req *contribPubsub.PublishRequest) error { go func() { err := o.handler(context.Background(), &contribPubsub.NewMessage{ Data: req.Data, Topic: req.Topic, }) if o.validateNoError { require.NoError(o.t, err) return } }() return nil } func (o *outboxPubsubMock) Subscribe(ctx context.Context, req contribPubsub.SubscribeRequest, handler contribPubsub.Handler) error { if req.Topic != o.expectedOutboxTopic { assert.Fail(o.t, fmt.Sprintf("expected outbox topic %s, got %s", o.expectedOutboxTopic, req.Topic)) } o.handler = handler return nil } func (o *outboxPubsubMock) Close() error { return nil } type outboxStateMock struct { expectedKey atomic.Pointer[string] receivedKey chan string returnEmptyOnGet bool } func (o *outboxStateMock) Init(ctx context.Context, metadata state.Metadata) error { return nil } func (o *outboxStateMock) Features() []state.Feature { return nil } func (o *outboxStateMock) Delete(ctx context.Context, req *state.DeleteRequest) error { return nil } func (o *outboxStateMock) Get(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) { if o.returnEmptyOnGet { return &state.GetResponse{}, nil } if o.receivedKey != nil { o.receivedKey <- req.Key } expected := o.expectedKey.Load() if expected != nil && *expected != "" && *expected == req.Key { return &state.GetResponse{ Data: []byte("0"), }, nil } return nil, nil } func TestOutboxTopic(t *testing.T) { t.Run("not namespaced", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) topic := outboxTopic("a", "b", o.namespace) assert.Equal(t, "aboutbox", topic) }) t.Run("namespaced", func(t *testing.T) { o := newTestOutbox(nil).(*outboxImpl) o.namespace = "default" topic := outboxTopic("a", "b", o.namespace) assert.Equal(t, "defaultaboutbox", topic) }) } func (o *outboxStateMock) Set(ctx context.Context, req *state.SetRequest) error { return nil } func (o *outboxStateMock) BulkGet(ctx context.Context, req []state.GetRequest, opts state.BulkGetOpts) ([]state.BulkGetResponse, error) { return nil, nil } func (o *outboxStateMock) BulkSet(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error { return nil } func (o *outboxStateMock) BulkDelete(ctx context.Context, req []state.DeleteRequest, opts state.BulkStoreOpts) error { return nil } func extractCloudEventProperty(cloudEvent map[string]any, property string) string { if cloudEvent == nil { return "" } iValue, ok := cloudEvent[property] if ok { if value, ok := iValue.(string); ok { return value } } return "" }
mikeee/dapr
pkg/runtime/pubsub/outbox_test.go
GO
mit
25,931
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "context" contribPubsub "github.com/dapr/components-contrib/pubsub" ) // Fake is a fake publisher type Fake struct { publishFn func(context.Context, *contribPubsub.PublishRequest) error bulkPublishFn func(context.Context, *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error) } func New() *Fake { return &Fake{ publishFn: func(context.Context, *contribPubsub.PublishRequest) error { return nil }, bulkPublishFn: func(context.Context, *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error) { return contribPubsub.BulkPublishResponse{}, nil }, } } func (f *Fake) WithPublishFn(fn func(context.Context, *contribPubsub.PublishRequest) error) *Fake { f.publishFn = fn return f } func (f *Fake) WithBulkPublishFn(fn func(context.Context, *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error)) *Fake { f.bulkPublishFn = fn return f } func (f *Fake) Publish(ctx context.Context, req *contribPubsub.PublishRequest) error { return f.publishFn(ctx, req) } func (f *Fake) BulkPublish(ctx context.Context, req *contribPubsub.BulkPublishRequest) (contribPubsub.BulkPublishResponse, error) { return f.bulkPublishFn(ctx, req) }
mikeee/dapr
pkg/runtime/pubsub/publisher/fake/fake.go
GO
mit
1,804
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "testing" "github.com/dapr/dapr/pkg/runtime/pubsub" ) func Test_Fake(t *testing.T) { var _ pubsub.Adapter = New() }
mikeee/dapr
pkg/runtime/pubsub/publisher/fake/fake_test.go
GO
mit
705
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package publisher import ( "context" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/resiliency" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/kit/logger" ) type GetPubSubFn func(name string) (*rtpubsub.PubsubItem, bool) type Options struct { AppID string Namespace string Resiliency resiliency.Provider GetPubSubFn GetPubSubFn } type publisher struct { appID string namespace string resiliency resiliency.Provider getpubsubFn GetPubSubFn } var log = logger.NewLogger("dapr.runtime.pubsub.publisher") func New(opts Options) rtpubsub.Adapter { return &publisher{ appID: opts.AppID, namespace: opts.Namespace, resiliency: opts.Resiliency, getpubsubFn: opts.GetPubSubFn, } } // Publish is an adapter method for the runtime to pre-validate publish requests // And then forward them to the Pub/Sub component. // This method is used by the HTTP and gRPC APIs. func (p *publisher) Publish(ctx context.Context, req *contribpubsub.PublishRequest) error { pubsub, ok := p.getpubsubFn(req.PubsubName) if !ok { return rtpubsub.NotFoundError{PubsubName: req.PubsubName} } if allowed := rtpubsub.IsOperationAllowed(req.Topic, pubsub, pubsub.ScopedPublishings); !allowed { return rtpubsub.NotAllowedError{Topic: req.Topic, ID: p.appID} } if pubsub.NamespaceScoped { req.Topic = p.namespace + req.Topic } policyRunner := resiliency.NewRunner[any](ctx, p.resiliency.ComponentOutboundPolicy(req.PubsubName, resiliency.Pubsub), ) _, err := policyRunner(func(ctx context.Context) (any, error) { return nil, pubsub.Component.Publish(ctx, req) }) return err } func (p *publisher) BulkPublish(ctx context.Context, req *contribpubsub.BulkPublishRequest) (contribpubsub.BulkPublishResponse, error) { pubsub, ok := p.getpubsubFn(req.PubsubName) if !ok { return contribpubsub.BulkPublishResponse{}, rtpubsub.NotFoundError{PubsubName: req.PubsubName} } if allowed := rtpubsub.IsOperationAllowed(req.Topic, pubsub, pubsub.ScopedPublishings); !allowed { return contribpubsub.BulkPublishResponse{}, rtpubsub.NotAllowedError{Topic: req.Topic, ID: p.appID} } policyDef := p.resiliency.ComponentOutboundPolicy(req.PubsubName, resiliency.Pubsub) if contribpubsub.FeatureBulkPublish.IsPresent(pubsub.Component.Features()) { return rtpubsub.ApplyBulkPublishResiliency(ctx, req, policyDef, pubsub.Component.(contribpubsub.BulkPublisher)) } log.Debugf("pubsub %s does not implement the BulkPublish API; falling back to publishing messages individually", req.PubsubName) defaultBulkPublisher := rtpubsub.NewDefaultBulkPublisher(pubsub.Component) return rtpubsub.ApplyBulkPublishResiliency(ctx, req, policyDef, defaultBulkPublisher) }
mikeee/dapr
pkg/runtime/pubsub/publisher/publisher.go
GO
mit
3,319
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package publisher import ( "context" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/compstore" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) const ( TestPubsubName = "testpubsub" TestSecondPubsubName = "testpubsub2" ) func TestPublish(t *testing.T) { t.Run("test bulk publish, topic allowed", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{Component: &mockPublishPubSub{}}) ps := New(Options{ GetPubSubFn: compStore.GetPubSub, Resiliency: resiliency.New(logger.NewLogger("test")), }) md := make(map[string]string, 2) md["key"] = "v3" res, err := ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestPubsubName, Topic: "topic0", Metadata: md, Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), Metadata: md, ContentType: "text/plain", }, }, }) require.NoError(t, err) assert.Empty(t, res.FailedEntries) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{Component: &mockPublishPubSub{}}) res, err = ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic1", Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), ContentType: "text/plain", }, { EntryId: "2", Event: []byte("test 2"), ContentType: "text/plain", }, }, }) require.NoError(t, err) assert.Empty(t, res.FailedEntries) }) t.Run("test bulk publish, topic protected, with scopes, publish succeeds", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic0"}, ScopedPublishings: []string{"topic0"}, }) ps := New(Options{ GetPubSubFn: compStore.GetPubSub, Resiliency: resiliency.New(logger.NewLogger("test")), }) md := make(map[string]string, 2) md["key"] = "v3" res, err := ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestPubsubName, Topic: "topic0", Metadata: md, Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), Metadata: md, ContentType: "text/plain", }, }, }) require.NoError(t, err) assert.Empty(t, res.FailedEntries) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, ScopedPublishings: []string{"topic1"}, }) res, err = ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic1", Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), ContentType: "text/plain", }, { EntryId: "2", Event: []byte("test 2"), ContentType: "text/plain", }, }, }) require.NoError(t, err) assert.Empty(t, res.FailedEntries) }) t.Run("test bulk publish, topic not allowed", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, AllowedTopics: []string{"topic1"}, }) ps := New(Options{ GetPubSubFn: compStore.GetPubSub, Resiliency: resiliency.New(logger.NewLogger("test")), }) md := make(map[string]string, 2) md["key"] = "v3" res, err := ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestPubsubName, Topic: "topic5", Metadata: md, Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), Metadata: md, ContentType: "text/plain", }, }, }) require.Error(t, err) assert.Empty(t, res) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, AllowedTopics: []string{"topic1"}, }) res, err = ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic5", Metadata: md, Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), Metadata: md, ContentType: "text/plain", }, }, }) require.Error(t, err) assert.Empty(t, res) }) t.Run("test bulk publish, topic protected, no scopes, publish fails", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, }) ps := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), GetPubSubFn: compStore.GetPubSub, }) md := make(map[string]string, 2) md["key"] = "v3" res, err := ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestPubsubName, Topic: "topic1", Metadata: md, Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), Metadata: md, ContentType: "text/plain", }, }, }) require.Error(t, err) assert.Empty(t, res) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, }) res, err = ps.BulkPublish(context.Background(), &contribpubsub.BulkPublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic1", Metadata: md, Entries: []contribpubsub.BulkMessageEntry{ { EntryId: "1", Event: []byte("test"), Metadata: md, ContentType: "text/plain", }, }, }) require.Error(t, err) assert.Empty(t, res) }) t.Run("test publish, topic allowed", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, }) md := make(map[string]string, 2) md["key"] = "v3" ps := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), GetPubSubFn: compStore.GetPubSub, }) err := ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestPubsubName, Topic: "topic0", Metadata: md, }) require.NoError(t, err) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, }) err = ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic1", }) require.NoError(t, err) }) t.Run("test publish, topic protected, with scopes, publish succeeds", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic0"}, ScopedPublishings: []string{"topic0"}, }) ps := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), GetPubSubFn: compStore.GetPubSub, }) md := make(map[string]string, 2) md["key"] = "v3" err := ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestPubsubName, Topic: "topic0", Metadata: md, }) require.NoError(t, err) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, ScopedPublishings: []string{"topic1"}, }) err = ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic1", }) require.NoError(t, err) }) t.Run("test publish, topic not allowed", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, AllowedTopics: []string{"topic1"}, }) ps := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), GetPubSubFn: compStore.GetPubSub, }) compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, AllowedTopics: []string{"topic1"}, }) err := ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestPubsubName, Topic: "topic5", }) require.Error(t, err) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, AllowedTopics: []string{"topic1"}, }) err = ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic5", }) require.Error(t, err) }) t.Run("test publish, topic protected, no scopes, publish fails", func(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, }) ps := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), GetPubSubFn: compStore.GetPubSub, }) compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, }) err := ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestPubsubName, Topic: "topic1", }) require.Error(t, err) compStore.AddPubSub(TestSecondPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, ProtectedTopics: []string{"topic1"}, }) err = ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestSecondPubsubName, Topic: "topic1", }) require.Error(t, err) }) } func TestNamespacedPublisher(t *testing.T) { compStore := compstore.New() compStore.AddPubSub(TestPubsubName, &rtpubsub.PubsubItem{ Component: &mockPublishPubSub{}, NamespaceScoped: true, }) ps := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), GetPubSubFn: compStore.GetPubSub, Namespace: "ns1", }) err := ps.Publish(context.Background(), &contribpubsub.PublishRequest{ PubsubName: TestPubsubName, Topic: "topic0", }) require.NoError(t, err) pubSub, ok := compStore.GetPubSub(TestPubsubName) require.True(t, ok) assert.Equal(t, "ns1topic0", pubSub.Component.(*mockPublishPubSub).PublishedRequest.Load().Topic) } type mockPublishPubSub struct { PublishedRequest atomic.Pointer[contribpubsub.PublishRequest] } // Init is a mock initialization method. func (m *mockPublishPubSub) Init(ctx context.Context, metadata contribpubsub.Metadata) error { return nil } // Publish is a mock publish method. func (m *mockPublishPubSub) Publish(ctx context.Context, req *contribpubsub.PublishRequest) error { m.PublishedRequest.Store(req) return nil } // BulkPublish is a mock bulk publish method returning a success all the time. func (m *mockPublishPubSub) BulkPublish(req *contribpubsub.BulkPublishRequest) (contribpubsub.BulkPublishResponse, error) { return contribpubsub.BulkPublishResponse{}, nil } func (m *mockPublishPubSub) BulkSubscribe(ctx context.Context, req contribpubsub.SubscribeRequest, handler contribpubsub.BulkHandler) (contribpubsub.BulkSubscribeResponse, error) { return contribpubsub.BulkSubscribeResponse{}, nil } // Subscribe is a mock subscribe method. func (m *mockPublishPubSub) Subscribe(_ context.Context, req contribpubsub.SubscribeRequest, handler contribpubsub.Handler) error { return nil } func (m *mockPublishPubSub) Close() error { return nil } func (m *mockPublishPubSub) Features() []contribpubsub.Feature { return nil } func TestPubsubWithResiliency(t *testing.T) { t.Run("pubsub publish retries with resiliency", func(t *testing.T) { failingPubsub := daprt.FailingPubsub{ Failure: daprt.NewFailure( map[string]int{ "failingTopic": 1, }, map[string]time.Duration{ "timeoutTopic": time.Second * 10, }, map[string]int{}, ), } compStore := compstore.New() compStore.AddPubSub("failPubsub", &rtpubsub.PubsubItem{Component: &failingPubsub}) ps := New(Options{ GetPubSubFn: compStore.GetPubSub, Resiliency: resiliency.FromConfigurations(logger.NewLogger("test"), daprt.TestResiliency), }) req := &contribpubsub.PublishRequest{ PubsubName: "failPubsub", Topic: "failingTopic", } err := ps.Publish(context.Background(), req) require.NoError(t, err) assert.Equal(t, 2, failingPubsub.Failure.CallCount("failingTopic")) }) t.Run("pubsub publish times out with resiliency", func(t *testing.T) { failingPubsub := daprt.FailingPubsub{ Failure: daprt.NewFailure( map[string]int{ "failingTopic": 1, }, map[string]time.Duration{ "timeoutTopic": time.Second * 10, }, map[string]int{}, ), } compStore := compstore.New() compStore.AddPubSub("failPubsub", &rtpubsub.PubsubItem{Component: &failingPubsub}) ps := New(Options{ GetPubSubFn: compStore.GetPubSub, Resiliency: resiliency.FromConfigurations(logger.NewLogger("test"), daprt.TestResiliency), }) req := &contribpubsub.PublishRequest{ PubsubName: "failPubsub", Topic: "timeoutTopic", } start := time.Now() err := ps.Publish(context.Background(), req) end := time.Now() require.Error(t, err) assert.Equal(t, 2, failingPubsub.Failure.CallCount("timeoutTopic")) assert.Less(t, end.Sub(start), time.Second*10) }) }
mikeee/dapr
pkg/runtime/pubsub/publisher/publisher_test.go
GO
mit
14,364
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pubsub import ( rtv1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" ) type Streamer interface { Subscribe(rtv1pb.Dapr_SubscribeTopicEventsAlpha1Server) error }
mikeee/dapr
pkg/runtime/pubsub/streamer.go
GO
mit
732
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package streamer import ( "context" "sync" rtv1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" ) type conn struct { lock sync.RWMutex streamLock sync.Mutex stream rtv1pb.Dapr_SubscribeTopicEventsAlpha1Server publishResponses map[string]chan *rtv1pb.SubscribeTopicEventsResponseAlpha1 } func (c *conn) registerPublishResponse(id string) (chan *rtv1pb.SubscribeTopicEventsResponseAlpha1, func()) { ch := make(chan *rtv1pb.SubscribeTopicEventsResponseAlpha1) c.lock.Lock() c.publishResponses[id] = ch c.lock.Unlock() return ch, func() { c.lock.Lock() delete(c.publishResponses, id) c.lock.Unlock() } } func (c *conn) notifyPublishResponse(ctx context.Context, resp *rtv1pb.SubscribeTopicEventsResponseAlpha1) { c.lock.RLock() ch, ok := c.publishResponses[resp.GetId()] c.lock.RUnlock() if !ok { log.Errorf("no client stream expecting publish response for id %q", resp.GetId()) return } select { case <-ctx.Done(): case ch <- resp: } }
mikeee/dapr
pkg/runtime/pubsub/streamer/conn.go
GO
mit
1,559
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package streamer import ( "context" "errors" "fmt" "io" "strings" "sync" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/config" diag "github.com/dapr/dapr/pkg/diagnostics" rtv1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" rterrors "github.com/dapr/dapr/pkg/runtime/errors" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/kit/logger" ) type Options struct { TracingSpec *config.TracingSpec } type streamer struct { tracingSpec *config.TracingSpec subscribers map[string]*conn lock sync.RWMutex } var log = logger.NewLogger("dapr.runtime.pubsub.streamer") func New(opts Options) rtpubsub.AdapterStreamer { return &streamer{ tracingSpec: opts.TracingSpec, subscribers: make(map[string]*conn), } } func (s *streamer) Subscribe(stream rtv1pb.Dapr_SubscribeTopicEventsAlpha1Server, req *rtv1pb.SubscribeTopicEventsInitialRequestAlpha1) error { s.lock.Lock() key := s.StreamerKey(req.GetPubsubName(), req.GetTopic()) if _, ok := s.subscribers[key]; ok { s.lock.Unlock() return fmt.Errorf("already subscribed to pubsub %q topic %q", req.GetPubsubName(), req.GetTopic()) } conn := &conn{ stream: stream, publishResponses: make(map[string]chan *rtv1pb.SubscribeTopicEventsResponseAlpha1), } s.subscribers[key] = conn log.Infof("Subscribing to pubsub '%s' topic '%s'", req.GetPubsubName(), req.GetTopic()) s.lock.Unlock() defer func() { s.lock.Lock() delete(s.subscribers, key) s.lock.Unlock() }() var wg sync.WaitGroup defer wg.Wait() for { resp, err := stream.Recv() s, ok := status.FromError(err) if (ok && s.Code() == codes.Canceled) || errors.Is(err, context.Canceled) || errors.Is(err, io.EOF) { log.Infof("Unsubscribed from pubsub '%s' topic '%s'", req.GetPubsubName(), req.GetTopic()) return err } if err != nil { log.Errorf("error receiving message from client stream: %s", err) return err } eventResp := resp.GetEventResponse() if eventResp == nil { return errors.New("duplicate initial request received") } wg.Add(1) go func() { defer wg.Done() conn.notifyPublishResponse(stream.Context(), eventResp) }() } } func (s *streamer) Publish(ctx context.Context, msg *rtpubsub.SubscribedMessage) error { s.lock.RLock() key := s.StreamerKey(msg.PubSub, msg.Topic) conn, ok := s.subscribers[key] s.lock.RUnlock() if !ok { return fmt.Errorf("no streamer subscribed to pubsub %q topic %q", msg.PubSub, msg.Topic) } envelope, span, err := rtpubsub.GRPCEnvelopeFromSubscriptionMessage(ctx, msg, log, s.tracingSpec) if err != nil { return err } ch, defFn := conn.registerPublishResponse(envelope.GetId()) defer defFn() start := time.Now() conn.streamLock.Lock() err = conn.stream.Send(envelope) conn.streamLock.Unlock() elapsed := diag.ElapsedSince(start) if span != nil { m := diag.ConstructSubscriptionSpanAttributes(envelope.GetTopic()) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromGRPCError(span, err) span.End() } if err != nil { err = fmt.Errorf("error returned from app while processing pub/sub event %v: %w", msg.CloudEvent[contribpubsub.IDField], rterrors.NewRetriable(err)) log.Debug(err) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) return err } var resp *rtv1pb.SubscribeTopicEventsResponseAlpha1 select { case <-ctx.Done(): return ctx.Err() case resp = <-ch: } switch resp.GetStatus().GetStatus() { case rtv1pb.TopicEventResponse_SUCCESS: //nolint:nosnakecase // on uninitialized status, this is the case it defaults to as an uninitialized status defaults to 0 which is // success from protobuf definition diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Success)), "", msg.Topic, elapsed) return nil case rtv1pb.TopicEventResponse_RETRY: //nolint:nosnakecase diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) // TODO: add retry error info return fmt.Errorf("RETRY status returned from app while processing pub/sub event %v: %w", msg.CloudEvent[contribpubsub.IDField], rterrors.NewRetriable(nil)) case rtv1pb.TopicEventResponse_DROP: //nolint:nosnakecase log.Warnf("DROP status returned from app while processing pub/sub event %v", msg.CloudEvent[contribpubsub.IDField]) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Drop)), "", msg.Topic, elapsed) return rtpubsub.ErrMessageDropped default: // Consider unknown status field as error and retry diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) return fmt.Errorf("unknown status returned from app while processing pub/sub event %v, status: %v, err: %w", msg.CloudEvent[contribpubsub.IDField], resp.GetStatus(), rterrors.NewRetriable(nil)) } } func (s *streamer) StreamerKey(pubsub, topic string) string { return "___" + pubsub + "||" + topic }
mikeee/dapr
pkg/runtime/pubsub/streamer/streamer.go
GO
mit
5,802
package pubsub import "fmt" type Subscription struct { PubsubName string `json:"pubsubname"` Topic string `json:"topic"` DeadLetterTopic string `json:"deadLetterTopic"` Metadata map[string]string `json:"metadata"` Rules []*Rule `json:"rules,omitempty"` Scopes []string `json:"scopes"` BulkSubscribe *BulkSubscribe `json:"bulkSubscribe"` } type BulkSubscribe struct { Enabled bool `json:"enabled"` MaxMessagesCount int32 `json:"maxMessagesCount,omitempty"` MaxAwaitDurationMs int32 `json:"maxAwaitDurationMs,omitempty"` } type Rule struct { Match Expr `json:"match"` Path string `json:"path"` } type Expr interface { fmt.Stringer Eval(variables map[string]interface{}) (interface{}, error) }
mikeee/dapr
pkg/runtime/pubsub/subscription.go
GO
mit
826
package pubsub import ( "context" "encoding/base64" "encoding/json" "errors" "fmt" "net/http" "strings" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/structpb" "k8s.io/apimachinery/pkg/util/sets" "github.com/dapr/components-contrib/contenttype" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/channel" "github.com/dapr/dapr/pkg/config" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/expr" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/kit/logger" ) const ( getTopicsError = "error getting topic list from app: %s" deserializeTopicsError = "error getting topics from app: %s" noSubscriptionsError = "user app did not subscribe to any topic" subscriptionKind = "Subscription" APIVersionV1alpha1 = "dapr.io/v1alpha1" APIVersionV2alpha1 = "dapr.io/v2alpha1" MetadataKeyPubSub = "pubsubName" ) var ( // errUnexpectedEnvelopeData denotes that an unexpected data type was // encountered when processing a cloud event's data property. errUnexpectedEnvelopeData = errors.New("unexpected data type encountered in envelope") cloudEventDuplicateKeys = sets.NewString( contribpubsub.IDField, contribpubsub.SourceField, contribpubsub.DataContentTypeField, contribpubsub.TypeField, contribpubsub.SpecVersionField, contribpubsub.DataField, contribpubsub.DataBase64Field, ) ) type ( SubscriptionJSON struct { PubsubName string `json:"pubsubname"` Topic string `json:"topic"` DeadLetterTopic string `json:"deadLetterTopic"` Metadata map[string]string `json:"metadata,omitempty"` Route string `json:"route"` // Single route from v1alpha1 Routes RoutesJSON `json:"routes"` // Multiple routes from v2alpha1 BulkSubscribe BulkSubscribeJSON `json:"bulkSubscribe,omitempty"` } RoutesJSON struct { Rules []*RuleJSON `json:"rules,omitempty"` Default string `json:"default,omitempty"` } BulkSubscribeJSON struct { Enabled bool `json:"enabled"` MaxMessagesCount int32 `json:"maxMessagesCount,omitempty"` MaxAwaitDurationMs int32 `json:"maxAwaitDurationMs,omitempty"` } RuleJSON struct { Match string `json:"match"` Path string `json:"path"` } SubscribedMessage struct { CloudEvent map[string]interface{} Data []byte Topic string Metadata map[string]string Path string PubSub string } ) func GetSubscriptionsHTTP(ctx context.Context, channel channel.AppChannel, log logger.Logger, r resiliency.Provider) ([]Subscription, error) { req := invokev1.NewInvokeMethodRequest("dapr/subscribe"). WithHTTPExtension(http.MethodGet, ""). WithContentType(invokev1.JSONContentType) defer req.Close() policyDef := r.BuiltInPolicy(resiliency.BuiltInInitializationRetries) if policyDef != nil && policyDef.HasRetries() { req.WithReplay(true) } policyRunner := resiliency.NewRunnerWithOptions(ctx, policyDef, resiliency.RunnerOpts[*invokev1.InvokeMethodResponse]{ Disposer: resiliency.DisposerCloser[*invokev1.InvokeMethodResponse], }, ) resp, err := policyRunner(func(ctx context.Context) (*invokev1.InvokeMethodResponse, error) { return channel.InvokeMethod(ctx, req, "") }) if err != nil { return nil, err } defer resp.Close() var ( subscriptions []Subscription subscriptionItems []SubscriptionJSON ) switch resp.Status().GetCode() { case http.StatusOK: err = json.NewDecoder(resp.RawData()).Decode(&subscriptionItems) if err != nil { err = fmt.Errorf(deserializeTopicsError, err) log.Error(err) return nil, err } subscriptions = make([]Subscription, len(subscriptionItems)) for i, si := range subscriptionItems { // Look for single route field and append it as a route struct. // This preserves backward compatibility. rules := make([]*Rule, len(si.Routes.Rules)+1) n := 0 for _, r := range si.Routes.Rules { rule, err := CreateRoutingRule(r.Match, r.Path) if err != nil { return nil, err } rules[n] = rule n++ } // If a default path is set, add a rule with a nil `Match`, // which is treated as `true` and always selected if // no previous rules match. if si.Routes.Default != "" { rules[n] = &Rule{ Path: si.Routes.Default, } n++ } else if si.Route != "" { rules[n] = &Rule{ Path: si.Route, } n++ } bulkSubscribe := &BulkSubscribe{ Enabled: si.BulkSubscribe.Enabled, MaxMessagesCount: si.BulkSubscribe.MaxMessagesCount, MaxAwaitDurationMs: si.BulkSubscribe.MaxAwaitDurationMs, } subscriptions[i] = Subscription{ PubsubName: si.PubsubName, Topic: si.Topic, Metadata: si.Metadata, DeadLetterTopic: si.DeadLetterTopic, Rules: rules[:n], BulkSubscribe: bulkSubscribe, } } case http.StatusNotFound: log.Debug(noSubscriptionsError) default: // Unexpected response: both GRPC and HTTP have to log the same level. log.Errorf("app returned http status code %v from subscription endpoint", resp.Status().GetCode()) } log.Debugf("app responded with subscriptions %v", subscriptions) return filterSubscriptions(subscriptions, log), nil } func filterSubscriptions(subscriptions []Subscription, log logger.Logger) []Subscription { i := 0 for _, s := range subscriptions { if len(s.Rules) == 0 { log.Warnf("topic %s has an empty routes. removing from subscriptions list", s.Topic) continue } subscriptions[i] = s i++ } return subscriptions[:i] } func GetSubscriptionsGRPC(ctx context.Context, channel runtimev1pb.AppCallbackClient, log logger.Logger, r resiliency.Provider) ([]Subscription, error) { policyRunner := resiliency.NewRunner[*runtimev1pb.ListTopicSubscriptionsResponse](ctx, r.BuiltInPolicy(resiliency.BuiltInInitializationRetries), ) resp, err := policyRunner(func(ctx context.Context) (*runtimev1pb.ListTopicSubscriptionsResponse, error) { rResp, rErr := channel.ListTopicSubscriptions(ctx, &emptypb.Empty{}) if rErr != nil { s, ok := status.FromError(rErr) if ok && s != nil { if s.Code() == codes.Unimplemented { log.Infof("pubsub subscriptions: gRPC app does not implement ListTopicSubscriptions") return new(runtimev1pb.ListTopicSubscriptionsResponse), nil } } } return rResp, rErr }) if err != nil { // Unexpected response: both GRPC and HTTP have to log the same level. log.Errorf(getTopicsError, err) return nil, err } var subscriptions []Subscription if len(resp.GetSubscriptions()) == 0 { log.Debug(noSubscriptionsError) } else { subscriptions = make([]Subscription, len(resp.GetSubscriptions())) for i, s := range resp.GetSubscriptions() { rules, err := parseRoutingRulesGRPC(s.GetRoutes()) if err != nil { return nil, err } var bulkSubscribe *BulkSubscribe if s.GetBulkSubscribe() != nil { bulkSubscribe = &BulkSubscribe{ Enabled: s.GetBulkSubscribe().GetEnabled(), MaxMessagesCount: s.GetBulkSubscribe().GetMaxMessagesCount(), MaxAwaitDurationMs: s.GetBulkSubscribe().GetMaxAwaitDurationMs(), } } subscriptions[i] = Subscription{ PubsubName: s.GetPubsubName(), Topic: s.GetTopic(), Metadata: s.GetMetadata(), DeadLetterTopic: s.GetDeadLetterTopic(), Rules: rules, BulkSubscribe: bulkSubscribe, } } } return subscriptions, nil } func parseRoutingRulesGRPC(routes *runtimev1pb.TopicRoutes) ([]*Rule, error) { if routes == nil { return []*Rule{{ Path: "", }}, nil } r := make([]*Rule, 0, len(routes.GetRules())+1) for _, rule := range routes.GetRules() { rr, err := CreateRoutingRule(rule.GetMatch(), rule.GetPath()) if err != nil { return nil, err } r = append(r, rr) } // If a default path is set, add a rule with a nil `Match`, // which is treated as `true` and always selected if // no previous rules match. if routes.GetDefault() != "" { r = append(r, &Rule{ Path: routes.GetDefault(), }) } // gRPC automatically specifies a default route // if none are returned. if len(r) == 0 { r = append(r, &Rule{ Path: "", }) } return r, nil } func CreateRoutingRule(match, path string) (*Rule, error) { var e *expr.Expr matchTrimmed := strings.TrimSpace(match) if matchTrimmed != "" { e = &expr.Expr{} if err := e.DecodeString(matchTrimmed); err != nil { return nil, err } } return &Rule{ Match: e, Path: path, }, nil } func GRPCEnvelopeFromSubscriptionMessage(ctx context.Context, msg *SubscribedMessage, log logger.Logger, tracingSpec *config.TracingSpec) (*runtimev1pb.TopicEventRequest, trace.Span, error) { cloudEvent := msg.CloudEvent envelope := &runtimev1pb.TopicEventRequest{ Id: ExtractCloudEventProperty(cloudEvent, contribpubsub.IDField), Source: ExtractCloudEventProperty(cloudEvent, contribpubsub.SourceField), DataContentType: ExtractCloudEventProperty(cloudEvent, contribpubsub.DataContentTypeField), Type: ExtractCloudEventProperty(cloudEvent, contribpubsub.TypeField), SpecVersion: ExtractCloudEventProperty(cloudEvent, contribpubsub.SpecVersionField), Topic: msg.Topic, PubsubName: msg.Metadata[MetadataKeyPubSub], Path: msg.Path, } if data, ok := cloudEvent[contribpubsub.DataBase64Field]; ok && data != nil { if dataAsString, ok := data.(string); ok { decoded, decodeErr := base64.StdEncoding.DecodeString(dataAsString) if decodeErr != nil { log.Debugf("unable to base64 decode cloudEvent field data_base64: %s", decodeErr) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, 0) return nil, nil, fmt.Errorf("error returned from app while processing pub/sub event: %w", rterrors.NewRetriable(decodeErr)) } envelope.Data = decoded } else { diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, 0) return nil, nil, fmt.Errorf("error returned from app while processing pub/sub event: %w", rterrors.NewRetriable(errUnexpectedEnvelopeData)) } } else if data, ok := cloudEvent[contribpubsub.DataField]; ok && data != nil { envelope.Data = nil if contenttype.IsStringContentType(envelope.GetDataContentType()) { switch v := data.(type) { case string: envelope.Data = []byte(v) case []byte: envelope.Data = v default: diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, 0) return nil, nil, fmt.Errorf("error returned from app while processing pub/sub event: %w", rterrors.NewRetriable(errUnexpectedEnvelopeData)) } } else if contenttype.IsJSONContentType(envelope.GetDataContentType()) || contenttype.IsCloudEventContentType(envelope.GetDataContentType()) { envelope.Data, _ = json.Marshal(data) } } var span trace.Span iTraceID := cloudEvent[contribpubsub.TraceParentField] if iTraceID == nil { iTraceID = cloudEvent[contribpubsub.TraceIDField] } if iTraceID != nil { if traceID, ok := iTraceID.(string); ok { sc, _ := diag.SpanContextFromW3CString(traceID) spanName := fmt.Sprintf("pubsub/%s", msg.Topic) // no ops if trace is off ctx, span = diag.StartInternalCallbackSpan(ctx, spanName, sc, tracingSpec) // span is nil if tracing is disabled (sampling rate is 0) if span != nil { ctx = diag.SpanContextToGRPCMetadata(ctx, span.SpanContext()) } } else { log.Warnf("ignored non-string traceid value: %v", iTraceID) } } extensions, extensionsErr := ExtractCloudEventExtensions(cloudEvent) if extensionsErr != nil { diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, 0) return nil, nil, extensionsErr } envelope.Extensions = extensions return envelope, span, nil } func ExtractCloudEventProperty(cloudEvent map[string]any, property string) string { if cloudEvent == nil { return "" } iValue, ok := cloudEvent[property] if ok { if value, ok := iValue.(string); ok { return value } } return "" } func ExtractCloudEventExtensions(cloudEvent map[string]any) (*structpb.Struct, error) { // Assemble Cloud Event Extensions: // Create copy of the cloud event with duplicated data removed extensions := make(map[string]any) for key, value := range cloudEvent { if !cloudEventDuplicateKeys.Has(key) { extensions[key] = value } } extensionsStruct := structpb.Struct{} extensionBytes, jsonMarshalErr := json.Marshal(extensions) if jsonMarshalErr != nil { return &extensionsStruct, fmt.Errorf("error processing internal cloud event data: unable to marshal cloudEvent extensions: %s", jsonMarshalErr) } protoUnmarshalErr := protojson.Unmarshal(extensionBytes, &extensionsStruct) if protoUnmarshalErr != nil { return &extensionsStruct, fmt.Errorf("error processing internal cloud event data: unable to unmarshal cloudEvent extensions to proto struct: %s", protoUnmarshalErr) } return &extensionsStruct, nil } func FetchEntry(rawPayload bool, entry *contribpubsub.BulkMessageEntry, cloudEvent map[string]interface{}) (*runtimev1pb.TopicEventBulkRequestEntry, error) { if rawPayload { return &runtimev1pb.TopicEventBulkRequestEntry{ EntryId: entry.EntryId, Event: &runtimev1pb.TopicEventBulkRequestEntry_Bytes{Bytes: entry.Event}, //nolint:nosnakecase ContentType: entry.ContentType, Metadata: entry.Metadata, }, nil } else { eventLocal, err := extractCloudEvent(cloudEvent) if err != nil { return nil, err } return &runtimev1pb.TopicEventBulkRequestEntry{ EntryId: entry.EntryId, Event: &eventLocal, ContentType: entry.ContentType, Metadata: entry.Metadata, }, nil } } func extractCloudEvent(event map[string]interface{}) (runtimev1pb.TopicEventBulkRequestEntry_CloudEvent, error) { //nolint:nosnakecase envelope := &runtimev1pb.TopicEventCERequest{ Id: ExtractCloudEventProperty(event, contribpubsub.IDField), Source: ExtractCloudEventProperty(event, contribpubsub.SourceField), DataContentType: ExtractCloudEventProperty(event, contribpubsub.DataContentTypeField), Type: ExtractCloudEventProperty(event, contribpubsub.TypeField), SpecVersion: ExtractCloudEventProperty(event, contribpubsub.SpecVersionField), } if data, ok := event[contribpubsub.DataField]; ok && data != nil { envelope.Data = nil if contenttype.IsStringContentType(envelope.GetDataContentType()) { switch v := data.(type) { case string: envelope.Data = []byte(v) case []byte: envelope.Data = v default: return runtimev1pb.TopicEventBulkRequestEntry_CloudEvent{}, errUnexpectedEnvelopeData //nolint:nosnakecase } } else if contenttype.IsJSONContentType(envelope.GetDataContentType()) || contenttype.IsCloudEventContentType(envelope.GetDataContentType()) { envelope.Data, _ = json.Marshal(data) } } extensions, extensionsErr := ExtractCloudEventExtensions(event) if extensionsErr != nil { return runtimev1pb.TopicEventBulkRequestEntry_CloudEvent{}, extensionsErr } envelope.Extensions = extensions return runtimev1pb.TopicEventBulkRequestEntry_CloudEvent{CloudEvent: envelope}, nil //nolint:nosnakecase }
mikeee/dapr
pkg/runtime/pubsub/subscriptions.go
GO
mit
15,813
package pubsub import ( "context" "encoding/json" "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" subscriptionsapiV2alpha1 "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/channel" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.test") func TestFilterSubscriptions(t *testing.T) { subs := []Subscription{ { Topic: "topic0", Rules: []*Rule{ { Path: "topic0", }, }, }, { Topic: "topic1", }, { Topic: "topic1", Rules: []*Rule{ { Path: "custom/topic1", }, }, }, } subs = filterSubscriptions(subs, log) if assert.Len(t, subs, 2) { assert.Equal(t, "topic0", subs[0].Topic) assert.Equal(t, "topic1", subs[1].Topic) if assert.Len(t, subs[1].Rules, 1) { assert.Equal(t, "custom/topic1", subs[1].Rules[0].Path) } } } type mockUnstableHTTPSubscriptions struct { channel.AppChannel callCount int alwaysError bool successThreshold int } func (m *mockUnstableHTTPSubscriptions) InvokeMethod(ctx context.Context, req *invokev1.InvokeMethodRequest, appID string) (*invokev1.InvokeMethodResponse, error) { if m.alwaysError { return nil, errors.New("error") } m.callCount++ if m.callCount < m.successThreshold { return nil, errors.New("connection refused") } subs := []SubscriptionJSON{ { PubsubName: "pubsub", Topic: "topic1", Metadata: map[string]string{ "testName": "testValue", }, Routes: RoutesJSON{ Rules: []*RuleJSON{ { Match: `event.type == "myevent.v3"`, Path: "myroute.v3", }, { Match: `event.type == "myevent.v2"`, Path: "myroute.v2", }, }, Default: "myroute", }, }, } responseBytes, _ := json.Marshal(subs) response := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(responseBytes). WithContentType("application/json") return response, nil } type mockHTTPSubscriptions struct { channel.AppChannel } func (m *mockHTTPSubscriptions) InvokeMethod(ctx context.Context, req *invokev1.InvokeMethodRequest, appID string) (*invokev1.InvokeMethodResponse, error) { subs := []SubscriptionJSON{ { PubsubName: "pubsub", Topic: "topic1", Metadata: map[string]string{ "testName": "testValue", }, Routes: RoutesJSON{ Rules: []*RuleJSON{ { Match: `event.type == "myevent.v3"`, Path: "myroute.v3", }, { Match: `event.type == "myevent.v2"`, Path: "myroute.v2", }, }, Default: "myroute", }, }, } responseBytes, _ := json.Marshal(subs) response := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(responseBytes). WithContentType("application/json") return response, nil } func TestHTTPSubscriptions(t *testing.T) { t.Run("topics received, no errors", func(t *testing.T) { m := mockHTTPSubscriptions{} subs, err := GetSubscriptionsHTTP(context.TODO(), &m, log, resiliency.FromConfigurations(log)) require.NoError(t, err) if assert.Len(t, subs, 1) { assert.Equal(t, "topic1", subs[0].Topic) if assert.Len(t, subs[0].Rules, 3) { assert.Equal(t, "myroute.v3", subs[0].Rules[0].Path) assert.Equal(t, "myroute.v2", subs[0].Rules[1].Path) assert.Equal(t, "myroute", subs[0].Rules[2].Path) } assert.Equal(t, "pubsub", subs[0].PubsubName) assert.Equal(t, "testValue", subs[0].Metadata["testName"]) } }) t.Run("error from app, success after retries", func(t *testing.T) { m := mockUnstableHTTPSubscriptions{ successThreshold: 3, } subs, err := GetSubscriptionsHTTP(context.TODO(), &m, log, resiliency.FromConfigurations(log)) assert.Equal(t, m.successThreshold, m.callCount) require.NoError(t, err) if assert.Len(t, subs, 1) { assert.Equal(t, "topic1", subs[0].Topic) if assert.Len(t, subs[0].Rules, 3) { assert.Equal(t, "myroute.v3", subs[0].Rules[0].Path) assert.Equal(t, "myroute.v2", subs[0].Rules[1].Path) assert.Equal(t, "myroute", subs[0].Rules[2].Path) } assert.Equal(t, "pubsub", subs[0].PubsubName) assert.Equal(t, "testValue", subs[0].Metadata["testName"]) } }) t.Run("error from app, retries exhausted", func(t *testing.T) { m := mockUnstableHTTPSubscriptions{ alwaysError: true, } _, err := GetSubscriptionsHTTP(context.TODO(), &m, log, resiliency.FromConfigurations(log)) require.Error(t, err) }) t.Run("error from app, success after retries with resiliency", func(t *testing.T) { m := mockUnstableHTTPSubscriptions{ successThreshold: 3, } subs, err := GetSubscriptionsHTTP(context.TODO(), &m, log, resiliency.FromConfigurations(log)) assert.Equal(t, m.successThreshold, m.callCount) require.NoError(t, err) if assert.Len(t, subs, 1) { assert.Equal(t, "topic1", subs[0].Topic) if assert.Len(t, subs[0].Rules, 3) { assert.Equal(t, "myroute.v3", subs[0].Rules[0].Path) assert.Equal(t, "myroute.v2", subs[0].Rules[1].Path) assert.Equal(t, "myroute", subs[0].Rules[2].Path) } assert.Equal(t, "pubsub", subs[0].PubsubName) assert.Equal(t, "testValue", subs[0].Metadata["testName"]) } }) t.Run("error from app, retries exhausted with resiliency", func(t *testing.T) { m := mockUnstableHTTPSubscriptions{ alwaysError: true, } _, err := GetSubscriptionsHTTP(context.TODO(), &m, log, resiliency.FromConfigurations(log)) require.Error(t, err) }) } type mockUnstableGRPCSubscriptions struct { runtimev1pb.AppCallbackClient callCount int successThreshold int unimplemented bool } func (m *mockUnstableGRPCSubscriptions) ListTopicSubscriptions(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*runtimev1pb.ListTopicSubscriptionsResponse, error) { m.callCount++ if m.unimplemented { return nil, status.Error(codes.Unimplemented, "Unimplemented method") } if m.callCount < m.successThreshold { return nil, errors.New("connection refused") } return &runtimev1pb.ListTopicSubscriptionsResponse{ Subscriptions: []*runtimev1pb.TopicSubscription{ { PubsubName: "pubsub", Topic: "topic1", Metadata: map[string]string{ "testName": "testValue", }, Routes: &runtimev1pb.TopicRoutes{ Rules: []*runtimev1pb.TopicRule{ { Match: `event.type == "myevent.v3"`, Path: "myroute.v3", }, { Match: `event.type == "myevent.v2"`, Path: "myroute.v2", }, }, Default: "myroute", }, }, }, }, nil } type mockGRPCSubscriptions struct { runtimev1pb.AppCallbackClient } func (m *mockGRPCSubscriptions) ListTopicSubscriptions(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*runtimev1pb.ListTopicSubscriptionsResponse, error) { return &runtimev1pb.ListTopicSubscriptionsResponse{ Subscriptions: []*runtimev1pb.TopicSubscription{ { PubsubName: "pubsub", Topic: "topic1", Metadata: map[string]string{ "testName": "testValue", }, Routes: &runtimev1pb.TopicRoutes{ Rules: []*runtimev1pb.TopicRule{ { Match: `event.type == "myevent.v3"`, Path: "myroute.v3", }, { Match: `event.type == "myevent.v2"`, Path: "myroute.v2", }, }, Default: "myroute", }, }, }, }, nil } func TestGRPCSubscriptions(t *testing.T) { t.Run("topics received, no errors", func(t *testing.T) { m := mockGRPCSubscriptions{} subs, err := GetSubscriptionsGRPC(context.TODO(), &m, log, resiliency.FromConfigurations(log)) require.NoError(t, err) if assert.Len(t, subs, 1) { assert.Equal(t, "topic1", subs[0].Topic) if assert.Len(t, subs[0].Rules, 3) { assert.Equal(t, "myroute.v3", subs[0].Rules[0].Path) assert.Equal(t, "myroute.v2", subs[0].Rules[1].Path) assert.Equal(t, "myroute", subs[0].Rules[2].Path) } assert.Equal(t, "pubsub", subs[0].PubsubName) assert.Equal(t, "testValue", subs[0].Metadata["testName"]) } }) t.Run("error from app, success after retries", func(t *testing.T) { m := mockUnstableGRPCSubscriptions{ successThreshold: 3, } subs, err := GetSubscriptionsGRPC(context.TODO(), &m, log, resiliency.FromConfigurations(log)) assert.Equal(t, m.successThreshold, m.callCount) require.NoError(t, err) if assert.Len(t, subs, 1) { assert.Equal(t, "topic1", subs[0].Topic) if assert.Len(t, subs[0].Rules, 3) { assert.Equal(t, "myroute.v3", subs[0].Rules[0].Path) assert.Equal(t, "myroute.v2", subs[0].Rules[1].Path) assert.Equal(t, "myroute", subs[0].Rules[2].Path) } assert.Equal(t, "pubsub", subs[0].PubsubName) assert.Equal(t, "testValue", subs[0].Metadata["testName"]) } }) t.Run("server is running, app returns unimplemented error, no retries", func(t *testing.T) { m := mockUnstableGRPCSubscriptions{ successThreshold: 3, unimplemented: true, } subs, err := GetSubscriptionsGRPC(context.TODO(), &m, log, resiliency.FromConfigurations(log)) // not implemented error is not retried and is returned as "zero" subscriptions require.NoError(t, err) assert.Equal(t, 1, m.callCount) assert.Empty(t, subs) }) t.Run("error from app, success after retries with resiliency", func(t *testing.T) { m := mockUnstableGRPCSubscriptions{ successThreshold: 3, } subs, err := GetSubscriptionsGRPC(context.TODO(), &m, log, resiliency.FromConfigurations(log)) assert.Equal(t, m.successThreshold, m.callCount) require.NoError(t, err) if assert.Len(t, subs, 1) { assert.Equal(t, "topic1", subs[0].Topic) if assert.Len(t, subs[0].Rules, 3) { assert.Equal(t, "myroute.v3", subs[0].Rules[0].Path) assert.Equal(t, "myroute.v2", subs[0].Rules[1].Path) assert.Equal(t, "myroute", subs[0].Rules[2].Path) } assert.Equal(t, "pubsub", subs[0].PubsubName) assert.Equal(t, "testValue", subs[0].Metadata["testName"]) } }) t.Run("server is running, app returns unimplemented error, no retries with resiliency", func(t *testing.T) { m := mockUnstableGRPCSubscriptions{ successThreshold: 3, unimplemented: true, } subs, err := GetSubscriptionsGRPC(context.TODO(), &m, log, resiliency.FromConfigurations(log)) // not implemented error is not retried and is returned as "zero" subscriptions require.NoError(t, err) assert.Equal(t, 1, m.callCount) assert.Empty(t, subs) }) } func TestGetRuleMatchString(t *testing.T) { cases := []subscriptionsapiV2alpha1.Rule{ { Match: `event.type == "myevent.v3"`, Path: "myroute.v3", }, { Match: `event.type == "myevent.v2"`, Path: "myroute.v2", }, { Match: "", Path: "myroute.v1", }, } for _, v := range cases { rule, err := CreateRoutingRule(v.Match, v.Path) require.NoError(t, err) assert.Equal(t, v.Match, rule.Match.String()) } }
mikeee/dapr
pkg/runtime/pubsub/subscriptions_test.go
GO
mit
11,077
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package registry import ( "github.com/dapr/dapr/pkg/components/bindings" "github.com/dapr/dapr/pkg/components/configuration" "github.com/dapr/dapr/pkg/components/crypto" "github.com/dapr/dapr/pkg/components/lock" "github.com/dapr/dapr/pkg/components/middleware/http" "github.com/dapr/dapr/pkg/components/nameresolution" "github.com/dapr/dapr/pkg/components/pubsub" "github.com/dapr/dapr/pkg/components/secretstores" "github.com/dapr/dapr/pkg/components/state" wfbe "github.com/dapr/dapr/pkg/components/wfbackend" "github.com/dapr/dapr/pkg/components/workflows" ) // Options is the options to configure the registries type Options struct { secret *secretstores.Registry state *state.Registry config *configuration.Registry lock *lock.Registry pubsub *pubsub.Registry nameResolution *nameresolution.Registry binding *bindings.Registry httpMiddleware *http.Registry workflow *workflows.Registry workflowBackend *wfbe.Registry crypto *crypto.Registry componentsCallback ComponentsCallback } func NewOptions() *Options { return &Options{ secret: secretstores.DefaultRegistry, state: state.DefaultRegistry, config: configuration.DefaultRegistry, lock: lock.DefaultRegistry, pubsub: pubsub.DefaultRegistry, nameResolution: nameresolution.DefaultRegistry, binding: bindings.DefaultRegistry, httpMiddleware: http.DefaultRegistry, crypto: crypto.DefaultRegistry, } } // WithSecretStores adds secret store components to the runtime. func (o *Options) WithSecretStores(registry *secretstores.Registry) *Options { o.secret = registry return o } // WithStateStores adds state store components to the runtime. func (o *Options) WithStateStores(registry *state.Registry) *Options { o.state = registry return o } // WithConfigurations adds configuration store components to the runtime. func (o *Options) WithConfigurations(registry *configuration.Registry) *Options { o.config = registry return o } // WithLocks adds lock store components to the runtime. func (o *Options) WithLocks(registry *lock.Registry) *Options { o.lock = registry return o } // WithPubSubs adds pubsub components to the runtime. func (o *Options) WithPubSubs(registry *pubsub.Registry) *Options { o.pubsub = registry return o } // WithNameResolution adds name resolution components to the runtime. func (o *Options) WithNameResolutions(registry *nameresolution.Registry) *Options { o.nameResolution = registry return o } // WithBindings adds binding components to the runtime. func (o *Options) WithBindings(registry *bindings.Registry) *Options { o.binding = registry return o } // WithHTTPMiddlewares adds http middleware components to the runtime. func (o *Options) WithHTTPMiddlewares(registry *http.Registry) *Options { o.httpMiddleware = registry return o } // WithWorkflows adds workflow components to the runtime. func (o *Options) WithWorkflows(registry *workflows.Registry) *Options { o.workflow = registry return o } // WithWorkflows adds workflow components to the runtime. func (o *Options) WithWorkflowBackends(registry *wfbe.Registry) *Options { o.workflowBackend = registry return o } // WithCryptoProviders adds crypto components to the runtime. func (o *Options) WithCryptoProviders(registry *crypto.Registry) *Options { o.crypto = registry return o } // WithComponentsCallback sets the components callback for applications that embed Dapr. func (o *Options) WithComponentsCallback(componentsCallback ComponentsCallback) *Options { o.componentsCallback = componentsCallback return o }
mikeee/dapr
pkg/runtime/registry/options.go
GO
mit
4,268
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package registry import ( "github.com/dapr/dapr/pkg/components/bindings" "github.com/dapr/dapr/pkg/components/configuration" "github.com/dapr/dapr/pkg/components/crypto" "github.com/dapr/dapr/pkg/components/lock" "github.com/dapr/dapr/pkg/components/middleware/http" "github.com/dapr/dapr/pkg/components/nameresolution" "github.com/dapr/dapr/pkg/components/pubsub" "github.com/dapr/dapr/pkg/components/secretstores" "github.com/dapr/dapr/pkg/components/state" wfbe "github.com/dapr/dapr/pkg/components/wfbackend" "github.com/dapr/dapr/pkg/components/workflows" messagingv1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/runtime/compstore" ) type ComponentsCallback func(components ComponentRegistry) error type ComponentRegistry struct { DirectMessaging messagingv1.DirectMessaging CompStore *compstore.ComponentStore } // Registry is a collection of component registries. type Registry struct { secret *secretstores.Registry state *state.Registry config *configuration.Registry lock *lock.Registry pubsub *pubsub.Registry nameResolution *nameresolution.Registry binding *bindings.Registry httpMiddleware *http.Registry workflow *workflows.Registry workflowBackend *wfbe.Registry crypto *crypto.Registry componentCb ComponentsCallback } func New(opts *Options) *Registry { return &Registry{ secret: opts.secret, state: opts.state, config: opts.config, lock: opts.lock, pubsub: opts.pubsub, nameResolution: opts.nameResolution, binding: opts.binding, httpMiddleware: opts.httpMiddleware, workflow: opts.workflow, workflowBackend: opts.workflowBackend, crypto: opts.crypto, componentCb: opts.componentsCallback, } } func (r *Registry) SecretStores() *secretstores.Registry { return r.secret } func (r *Registry) StateStores() *state.Registry { return r.state } func (r *Registry) Configurations() *configuration.Registry { return r.config } func (r *Registry) Locks() *lock.Registry { return r.lock } func (r *Registry) PubSubs() *pubsub.Registry { return r.pubsub } func (r *Registry) NameResolutions() *nameresolution.Registry { return r.nameResolution } func (r *Registry) Bindings() *bindings.Registry { return r.binding } func (r *Registry) HTTPMiddlewares() *http.Registry { return r.httpMiddleware } func (r *Registry) Workflows() *workflows.Registry { return r.workflow } func (r *Registry) WorkflowBackends() *wfbe.Registry { return r.workflowBackend } func (r *Registry) Crypto() *crypto.Registry { return r.crypto } func (r *Registry) ComponentsCallback() ComponentsCallback { return r.componentCb }
mikeee/dapr
pkg/runtime/registry/registry.go
GO
mit
3,333
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package runtime import ( "context" "crypto/tls" "errors" "fmt" "io" "net" "os" "reflect" "runtime" "strconv" "strings" "sync" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" otlptracegrpc "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" otlptracehttp "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" "go.opentelemetry.io/otel/exporters/zipkin" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.10.0" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/clock" nr "github.com/dapr/components-contrib/nameresolution" "github.com/dapr/components-contrib/state" "github.com/dapr/dapr/pkg/actors" "github.com/dapr/dapr/pkg/api/grpc" "github.com/dapr/dapr/pkg/api/grpc/manager" "github.com/dapr/dapr/pkg/api/http" "github.com/dapr/dapr/pkg/api/universal" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" endpointapi "github.com/dapr/dapr/pkg/apis/httpEndpoint/v1alpha1" subapi "github.com/dapr/dapr/pkg/apis/subscriptions/v2alpha1" "github.com/dapr/dapr/pkg/apphealth" "github.com/dapr/dapr/pkg/components" "github.com/dapr/dapr/pkg/components/pluggable" secretstoresLoader "github.com/dapr/dapr/pkg/components/secretstores" "github.com/dapr/dapr/pkg/config" "github.com/dapr/dapr/pkg/config/protocol" diag "github.com/dapr/dapr/pkg/diagnostics" diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils" "github.com/dapr/dapr/pkg/internal/loader" "github.com/dapr/dapr/pkg/internal/loader/disk" "github.com/dapr/dapr/pkg/internal/loader/kubernetes" "github.com/dapr/dapr/pkg/messaging" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" middlewarehttp "github.com/dapr/dapr/pkg/middleware/http" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/operator/client" "github.com/dapr/dapr/pkg/outbox" operatorv1pb "github.com/dapr/dapr/pkg/proto/operator/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/authorizer" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" "github.com/dapr/dapr/pkg/runtime/hotreload" "github.com/dapr/dapr/pkg/runtime/meta" "github.com/dapr/dapr/pkg/runtime/processor" "github.com/dapr/dapr/pkg/runtime/processor/wfbackend" "github.com/dapr/dapr/pkg/runtime/processor/workflow" "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/runtime/pubsub/publisher" "github.com/dapr/dapr/pkg/runtime/pubsub/streamer" "github.com/dapr/dapr/pkg/runtime/registry" "github.com/dapr/dapr/pkg/runtime/wfengine" "github.com/dapr/dapr/pkg/security" "github.com/dapr/dapr/utils" "github.com/dapr/kit/concurrency" "github.com/dapr/kit/logger" ) var log = logger.NewLogger("dapr.runtime") // DaprRuntime holds all the core components of the runtime. type DaprRuntime struct { runtimeConfig *internalConfig globalConfig *config.Configuration accessControlList *config.AccessControlList grpc *manager.Manager channels *channels.Channels appConfig config.ApplicationConfig directMessaging invokev1.DirectMessaging actor actors.ActorRuntime nameResolver nr.Resolver hostAddress string actorStateStoreLock sync.RWMutex namespace string podName string daprUniversal *universal.Universal daprHTTPAPI http.API daprGRPCAPI grpc.API operatorClient operatorv1pb.OperatorClient isAppHealthy chan struct{} appHealth *apphealth.AppHealth appHealthReady func(context.Context) error // Invoked the first time the app health becomes ready appHealthLock sync.Mutex httpMiddleware *middlewarehttp.HTTP compStore *compstore.ComponentStore pubsubAdapter pubsub.Adapter pubsubAdapterStreamer pubsub.AdapterStreamer outbox outbox.Outbox meta *meta.Meta processor *processor.Processor authz *authorizer.Authorizer sec security.Handler runnerCloser *concurrency.RunnerCloserManager clock clock.Clock reloader *hotreload.Reloader // Used for testing. initComplete chan struct{} proxy messaging.Proxy resiliency resiliency.Provider tracerProvider *sdktrace.TracerProvider workflowEngine *wfengine.WorkflowEngine wg sync.WaitGroup } // newDaprRuntime returns a new runtime with the given runtime config and global config. func newDaprRuntime(ctx context.Context, sec security.Handler, runtimeConfig *internalConfig, globalConfig *config.Configuration, accessControlList *config.AccessControlList, resiliencyProvider resiliency.Provider, ) (*DaprRuntime, error) { compStore := compstore.New() namespace := security.CurrentNamespace() podName := getPodName() meta := meta.New(meta.Options{ ID: runtimeConfig.id, PodName: podName, Namespace: namespace, StrictSandbox: globalConfig.Spec.WasmSpec.GetStrictSandbox(), Mode: runtimeConfig.mode, }) operatorClient, err := getOperatorClient(ctx, sec, runtimeConfig) if err != nil { return nil, err } grpc := createGRPCManager(sec, runtimeConfig, globalConfig) authz := authorizer.New(authorizer.Options{ ID: runtimeConfig.id, GlobalConfig: globalConfig, }) httpMiddleware := middlewarehttp.New() httpMiddlewareApp := httpMiddleware.BuildPipelineFromSpec("app", globalConfig.Spec.AppHTTPPipelineSpec) channels := channels.New(channels.Options{ Registry: runtimeConfig.registry, ComponentStore: compStore, Meta: meta, AppConnectionConfig: runtimeConfig.appConnectionConfig, GlobalConfig: globalConfig, MaxRequestBodySize: runtimeConfig.maxRequestBodySize, ReadBufferSize: runtimeConfig.readBufferSize, GRPC: grpc, AppMiddleware: httpMiddlewareApp, }) pubsubAdapter := publisher.New(publisher.Options{ AppID: runtimeConfig.id, Namespace: namespace, Resiliency: resiliencyProvider, GetPubSubFn: compStore.GetPubSub, }) pubsubAdapterStreamer := streamer.New(streamer.Options{ TracingSpec: globalConfig.Spec.TracingSpec, }) outbox := pubsub.NewOutbox(pubsub.OptionsOutbox{ Publisher: pubsubAdapter, GetPubsubFn: compStore.GetPubSubComponent, GetStateFn: compStore.GetStateStore, CloudEventExtractorFn: pubsub.ExtractCloudEventProperty, Namespace: namespace, }) processor := processor.New(processor.Options{ ID: runtimeConfig.id, Namespace: namespace, IsHTTP: runtimeConfig.appConnectionConfig.Protocol.IsHTTP(), ActorsEnabled: len(runtimeConfig.actorsService) > 0, Registry: runtimeConfig.registry, ComponentStore: compStore, Meta: meta, GlobalConfig: globalConfig, Resiliency: resiliencyProvider, Mode: runtimeConfig.mode, PodName: podName, OperatorClient: operatorClient, GRPC: grpc, Channels: channels, MiddlewareHTTP: httpMiddleware, Security: sec, Outbox: outbox, Adapter: pubsubAdapter, AdapterStreamer: pubsubAdapterStreamer, }) var reloader *hotreload.Reloader switch runtimeConfig.mode { case modes.KubernetesMode: reloader = hotreload.NewOperator(hotreload.OptionsReloaderOperator{ PodName: podName, Namespace: namespace, Client: operatorClient, Config: globalConfig, ComponentStore: compStore, Authorizer: authz, Processor: processor, }) case modes.StandaloneMode: reloader, err = hotreload.NewDisk(hotreload.OptionsReloaderDisk{ Config: globalConfig, Dirs: runtimeConfig.standalone.ResourcesPath, ComponentStore: compStore, Authorizer: authz, Processor: processor, AppID: runtimeConfig.id, }) if err != nil { return nil, err } default: return nil, fmt.Errorf("invalid mode: %s", runtimeConfig.mode) } rt := &DaprRuntime{ runtimeConfig: runtimeConfig, globalConfig: globalConfig, accessControlList: accessControlList, grpc: grpc, tracerProvider: nil, resiliency: resiliencyProvider, appHealthReady: nil, compStore: compStore, pubsubAdapter: pubsubAdapter, pubsubAdapterStreamer: pubsubAdapterStreamer, outbox: outbox, meta: meta, operatorClient: operatorClient, channels: channels, sec: sec, processor: processor, authz: authz, reloader: reloader, namespace: namespace, podName: podName, initComplete: make(chan struct{}), isAppHealthy: make(chan struct{}), clock: new(clock.RealClock), httpMiddleware: httpMiddleware, } close(rt.isAppHealthy) var gracePeriod *time.Duration if duration := runtimeConfig.gracefulShutdownDuration; duration > 0 { gracePeriod = &duration } rt.runnerCloser = concurrency.NewRunnerCloserManager(gracePeriod, rt.runtimeConfig.metricsExporter.Run, rt.processor.Process, rt.reloader.Run, func(ctx context.Context) error { start := time.Now() log.Infof("%s mode configured", rt.runtimeConfig.mode) log.Infof("app id: %s", rt.runtimeConfig.id) if err := rt.initRuntime(ctx); err != nil { return err } d := time.Since(start).Milliseconds() log.Infof("dapr initialized. Status: Running. Init Elapsed %vms", d) if rt.daprHTTPAPI != nil { // Setting the status only when runtime is initialized. rt.daprHTTPAPI.MarkStatusAsReady() } close(rt.initComplete) <-ctx.Done() return nil }, ) if err := rt.runnerCloser.AddCloser( func() error { log.Info("Dapr is shutting down") comps := rt.compStore.ListComponents() errCh := make(chan error) for _, comp := range comps { go func(comp compapi.Component) { log.Infof("Shutting down component %s", comp.LogName()) errCh <- rt.processor.Close(comp) }(comp) } errs := make([]error, len(comps)+1) for i := range comps { errs[i] = <-errCh } rt.wg.Wait() log.Info("Dapr runtime stopped") errs[len(comps)] = rt.cleanSockets() return errors.Join(errs...) }, rt.stopWorkflow, rt.stopActor, rt.stopTrace, rt.grpc, ); err != nil { return nil, err } return rt, nil } // Run performs initialization of the runtime with the runtime and global configurations. func (a *DaprRuntime) Run(parentCtx context.Context) error { ctx := parentCtx if a.runtimeConfig.blockShutdownDuration != nil { // Override context with Background. Runner context will be cancelled when // blocking graceful shutdown returns. ctx = context.Background() a.runnerCloser.Add(func(ctx context.Context) error { select { case <-parentCtx.Done(): case <-ctx.Done(): // Return nil as another routine has returned, not due to an interrupt. return nil } log.Infof("Blocking graceful shutdown for %s or until app reports unhealthy...", *a.runtimeConfig.blockShutdownDuration) // Stop reading from subscriptions and input bindings forever while // blocking graceful shutdown. This will prevent incoming messages from // being processed, but allow outgoing APIs to be processed. a.processor.Subscriber().StopAllSubscriptionsForever() a.processor.Binding().StopReadingFromBindings(true) select { case <-a.clock.After(*a.runtimeConfig.blockShutdownDuration): log.Info("Block shutdown period expired, entering shutdown...") case <-a.isAppHealthy: log.Info("App reported unhealthy, entering shutdown...") } return nil }) } return a.runnerCloser.Run(ctx) } func getPodName() string { return os.Getenv("POD_NAME") } func getOperatorClient(ctx context.Context, sec security.Handler, cfg *internalConfig) (operatorv1pb.OperatorClient, error) { // Get the operator client only if we're running in Kubernetes and if we need it if cfg.mode != modes.KubernetesMode { return nil, nil } client, _, err := client.GetOperatorClient(ctx, cfg.kubernetes.ControlPlaneAddress, sec) if err != nil { return nil, fmt.Errorf("error creating operator client: %w", err) } return client, nil } // setupTracing set up the trace exporters. Technically we don't need to pass `hostAddress` in, // but we do so here to explicitly call out the dependency on having `hostAddress` computed. func (a *DaprRuntime) setupTracing(ctx context.Context, hostAddress string, tpStore tracerProviderStore) error { tracingSpec := a.globalConfig.GetTracingSpec() // Register stdout trace exporter if user wants to debug requests or log as Info level. if tracingSpec.Stdout { tpStore.RegisterExporter(diagUtils.NewStdOutExporter()) } // Register zipkin trace exporter if ZipkinSpec is specified if tracingSpec.Zipkin != nil && tracingSpec.Zipkin.EndpointAddress != "" { zipkinExporter, err := zipkin.New(tracingSpec.Zipkin.EndpointAddress) if err != nil { return err } tpStore.RegisterExporter(zipkinExporter) } // Register otel trace exporter if OtelSpec is specified if tracingSpec.Otel != nil && tracingSpec.Otel.EndpointAddress != "" && tracingSpec.Otel.Protocol != "" { endpoint := tracingSpec.Otel.EndpointAddress protocol := tracingSpec.Otel.Protocol if protocol != "http" && protocol != "grpc" { return fmt.Errorf("invalid protocol %v provided for Otel endpoint", protocol) } var client otlptrace.Client if protocol == "http" { clientOptions := []otlptracehttp.Option{otlptracehttp.WithEndpoint(endpoint)} if !tracingSpec.Otel.GetIsSecure() { clientOptions = append(clientOptions, otlptracehttp.WithInsecure()) } client = otlptracehttp.NewClient(clientOptions...) } else { clientOptions := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(endpoint)} if !tracingSpec.Otel.GetIsSecure() { clientOptions = append(clientOptions, otlptracegrpc.WithInsecure()) } client = otlptracegrpc.NewClient(clientOptions...) } otelExporter, err := otlptrace.New(ctx, client) if err != nil { return err } tpStore.RegisterExporter(otelExporter) } if !tpStore.HasExporter() && tracingSpec.SamplingRate != "" { tpStore.RegisterExporter(diagUtils.NewNullExporter()) } // Register a resource r := resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceNameKey.String(getOtelServiceName(a.runtimeConfig.id)), ) tpStore.RegisterResource(r) // Register a trace sampler based on Sampling settings daprTraceSampler := diag.NewDaprTraceSampler(tracingSpec.SamplingRate) log.Infof("Dapr trace sampler initialized: %s", daprTraceSampler.Description()) tpStore.RegisterSampler(daprTraceSampler) a.tracerProvider = tpStore.RegisterTracerProvider() return nil } func getOtelServiceName(fallback string) string { if value := os.Getenv("OTEL_SERVICE_NAME"); value != "" { return value } return fallback } func (a *DaprRuntime) initRuntime(ctx context.Context) error { var err error if a.hostAddress, err = utils.GetHostAddress(); err != nil { return fmt.Errorf("failed to determine host address: %w", err) } if err = a.setupTracing(ctx, a.hostAddress, newOpentelemetryTracerProviderStore()); err != nil { return fmt.Errorf("failed to setup tracing: %w", err) } // Register and initialize name resolution for service discovery. err = a.initNameResolution(ctx) if err != nil { log.Errorf(err.Error()) } // Start proxy a.initProxy() a.initDirectMessaging(a.nameResolver) a.initPluggableComponents(ctx) a.appendBuiltinSecretStore(ctx) err = a.loadComponents(ctx) if err != nil { return fmt.Errorf("failed to load components: %s", err) } a.flushOutstandingComponents(ctx) // Creating workflow engine after components are loaded wfe := wfengine.NewWorkflowEngine(a.runtimeConfig.id, a.globalConfig.GetWorkflowSpec(), a.processor.WorkflowBackend()) wfe.ConfigureGrpcExecutor() a.workflowEngine = wfe err = a.loadHTTPEndpoints(ctx) if err != nil { log.Warnf("failed to load HTTP endpoints: %s", err) } a.flushOutstandingHTTPEndpoints(ctx) err = a.loadDeclarativeSubscriptions(ctx) if err != nil { return fmt.Errorf("failed to load declarative subscriptions: %s", err) } if err = a.channels.Refresh(); err != nil { log.Warnf("failed to open %s channel to app: %s", string(a.runtimeConfig.appConnectionConfig.Protocol), err) } // Setup allow/deny list for secrets a.populateSecretsConfiguration() // Create and start the external gRPC server a.daprUniversal = universal.New(universal.Options{ AppID: a.runtimeConfig.id, Logger: logger.NewLogger("dapr.api"), CompStore: a.compStore, Resiliency: a.resiliency, Actors: a.actor, GetComponentsCapabilitiesFn: a.getComponentsCapabilitesMap, ShutdownFn: a.ShutdownWithWait, AppConnectionConfig: a.runtimeConfig.appConnectionConfig, GlobalConfig: a.globalConfig, WorkflowEngine: wfe, }) // Create and start internal and external gRPC servers a.daprGRPCAPI = grpc.NewAPI(grpc.APIOpts{ Universal: a.daprUniversal, Logger: logger.NewLogger("dapr.grpc.api"), Channels: a.channels, PubSubAdapter: a.pubsubAdapter, PubSubAdapterStreamer: a.pubsubAdapterStreamer, Outbox: a.outbox, DirectMessaging: a.directMessaging, SendToOutputBindingFn: a.processor.Binding().SendToOutputBinding, TracingSpec: a.globalConfig.GetTracingSpec(), AccessControlList: a.accessControlList, Processor: a.processor, }) if err = a.runnerCloser.AddCloser(a.daprGRPCAPI); err != nil { return err } err = a.startGRPCAPIServer(a.daprGRPCAPI, a.runtimeConfig.apiGRPCPort) if err != nil { return fmt.Errorf("failed to start API gRPC server: %w", err) } if a.runtimeConfig.unixDomainSocket != "" { log.Info("API gRPC server is running on a Unix Domain Socket") } else { log.Infof("API gRPC server is running on port %v", a.runtimeConfig.apiGRPCPort) } // Start HTTP Server err = a.startHTTPServer() if err != nil { return fmt.Errorf("failed to start HTTP server: %w", err) } if a.runtimeConfig.unixDomainSocket != "" { log.Info("HTTP server is running on a Unix Domain Socket") } else { log.Infof("HTTP server is running on port %v", a.runtimeConfig.httpPort) } log.Infof("The request body size parameter is: %v bytes", a.runtimeConfig.maxRequestBodySize) // Start internal gRPC server (used for sidecar-to-sidecar communication) err = a.startGRPCInternalServer(a.daprGRPCAPI) if err != nil { return fmt.Errorf("failed to start internal gRPC server: %w", err) } log.Infof("Internal gRPC server is running on %s:%d", a.runtimeConfig.internalGRPCListenAddress, a.runtimeConfig.internalGRPCPort) a.initDirectMessaging(a.nameResolver) if a.daprHTTPAPI != nil { a.daprHTTPAPI.MarkStatusAsOutboundReady() } if err := a.blockUntilAppIsReady(ctx); err != nil { return err } if err := a.processor.Subscriber().InitProgramaticSubscriptions(ctx); err != nil { return fmt.Errorf("failed to init programmatic subscriptions: %s", err) } if a.runtimeConfig.appConnectionConfig.MaxConcurrency > 0 { log.Infof("app max concurrency set to %v", a.runtimeConfig.appConnectionConfig.MaxConcurrency) } a.appHealthReady = a.appHealthReadyInit if a.runtimeConfig.appConnectionConfig.HealthCheck != nil && a.channels.AppChannel() != nil { // We can't just pass "a.channels.HealthProbe" because appChannel may be re-created a.appHealth = apphealth.New(*a.runtimeConfig.appConnectionConfig.HealthCheck, func(ctx context.Context) (bool, error) { return a.channels.AppChannel().HealthProbe(ctx) }) if err := a.runnerCloser.AddCloser(a.appHealth); err != nil { return err } a.appHealth.OnHealthChange(a.appHealthChanged) if err := a.appHealth.StartProbes(ctx); err != nil { return err } // Set the appHealth object in the channel so it's aware of the app's health status a.channels.AppChannel().SetAppHealth(a.appHealth) // Enqueue a probe right away // This will also start the input components once the app is healthy a.appHealth.Enqueue() } else { // If there's no health check, mark the app as healthy right away so subscriptions can start a.appHealthChanged(ctx, apphealth.AppStatusHealthy) } return nil } // appHealthReadyInit completes the initialization phase and is invoked after the app is healthy func (a *DaprRuntime) appHealthReadyInit(ctx context.Context) (err error) { // Load app configuration (for actors) and init actors a.loadAppConfiguration(ctx) if a.runtimeConfig.ActorsEnabled() { err = a.initActors(ctx) if err != nil { log.Warn(err) } else { a.daprUniversal.SetActorRuntime(a.actor) } } // Initialize workflow engine if err = a.initWorkflowEngine(ctx); err != nil { return err } // We set actors as initialized whether we have an actors runtime or not a.daprUniversal.SetActorsInitDone() if cb := a.runtimeConfig.registry.ComponentsCallback(); cb != nil { if err = cb(registry.ComponentRegistry{ DirectMessaging: a.directMessaging, CompStore: a.compStore, }); err != nil { return fmt.Errorf("failed to register components with callback: %w", err) } } return nil } func (a *DaprRuntime) initWorkflowEngine(ctx context.Context) error { wfComponentFactory := wfengine.BuiltinWorkflowFactory(a.workflowEngine) // If actors are not enabled, still invoke SetActorRuntime on the workflow engine with `nil` to unblock startup if abe, ok := a.workflowEngine.Backend.(interface { SetActorRuntime(ctx context.Context, actorRuntime actors.ActorRuntime) }); ok { log.Info("Configuring workflow engine with actors backend") var actorRuntime actors.ActorRuntime if a.runtimeConfig.ActorsEnabled() { actorRuntime = a.actor } abe.SetActorRuntime(ctx, actorRuntime) } reg := a.runtimeConfig.registry.Workflows() if reg == nil { log.Info("No workflow registry available, not registering Dapr workflow component.") return nil } log.Info("Registering component for dapr workflow engine...") reg.RegisterComponent(wfComponentFactory, "dapr") wfe := workflow.New(workflow.Options{ Registry: a.runtimeConfig.registry.Workflows(), ComponentStore: a.compStore, Meta: a.meta, }) if err := wfe.Init(ctx, wfbackend.ComponentDefinition()); err != nil { return fmt.Errorf("failed to initialize Dapr workflow component: %w", err) } log.Info("Workflow engine initialized.") return a.runnerCloser.AddCloser(func() error { return wfe.Close(wfbackend.ComponentDefinition()) }) } // initPluggableComponents discover pluggable components and initialize with their respective registries. func (a *DaprRuntime) initPluggableComponents(ctx context.Context) { if runtime.GOOS == "windows" { log.Debugf("the current OS does not support pluggable components feature, skipping initialization") return } if err := pluggable.Discover(ctx); err != nil { log.Errorf("could not initialize pluggable components %v", err) } } // Sets the status of the app to healthy or un-healthy // Callback for apphealth when the detected status changed func (a *DaprRuntime) appHealthChanged(ctx context.Context, status uint8) { a.appHealthLock.Lock() defer a.appHealthLock.Unlock() switch status { case apphealth.AppStatusHealthy: select { case <-a.isAppHealthy: a.isAppHealthy = make(chan struct{}) default: } // First time the app becomes healthy, complete the init process if a.appHealthReady != nil { if err := a.appHealthReady(ctx); err != nil { log.Warnf("Failed to complete app init: %s ", err) } a.appHealthReady = nil } // Start subscribing to topics and reading from input bindings if err := a.processor.Subscriber().StartAppSubscriptions(); err != nil { log.Warnf("failed to subscribe to topics: %s ", err) } err := a.processor.Binding().StartReadingFromBindings(ctx) if err != nil { log.Warnf("failed to read from bindings: %s ", err) } // Start subscribing to outbox topics if err := a.outbox.SubscribeToInternalTopics(ctx, a.runtimeConfig.id); err != nil { log.Warnf("failed to subscribe to outbox topics: %s", err) } case apphealth.AppStatusUnhealthy: select { case <-a.isAppHealthy: default: close(a.isAppHealthy) } // Stop topic subscriptions and input bindings a.processor.Subscriber().StopAppSubscriptions() a.processor.Binding().StopReadingFromBindings(false) } } func (a *DaprRuntime) populateSecretsConfiguration() { // Populate in a map for easy lookup by store name. if a.globalConfig.Spec.Secrets == nil { return } for _, scope := range a.globalConfig.Spec.Secrets.Scopes { a.compStore.AddSecretsConfiguration(scope.StoreName, scope) } } func (a *DaprRuntime) initDirectMessaging(resolver nr.Resolver) { a.directMessaging = messaging.NewDirectMessaging(messaging.NewDirectMessagingOpts{ AppID: a.runtimeConfig.id, Namespace: a.namespace, Port: a.runtimeConfig.internalGRPCPort, Mode: a.runtimeConfig.mode, Channels: a.channels, ClientConnFn: a.grpc.GetGRPCConnection, Resolver: resolver, MaxRequestBodySize: a.runtimeConfig.maxRequestBodySize, Proxy: a.proxy, ReadBufferSize: a.runtimeConfig.readBufferSize, Resiliency: a.resiliency, CompStore: a.compStore, }) a.runnerCloser.AddCloser(a.directMessaging) } func (a *DaprRuntime) initProxy() { a.proxy = messaging.NewProxy(messaging.ProxyOpts{ AppClientFn: a.grpc.GetAppClient, ConnectionFactory: a.grpc.GetGRPCConnection, AppID: a.runtimeConfig.id, ACL: a.accessControlList, Resiliency: a.resiliency, MaxRequestBodySize: a.runtimeConfig.maxRequestBodySize, }) } func (a *DaprRuntime) startHTTPServer() error { getMetricSpec := a.globalConfig.GetMetricsSpec() a.daprHTTPAPI = http.NewAPI(http.APIOpts{ Universal: a.daprUniversal, Channels: a.channels, DirectMessaging: a.directMessaging, PubSubAdapter: a.pubsubAdapter, Outbox: a.outbox, SendToOutputBindingFn: a.processor.Binding().SendToOutputBinding, TracingSpec: a.globalConfig.GetTracingSpec(), MetricSpec: &getMetricSpec, MaxRequestBodySize: int64(a.runtimeConfig.maxRequestBodySize), }) serverConf := http.ServerConfig{ AppID: a.runtimeConfig.id, HostAddress: a.hostAddress, Port: a.runtimeConfig.httpPort, APIListenAddresses: a.runtimeConfig.apiListenAddresses, PublicPort: a.runtimeConfig.publicPort, PublicListenAddress: a.runtimeConfig.publicListenAddress, ProfilePort: a.runtimeConfig.profilePort, AllowedOrigins: a.runtimeConfig.allowedOrigins, EnableProfiling: a.runtimeConfig.enableProfiling, MaxRequestBodySize: a.runtimeConfig.maxRequestBodySize, UnixDomainSocket: a.runtimeConfig.unixDomainSocket, ReadBufferSize: a.runtimeConfig.readBufferSize, EnableAPILogging: *a.runtimeConfig.enableAPILogging, APILoggingObfuscateURLs: a.globalConfig.GetAPILoggingSpec().ObfuscateURLs, APILogHealthChecks: !a.globalConfig.GetAPILoggingSpec().OmitHealthChecks, } server := http.NewServer(http.NewServerOpts{ API: a.daprHTTPAPI, Config: serverConf, TracingSpec: a.globalConfig.GetTracingSpec(), MetricSpec: a.globalConfig.GetMetricsSpec(), Middleware: a.httpMiddleware.BuildPipelineFromSpec("server", a.globalConfig.Spec.HTTPPipelineSpec), APISpec: a.globalConfig.GetAPISpec(), }) if err := server.StartNonBlocking(); err != nil { return err } if err := a.runnerCloser.AddCloser(server); err != nil { return err } if err := a.runnerCloser.AddCloser(a.processor.Subscriber().StopAllSubscriptionsForever); err != nil { return err } if err := a.runnerCloser.AddCloser(func() { a.processor.Binding().StopReadingFromBindings(true) }); err != nil { return err } return nil } func (a *DaprRuntime) startGRPCInternalServer(api grpc.API) error { // Since GRPCInteralServer is encrypted & authenticated, it is safe to listen on * serverConf := a.getNewServerConfig([]string{a.runtimeConfig.internalGRPCListenAddress}, a.runtimeConfig.internalGRPCPort) server := grpc.NewInternalServer(api, serverConf, a.globalConfig.GetTracingSpec(), a.globalConfig.GetMetricsSpec(), a.sec, a.proxy) if err := server.StartNonBlocking(); err != nil { return err } if err := a.runnerCloser.AddCloser(server); err != nil { return err } return nil } func (a *DaprRuntime) startGRPCAPIServer(api grpc.API, port int) error { serverConf := a.getNewServerConfig(a.runtimeConfig.apiListenAddresses, port) server := grpc.NewAPIServer(api, serverConf, a.globalConfig.GetTracingSpec(), a.globalConfig.GetMetricsSpec(), a.globalConfig.GetAPISpec(), a.proxy, a.workflowEngine) if err := server.StartNonBlocking(); err != nil { return err } if err := a.runnerCloser.AddCloser(server); err != nil { return err } return nil } func (a *DaprRuntime) getNewServerConfig(apiListenAddresses []string, port int) grpc.ServerConfig { // Use the trust domain value from the access control policy spec to generate the cert // If no access control policy has been specified, use a default value trustDomain := config.DefaultTrustDomain if a.accessControlList != nil { trustDomain = a.accessControlList.TrustDomain } return grpc.ServerConfig{ AppID: a.runtimeConfig.id, HostAddress: a.hostAddress, Port: port, APIListenAddresses: apiListenAddresses, NameSpace: a.namespace, TrustDomain: trustDomain, MaxRequestBodySize: a.runtimeConfig.maxRequestBodySize, UnixDomainSocket: a.runtimeConfig.unixDomainSocket, ReadBufferSize: a.runtimeConfig.readBufferSize, EnableAPILogging: *a.runtimeConfig.enableAPILogging, } } func (a *DaprRuntime) initNameResolution(ctx context.Context) (err error) { var ( resolver nr.Resolver resolverMetadata nr.Metadata resolverName string resolverVersion string ) if a.globalConfig.Spec.NameResolutionSpec != nil { resolverName = a.globalConfig.Spec.NameResolutionSpec.Component resolverVersion = a.globalConfig.Spec.NameResolutionSpec.Version } if resolverName == "" { switch a.runtimeConfig.mode { case modes.KubernetesMode: resolverName = "kubernetes" case modes.StandaloneMode: resolverName = "mdns" default: fName := utils.ComponentLogName("nr", resolverName, resolverVersion) return rterrors.NewInit(rterrors.InitComponentFailure, fName, fmt.Errorf("unable to determine name resolver for %s mode", string(a.runtimeConfig.mode))) } } if resolverVersion == "" { resolverVersion = components.FirstStableVersion } fName := utils.ComponentLogName("nr", resolverName, resolverVersion) resolver, err = a.runtimeConfig.registry.NameResolutions().Create(resolverName, resolverVersion, fName) if err != nil { diag.DefaultMonitoring.ComponentInitFailed("nameResolution", "creation", resolverName) return rterrors.NewInit(rterrors.CreateComponentFailure, fName, err) } resolverMetadata.Name = resolverName if a.globalConfig.Spec.NameResolutionSpec != nil { resolverMetadata.Configuration = a.globalConfig.Spec.NameResolutionSpec.Configuration } // Override host address if the internal gRPC listen address is localhost. hostAddress := a.hostAddress if utils.Contains( []string{"127.0.0.1", "localhost", "[::1]"}, a.runtimeConfig.internalGRPCListenAddress, ) { hostAddress = a.runtimeConfig.internalGRPCListenAddress } resolverMetadata.Instance = nr.Instance{ DaprHTTPPort: a.runtimeConfig.httpPort, DaprInternalPort: a.runtimeConfig.internalGRPCPort, AppPort: a.runtimeConfig.appConnectionConfig.Port, Address: hostAddress, AppID: a.runtimeConfig.id, Namespace: a.namespace, } err = resolver.Init(ctx, resolverMetadata) if err != nil { diag.DefaultMonitoring.ComponentInitFailed("nameResolution", "init", resolverName) return rterrors.NewInit(rterrors.InitComponentFailure, fName, err) } a.nameResolver = resolver if nrCloser, ok := resolver.(io.Closer); ok { err = a.runnerCloser.AddCloser(nrCloser) if err != nil { return err } } log.Infof("Initialized name resolution to %s", resolverName) return nil } func (a *DaprRuntime) initActors(ctx context.Context) error { err := actors.ValidateHostEnvironment(a.runtimeConfig.mTLSEnabled, a.runtimeConfig.mode, a.namespace) if err != nil { return rterrors.NewInit(rterrors.InitFailure, "actors", err) } a.actorStateStoreLock.Lock() defer a.actorStateStoreLock.Unlock() actorStateStoreName, ok := a.processor.State().ActorStateStoreName() if !ok { log.Info("actors: state store is not configured - this is okay for clients but services with hosted actors will fail to initialize!") } // Override host address if the internal gRPC listen address is localhost. hostAddress := a.hostAddress if utils.Contains( []string{"127.0.0.1", "localhost", "[::1]"}, a.runtimeConfig.internalGRPCListenAddress, ) { hostAddress = a.runtimeConfig.internalGRPCListenAddress } actorConfig := actors.NewConfig(actors.ConfigOpts{ HostAddress: hostAddress, AppID: a.runtimeConfig.id, ActorsService: a.runtimeConfig.actorsService, RemindersService: a.runtimeConfig.remindersService, Port: a.runtimeConfig.internalGRPCPort, Namespace: a.namespace, AppConfig: a.appConfig, HealthHTTPClient: a.channels.AppHTTPClient(), HealthEndpoint: a.channels.AppHTTPEndpoint(), AppChannelAddress: a.runtimeConfig.appConnectionConfig.ChannelAddress, PodName: getPodName(), }) act, err := actors.NewActors(actors.ActorsOpts{ AppChannel: a.channels.AppChannel(), GRPCConnectionFn: a.grpc.GetGRPCConnection, Config: actorConfig, TracingSpec: a.globalConfig.GetTracingSpec(), Resiliency: a.resiliency, StateStoreName: actorStateStoreName, CompStore: a.compStore, // TODO: @joshvanl Remove in Dapr 1.12 when ActorStateTTL is finalized. StateTTLEnabled: a.globalConfig.IsFeatureEnabled(config.ActorStateTTL), Security: a.sec, }) if err != nil { return rterrors.NewInit(rterrors.InitFailure, "actors", err) } err = act.Init(ctx) if err != nil { return rterrors.NewInit(rterrors.InitFailure, "actors", err) } a.actor = act return nil } func (a *DaprRuntime) loadComponents(ctx context.Context) error { var loader loader.Loader[compapi.Component] switch a.runtimeConfig.mode { case modes.KubernetesMode: loader = kubernetes.NewComponents(kubernetes.Options{ Config: a.runtimeConfig.kubernetes, Client: a.operatorClient, Namespace: a.namespace, PodName: a.podName, }) case modes.StandaloneMode: loader = disk.NewComponents(disk.Options{ AppID: a.runtimeConfig.id, Paths: a.runtimeConfig.standalone.ResourcesPath, }) default: return nil } log.Info("Loading components…") comps, err := loader.Load(ctx) if err != nil { return err } authorizedComps := a.authz.GetAuthorizedObjects(comps, a.authz.IsObjectAuthorized).([]compapi.Component) // Iterate through the list twice // First, we look for secret stores and load those, then all other components // Sure, we could sort the list of authorizedComps... but this is simpler and most certainly faster for _, comp := range authorizedComps { if strings.HasPrefix(comp.Spec.Type, string(components.CategorySecretStore)+".") { log.Debug("Found component: " + comp.LogName()) if !a.processor.AddPendingComponent(ctx, comp) { return nil } } } for _, comp := range authorizedComps { if !strings.HasPrefix(comp.Spec.Type, string(components.CategorySecretStore)+".") { log.Debug("Found component: " + comp.LogName()) if !a.processor.AddPendingComponent(ctx, comp) { return nil } } } return nil } func (a *DaprRuntime) loadDeclarativeSubscriptions(ctx context.Context) error { var loader loader.Loader[subapi.Subscription] switch a.runtimeConfig.mode { case modes.KubernetesMode: loader = kubernetes.NewSubscriptions(kubernetes.Options{ Client: a.operatorClient, Namespace: a.namespace, PodName: a.podName, }) case modes.StandaloneMode: loader = disk.NewSubscriptions(disk.Options{ AppID: a.runtimeConfig.id, Paths: a.runtimeConfig.standalone.ResourcesPath, }) default: return nil } log.Info("Loading Declarative Subscriptions…") subs, err := loader.Load(ctx) if err != nil { return err } for _, s := range subs { log.Infof("Found Subscription: %s", s.Name) } a.processor.AddPendingSubscription(ctx, subs...) return nil } func (a *DaprRuntime) flushOutstandingHTTPEndpoints(ctx context.Context) { log.Info("Waiting for all outstanding http endpoints to be processed…") // We flush by sending a no-op http endpoint. Since the processHTTPEndpoints goroutine only reads one http endpoint at a time, // We know that once the no-op http endpoint is read from the channel, all previous http endpoints will have been fully processed. a.processor.AddPendingEndpoint(ctx, endpointapi.HTTPEndpoint{}) log.Info("All outstanding http endpoints processed") } func (a *DaprRuntime) flushOutstandingComponents(ctx context.Context) { log.Info("Waiting for all outstanding components to be processed…") // We flush by sending a no-op component. Since the processComponents goroutine only reads one component at a time, // We know that once the no-op component is read from the channel, all previous components will have been fully processed. a.processor.AddPendingComponent(ctx, compapi.Component{}) log.Info("All outstanding components processed") } func (a *DaprRuntime) loadHTTPEndpoints(ctx context.Context) error { var loader loader.Loader[endpointapi.HTTPEndpoint] switch a.runtimeConfig.mode { case modes.KubernetesMode: loader = kubernetes.NewHTTPEndpoints(kubernetes.Options{ Config: a.runtimeConfig.kubernetes, Client: a.operatorClient, Namespace: a.namespace, PodName: a.podName, }) case modes.StandaloneMode: loader = disk.NewHTTPEndpoints(disk.Options{ AppID: a.runtimeConfig.id, Paths: a.runtimeConfig.standalone.ResourcesPath, }) default: return nil } log.Info("Loading endpoints…") endpoints, err := loader.Load(ctx) if err != nil { return err } authorizedHTTPEndpoints := a.authz.GetAuthorizedObjects(endpoints, a.authz.IsObjectAuthorized).([]endpointapi.HTTPEndpoint) for _, e := range authorizedHTTPEndpoints { log.Infof("Found http endpoint: %s", e.Name) if !a.processor.AddPendingEndpoint(ctx, e) { return nil } } return nil } func (a *DaprRuntime) stopActor() error { if a.actor != nil { log.Info("Shutting down actor") return a.actor.Close() } return nil } func (a *DaprRuntime) stopWorkflow(ctx context.Context) error { if a.workflowEngine != nil { log.Info("Shutting down workflow engine") return a.workflowEngine.Close(ctx) } return nil } // ShutdownWithWait will gracefully stop runtime and wait outstanding operations. func (a *DaprRuntime) ShutdownWithWait() { a.runnerCloser.Close() } func (a *DaprRuntime) WaitUntilShutdown() { a.runnerCloser.WaitUntilShutdown() } func (a *DaprRuntime) cleanSockets() error { var errs []error if a.runtimeConfig.unixDomainSocket != "" { for _, s := range []string{"http", "grpc"} { err := os.Remove(fmt.Sprintf("%s/dapr-%s-%s.socket", a.runtimeConfig.unixDomainSocket, a.runtimeConfig.id, s)) if os.IsNotExist(err) { continue } if err != nil { errs = append(errs, fmt.Errorf("error removing socket file: %w", err)) } } } return errors.Join(errs...) } func (a *DaprRuntime) blockUntilAppIsReady(ctx context.Context) error { if a.runtimeConfig.appConnectionConfig.Port <= 0 { return nil } log.Infof("application protocol: %s. waiting on port %v. This will block until the app is listening on that port.", string(a.runtimeConfig.appConnectionConfig.Protocol), a.runtimeConfig.appConnectionConfig.Port) dialAddr := a.runtimeConfig.appConnectionConfig.ChannelAddress + ":" + strconv.Itoa(a.runtimeConfig.appConnectionConfig.Port) for { var ( conn net.Conn err error ) dialer := &net.Dialer{ Timeout: 500 * time.Millisecond, } if a.runtimeConfig.appConnectionConfig.Protocol.HasTLS() { conn, err = tls.DialWithDialer(dialer, "tcp", dialAddr, &tls.Config{ InsecureSkipVerify: true, //nolint:gosec }) } else { conn, err = dialer.DialContext(ctx, "tcp", dialAddr) } if err == nil && conn != nil { conn.Close() break } select { // Return case <-ctx.Done(): return ctx.Err() // prevents overwhelming the OS with open connections case <-a.clock.After(time.Millisecond * 100): } } log.Infof("application discovered on port %v", a.runtimeConfig.appConnectionConfig.Port) return nil } func (a *DaprRuntime) loadAppConfiguration(ctx context.Context) { if a.channels.AppChannel() == nil { return } appConfig, err := a.channels.AppChannel().GetAppConfig(ctx, a.runtimeConfig.id) if err != nil { return } if appConfig != nil { a.appConfig = *appConfig log.Info("Application configuration loaded") } } func (a *DaprRuntime) appendBuiltinSecretStore(ctx context.Context) { if a.runtimeConfig.disableBuiltinK8sSecretStore { return } switch a.runtimeConfig.mode { case modes.KubernetesMode: // Preload Kubernetes secretstore a.processor.AddPendingComponent(ctx, compapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: secretstoresLoader.BuiltinKubernetesSecretStore, }, Spec: compapi.ComponentSpec{ Type: "secretstores.kubernetes", Version: components.FirstStableVersion, }, }) } } func (a *DaprRuntime) getComponentsCapabilitesMap() map[string][]string { capabilities := make(map[string][]string) for key, store := range a.compStore.ListStateStores() { features := store.Features() stateStoreCapabilities := featureTypeToString(features) if state.FeatureETag.IsPresent(features) && state.FeatureTransactional.IsPresent(features) { stateStoreCapabilities = append(stateStoreCapabilities, "ACTOR") } capabilities[key] = stateStoreCapabilities } for key, pubSubItem := range a.compStore.ListPubSubs() { features := pubSubItem.Component.Features() capabilities[key] = featureTypeToString(features) } for key := range a.compStore.ListInputBindings() { capabilities[key] = []string{"INPUT_BINDING"} } for key := range a.compStore.ListOutputBindings() { if val, found := capabilities[key]; found { capabilities[key] = append(val, "OUTPUT_BINDING") } else { capabilities[key] = []string{"OUTPUT_BINDING"} } } for key, store := range a.compStore.ListSecretStores() { features := store.Features() capabilities[key] = featureTypeToString(features) } return capabilities } // converts components Features from FeatureType to string func featureTypeToString(features interface{}) []string { featureStr := make([]string, 0) switch reflect.TypeOf(features).Kind() { case reflect.Slice: val := reflect.ValueOf(features) for i := 0; i < val.Len(); i++ { featureStr = append(featureStr, val.Index(i).String()) } } return featureStr } func createGRPCManager(sec security.Handler, runtimeConfig *internalConfig, globalConfig *config.Configuration) *manager.Manager { grpcAppChannelConfig := &manager.AppChannelConfig{} if globalConfig != nil { grpcAppChannelConfig.TracingSpec = globalConfig.GetTracingSpec() } if runtimeConfig != nil { grpcAppChannelConfig.Port = runtimeConfig.appConnectionConfig.Port grpcAppChannelConfig.MaxConcurrency = runtimeConfig.appConnectionConfig.MaxConcurrency grpcAppChannelConfig.EnableTLS = (runtimeConfig.appConnectionConfig.Protocol == protocol.GRPCSProtocol) grpcAppChannelConfig.MaxRequestBodySize = runtimeConfig.maxRequestBodySize grpcAppChannelConfig.ReadBufferSize = runtimeConfig.readBufferSize grpcAppChannelConfig.BaseAddress = runtimeConfig.appConnectionConfig.ChannelAddress } m := manager.NewManager(sec, runtimeConfig.mode, grpcAppChannelConfig) m.StartCollector() return m } func (a *DaprRuntime) stopTrace(ctx context.Context) error { if a.tracerProvider == nil { return nil } // Flush and shutdown the tracing provider. if err := a.tracerProvider.ForceFlush(ctx); err != nil && !errors.Is(err, context.Canceled) { log.Warnf("Error flushing tracing provider: %v", err) } if err := a.tracerProvider.Shutdown(ctx); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("error shutting down tracing provider: %w", err) } else { a.tracerProvider = nil } return nil }
mikeee/dapr
pkg/runtime/runtime.go
GO
mit
45,865
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //nolint:nosnakecase package runtime import ( "context" "crypto/rand" "crypto/x509" "encoding/hex" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/http/httptest" "net/url" "os" "path/filepath" "reflect" "strconv" "strings" "sync/atomic" "testing" "time" "github.com/golang/mock/gomock" "github.com/phayes/freeport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/zipkin" sdktrace "go.opentelemetry.io/otel/sdk/trace" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clocktesting "k8s.io/utils/clock/testing" "github.com/dapr/components-contrib/bindings" "github.com/dapr/components-contrib/lock" mdata "github.com/dapr/components-contrib/metadata" "github.com/dapr/components-contrib/nameresolution" "github.com/dapr/components-contrib/pubsub" "github.com/dapr/components-contrib/secretstores" "github.com/dapr/components-contrib/state" commonapi "github.com/dapr/dapr/pkg/apis/common" componentsV1alpha1 "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/dapr/dapr/pkg/apphealth" channelt "github.com/dapr/dapr/pkg/channel/testing" bindingsLoader "github.com/dapr/dapr/pkg/components/bindings" configurationLoader "github.com/dapr/dapr/pkg/components/configuration" lockLoader "github.com/dapr/dapr/pkg/components/lock" httpMiddlewareLoader "github.com/dapr/dapr/pkg/components/middleware/http" nrLoader "github.com/dapr/dapr/pkg/components/nameresolution" pubsubLoader "github.com/dapr/dapr/pkg/components/pubsub" secretstoresLoader "github.com/dapr/dapr/pkg/components/secretstores" "github.com/dapr/dapr/pkg/config/protocol" "github.com/dapr/dapr/pkg/metrics" "github.com/dapr/dapr/pkg/security" pb "github.com/dapr/dapr/pkg/api/grpc/proxy/testservice" stateLoader "github.com/dapr/dapr/pkg/components/state" "github.com/dapr/dapr/pkg/config" modeconfig "github.com/dapr/dapr/pkg/config/modes" "github.com/dapr/dapr/pkg/cors" diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/modes" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/authorizer" rterrors "github.com/dapr/dapr/pkg/runtime/errors" rtmock "github.com/dapr/dapr/pkg/runtime/mock" "github.com/dapr/dapr/pkg/runtime/processor" runtimePubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/runtime/registry" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" "github.com/dapr/kit/ptr" ) const ( TestPubsubName = "testpubsub" TestSecondPubsubName = "testpubsub2" TestLockName = "testlock" resourcesDir = "./components" maxGRPCServerUptime = 200 * time.Millisecond ) func TestNewRuntime(t *testing.T) { // act r, err := newDaprRuntime(context.Background(), nil, &internalConfig{ mode: modes.StandaloneMode, metricsExporter: metrics.NewExporter(log, metrics.DefaultMetricNamespace), registry: registry.New(registry.NewOptions()), }, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) // assert require.NoError(t, err) assert.NotNil(t, r, "runtime must be initiated") } func TestDoProcessComponent(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) pubsubComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: TestPubsubName, }, Spec: componentsV1alpha1.ComponentSpec{ Type: "pubsub.mockPubSub", Version: "v1", Metadata: daprt.GetFakeMetadataItems(), }, } lockComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: TestLockName, }, Spec: componentsV1alpha1.ComponentSpec{ Type: "lock.mockLock", Version: "v1", }, } t.Run("test error on lock init", func(t *testing.T) { // setup ctrl := gomock.NewController(t) mockLockStore := daprt.NewMockStore(ctrl) mockLockStore.EXPECT().InitLockStore(context.Background(), gomock.Any()).Return(assert.AnError) rt.runtimeConfig.registry.Locks().RegisterComponent( func(_ logger.Logger) lock.Store { return mockLockStore }, "mockLock", ) // act err := rt.processor.Init(context.Background(), lockComponent) // assert require.Error(t, err, "expected an error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.InitComponentFailure, "testlock (lock.mockLock/v1)", assert.AnError).Error(), "expected error strings to match") }) t.Run("test error when lock version invalid", func(t *testing.T) { // setup ctrl := gomock.NewController(t) mockLockStore := daprt.NewMockStore(ctrl) rt.runtimeConfig.registry.Locks().RegisterComponent( func(_ logger.Logger) lock.Store { return mockLockStore }, "mockLock", ) lockComponentV3 := lockComponent lockComponentV3.Spec.Version = "v3" // act err := rt.processor.Init(context.Background(), lockComponentV3) // assert require.Error(t, err, "expected an error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.CreateComponentFailure, "testlock (lock.mockLock/v3)", fmt.Errorf("couldn't find lock store lock.mockLock/v3")).Error()) }) t.Run("test error when lock prefix strategy invalid", func(t *testing.T) { // setup ctrl := gomock.NewController(t) mockLockStore := daprt.NewMockStore(ctrl) mockLockStore.EXPECT().InitLockStore(context.Background(), gomock.Any()).Return(nil) rt.runtimeConfig.registry.Locks().RegisterComponent( func(_ logger.Logger) lock.Store { return mockLockStore }, "mockLock", ) lockComponentWithWrongStrategy := lockComponent lockComponentWithWrongStrategy.Spec.Metadata = []commonapi.NameValuePair{ { Name: "keyPrefix", Value: commonapi.DynamicValue{ JSON: v1.JSON{Raw: []byte("||")}, }, }, } // act err := rt.processor.Init(context.Background(), lockComponentWithWrongStrategy) // assert require.Error(t, err) }) t.Run("lock init successfully and set right strategy", func(t *testing.T) { // setup ctrl := gomock.NewController(t) mockLockStore := daprt.NewMockStore(ctrl) mockLockStore.EXPECT().InitLockStore(context.Background(), gomock.Any()).Return(nil) rt.runtimeConfig.registry.Locks().RegisterComponent( func(_ logger.Logger) lock.Store { return mockLockStore }, "mockLock", ) // act err := rt.processor.Init(context.Background(), lockComponent) // assert require.NoError(t, err, "unexpected error") // get modified key key, err := lockLoader.GetModifiedLockKey("test", "mockLock", "appid-1") require.NoError(t, err, "unexpected error") assert.Equal(t, "lock||appid-1||test", key) }) t.Run("test error on pubsub init", func(t *testing.T) { // setup mockPubSub := new(daprt.MockPubSub) rt.runtimeConfig.registry.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) expectedMetadata := pubsub.Metadata{ Base: mdata.Base{ Name: TestPubsubName, Properties: daprt.GetFakeProperties(), }, } mockPubSub.On("Init", expectedMetadata).Return(assert.AnError) // act err := rt.processor.Init(context.Background(), pubsubComponent) // assert require.Error(t, err, "expected an error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.InitComponentFailure, "testpubsub (pubsub.mockPubSub/v1)", assert.AnError).Error(), "expected error strings to match") }) t.Run("test invalid category component", func(t *testing.T) { // act err := rt.processor.Init(context.Background(), componentsV1alpha1.Component{ Spec: componentsV1alpha1.ComponentSpec{ Type: "invalid", }, }) // assert require.Error(t, err, "error expected") }) } // Test that flushOutstandingComponents waits for components. func TestFlushOutstandingComponent(t *testing.T) { t.Run("We can call flushOustandingComponents more than once", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) wasCalled := false m := rtmock.NewMockKubernetesStoreWithInitCallback(func(context.Context) error { time.Sleep(100 * time.Millisecond) wasCalled = true return nil }) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) go rt.processor.Process(context.Background()) rt.processor.AddPendingComponent(context.Background(), componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMock", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "secretstores.kubernetesMock", Version: "v1", }, }) rt.flushOutstandingComponents(context.Background()) assert.True(t, wasCalled) // Make sure that the goroutine was restarted and can flush a second time wasCalled = false rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock2", ) rt.processor.AddPendingComponent(context.Background(), componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMock2", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "secretstores.kubernetesMock", Version: "v1", }, }) rt.flushOutstandingComponents(context.Background()) assert.True(t, wasCalled) }) t.Run("flushOutstandingComponents blocks for components with outstanding dependanices", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) wasCalled := false wasCalledChild := false wasCalledGrandChild := false m := rtmock.NewMockKubernetesStoreWithInitCallback(func(context.Context) error { time.Sleep(100 * time.Millisecond) wasCalled = true return nil }) mc := rtmock.NewMockKubernetesStoreWithInitCallback(func(context.Context) error { time.Sleep(100 * time.Millisecond) wasCalledChild = true return nil }) mgc := rtmock.NewMockKubernetesStoreWithInitCallback(func(context.Context) error { time.Sleep(100 * time.Millisecond) wasCalledGrandChild = true return nil }) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return mc }, "kubernetesMockChild", ) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return mgc }, "kubernetesMockGrandChild", ) go rt.processor.Process(context.Background()) rt.processor.AddPendingComponent(context.Background(), componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMockGrandChild", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "secretstores.kubernetesMockGrandChild", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "a", SecretKeyRef: commonapi.SecretKeyRef{ Key: "key1", Name: "name1", }, }, }, }, Auth: componentsV1alpha1.Auth{ SecretStore: "kubernetesMockChild", }, }) rt.processor.AddPendingComponent(context.Background(), componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMockChild", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "secretstores.kubernetesMockChild", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "a", SecretKeyRef: commonapi.SecretKeyRef{ Key: "key1", Name: "name1", }, }, }, }, Auth: componentsV1alpha1.Auth{ SecretStore: "kubernetesMock", }, }) rt.processor.AddPendingComponent(context.Background(), componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetesMock", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "secretstores.kubernetesMock", Version: "v1", }, }) rt.flushOutstandingComponents(context.Background()) assert.True(t, wasCalled) assert.True(t, wasCalledChild) assert.True(t, wasCalledGrandChild) }) } func TestInitNameResolution(t *testing.T) { initMockResolverForRuntime := func(rt *DaprRuntime, resolverName string, e error) *daprt.MockResolver { mockResolver := new(daprt.MockResolver) rt.runtimeConfig.registry.NameResolutions().RegisterComponent( func(_ logger.Logger) nameresolution.Resolver { return mockResolver }, resolverName, ) expectedMetadata := nameresolution.Metadata{ Base: mdata.Base{ Name: resolverName, }, Instance: nameresolution.Instance{ DaprHTTPPort: rt.runtimeConfig.httpPort, DaprInternalPort: rt.runtimeConfig.internalGRPCPort, AppPort: rt.runtimeConfig.appConnectionConfig.Port, Address: rt.hostAddress, AppID: rt.runtimeConfig.id, Namespace: "default", }, } mockResolver.On("Init", expectedMetadata).Return(e) return mockResolver } t.Run("error on unknown resolver", func(t *testing.T) { // given rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) // target resolver rt.globalConfig.Spec.NameResolutionSpec = &config.NameResolutionSpec{ Component: "targetResolver", } // registered resolver initMockResolverForRuntime(rt, "anotherResolver", nil) // act err = rt.initNameResolution(context.Background()) // assert require.Error(t, err) }) t.Run("test init nameresolution", func(t *testing.T) { // given rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) // target resolver rt.globalConfig.Spec.NameResolutionSpec = &config.NameResolutionSpec{ Component: "someResolver", } // registered resolver initMockResolverForRuntime(rt, "someResolver", nil) // act err = rt.initNameResolution(context.Background()) // assert require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution default in StandaloneMode", func(t *testing.T) { // given rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) // target resolver rt.globalConfig.Spec.NameResolutionSpec = &config.NameResolutionSpec{} // registered resolver initMockResolverForRuntime(rt, "mdns", nil) // act err = rt.initNameResolution(context.Background()) // assert require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution nil in StandaloneMode", func(t *testing.T) { // given rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) // target resolver rt.globalConfig.Spec.NameResolutionSpec = nil // registered resolver initMockResolverForRuntime(rt, "mdns", nil) // act err = rt.initNameResolution(context.Background()) // assert require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution default in KubernetesMode", func(t *testing.T) { // given rt, err := NewTestDaprRuntime(t, modes.KubernetesMode) require.NoError(t, err) // target resolver rt.globalConfig.Spec.NameResolutionSpec = &config.NameResolutionSpec{} // registered resolver initMockResolverForRuntime(rt, "kubernetes", nil) // act err = rt.initNameResolution(context.Background()) // assert require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution nil in KubernetesMode", func(t *testing.T) { // given rt, err := NewTestDaprRuntime(t, modes.KubernetesMode) require.NoError(t, err) // target resolver rt.globalConfig.Spec.NameResolutionSpec = nil // registered resolver initMockResolverForRuntime(rt, "kubernetes", nil) // act err = rt.initNameResolution(context.Background()) // assert require.NoError(t, err, "expected no error") }) } func TestSetupTracing(t *testing.T) { testcases := []struct { name string tracingConfig config.TracingSpec hostAddress string expectedExporters []sdktrace.SpanExporter expectedErr string }{{ name: "no trace exporter", tracingConfig: config.TracingSpec{}, }, { name: "sampling rate 1 without trace exporter", tracingConfig: config.TracingSpec{ SamplingRate: "1", }, expectedExporters: []sdktrace.SpanExporter{&diagUtils.NullExporter{}}, }, { name: "bad host address, failing zipkin", tracingConfig: config.TracingSpec{ Zipkin: &config.ZipkinSpec{ EndpointAddress: "localhost", }, }, expectedErr: "invalid collector URL \"localhost\": no scheme or host", }, { name: "zipkin trace exporter", tracingConfig: config.TracingSpec{ Zipkin: &config.ZipkinSpec{ EndpointAddress: "http://foo.bar", }, }, expectedExporters: []sdktrace.SpanExporter{&zipkin.Exporter{}}, }, { name: "otel trace http exporter", tracingConfig: config.TracingSpec{ Otel: &config.OtelSpec{ EndpointAddress: "foo.bar", IsSecure: ptr.Of(false), Protocol: "http", }, }, expectedExporters: []sdktrace.SpanExporter{&otlptrace.Exporter{}}, }, { name: "invalid otel trace exporter protocol", tracingConfig: config.TracingSpec{ Otel: &config.OtelSpec{ EndpointAddress: "foo.bar", IsSecure: ptr.Of(false), Protocol: "tcp", }, }, expectedErr: "invalid protocol tcp provided for Otel endpoint", }, { name: "stdout trace exporter", tracingConfig: config.TracingSpec{ Stdout: true, }, expectedExporters: []sdktrace.SpanExporter{&diagUtils.StdoutExporter{}}, }, { name: "all trace exporters", tracingConfig: config.TracingSpec{ Otel: &config.OtelSpec{ EndpointAddress: "http://foo.bar", IsSecure: ptr.Of(false), Protocol: "http", }, Zipkin: &config.ZipkinSpec{ EndpointAddress: "http://foo.bar", }, Stdout: true, }, expectedExporters: []sdktrace.SpanExporter{&diagUtils.StdoutExporter{}, &zipkin.Exporter{}, &otlptrace.Exporter{}}, }} for i, tc := range testcases { t.Run(tc.name, func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) rt.globalConfig.Spec.TracingSpec = &testcases[i].tracingConfig if tc.hostAddress != "" { rt.hostAddress = tc.hostAddress } // Setup tracing with the fake tracer provider store to confirm // the right exporter was registered. tpStore := newFakeTracerProviderStore() if err := rt.setupTracing(context.Background(), rt.hostAddress, tpStore); tc.expectedErr != "" { assert.Contains(t, err.Error(), tc.expectedErr) } else { require.NoError(t, err) } if len(tc.expectedExporters) > 0 { assert.True(t, tpStore.HasExporter()) } for i, exporter := range tpStore.exporters { // Exporter types don't expose internals, so we can only validate that // the right type of exporter was registered. assert.Equal(t, reflect.TypeOf(tc.expectedExporters[i]), reflect.TypeOf(exporter)) } // Setup tracing with the OpenTelemetry trace provider store. // We have no way to validate the result, but we can at least // confirm that nothing blows up. if tc.expectedErr == "" { rt.setupTracing(context.Background(), rt.hostAddress, newOpentelemetryTracerProviderStore()) } }) } } func TestPopulateSecretsConfiguration(t *testing.T) { t.Run("secret store configuration is populated", func(t *testing.T) { // setup rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) rt.globalConfig.Spec.Secrets = &config.SecretsSpec{ Scopes: []config.SecretsScope{ { StoreName: "testMock", DefaultAccess: "allow", }, }, } // act rt.populateSecretsConfiguration() // verify secConf, ok := rt.compStore.GetSecretsConfiguration("testMock") require.True(t, ok, "Expected testMock secret store configuration to be populated") assert.Equal(t, config.AllowAccess, secConf.DefaultAccess, "Expected default access as allow") assert.Empty(t, secConf.DeniedSecrets, "Expected testMock deniedSecrets to not be populated") assert.NotContains(t, secConf.AllowedSecrets, "Expected testMock allowedSecrets to not be populated") }) } // Test InitSecretStore if secretstore.* refers to Kubernetes secret store. func TestInitSecretStoresInKubernetesMode(t *testing.T) { t.Run("built-in secret store is added", func(t *testing.T) { rt, _ := NewTestDaprRuntime(t, modes.KubernetesMode) m := rtmock.NewMockKubernetesStore() rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, secretstoresLoader.BuiltinKubernetesSecretStore, ) assertBuiltInSecretStore(t, rt) }) t.Run("disable built-in secret store flag", func(t *testing.T) { rt, _ := NewTestDaprRuntime(t, modes.KubernetesMode) defer stopRuntime(t, rt) rt.runtimeConfig.disableBuiltinK8sSecretStore = true testOk := make(chan struct{}) defer close(testOk) go func() { // If the test fails, this call blocks forever, eventually causing a timeout rt.appendBuiltinSecretStore(context.Background()) testOk <- struct{}{} }() select { case <-testOk: return case <-time.After(5 * time.Second): t.Fatalf("test failed") } }) t.Run("built-in secret store bypasses authorizers", func(t *testing.T) { rt, _ := NewTestDaprRuntime(t, modes.KubernetesMode) rt.authz = rt.authz.WithComponentAuthorizers([]authorizer.ComponentAuthorizer{ func(component componentsV1alpha1.Component) bool { return false }, }) m := rtmock.NewMockKubernetesStore() rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, secretstoresLoader.BuiltinKubernetesSecretStore, ) assertBuiltInSecretStore(t, rt) }) } func assertBuiltInSecretStore(t *testing.T, rt *DaprRuntime) { go rt.processor.Process(context.Background()) rt.appendBuiltinSecretStore(context.Background()) assert.Eventually(t, func() bool { _, ok := rt.compStore.GetComponent(secretstoresLoader.BuiltinKubernetesSecretStore) return ok }, time.Second*2, time.Millisecond*100) require.NoError(t, rt.runnerCloser.Close()) } func NewTestDaprRuntime(t *testing.T, mode modes.DaprMode) (*DaprRuntime, error) { return NewTestDaprRuntimeWithProtocol(t, mode, string(protocol.HTTPProtocol), 1024) } func NewTestDaprRuntimeWithID(t *testing.T, mode modes.DaprMode, id string) (*DaprRuntime, error) { testRuntimeConfig := NewTestDaprRuntimeConfig(t, modes.StandaloneMode, string(protocol.HTTPProtocol), 1024) testRuntimeConfig.id = id rt, err := newDaprRuntime(context.Background(), testSecurity(t), testRuntimeConfig, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) if err != nil { return nil, err } rt.runtimeConfig.mode = mode rt.channels.Refresh() return rt, nil } func NewTestDaprRuntimeWithProtocol(t *testing.T, mode modes.DaprMode, protocol string, appPort int) (*DaprRuntime, error) { testRuntimeConfig := NewTestDaprRuntimeConfig(t, modes.StandaloneMode, protocol, appPort) rt, err := newDaprRuntime(context.Background(), testSecurity(t), testRuntimeConfig, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) if err != nil { return nil, err } rt.runtimeConfig.mode = mode rt.channels.Refresh() return rt, nil } func NewTestDaprRuntimeConfig(t *testing.T, mode modes.DaprMode, appProtocol string, appPort int) *internalConfig { return &internalConfig{ id: daprt.TestRuntimeConfigID, actorsService: "placement:10.10.10.12", kubernetes: modeconfig.KubernetesConfig{ ControlPlaneAddress: "10.10.10.11", }, allowedOrigins: cors.DefaultAllowedOrigins, appConnectionConfig: config.AppConnectionConfig{ Protocol: protocol.Protocol(appProtocol), Port: appPort, MaxConcurrency: -1, ChannelAddress: "127.0.0.1", }, mode: mode, httpPort: DefaultDaprHTTPPort, internalGRPCPort: 0, apiGRPCPort: DefaultDaprAPIGRPCPort, apiListenAddresses: []string{DefaultAPIListenAddress}, publicPort: nil, profilePort: DefaultProfilePort, enableProfiling: false, mTLSEnabled: false, sentryServiceAddress: "", maxRequestBodySize: 4 << 20, readBufferSize: 4 << 10, unixDomainSocket: "", gracefulShutdownDuration: time.Second, enableAPILogging: ptr.Of(true), disableBuiltinK8sSecretStore: false, metricsExporter: metrics.NewExporter(log, metrics.DefaultMetricNamespace), registry: registry.New(registry.NewOptions(). WithStateStores(stateLoader.NewRegistry()). WithSecretStores(secretstoresLoader.NewRegistry()). WithNameResolutions(nrLoader.NewRegistry()). WithBindings(bindingsLoader.NewRegistry()). WithPubSubs(pubsubLoader.NewRegistry()). WithHTTPMiddlewares(httpMiddlewareLoader.NewRegistry()). WithConfigurations(configurationLoader.NewRegistry()). WithLocks(lockLoader.NewRegistry())), } } func TestGracefulShutdown(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) assert.Equal(t, time.Second, r.runtimeConfig.gracefulShutdownDuration) } func TestPodName(t *testing.T) { t.Run("empty podName", func(t *testing.T) { assert.Empty(t, getPodName()) }) t.Run("non-empty podName", func(t *testing.T) { t.Setenv("POD_NAME", "testPodName") assert.Equal(t, "testPodName", getPodName()) }) } func TestInitActors(t *testing.T) { t.Run("missing namespace on kubernetes", func(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.KubernetesMode) require.NoError(t, err) defer stopRuntime(t, r) r.namespace = "" r.runtimeConfig.mTLSEnabled = true err = r.initActors(context.TODO()) require.Error(t, err) }) t.Run("actors hosted = true", func(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.KubernetesMode) require.NoError(t, err) defer stopRuntime(t, r) r.appConfig = config.ApplicationConfig{ Entities: []string{"actor1"}, } hosted := len(r.appConfig.Entities) > 0 assert.True(t, hosted) }) t.Run("actors hosted = false", func(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.KubernetesMode) require.NoError(t, err) defer stopRuntime(t, r) hosted := len(r.appConfig.Entities) > 0 assert.False(t, hosted) }) t.Run("placement enable = false", func(t *testing.T) { r, err := newDaprRuntime(context.Background(), testSecurity(t), &internalConfig{ metricsExporter: metrics.NewExporter(log, metrics.DefaultMetricNamespace), mode: modes.StandaloneMode, registry: registry.New(registry.NewOptions()), }, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) require.NoError(t, err) defer stopRuntime(t, r) r.channels.Refresh() err = r.initActors(context.TODO()) require.Error(t, err) }) t.Run("the state stores can still be initialized normally", func(t *testing.T) { r, err := newDaprRuntime(context.Background(), testSecurity(t), &internalConfig{ metricsExporter: metrics.NewExporter(log, metrics.DefaultMetricNamespace), mode: modes.StandaloneMode, registry: registry.New(registry.NewOptions()), }, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) require.NoError(t, err) defer stopRuntime(t, r) r.channels.Refresh() assert.Nil(t, r.actor) assert.NotNil(t, r.compStore.ListStateStores()) assert.Equal(t, 0, r.compStore.StateStoresLen()) }) t.Run("the actor store can not be initialized normally", func(t *testing.T) { r, err := newDaprRuntime(context.Background(), testSecurity(t), &internalConfig{ metricsExporter: metrics.NewExporter(log, metrics.DefaultMetricNamespace), mode: modes.StandaloneMode, registry: registry.New(registry.NewOptions()), }, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) require.NoError(t, err) defer stopRuntime(t, r) r.channels.Refresh() name, ok := r.processor.State().ActorStateStoreName() assert.False(t, ok) assert.Equal(t, "", name) err = r.initActors(context.TODO()) require.Error(t, err) }) } func TestActorReentrancyConfig(t *testing.T) { fullConfig := `{ "entities":["actorType1", "actorType2"], "actorIdleTimeout": "1h", "drainOngoingCallTimeout": "30s", "drainRebalancedActors": true, "reentrancy": { "enabled": true, "maxStackDepth": 64 } }` limit := 64 minimumConfig := `{ "entities":["actorType1", "actorType2"], "actorIdleTimeout": "1h", "drainOngoingCallTimeout": "30s", "drainRebalancedActors": true, "reentrancy": { "enabled": true } }` emptyConfig := `{ "entities":["actorType1", "actorType2"], "actorIdleTimeout": "1h", "drainOngoingCallTimeout": "30s", "drainRebalancedActors": true }` testcases := []struct { Name string Config []byte ExpectedReentrancy bool ExpectedLimit *int }{ { Name: "Test full configuration", Config: []byte(fullConfig), ExpectedReentrancy: true, ExpectedLimit: &limit, }, { Name: "Test minimum configuration", Config: []byte(minimumConfig), ExpectedReentrancy: true, ExpectedLimit: nil, }, { Name: "Test minimum configuration", Config: []byte(emptyConfig), ExpectedReentrancy: false, ExpectedLimit: nil, }, } for _, tc := range testcases { t.Run(tc.Name, func(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) mockAppChannel := new(channelt.MockAppChannel) r.channels.WithAppChannel(mockAppChannel) r.runtimeConfig.appConnectionConfig.Protocol = protocol.HTTPProtocol configResp := config.ApplicationConfig{} json.Unmarshal(tc.Config, &configResp) mockAppChannel.On("GetAppConfig").Return(&configResp, nil) r.loadAppConfiguration(context.Background()) assert.NotNil(t, r.appConfig) assert.Equal(t, tc.ExpectedReentrancy, r.appConfig.Reentrancy.Enabled) assert.Equal(t, tc.ExpectedLimit, r.appConfig.Reentrancy.MaxStackDepth) }) } } type mockPubSub struct { pubsub.PubSub closeErr error } func (p *mockPubSub) Init(ctx context.Context, metadata pubsub.Metadata) error { return nil } func (p *mockPubSub) Close() error { return p.closeErr } type mockStateStore struct { state.Store closeErr error } func (s *mockStateStore) Init(ctx context.Context, metadata state.Metadata) error { return nil } func (s *mockStateStore) Close() error { return s.closeErr } func TestCloseWithErrors(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) testErr := errors.New("mock close error") rt.runtimeConfig.registry.Bindings().RegisterOutputBinding( func(_ logger.Logger) bindings.OutputBinding { return &rtmock.Binding{CloseErr: testErr} }, "output", ) rt.runtimeConfig.registry.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return &mockPubSub{closeErr: testErr} }, "pubsub", ) rt.runtimeConfig.registry.StateStores().RegisterComponent( func(_ logger.Logger) state.Store { return &mockStateStore{closeErr: testErr} }, "statestore", ) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return &rtmock.SecretStore{CloseErr: testErr} }, "secretstore", ) mockOutputBindingComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "binding", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "bindings.output", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "output", Value: commonapi.DynamicValue{ JSON: v1.JSON{}, }, }, }, }, } mockPubSubComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "pubsub", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "pubsub.pubsub", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "pubsub", Value: commonapi.DynamicValue{ JSON: v1.JSON{}, }, }, }, }, } mockStateComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "state", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "state.statestore", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "statestore", Value: commonapi.DynamicValue{ JSON: v1.JSON{}, }, }, }, }, } mockSecretsComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: "secret", }, Spec: componentsV1alpha1.ComponentSpec{ Type: "secretstores.secretstore", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "secretstore", Value: commonapi.DynamicValue{ JSON: v1.JSON{}, }, }, }, }, } errCh := make(chan error) go func() { errCh <- rt.Run(context.Background()) }() rt.processor.AddPendingComponent(context.Background(), mockOutputBindingComponent) rt.processor.AddPendingComponent(context.Background(), mockPubSubComponent) rt.processor.AddPendingComponent(context.Background(), mockStateComponent) rt.processor.AddPendingComponent(context.Background(), mockSecretsComponent) err = rt.runnerCloser.Close() require.Error(t, err) assert.Len(t, strings.Split(err.Error(), "\n"), 4) select { case rErr := <-errCh: assert.Equal(t, err, rErr) case <-time.After(5 * time.Second): t.Fatal("timed out waiting for runtime to stop") } } func stopRuntime(t *testing.T, rt *DaprRuntime) { require.NoError(t, rt.runnerCloser.Close()) } func TestComponentsCallback(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, "OK") })) defer srv.Close() u, err := url.Parse(srv.URL) require.NoError(t, err) port, _ := strconv.Atoi(u.Port()) c := make(chan struct{}) var callbackInvoked atomic.Bool cfg := NewTestDaprRuntimeConfig(t, modes.StandaloneMode, "http", port) rt, err := newDaprRuntime(context.Background(), testSecurity(t), cfg, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) require.NoError(t, err) rt.runtimeConfig.registry = registry.New(registry.NewOptions().WithComponentsCallback(func(components registry.ComponentRegistry) error { callbackInvoked.Store(true) close(c) return nil })) errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) go func() { errCh <- rt.Run(ctx) }() select { case <-c: case <-time.After(10 * time.Second): t.Fatal("timed out waiting for component callback") } assert.True(t, callbackInvoked.Load(), "component callback was not invoked") cancel() select { case err := <-errCh: require.NoError(t, err) case <-time.After(10 * time.Second): t.Fatal("timed out waiting for runtime to stop") } } func TestGRPCProxy(t *testing.T) { // setup gRPC server serverPort, _ := freeport.GetFreePort() teardown, err := runGRPCApp(serverPort) require.NoError(t, err) defer teardown() // setup proxy rt, err := NewTestDaprRuntimeWithProtocol(t, modes.StandaloneMode, "grpc", serverPort) require.NoError(t, err) internalPort, _ := freeport.GetFreePort() rt.runtimeConfig.internalGRPCPort = internalPort rt.runtimeConfig.registry.NameResolutions().RegisterComponent( func(_ logger.Logger) nameresolution.Resolver { mockResolver := new(daprt.MockResolver) // proxy to server anytime mockResolver.On("Init", mock.Anything).Return(nil) mockResolver.On("ResolveID", mock.Anything).Return(fmt.Sprintf("localhost:%d", serverPort), nil) return mockResolver }, "mdns", // for standalone mode ) ctx, cancel := context.WithCancel(context.Background()) errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() t.Cleanup(func() { cancel() select { case err := <-errCh: require.NoError(t, err) case <-time.After(5 * time.Second): t.Fatal("timed out waiting for runtime to stop") } }) req := &pb.PingRequest{Value: "foo"} t.Run("proxy single streaming request", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() stream, err := pingStreamClient(ctx, internalPort) require.NoError(t, err) require.NoError(t, stream.Send(req), "sending to PingStream must not fail") resp, err := stream.Recv() require.NoError(t, err) require.NotNil(t, resp, "resp must not be nil") require.NoError(t, stream.CloseSend(), "no error on close send") }) t.Run("proxy concurrent streaming requests", func(t *testing.T) { ctx1, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() stream1, err := pingStreamClient(ctx1, internalPort) require.NoError(t, err) ctx2, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() stream2, err := pingStreamClient(ctx2, internalPort) require.NoError(t, err) require.NoError(t, stream1.Send(req), "sending to PingStream must not fail") resp, err := stream1.Recv() require.NoError(t, err) require.NotNil(t, resp, "resp must not be nil") require.NoError(t, stream2.Send(req), "sending to PingStream must not fail") resp, err = stream2.Recv() require.NoError(t, err) require.NotNil(t, resp, "resp must not be nil") require.NoError(t, stream1.CloseSend(), "no error on close send") require.NoError(t, stream2.CloseSend(), "no error on close send") }) } func TestShutdownWithWait(t *testing.T) { t.Run("calling ShutdownWithWait should wait until runtime has stopped", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) closeSecretClose := make(chan struct{}) closeSecretCalled := make(chan struct{}) m := rtmock.NewMockKubernetesStoreWithClose(func() error { close(closeSecretCalled) <-closeSecretClose return nil }) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: kubernetesMock spec: type: secretstores.kubernetesMock version: v1 `), 0o600)) // Use a background context since this is not closed by the test. ctx := context.Background() errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() assert.Eventually(t, func() bool { return len(rt.compStore.ListComponents()) > 0 }, 5*time.Second, 100*time.Millisecond, "timed out waiting for component store to be populated with mock secret") shutdownCh := make(chan struct{}) go func() { rt.ShutdownWithWait() close(shutdownCh) }() select { case <-closeSecretCalled: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for secret store to be closed") } select { case <-errCh: t.Fatal("runtime stopped before ShutdownWithWait returned") default: } select { case <-shutdownCh: t.Fatal("ShutdownWithWait returned before runtime stopped") default: close(closeSecretClose) } select { case <-shutdownCh: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for ShutdownWithWait to return") } select { case err := <-errCh: require.NoError(t, err) case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to stop") } }) t.Run("if secret times out after init, error should return from runtime and ShutdownWithWait should return", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) initSecretContextClosed := make(chan struct{}) closeSecretInit := make(chan struct{}) m := rtmock.NewMockKubernetesStoreWithInitCallback(func(ctx context.Context) error { <-ctx.Done() close(initSecretContextClosed) <-closeSecretInit return nil }) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: kubernetesMock spec: type: secretstores.kubernetesMock version: v1 initTimeout: 1ms `), 0o600)) // Use a background context since this is not closed by the test. ctx := context.Background() errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() select { case <-initSecretContextClosed: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for secret store to return inited because of timeout") } select { case <-errCh: t.Fatal("runtime returned stopped before secret Close() returned") default: } shutdownCh := make(chan struct{}) go func() { rt.ShutdownWithWait() close(shutdownCh) }() close(closeSecretInit) select { case <-shutdownCh: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for ShutdownWithWait to return") } select { case err := <-errCh: require.Error(t, err) case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to stop") } select { case <-shutdownCh: case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to be marked as stopped") } }) t.Run("if secret init fails then the runtime should not error when the error should be ignored. Should wait for shutdown signal", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) secretInited := make(chan struct{}) m := rtmock.NewMockKubernetesStoreWithInitCallback(func(ctx context.Context) error { close(secretInited) return errors.New("this is an error") }) secretClosed := make(chan struct{}) m.(*rtmock.MockKubernetesStateStore).CloseFn = func() error { close(secretClosed) return nil } rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: kubernetesMock spec: type: secretstores.kubernetesMock version: v1 ignoreErrors: true `), 0o600)) // Use a background context since this is not closed by the test. ctx := context.Background() errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() select { case <-secretInited: case <-time.After(3 * time.Second): t.Fatal("timed out waiting for secret store to be inited") } shutdownCh := make(chan struct{}) go func() { rt.ShutdownWithWait() close(shutdownCh) }() select { case err := <-errCh: require.NoError(t, err) case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to stop") } select { case <-shutdownCh: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for ShutdownWithWait to return") } select { case <-secretClosed: t.Fatal("secret store closed should not be called when init failed") default: } }) t.Run("if secret init fails then the runtime should error when the error should NOT be ignored. Shouldn't wait for shutdown signal", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) m := rtmock.NewMockKubernetesStoreWithInitCallback(func(ctx context.Context) error { return errors.New("this is an error") }) secretClosed := make(chan struct{}) m.(*rtmock.MockKubernetesStateStore).CloseFn = func() error { close(secretClosed) return nil } rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: kubernetesMock spec: type: secretstores.kubernetesMock version: v1 `), 0o600)) // Use a background context since this is not closed by the test. ctx := context.Background() errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() select { case err := <-errCh: require.ErrorContains(t, err, "this is an error") case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to error") } select { case <-secretClosed: t.Fatal("secret store should not be closed when init failed") default: } // ShutdownWithWait() can still be called even if the runtime errored, it // will just return immediately. shutdownCh := make(chan struct{}) go func() { rt.ShutdownWithWait() close(shutdownCh) }() select { case <-shutdownCh: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for ShutdownWithWait to return") } }) t.Run("runtime should fatal if closing components does not happen in time", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) m := rtmock.NewMockKubernetesStoreWithClose(func() error { <-time.After(5 * time.Second) return nil }) rt.runtimeConfig.gracefulShutdownDuration = time.Millisecond * 10 fatalShutdownCalled := make(chan struct{}) rt.runnerCloser.WithFatalShutdown(func() { close(fatalShutdownCalled) }) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return m }, "kubernetesMock", ) dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: name: kubernetesMock spec: type: secretstores.kubernetesMock version: v1 `), 0o600)) // Use a background context since this is not closed by the test. ctx := context.Background() errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() assert.Eventually(t, func() bool { return len(rt.compStore.ListSecretStores()) > 0 }, 5*time.Second, 100*time.Millisecond, "secret store not init in time") go rt.ShutdownWithWait() select { case <-fatalShutdownCalled: case <-time.After(5 * time.Second): t.Fatal("timed out waiting for fatal shutdown to return") } }) } func TestGetComponentsCapabilitiesMap(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) mockStateStore := new(daprt.MockStateStore) rt.runtimeConfig.registry.StateStores().RegisterComponent( func(_ logger.Logger) state.Store { return mockStateStore }, "mockState", ) mockStateStore.On("Init", mock.Anything).Return(nil) cStateStore := componentsV1alpha1.Component{} cStateStore.ObjectMeta.Name = "testStateStoreName" cStateStore.Spec.Type = "state.mockState" mockPubSub := new(daprt.MockPubSub) rt.runtimeConfig.registry.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) mockPubSub.On("Init", mock.Anything).Return(nil) mockPubSub.On("Features").Return([]pubsub.Feature{pubsub.FeatureMessageTTL, pubsub.FeatureSubscribeWildcards}) cPubSub := componentsV1alpha1.Component{} cPubSub.ObjectMeta.Name = "mockPubSub" cPubSub.Spec.Type = "pubsub.mockPubSub" rt.runtimeConfig.registry.Bindings().RegisterInputBinding( func(_ logger.Logger) bindings.InputBinding { return &daprt.MockBinding{} }, "testInputBinding", ) cin := componentsV1alpha1.Component{} cin.ObjectMeta.Name = "testInputBinding" cin.Spec.Type = "bindings.testInputBinding" rt.runtimeConfig.registry.Bindings().RegisterOutputBinding( func(_ logger.Logger) bindings.OutputBinding { return &daprt.MockBinding{} }, "testOutputBinding", ) cout := componentsV1alpha1.Component{} cout.ObjectMeta.Name = "testOutputBinding" cout.Spec.Type = "bindings.testOutputBinding" mockSecretStoreName := "mockSecretStore" mockSecretStore := new(daprt.FakeSecretStore) rt.runtimeConfig.registry.SecretStores().RegisterComponent( func(_ logger.Logger) secretstores.SecretStore { return mockSecretStore }, mockSecretStoreName, ) cSecretStore := componentsV1alpha1.Component{} cSecretStore.ObjectMeta.Name = mockSecretStoreName cSecretStore.Spec.Type = "secretstores.mockSecretStore" require.NoError(t, rt.processor.Init(context.Background(), cin)) require.NoError(t, rt.processor.Init(context.Background(), cout)) require.NoError(t, rt.processor.Init(context.Background(), cPubSub)) require.NoError(t, rt.processor.Init(context.Background(), cStateStore)) require.NoError(t, rt.processor.Init(context.Background(), cSecretStore)) capabilities := rt.getComponentsCapabilitesMap() assert.Len(t, capabilities, 5, "All 5 registered components have are present in capabilities (stateStore pubSub input output secretStore)") assert.Len(t, capabilities["mockPubSub"], 2, "mockPubSub has 2 features because we mocked it so") assert.Len(t, capabilities["testInputBinding"], 1, "Input bindings always have INPUT_BINDING added to their capabilities") assert.Len(t, capabilities["testOutputBinding"], 1, "Output bindings always have OUTPUT_BINDING added to their capabilities") assert.Len(t, capabilities[mockSecretStoreName], 1, "mockSecretStore has a single feature and it should be present") } func runGRPCApp(port int) (func(), error) { serverListener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port)) if err != nil { return func() {}, err } server := grpc.NewServer() pb.RegisterTestServiceServer(server, &pingStreamService{}) go func() { server.Serve(serverListener) }() teardown := func() { server.Stop() } return teardown, nil } func pingStreamClient(ctx context.Context, port int) (pb.TestService_PingStreamClient, error) { clientConn, err := grpc.DialContext( ctx, fmt.Sprintf("localhost:%d", port), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), ) if err != nil { return nil, err } testClient := pb.NewTestServiceClient(clientConn) ctx = metadata.AppendToOutgoingContext(ctx, "dapr-app-id", "dummy") return testClient.PingStream(ctx) } type pingStreamService struct { pb.TestServiceServer } func (s *pingStreamService) PingStream(stream pb.TestService_PingStreamServer) error { counter := int32(0) for { ping, err := stream.Recv() if err == io.EOF { break } else if err != nil { return err } pong := &pb.PingResponse{Value: ping.GetValue(), Counter: counter} if err := stream.Send(pong); err != nil { return err } counter++ } return nil } func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true }) } func TestGracefulShutdownBindings(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() rt.runtimeConfig.gracefulShutdownDuration = 3 * time.Second rt.runtimeConfig.registry.Bindings().RegisterInputBinding( func(_ logger.Logger) bindings.InputBinding { return &daprt.MockBinding{} }, "testInputBinding", ) cin := componentsV1alpha1.Component{} cin.ObjectMeta.Name = "testInputBinding" cin.Spec.Type = "bindings.testInputBinding" rt.runtimeConfig.registry.Bindings().RegisterOutputBinding( func(_ logger.Logger) bindings.OutputBinding { return &daprt.MockBinding{} }, "testOutputBinding", ) cout := componentsV1alpha1.Component{} cout.ObjectMeta.Name = "testOutputBinding" cout.Spec.Type = "bindings.testOutputBinding" require.NoError(t, rt.processor.Init(context.Background(), cin)) require.NoError(t, rt.processor.Init(context.Background(), cout)) assert.Len(t, rt.compStore.ListInputBindings(), 1) assert.Len(t, rt.compStore.ListOutputBindings(), 1) cancel() select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "input bindings shutdown timed out") case err := <-errCh: require.NoError(t, err) } } func TestBlockShutdownBindings(t *testing.T) { t.Run("block timeout", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) fakeClock := clocktesting.NewFakeClock(time.Now()) rt.clock = fakeClock rt.appHealthChanged(context.Background(), apphealth.AppStatusHealthy) rt.runtimeConfig.blockShutdownDuration = ptr.Of(time.Millisecond * 100) rt.runtimeConfig.gracefulShutdownDuration = 3 * time.Second ctx, cancel := context.WithCancel(context.Background()) errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() cancel() select { case <-time.After(time.Second): case <-errCh: assert.Fail(t, "expected not to return until block timeout is reached") } fakeClock.Step(time.Millisecond * 200) select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "input bindings shutdown timed out") case err := <-errCh: require.NoError(t, err) } }) t.Run("block app unhealthy", func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) fakeClock := clocktesting.NewFakeClock(time.Now()) rt.clock = fakeClock rt.appHealthChanged(context.Background(), apphealth.AppStatusHealthy) rt.runtimeConfig.blockShutdownDuration = ptr.Of(time.Millisecond * 100) rt.runtimeConfig.gracefulShutdownDuration = 3 * time.Second ctx, cancel := context.WithCancel(context.Background()) errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() cancel() select { case <-time.After(time.Second): case <-errCh: assert.Fail(t, "expected not to return until block timeout is reached") } rt.appHealthChanged(context.Background(), apphealth.AppStatusUnhealthy) select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "input bindings shutdown timed out") case err := <-errCh: require.NoError(t, err) } }) } func TestGracefulShutdownPubSub(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) mockPubSub := new(daprt.MockPubSub) rt.runtimeConfig.registry.PubSubs().RegisterComponent( func(_ logger.Logger) pubsub.PubSub { return mockPubSub }, "mockPubSub", ) rt.runtimeConfig.gracefulShutdownDuration = 5 * time.Second mockPubSub.On("Init", mock.Anything).Return(nil) mockPubSub.On("Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")).Return(nil) mockPubSub.On("Close").Return(nil) cPubSub := componentsV1alpha1.Component{} cPubSub.ObjectMeta.Name = "mockPubSub" cPubSub.Spec.Type = "pubsub.mockPubSub" subscriptionItems := []runtimePubsub.SubscriptionJSON{ {PubsubName: "mockPubSub", Topic: "topic0", Route: "shutdown"}, } sub, _ := json.Marshal(subscriptionItems) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil) fakeResp.WithRawDataBytes(sub). WithContentType("application/json") defer fakeResp.Close() mockAppChannel := new(channelt.MockAppChannel) rt.channels.WithAppChannel(mockAppChannel) mockAppChannel.On("InvokeMethod", mock.MatchedBy(daprt.MatchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) // Create new processor with mocked app channel. rt.processor = processor.New(processor.Options{ ID: rt.runtimeConfig.id, IsHTTP: rt.runtimeConfig.appConnectionConfig.Protocol.IsHTTP(), ActorsEnabled: len(rt.runtimeConfig.actorsService) > 0, Registry: rt.runtimeConfig.registry, ComponentStore: rt.compStore, Meta: rt.meta, GlobalConfig: rt.globalConfig, Resiliency: rt.resiliency, Mode: rt.runtimeConfig.mode, Channels: rt.channels, GRPC: rt.grpc, Security: rt.sec, }) require.NoError(t, rt.processor.Init(context.Background(), cPubSub)) ctx, cancel := context.WithCancel(context.Background()) errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() rt.appHealthChanged(context.Background(), apphealth.AppStatusHealthy) mockPubSub.AssertCalled(t, "Init", mock.Anything) mockPubSub.AssertCalled(t, "Subscribe", mock.AnythingOfType("pubsub.SubscribeRequest"), mock.AnythingOfType("pubsub.Handler")) cancel() select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "pubsub shutdown timed out") case err := <-errCh: require.NoError(t, err) } } func TestGracefulShutdownActors(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) rt.runtimeConfig.gracefulShutdownDuration = 5 * time.Second bytes := make([]byte, 32) rand.Read(bytes) encryptKey := hex.EncodeToString(bytes) mockStateComponent := componentsV1alpha1.Component{ ObjectMeta: metav1.ObjectMeta{ Name: TestPubsubName, }, Spec: componentsV1alpha1.ComponentSpec{ Type: "state.mockState", Version: "v1", Metadata: []commonapi.NameValuePair{ { Name: "ACTORSTATESTORE", Value: commonapi.DynamicValue{ JSON: v1.JSON{Raw: []byte("true")}, }, }, { Name: "primaryEncryptionKey", Value: commonapi.DynamicValue{ JSON: v1.JSON{Raw: []byte(encryptKey)}, }, }, }, }, Auth: componentsV1alpha1.Auth{ SecretStore: "mockSecretStore", }, } // setup initMockStateStoreForRuntime(rt, encryptKey, nil) rt.namespace = "test" rt.runtimeConfig.appConnectionConfig.Port = -1 ctx, cancel := context.WithCancel(context.Background()) errCh := make(chan error) go func() { errCh <- rt.Run(ctx) }() select { case <-rt.initComplete: case <-time.After(time.Second * 5): t.Fatal("runtime did not init in time") } // act err = rt.processor.Init(context.Background(), mockStateComponent) // assert require.NoError(t, err, "expected no error") require.NoError(t, rt.initActors(context.TODO())) cancel() select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "actors shutdown timed out") case err := <-errCh: require.NoError(t, err) } var activeActCount int32 runtimeStatus := rt.actor.GetRuntimeStatus(context.Background()) for _, v := range runtimeStatus.GetActiveActors() { activeActCount += v.GetCount() } assert.Equal(t, int32(0), activeActCount) } func initMockStateStoreForRuntime(rt *DaprRuntime, encryptKey string, e error) *daprt.MockStateStore { mockStateStore := new(daprt.MockStateStore) rt.runtimeConfig.registry.StateStores().RegisterComponent( func(_ logger.Logger) state.Store { return mockStateStore }, "mockState", ) expectedMetadata := state.Metadata{Base: mdata.Base{ Name: TestPubsubName, Properties: map[string]string{ "actorstatestore": "true", "primaryEncryptionKey": encryptKey, }, }} expectedMetadataUppercase := state.Metadata{Base: mdata.Base{ Name: TestPubsubName, Properties: map[string]string{ "ACTORSTATESTORE": "true", "primaryEncryptionKey": encryptKey, }, }} mockStateStore.On("Init", expectedMetadata).Return(e) mockStateStore.On("Init", expectedMetadataUppercase).Return(e) return mockStateStore } func TestTraceShutdown(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) rt.runtimeConfig.gracefulShutdownDuration = 5 * time.Second rt.globalConfig.Spec.TracingSpec = &config.TracingSpec{ Otel: &config.OtelSpec{ EndpointAddress: "foo.bar", IsSecure: ptr.Of(false), Protocol: "http", }, } rt.hostAddress = "localhost:3000" tpStore := newOpentelemetryTracerProviderStore() require.NoError(t, rt.setupTracing(context.Background(), rt.hostAddress, tpStore)) assert.NotNil(t, rt.tracerProvider) errCh := make(chan error) ctx, cancel := context.WithCancel(context.Background()) go func() { errCh <- rt.Run(ctx) }() cancel() select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "tracing shutdown timed out") case err := <-errCh: require.NoError(t, err) } assert.Nil(t, rt.tracerProvider) } func testSecurity(t *testing.T) security.Handler { secP, err := security.New(context.Background(), security.Options{ TrustAnchors: []byte("test"), AppID: "test", ControlPlaneTrustDomain: "test.example.com", ControlPlaneNamespace: "default", MTLSEnabled: false, OverrideCertRequestFn: func(context.Context, []byte) ([]*x509.Certificate, error) { return []*x509.Certificate{nil}, nil }, }) require.NoError(t, err) go secP.Run(context.Background()) sec, err := secP.Handler(context.Background()) require.NoError(t, err) return sec } func TestGetOtelServiceName(t *testing.T) { // Save the original value of the OTEL_SERVICE_NAME variable and restore at the end tests := []struct { env string // The value of the OTEL_SERVICE_NAME variable fallback string // The fallback value expected string // The expected value }{ {"", "my-app", "my-app"}, // Case 1: No environment variable, use fallback {"service-abc", "my-app", "service-abc"}, // Case 2: Environment variable set, use it } for _, tc := range tests { t.Run(tc.env, func(t *testing.T) { // Set the environment variable to the test case value t.Setenv("OTEL_SERVICE_NAME", tc.env) // Call the function and check the result got := getOtelServiceName(tc.fallback) if got != tc.expected { // Report an error if the result doesn't match t.Errorf("getOtelServiceName(%q) = %q; expected %q", tc.fallback, got, tc.expected) } }) } }
mikeee/dapr
pkg/runtime/runtime_test.go
GO
mit
63,641
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "context" "golang.org/x/exp/maps" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/utils" ) type bulkSubscribeResiliencyRes struct { entries []contribpubsub.BulkSubscribeResponseEntry envelope map[string]interface{} } // applyBulkSubscribeResiliency applies resiliency support to bulk subscribe. It tries to filter // out the messages that have been successfully processed and only retries the ones that have failed func (s *Subscription) applyBulkSubscribeResiliency(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, psm bulkSubscribedMessage, deadLetterTopic string, path string, policyDef *resiliency.PolicyDefinition, rawPayload bool, envelope map[string]interface{}, ) (*[]contribpubsub.BulkSubscribeResponseEntry, error) { bscData := *bulkSubCallData policyRunner := resiliency.NewRunnerWithOptions( ctx, policyDef, resiliency.RunnerOpts[*bulkSubscribeResiliencyRes]{ Accumulator: func(bsrr *bulkSubscribeResiliencyRes) { for _, v := range bsrr.entries { // add to main bulkResponses if index, ok := (*bscData.entryIdIndexMap)[v.EntryId]; ok { (*bscData.bulkResponses)[index].EntryId = v.EntryId (*bscData.bulkResponses)[index].Error = v.Error } } filteredPubSubMsgs := utils.Filter(psm.pubSubMessages, func(ps message) bool { if index, ok := (*bscData.entryIdIndexMap)[ps.entry.EntryId]; ok { return (*bscData.bulkResponses)[index].Error != nil } return false }) psm.pubSubMessages = filteredPubSubMsgs psm.length = len(filteredPubSubMsgs) }, }) _, err := policyRunner(func(ctx context.Context) (*bulkSubscribeResiliencyRes, error) { var pErr error bsrr := &bulkSubscribeResiliencyRes{ entries: make([]contribpubsub.BulkSubscribeResponseEntry, 0, len(psm.pubSubMessages)), envelope: maps.Clone(envelope), } if s.isHTTP { pErr = s.publishBulkMessageHTTP(ctx, &bscData, &psm, bsrr, deadLetterTopic) } else { pErr = s.publishBulkMessageGRPC(ctx, &bscData, &psm, &bsrr.entries, rawPayload, deadLetterTopic) } return bsrr, pErr }) // setting error if any entry has not been yet touched - only use case that seems possible is of timeout for eId, ind := range *bscData.entryIdIndexMap { //nolint:stylecheck if (*bscData.bulkResponses)[ind].EntryId == "" { (*bscData.bulkResponses)[ind].EntryId = eId (*bscData.bulkResponses)[ind].Error = err } } return bscData.bulkResponses, err }
mikeee/dapr
pkg/runtime/subscription/bulkresiliency.go
GO
mit
3,095
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "context" "encoding/json" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" contribpubsub "github.com/dapr/components-contrib/pubsub" inmemory "github.com/dapr/components-contrib/pubsub/in-memory" resiliencyV1alpha "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1" channelt "github.com/dapr/dapr/pkg/channel/testing" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/resiliency/breaker" "github.com/dapr/dapr/pkg/runtime/channels" runtimePubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/kit/logger" "github.com/dapr/kit/ptr" ) const ( pubsubName = "pubsubName" ) var testLogger = logger.NewLogger("dapr.runtime.test") type input struct { pbsm bulkSubscribedMessage bscData bulkSubscribeCallData envelope map[string]interface{} } type testSettings struct { entryIdRetryTimes map[string]int //nolint:stylecheck failEvenOnes bool failAllEntries bool failCount int } func getBulkMessageEntriesForResiliency(len int) []contribpubsub.BulkMessageEntry { bulkEntries := make([]contribpubsub.BulkMessageEntry, 10) bulkEntries[0] = contribpubsub.BulkMessageEntry{EntryId: "1111111a", Event: []byte(order1)} bulkEntries[1] = contribpubsub.BulkMessageEntry{EntryId: "2222222b", Event: []byte(order2)} bulkEntries[2] = contribpubsub.BulkMessageEntry{EntryId: "333333c", Event: []byte(order3)} bulkEntries[3] = contribpubsub.BulkMessageEntry{EntryId: "4444444d", Event: []byte(order4)} bulkEntries[4] = contribpubsub.BulkMessageEntry{EntryId: "5555555e", Event: []byte(order5)} bulkEntries[5] = contribpubsub.BulkMessageEntry{EntryId: "66666666f", Event: []byte(order6)} bulkEntries[6] = contribpubsub.BulkMessageEntry{EntryId: "7777777g", Event: []byte(order7)} bulkEntries[7] = contribpubsub.BulkMessageEntry{EntryId: "8888888h", Event: []byte(order8)} bulkEntries[8] = contribpubsub.BulkMessageEntry{EntryId: "9999999i", Event: []byte(order9)} bulkEntries[9] = contribpubsub.BulkMessageEntry{EntryId: "10101010j", Event: []byte(order10)} return bulkEntries[:len] } var shortRetry = resiliencyV1alpha.Retry{ Policy: "constant", Duration: "1s", } var longRetry = resiliencyV1alpha.Retry{ Policy: "constant", Duration: "5s", } var ( longTimeout = "10s" shortTimeout = "1s" ) var orders []string = []string{order1, order2, order3, order4, order5, order6, order7, order8, order9, order10} func getPubSubMessages() []message { pubSubMessages := make([]message, 10) bulkEntries := getBulkMessageEntriesForResiliency(10) i := 0 for _, ord := range orders { var cloudEvent map[string]interface{} err := json.Unmarshal([]byte(ord), &cloudEvent) if err == nil { pubSubMessages[i].cloudEvent = cloudEvent } pubSubMessages[i].entry = &bulkEntries[i] rawData := runtimePubsub.BulkSubscribeMessageItem{ EntryId: bulkEntries[i].EntryId, Event: cloudEvent, } pubSubMessages[i].rawData = &rawData i++ } return pubSubMessages } func createResPolicyProvider(ciruitBreaker resiliencyV1alpha.CircuitBreaker, timeout string, retry resiliencyV1alpha.Retry, ) *resiliency.Resiliency { r := &resiliencyV1alpha.Resiliency{ Spec: resiliencyV1alpha.ResiliencySpec{ Policies: resiliencyV1alpha.Policies{ Timeouts: map[string]string{ "pubsubTimeout": timeout, }, CircuitBreakers: map[string]resiliencyV1alpha.CircuitBreaker{ "pubsubCircuitBreaker": ciruitBreaker, }, Retries: map[string]resiliencyV1alpha.Retry{ "pubsubRetry": retry, }, }, Targets: resiliencyV1alpha.Targets{ Components: map[string]resiliencyV1alpha.ComponentPolicyNames{ "pubsubName": { Inbound: resiliencyV1alpha.PolicyNames{ Timeout: "pubsubTimeout", CircuitBreaker: "pubsubCircuitBreaker", Retry: "pubsubRetry", }, }, }, }, }, } return resiliency.FromConfigurations(testLogger, r) } func getResponse(req *invokev1.InvokeMethodRequest, ts *testSettings) *invokev1.InvokeMethodResponse { var data map[string]any v, _ := req.RawDataFull() e := json.Unmarshal(v, &data) appResponses := []contribpubsub.AppBulkResponseEntry{} if e == nil { entries, _ := data["entries"].([]any) for j := 1; j <= len(entries); j++ { entryId, _ := entries[j-1].(map[string]any)["entryId"].(string) //nolint:stylecheck abre := contribpubsub.AppBulkResponseEntry{ EntryId: entryId, } if ts.failCount > 0 && (ts.failAllEntries || (ts.failEvenOnes && j%2 == 0)) { testLogger.Infof("ts.failCount: %d", ts.failCount) abre.Status = "RETRY" } else { abre.Status = "SUCCESS" } appResponses = append(appResponses, abre) if _, ok := ts.entryIdRetryTimes[entryId]; ok { ts.entryIdRetryTimes[entryId]++ } else { ts.entryIdRetryTimes[entryId] = 1 } } ts.failCount-- } re := contribpubsub.AppBulkResponse{ AppResponses: appResponses, } v, _ = json.Marshal(re) respInvoke := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(v). WithContentType("application/json") return respInvoke } func getInput() input { in := input{} testBulkSubscribePubsub := "bulkSubscribePubSub" msgArr := getBulkMessageEntriesForResiliency(10) psMessages := getPubSubMessages() in.pbsm = bulkSubscribedMessage{ pubSubMessages: psMessages, topic: "topic0", pubsub: testBulkSubscribePubsub, path: orders1, length: len(psMessages), } bulkResponses := make([]contribpubsub.BulkSubscribeResponseEntry, 10) in.bscData.bulkResponses = &bulkResponses entryIdIndexMap := make(map[string]int) //nolint:stylecheck in.bscData.entryIdIndexMap = &entryIdIndexMap for i, entry := range msgArr { (*in.bscData.entryIdIndexMap)[entry.EntryId] = i } in.envelope = runtimePubsub.NewBulkSubscribeEnvelope(&runtimePubsub.BulkSubscribeEnvelope{ ID: "", Topic: "topic0", Pubsub: testBulkSubscribePubsub, }) bulkSubDiag := newBulkSubIngressDiagnostics() in.bscData.bulkSubDiag = &bulkSubDiag in.bscData.topic = "topic0" in.bscData.psName = testBulkSubscribePubsub return in } func TestBulkSubscribeResiliency(t *testing.T) { t.Run("verify Responses when few entries fail even after retries", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) ps, err := New(Options{ IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 4, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } shortRetry.MaxRetries = ptr.Of(2) policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } assertRetryCount(t, map[string]int{ "1111111a": 1, "2222222b": 2, "333333c": 1, "4444444d": 3, "5555555e": 1, "66666666f": 2, "7777777g": 1, "8888888h": 3, "9999999i": 1, "10101010j": 2, }, ts.entryIdRetryTimes) require.Error(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("verify Responses when ALL entries fail even after retries", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 4, failEvenOnes: true, failAllEntries: true, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } shortRetry.MaxRetries = ptr.Of(2) policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: true}, {EntryId: "10101010j", IsError: true}, }, } assertRetryCount(t, map[string]int{ "1111111a": 3, "2222222b": 3, "333333c": 3, "4444444d": 3, "5555555e": 3, "66666666f": 3, "7777777g": 3, "8888888h": 3, "9999999i": 3, "10101010j": 3, }, ts.entryIdRetryTimes) require.Error(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("pass ALL entries in second attempt", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 1, failEvenOnes: false, failAllEntries: true, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } shortRetry.MaxRetries = ptr.Of(2) policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } assertRetryCount(t, map[string]int{ "1111111a": 2, "2222222b": 2, "333333c": 2, "4444444d": 2, "5555555e": 2, "66666666f": 2, "7777777g": 2, "8888888h": 2, "9999999i": 2, "10101010j": 2, }, ts.entryIdRetryTimes) require.NoError(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("pass ALL entries in first attempt", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 0, failEvenOnes: false, failAllEntries: false, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } shortRetry.MaxRetries = ptr.Of(2) policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } assertRetryCount(t, map[string]int{ "1111111a": 1, "2222222b": 1, "333333c": 1, "4444444d": 1, "5555555e": 1, "66666666f": 1, "7777777g": 1, "8888888h": 1, "9999999i": 1, "10101010j": 1, }, ts.entryIdRetryTimes) require.NoError(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("fail ALL entries due to timeout", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Channels: new(channels.Channels).WithAppChannel(mockAppChannel), Resiliency: resiliency.New(logger.NewLogger("test")), PubSub: &runtimePubsub.PubsubItem{Component: comp}, IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 0, failEvenOnes: false, failAllEntries: false, } mockee := mockAppChannel. On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ). After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } shortRetry.MaxRetries = ptr.Of(2) policyProvider := createResPolicyProvider(resiliencyV1alpha.CircuitBreaker{}, shortTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: true}, {EntryId: "10101010j", IsError: true}, }, } require.Error(t, e) require.ErrorIs(t, e, context.DeadlineExceeded) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("verify Responses when ALL entries fail with Circuitbreaker and exhaust retries", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Resiliency: resiliency.New(logger.NewLogger("test")), Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 10, failEvenOnes: true, failAllEntries: true, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will not be triggered } shortRetry.MaxRetries = ptr.Of(3) policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: true}, {EntryId: "10101010j", IsError: true}, }, } expectedCBRetryCount := map[string]int{ "1111111a": 2, "2222222b": 2, "333333c": 2, "4444444d": 2, "5555555e": 2, "66666666f": 2, "7777777g": 2, "8888888h": 2, "9999999i": 2, "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("verify Responses when Partial entries fail with Circuitbreaker and exhaust retries", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), Channels: new(channels.Channels).WithAppChannel(mockAppChannel), IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 10, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will not be triggered } shortRetry.MaxRetries = ptr.Of(5) policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } expectedCBRetryCount := map[string]int{ "1111111a": 1, "2222222b": 2, "333333c": 1, "4444444d": 2, "5555555e": 1, "66666666f": 2, "7777777g": 1, "8888888h": 2, "9999999i": 1, "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("verify Responses when Partial entries Pass with Circuitbreaker half open timeout", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), Channels: new(channels.Channels).WithAppChannel(mockAppChannel), IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 2, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "1ms", // half-open after 1ms. So in test this will be triggered } shortRetry.MaxRetries = ptr.Of(3) policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } expectedCBRetryCount := map[string]int{ "1111111a": 1, "2222222b": 2, "333333c": 1, "4444444d": 3, "5555555e": 1, "66666666f": 2, "7777777g": 1, "8888888h": 3, "9999999i": 1, "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.NoError(t, e) // assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("Partial success with CB and exhaust retries, then act with short half open timeout", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), Channels: new(channels.Channels).WithAppChannel(mockAppChannel), IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 2, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "4s", // half-open after 4s. So in test this will be triggered } shortRetry.MaxRetries = ptr.Of(3) policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } expectedCBRetryCount := map[string]int{ "1111111a": 1, "2222222b": 2, "333333c": 1, "4444444d": 2, "5555555e": 1, "66666666f": 2, "7777777g": 1, "8888888h": 2, "9999999i": 1, "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) // assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) time.Sleep(5 * time.Second) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse = BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } expectedCBRetryCount = map[string]int{ "1111111a": 2, "2222222b": 3, "333333c": 2, "4444444d": 3, "5555555e": 2, "66666666f": 3, "7777777g": 2, "8888888h": 3, "9999999i": 2, "10101010j": 3, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.NoError(t, e) // assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) t.Run("Fail all events with timeout and then Open CB - short retries", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), Channels: new(channels.Channels).WithAppChannel(mockAppChannel), IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 10, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel. On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ). After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will NOT be triggered } shortRetry.MaxRetries = ptr.Of(2) policyProvider := createResPolicyProvider(cb, shortTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.Background(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: true}, {EntryId: "10101010j", IsError: true}, }, } assert.Len(t, *b, 10) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) assert.Len(t, *b, 10) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) } func TestBulkSubscribeResiliencyStateConversionsFromHalfOpen(t *testing.T) { t.Run("verify Responses when Circuitbreaker half open state changes happen", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) ps, err := New(Options{ Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), IsHTTP: true, }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 3, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "4s", // half-open after 4s. So in test this will be triggered } shortRetry.MaxRetries = ptr.Of(20) policyProvider := createResPolicyProvider(cb, longTimeout, shortRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } expectedCBRetryCount := map[string]int{ "1111111a": 1, "2222222b": 2, "333333c": 1, "4444444d": 2, "5555555e": 1, "66666666f": 2, "7777777g": 1, "8888888h": 2, "9999999i": 1, "10101010j": 2, } // 2 invoke calls should be made here, as the circuit breaker becomes open mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) time.Sleep(5 * time.Second) // after this time, circuit breaker should be half-open b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse = BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: true}, }, } expectedCBRetryCount = map[string]int{ "1111111a": 2, "2222222b": 3, "333333c": 2, "4444444d": 3, "5555555e": 2, "66666666f": 3, "7777777g": 2, "8888888h": 3, "9999999i": 2, "10101010j": 3, } // as this operation is partial failure case and circuit breaker is half-open, this failure // would mark state as open mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) // circuit breaker is open, so no call should go through b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) time.Sleep(5 * time.Second) // after this time, circuit breaker should be half-open b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse = BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: false}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } expectedCBRetryCount = map[string]int{ "1111111a": 3, "2222222b": 4, "333333c": 3, "4444444d": 4, "5555555e": 3, "66666666f": 4, "7777777g": 3, "8888888h": 4, "9999999i": 3, "10101010j": 4, } // As this operation succeeds with all entries passed, circuit breaker should be closed // as successCount becomes equal or greater than maxRequests mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 4) assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) require.NoError(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) } func TestBulkSubscribeResiliencyWithLongRetries(t *testing.T) { t.Run("Fail all events with timeout and then Open CB - long retries", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), }) require.NoError(t, err) ts := testSettings{ entryIdRetryTimes: map[string]int{}, failCount: 10, failEvenOnes: true, failAllEntries: false, } mockee := mockAppChannel. On( "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { return req.Message().GetMethod() == orders1 }), ). After(3 * time.Second) mockee.RunFn = func(args mock.Arguments) { respInvoke1 := getResponse(args.Get(1).(*invokev1.InvokeMethodRequest), &ts) mockee.ReturnArguments = mock.Arguments{respInvoke1, nil} } // set a circuit breaker with 1 consecutive failure cb := resiliencyV1alpha.CircuitBreaker{ Trip: "consecutiveFailures > 1", // circuitBreaker will open after 1 failure, after the retries MaxRequests: 1, // only 1 request will be allowed when circuitBreaker is half-open Timeout: "30s", // half-open after 30s. So in test this will NOT be triggered } shortRetry.MaxRetries = ptr.Of(7) policyProvider := createResPolicyProvider(cb, shortTimeout, longRetry) policyDef := policyProvider.ComponentInboundPolicy(pubsubName, resiliency.Pubsub) in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: true}, {EntryId: "9999999i", IsError: true}, {EntryId: "10101010j", IsError: true}, }, } assert.Len(t, *b, 10) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) assert.Len(t, *b, 10) require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) } func assertRetryCount(t *testing.T, expectedIDRetryCountMap map[string]int, actualRetryCountMap map[string]int) { for k, v := range expectedIDRetryCountMap { assert.Equal(t, v, actualRetryCountMap[k], "expected retry/try count to match") } }
mikeee/dapr
pkg/runtime/subscription/bulkresiliency_test.go
GO
mit
45,653
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "context" "encoding/base64" "encoding/json" "errors" "fmt" nethttp "net/http" "strings" "time" "github.com/google/uuid" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/dapr/components-contrib/contenttype" "github.com/dapr/components-contrib/metadata" contribpubsub "github.com/dapr/components-contrib/pubsub" diag "github.com/dapr/dapr/pkg/diagnostics" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" ) // message contains all the essential information related to a particular entry. // This need to be maintained as a separate struct, as we need to filter out messages and // their related info doing retries of resiliency support. type message struct { cloudEvent map[string]interface{} rawData *rtpubsub.BulkSubscribeMessageItem entry *contribpubsub.BulkMessageEntry } // bulkSubscribedMessage contains all the essential information related to // a bulk subscribe message. type bulkSubscribedMessage struct { pubSubMessages []message topic string metadata map[string]string pubsub string path string length int } // bulkSubIngressDiagnostics holds diagnostics information for bulk subscribe // ingress. type bulkSubIngressDiagnostics struct { statusWiseDiag map[string]int64 elapsed float64 retryReported bool } // bulkSubscribeCallData holds data for a bulk subscribe call. type bulkSubscribeCallData struct { bulkResponses *[]contribpubsub.BulkSubscribeResponseEntry bulkSubDiag *bulkSubIngressDiagnostics entryIdIndexMap *map[string]int //nolint:stylecheck psName string topic string } // bulkSubscribeTopic subscribes to a topic for bulk messages and invokes subscriber app endpoint(s). // Major steps inside a bulk handler: // 1. Deserialize pubsub metadata and determine if rawPayload or not // 1.A. If any error occurs, send to DLQ if configured, else send back error for all messages // 2. Iterate through each message and validate entryId is NOT blank // 2.A. If it is a raw payload: // 2.A.i. Get route path, if processable // 2.A.ii. Check route path is non-blank if protocol used is HTTP; generate base64 encoding of event data // and set contentType, if provided, else set to "application/octet-stream" // 2.A.iii. Finally, form a child message to be sent to app and add it to the list of messages, // to be sent to app (this list of messages is registered against correct path in an internal map) // 2.B. If it is NOT a raw payload (it is considered a cloud event): // 2.B.i. Unmarshal it into a map[string]interface{} // 2.B.ii. If any error while unmarshalling, register error for this message // 2.B.iii. Check if message expired // 2.B.iv. Get route path, if processable // 2.B.v. Check route path is non-blank if protocol used is HTTP, form a child message to be sent to app and add it to the list of messages, // 3. Iterate through map prepared for path vs list of messages to be sent on this path // 3.A. Prepare envelope for the list of messages to be sent to app on this path // 3.B. Send the envelope to app by invoking http/grpc endpoint // 4. Check if any error has occurred so far in processing for any of the message and invoke DLQ, if configured. // 5. Send back responses array to broker interface. func (s *Subscription) bulkSubscribeTopic(ctx context.Context, policyDef *resiliency.PolicyDefinition) error { subscribeTopic := s.topic psName := s.pubsubName route := s.route topic := s.topic namespacedConsumer := s.pubsub.NamespaceScoped if namespacedConsumer { subscribeTopic = s.namespace + s.topic } req := contribpubsub.SubscribeRequest{ Topic: subscribeTopic, Metadata: s.route.Metadata, BulkSubscribeConfig: contribpubsub.BulkSubscribeConfig{ MaxMessagesCount: int(s.route.BulkSubscribe.MaxMessagesCount), MaxAwaitDurationMs: int(s.route.BulkSubscribe.MaxAwaitDurationMs), }, } bulkHandler := func(ctx context.Context, msg *contribpubsub.BulkMessage) ([]contribpubsub.BulkSubscribeResponseEntry, error) { if msg.Metadata == nil { msg.Metadata = make(map[string]string, 1) } msg.Metadata[rtpubsub.MetadataKeyPubSub] = s.pubsubName bulkSubDiag := newBulkSubIngressDiagnostics() bulkResponses := make([]contribpubsub.BulkSubscribeResponseEntry, len(msg.Entries)) routePathBulkMessageMap := make(map[string]bulkSubscribedMessage) entryIdIndexMap := make(map[string]int, len(msg.Entries)) //nolint:stylecheck bulkSubCallData := bulkSubscribeCallData{ bulkResponses: &bulkResponses, bulkSubDiag: &bulkSubDiag, entryIdIndexMap: &entryIdIndexMap, psName: psName, topic: topic, } rawPayload, err := metadata.IsRawPayload(route.Metadata) if err != nil { log.Errorf("error deserializing pubsub metadata: %s", err) if dlqErr := s.sendBulkToDLQIfConfigured(ctx, &bulkSubCallData, msg, true, route); dlqErr != nil { populateAllBulkResponsesWithError(msg, &bulkResponses, err) reportBulkSubDiagnostics(ctx, topic, &bulkSubDiag) return bulkResponses, err } reportBulkSubDiagnostics(ctx, topic, &bulkSubDiag) return nil, nil } hasAnyError := false for i, message := range msg.Entries { if entryIdErr := validateEntryId(message.EntryId, i); entryIdErr != nil { //nolint:stylecheck bulkResponses[i].Error = entryIdErr hasAnyError = true continue } entryIdIndexMap[message.EntryId] = i if rawPayload { rPath, routeErr := s.getRouteIfProcessable(ctx, &bulkSubCallData, route, &(msg.Entries[i]), i, string(message.Event)) if routeErr != nil { hasAnyError = true continue } // For grpc, we can still send the entry even if path is blank, App can take a decision if rPath == "" && s.isHTTP { continue } dataB64 := base64.StdEncoding.EncodeToString(message.Event) if message.ContentType == "" { message.ContentType = "application/octet-stream" } populateBulkSubcribedMessage(&(msg.Entries[i]), dataB64, &routePathBulkMessageMap, rPath, i, msg, false, psName, message.ContentType, namespacedConsumer, s.namespace) } else { var cloudEvent map[string]interface{} err = json.Unmarshal(message.Event, &cloudEvent) if err != nil { log.Errorf("error deserializing one of the messages in bulk cloud event in pubsub %s and topic %s: %s", psName, topic, err) bulkResponses[i].Error = err bulkResponses[i].EntryId = message.EntryId hasAnyError = true continue } if contribpubsub.HasExpired(cloudEvent) { log.Warnf("dropping expired pub/sub event %v as of %v", cloudEvent[contribpubsub.IDField], cloudEvent[contribpubsub.ExpirationField]) bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)]++ if route.DeadLetterTopic != "" { _ = s.sendToDeadLetter(ctx, psName, &contribpubsub.NewMessage{ Data: message.Event, Topic: topic, Metadata: message.Metadata, ContentType: &msg.Entries[i].ContentType, }, route.DeadLetterTopic) } bulkResponses[i].EntryId = message.EntryId bulkResponses[i].Error = nil continue } rPath, routeErr := s.getRouteIfProcessable(ctx, &bulkSubCallData, route, &(msg.Entries[i]), i, cloudEvent) if routeErr != nil { hasAnyError = true continue } // For grpc, we can still send the entry even if path is blank, App can take a decision if rPath == "" && s.isHTTP { continue } if message.ContentType == "" { message.ContentType = contenttype.CloudEventContentType } populateBulkSubcribedMessage(&(msg.Entries[i]), cloudEvent, &routePathBulkMessageMap, rPath, i, msg, true, psName, message.ContentType, namespacedConsumer, s.namespace) } } var overallInvokeErr error for path, psm := range routePathBulkMessageMap { invokeErr := s.createEnvelopeAndInvokeSubscriber(ctx, &bulkSubCallData, psm, msg, route, path, policyDef, rawPayload) if invokeErr != nil { hasAnyError = true err = invokeErr overallInvokeErr = invokeErr } } if errors.Is(overallInvokeErr, context.Canceled) { reportBulkSubDiagnostics(ctx, topic, &bulkSubDiag) return bulkResponses, overallInvokeErr } if hasAnyError { // Sending msg to dead letter queue. // If no DLQ is configured, return error for backwards compatibility (component-level retry). bulkSubDiag.retryReported = true if dlqErr := s.sendBulkToDLQIfConfigured(ctx, &bulkSubCallData, msg, false, route); dlqErr != nil { reportBulkSubDiagnostics(ctx, topic, &bulkSubDiag) return bulkResponses, err } reportBulkSubDiagnostics(ctx, topic, &bulkSubDiag) return nil, nil } reportBulkSubDiagnostics(ctx, topic, &bulkSubDiag) return bulkResponses, err } if bulkSubscriber, ok := s.pubsub.Component.(contribpubsub.BulkSubscriber); ok { return bulkSubscriber.BulkSubscribe(ctx, req, bulkHandler) } return rtpubsub.NewDefaultBulkSubscriber(s.pubsub.Component).BulkSubscribe(ctx, req, bulkHandler) } // sendBulkToDLQIfConfigured sends the message to the dead letter queue if configured. func (s *Subscription) sendBulkToDLQIfConfigured(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, msg *contribpubsub.BulkMessage, sendAllEntries bool, route rtpubsub.Subscription, ) error { bscData := *bulkSubCallData if route.DeadLetterTopic != "" { if dlqErr := s.sendBulkToDeadLetter(ctx, bulkSubCallData, msg, route.DeadLetterTopic, sendAllEntries); dlqErr == nil { // dlq has been configured and whole bulk of messages is successfully sent to dlq. return nil } } if !bscData.bulkSubDiag.retryReported { bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += int64(len(msg.Entries)) } return errors.New("failed to send to DLQ as DLQ was not configured") } // getRouteIfProcessable returns the route path if the message is processable. func (s *Subscription) getRouteIfProcessable(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, route rtpubsub.Subscription, message *contribpubsub.BulkMessageEntry, i int, matchElem interface{}, ) (string, error) { bscData := *bulkSubCallData rPath, shouldProcess, routeErr := findMatchingRoute(route.Rules, matchElem) if routeErr != nil { log.Errorf("Error finding matching route for event in bulk subscribe %s and topic %s for entry id %s: %s", bscData.psName, bscData.topic, message.EntryId, routeErr) setBulkResponseEntry(bscData.bulkResponses, i, message.EntryId, routeErr) return "", routeErr } if !shouldProcess { // The event does not match any route specified so ignore it. log.Warnf("No matching route for event in pubsub %s and topic %s; skipping", bscData.psName, bscData.topic) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)]++ if route.DeadLetterTopic != "" { _ = s.sendToDeadLetter(ctx, bscData.psName, &contribpubsub.NewMessage{ Data: message.Event, Topic: bscData.topic, Metadata: message.Metadata, ContentType: &message.ContentType, }, route.DeadLetterTopic) } setBulkResponseEntry(bscData.bulkResponses, i, message.EntryId, nil) return "", nil } return rPath, nil } // createEnvelopeAndInvokeSubscriber creates the envelope and invokes the subscriber. func (s *Subscription) createEnvelopeAndInvokeSubscriber(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, psm bulkSubscribedMessage, msg *contribpubsub.BulkMessage, route rtpubsub.Subscription, path string, policyDef *resiliency.PolicyDefinition, rawPayload bool, ) error { bscData := *bulkSubCallData var id string idObj, err := uuid.NewRandom() if err != nil { id = idObj.String() } psm.pubSubMessages = psm.pubSubMessages[:psm.length] psm.path = path envelope := rtpubsub.NewBulkSubscribeEnvelope(&rtpubsub.BulkSubscribeEnvelope{ ID: id, Topic: bscData.topic, Pubsub: bscData.psName, Metadata: msg.Metadata, }) _, e := s.applyBulkSubscribeResiliency(ctx, bulkSubCallData, psm, route.DeadLetterTopic, path, policyDef, rawPayload, envelope) return e } // publishBulkMessageHTTP publishes bulk message to a subscriber using HTTP and takes care of corresponding responses. func (s *Subscription) publishBulkMessageHTTP(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, psm *bulkSubscribedMessage, bsrr *bulkSubscribeResiliencyRes, deadLetterTopic string, ) error { bscData := *bulkSubCallData rawMsgEntries := make([]*rtpubsub.BulkSubscribeMessageItem, len(psm.pubSubMessages)) entryRespReceived := make(map[string]bool, len(psm.pubSubMessages)) for i, pubSubMsg := range psm.pubSubMessages { rawMsgEntries[i] = pubSubMsg.rawData } bsrr.envelope[rtpubsub.Entries] = rawMsgEntries da, marshalErr := json.Marshal(&bsrr.envelope) if marshalErr != nil { log.Errorf("Error serializing bulk cloud event in pubsub %s and topic %s: %s", psm.pubsub, psm.topic, marshalErr) if deadLetterTopic != "" { entries := make([]contribpubsub.BulkMessageEntry, len(psm.pubSubMessages)) for i, pubsubMsg := range psm.pubSubMessages { entries[i] = *pubsubMsg.entry } bulkMsg := contribpubsub.BulkMessage{ Entries: entries, Topic: psm.topic, Metadata: psm.metadata, } if dlqErr := s.sendBulkToDeadLetter(ctx, bulkSubCallData, &bulkMsg, deadLetterTopic, true); dlqErr == nil { // dlq has been configured and message is successfully sent to dlq. for _, item := range rawMsgEntries { addBulkResponseEntry(&bsrr.entries, item.EntryId, nil) } return nil } } bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += int64(len(rawMsgEntries)) for _, item := range rawMsgEntries { addBulkResponseEntry(&bsrr.entries, item.EntryId, marshalErr) } return marshalErr } spans := make([]trace.Span, len(rawMsgEntries)) req := invokev1.NewInvokeMethodRequest(psm.path). WithHTTPExtension(nethttp.MethodPost, ""). WithRawDataBytes(da). WithContentType(contenttype.JSONContentType). WithCustomHTTPMetadata(psm.metadata) defer req.Close() n := 0 for _, pubsubMsg := range psm.pubSubMessages { cloudEvent := pubsubMsg.cloudEvent iTraceID := cloudEvent[contribpubsub.TraceParentField] if iTraceID == nil { iTraceID = cloudEvent[contribpubsub.TraceIDField] } if iTraceID != nil { traceID := iTraceID.(string) sc, _ := diag.SpanContextFromW3CString(traceID) var span trace.Span ctx, span = diag.StartInternalCallbackSpan(ctx, "pubsub/"+psm.topic, sc, s.tracingSpec) if span != nil { spans[n] = span n++ } } } spans = spans[:n] defer endSpans(spans) start := time.Now() resp, err := s.channels.AppChannel().InvokeMethod(ctx, req, "") elapsed := diag.ElapsedSince(start) if err != nil { bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += int64(len(rawMsgEntries)) bscData.bulkSubDiag.elapsed = elapsed populateBulkSubscribeResponsesWithError(psm, &bsrr.entries, err) return fmt.Errorf("error from app channel while sending pub/sub event to app: %w", err) } defer resp.Close() statusCode := int(resp.Status().GetCode()) for _, span := range spans { m := diag.ConstructSubscriptionSpanAttributes(psm.topic) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromHTTPStatus(span, statusCode) } if (statusCode >= 200) && (statusCode <= 299) { // Any 2xx is considered a success. var appBulkResponse contribpubsub.AppBulkResponse err = json.NewDecoder(resp.RawData()).Decode(&appBulkResponse) if err != nil { bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += int64(len(rawMsgEntries)) bscData.bulkSubDiag.elapsed = elapsed populateBulkSubscribeResponsesWithError(psm, &bsrr.entries, err) return fmt.Errorf("failed unmarshalling app response for bulk subscribe: %w", err) } var hasAnyError bool for _, response := range appBulkResponse.AppResponses { if _, ok := (*bscData.entryIdIndexMap)[response.EntryId]; ok { switch response.Status { case "": // When statusCode 2xx, Consider empty status field OR not receiving status for an item as retry fallthrough case contribpubsub.Retry: bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)]++ entryRespReceived[response.EntryId] = true addBulkResponseEntry(&bsrr.entries, response.EntryId, fmt.Errorf("RETRY required while processing bulk subscribe event for entry id: %v", response.EntryId)) hasAnyError = true case contribpubsub.Success: bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Success)]++ entryRespReceived[response.EntryId] = true addBulkResponseEntry(&bsrr.entries, response.EntryId, nil) case contribpubsub.Drop: bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)]++ entryRespReceived[response.EntryId] = true log.Warnf("DROP status returned from app while processing pub/sub event %v", response.EntryId) addBulkResponseEntry(&bsrr.entries, response.EntryId, nil) if deadLetterTopic != "" { msg := psm.pubSubMessages[(*bscData.entryIdIndexMap)[response.EntryId]] _ = s.sendToDeadLetter(ctx, bscData.psName, &contribpubsub.NewMessage{ Data: msg.entry.Event, Topic: bscData.topic, Metadata: msg.entry.Metadata, ContentType: &msg.entry.ContentType, }, deadLetterTopic) } default: // Consider unknown status field as error and retry bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)]++ entryRespReceived[response.EntryId] = true addBulkResponseEntry(&bsrr.entries, response.EntryId, fmt.Errorf("unknown status returned from app while processing bulk subscribe event %v: %v", response.EntryId, response.Status)) hasAnyError = true } } else { log.Warnf("Invalid entry id received from app while processing pub/sub event %v", response.EntryId) continue } } for _, item := range rawMsgEntries { if !entryRespReceived[item.EntryId] { addBulkResponseEntry(&bsrr.entries, item.EntryId, fmt.Errorf("Response not received, RETRY required while processing bulk subscribe event for entry id: %v", item.EntryId), //nolint:stylecheck ) hasAnyError = true bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)]++ } } bscData.bulkSubDiag.elapsed = elapsed if hasAnyError { //nolint:stylecheck return errors.New("Few message(s) have failed during bulk subscribe operation") } else { return nil } } if statusCode == nethttp.StatusNotFound { // These are errors that are not retriable, for now it is just 404 but more status codes can be added. // When adding/removing an error here, check if that is also applicable to GRPC since there is a mapping between HTTP and GRPC errors: // https://cloud.google.com/apis/design/errors#handling_errors log.Errorf("Non-retriable error returned from app while processing bulk pub/sub event. status code returned: %v", statusCode) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)] += int64(len(rawMsgEntries)) bscData.bulkSubDiag.elapsed = elapsed populateBulkSubscribeResponsesWithError(psm, &bsrr.entries, nil) return nil } // Every error from now on is a retriable error. retriableErrorStr := fmt.Sprintf("Retriable error returned from app while processing bulk pub/sub event, topic: %v. status code returned: %v", psm.topic, statusCode) retriableError := errors.New(retriableErrorStr) log.Warn(retriableErrorStr) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += int64(len(rawMsgEntries)) bscData.bulkSubDiag.elapsed = elapsed populateBulkSubscribeResponsesWithError(psm, &bsrr.entries, retriableError) return retriableError } // publishBulkMessageGRPC publishes bulk message to a subscriber using gRPC and takes care of corresponding responses. func (s *Subscription) publishBulkMessageGRPC(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, psm *bulkSubscribedMessage, bulkResponses *[]contribpubsub.BulkSubscribeResponseEntry, rawPayload bool, deadLetterTopic string, ) error { bscData := *bulkSubCallData items := make([]*runtimev1pb.TopicEventBulkRequestEntry, len(psm.pubSubMessages)) entryRespReceived := make(map[string]bool, len(psm.pubSubMessages)) for i, pubSubMsg := range psm.pubSubMessages { entry := pubSubMsg.entry item, err := rtpubsub.FetchEntry(rawPayload, entry, psm.pubSubMessages[i].cloudEvent) if err != nil { bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)]++ addBulkResponseEntry(bulkResponses, entry.EntryId, err) continue } items[i] = item } uuidObj, err := uuid.NewRandom() if err != nil { return fmt.Errorf("failed to generate UUID: %w", err) } envelope := &runtimev1pb.TopicEventBulkRequest{ Id: uuidObj.String(), Entries: items, Metadata: psm.metadata, Topic: psm.topic, PubsubName: psm.pubsub, Type: contribpubsub.DefaultBulkEventType, Path: psm.path, } spans := make([]trace.Span, len(psm.pubSubMessages)) n := 0 for _, pubSubMsg := range psm.pubSubMessages { cloudEvent := pubSubMsg.cloudEvent iTraceID := cloudEvent[contribpubsub.TraceParentField] if iTraceID == nil { iTraceID = cloudEvent[contribpubsub.TraceIDField] } if iTraceID != nil { if traceID, ok := iTraceID.(string); ok { sc, _ := diag.SpanContextFromW3CString(traceID) // no ops if trace is off var span trace.Span ctx, span = diag.StartInternalCallbackSpan(ctx, "pubsub/"+psm.topic, sc, s.tracingSpec) if span != nil { ctx = diag.SpanContextToGRPCMetadata(ctx, span.SpanContext()) spans[n] = span n++ } } else { log.Warnf("ignored non-string traceid value: %v", iTraceID) } } } spans = spans[:n] defer endSpans(spans) ctx = invokev1.WithCustomGRPCMetadata(ctx, psm.metadata) conn, err := s.grpc.GetAppClient() if err != nil { return fmt.Errorf("error while getting app client: %w", err) } clientV1 := runtimev1pb.NewAppCallbackAlphaClient(conn) start := time.Now() res, err := clientV1.OnBulkTopicEventAlpha1(ctx, envelope) elapsed := diag.ElapsedSince(start) for _, span := range spans { m := diag.ConstructSubscriptionSpanAttributes(envelope.GetTopic()) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromGRPCError(span, err) } if err != nil { errStatus, hasErrStatus := status.FromError(err) if hasErrStatus && (errStatus.Code() == codes.Unimplemented) { // DROP log.Warnf("non-retriable error returned from app while processing bulk pub/sub event: %s", err) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)] += int64(len(psm.pubSubMessages)) bscData.bulkSubDiag.elapsed = elapsed populateBulkSubscribeResponsesWithError(psm, bulkResponses, nil) return nil } err = fmt.Errorf("error returned from app while processing bulk pub/sub event: %w", err) log.Debug(err) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += int64(len(psm.pubSubMessages)) bscData.bulkSubDiag.elapsed = elapsed populateBulkSubscribeResponsesWithError(psm, bulkResponses, err) // on error from application, return error for redelivery of event return err } hasAnyError := false for _, response := range res.GetStatuses() { entryID := response.GetEntryId() if _, ok := (*bscData.entryIdIndexMap)[entryID]; ok { switch response.GetStatus() { case runtimev1pb.TopicEventResponse_SUCCESS: //nolint:nosnakecase // on uninitialized status, this is the case it defaults to as an uninitialized status defaults to 0 which is // success from protobuf definition bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Success)] += 1 entryRespReceived[entryID] = true addBulkResponseEntry(bulkResponses, entryID, nil) case runtimev1pb.TopicEventResponse_RETRY: //nolint:nosnakecase bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 entryRespReceived[entryID] = true addBulkResponseEntry(bulkResponses, entryID, fmt.Errorf("RETRY status returned from app while processing pub/sub event for entry id: %v", entryID)) hasAnyError = true case runtimev1pb.TopicEventResponse_DROP: //nolint:nosnakecase log.Warnf("DROP status returned from app while processing pub/sub event for entry id: %v", entryID) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)] += 1 entryRespReceived[entryID] = true addBulkResponseEntry(bulkResponses, entryID, nil) if deadLetterTopic != "" { msg := psm.pubSubMessages[(*bscData.entryIdIndexMap)[entryID]] _ = s.sendToDeadLetter(ctx, bscData.psName, &contribpubsub.NewMessage{ Data: msg.entry.Event, Topic: bscData.topic, Metadata: msg.entry.Metadata, ContentType: &msg.entry.ContentType, }, deadLetterTopic) } default: // Consider unknown status field as error and retry bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 entryRespReceived[entryID] = true addBulkResponseEntry(bulkResponses, entryID, fmt.Errorf("unknown status returned from app while processing pub/sub event for entry id %v: %v", entryID, response.GetStatus())) hasAnyError = true } } else { log.Warnf("Invalid entry id received from app while processing pub/sub event %v", entryID) continue } } for _, item := range psm.pubSubMessages { if !entryRespReceived[item.entry.EntryId] { addBulkResponseEntry(bulkResponses, item.entry.EntryId, fmt.Errorf("Response not received, RETRY required while processing bulk subscribe event for entry id: %v", item.entry.EntryId), //nolint:stylecheck ) hasAnyError = true bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 } } bscData.bulkSubDiag.elapsed = elapsed if hasAnyError { //nolint:stylecheck return errors.New("Few message(s) have failed during bulk subscribe operation") } else { return nil } } func endSpans(spans []trace.Span) { for _, span := range spans { if span != nil { span.End() } } } // sendBulkToDeadLetter sends the bulk message to deadletter topic. func (s *Subscription) sendBulkToDeadLetter(ctx context.Context, bulkSubCallData *bulkSubscribeCallData, msg *contribpubsub.BulkMessage, deadLetterTopic string, sendAllEntries bool, ) error { bscData := *bulkSubCallData data := make([]contribpubsub.BulkMessageEntry, len(msg.Entries)) if sendAllEntries { data = msg.Entries } else { n := 0 for _, message := range msg.Entries { entryId := (*bscData.entryIdIndexMap)[message.EntryId] //nolint:stylecheck if (*bscData.bulkResponses)[entryId].Error != nil { data[n] = message n++ } } data = data[:n] } bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)] += int64(len(data)) if bscData.bulkSubDiag.retryReported { bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] -= int64(len(data)) } req := &contribpubsub.BulkPublishRequest{ Entries: data, PubsubName: bscData.psName, Topic: deadLetterTopic, Metadata: msg.Metadata, } _, err := s.adapter.BulkPublish(ctx, req) if err != nil { log.Errorf("error sending message to dead letter, origin topic: %s dead letter topic %s err: %w", msg.Topic, deadLetterTopic, err) } return err } func validateEntryId(entryId string, i int) error { //nolint:stylecheck if entryId == "" { log.Warn("Invalid blank entry id received while processing bulk pub/sub event, won't be able to process it") //nolint:stylecheck return errors.New("Blank entryId supplied - won't be able to process it") } return nil } func populateBulkSubcribedMessage(msgE *contribpubsub.BulkMessageEntry, event interface{}, routePathBulkMessageMap *map[string]bulkSubscribedMessage, rPath string, i int, msg *contribpubsub.BulkMessage, isCloudEvent bool, psName string, contentType string, namespacedConsumer bool, namespace string, ) { childMessage := rtpubsub.BulkSubscribeMessageItem{ Event: event, Metadata: msgE.Metadata, EntryId: msgE.EntryId, ContentType: contentType, } var cloudEvent map[string]interface{} mapTypeEvent, ok := event.(map[string]interface{}) if ok { cloudEvent = mapTypeEvent } if val, ok := (*routePathBulkMessageMap)[rPath]; ok { if isCloudEvent { val.pubSubMessages[val.length].cloudEvent = mapTypeEvent } val.pubSubMessages[val.length].rawData = &childMessage val.pubSubMessages[val.length].entry = &msg.Entries[i] val.length++ (*routePathBulkMessageMap)[rPath] = val } else { pubSubMessages := make([]message, len(msg.Entries)) pubSubMessages[0].rawData = &childMessage pubSubMessages[0].entry = &msg.Entries[i] if isCloudEvent { pubSubMessages[0].cloudEvent = cloudEvent } msgTopic := msg.Topic if namespacedConsumer { msgTopic = strings.Replace(msgTopic, namespace, "", 1) } psm := bulkSubscribedMessage{ pubSubMessages: pubSubMessages, topic: msgTopic, metadata: msg.Metadata, pubsub: psName, length: 1, } (*routePathBulkMessageMap)[rPath] = psm } } func populateBulkSubscribeResponsesWithError(psm *bulkSubscribedMessage, bulkResponses *[]contribpubsub.BulkSubscribeResponseEntry, err error, ) { for _, message := range psm.pubSubMessages { addBulkResponseEntry(bulkResponses, message.entry.EntryId, err) } } func populateAllBulkResponsesWithError(bulkMsg *contribpubsub.BulkMessage, bulkResponses *[]contribpubsub.BulkSubscribeResponseEntry, err error, ) { for i, item := range bulkMsg.Entries { if (*bulkResponses)[i].EntryId == "" { setBulkResponseEntry(bulkResponses, i, item.EntryId, err) } } } func setBulkResponseEntry(bulkResponses *[]contribpubsub.BulkSubscribeResponseEntry, i int, entryId string, err error) { //nolint:stylecheck (*bulkResponses)[i].EntryId = entryId (*bulkResponses)[i].Error = err } func addBulkResponseEntry(bulkResponses *[]contribpubsub.BulkSubscribeResponseEntry, entryId string, err error) { //nolint:stylecheck resp := contribpubsub.BulkSubscribeResponseEntry{ EntryId: entryId, Error: err, } *bulkResponses = append(*bulkResponses, resp) } func newBulkSubIngressDiagnostics() bulkSubIngressDiagnostics { statusWiseCountDiag := make(map[string]int64, 3) statusWiseCountDiag[string(contribpubsub.Success)] = 0 statusWiseCountDiag[string(contribpubsub.Drop)] = 0 statusWiseCountDiag[string(contribpubsub.Retry)] = 0 bulkSubDiag := bulkSubIngressDiagnostics{ statusWiseDiag: statusWiseCountDiag, elapsed: 0, retryReported: false, } return bulkSubDiag } func reportBulkSubDiagnostics(ctx context.Context, topic string, bulkSubDiag *bulkSubIngressDiagnostics) { if bulkSubDiag == nil { return } diag.DefaultComponentMonitoring.BulkPubsubIngressEvent(ctx, rtpubsub.MetadataKeyPubSub, topic, bulkSubDiag.elapsed) for status, count := range bulkSubDiag.statusWiseDiag { diag.DefaultComponentMonitoring.BulkPubsubIngressEventEntries(ctx, rtpubsub.MetadataKeyPubSub, topic, status, count) } }
mikeee/dapr
pkg/runtime/subscription/bulksubscription.go
GO
mit
31,805
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "context" "encoding/json" "errors" "fmt" "net" "strings" "testing" "time" "github.com/phayes/freeport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/api/grpc/manager" channelt "github.com/dapr/dapr/pkg/channel/testing" "github.com/dapr/dapr/pkg/config" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/modes" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" runtimePubsub "github.com/dapr/dapr/pkg/runtime/pubsub" publisherfake "github.com/dapr/dapr/pkg/runtime/pubsub/publisher/fake" "github.com/dapr/dapr/pkg/runtime/registry" daprt "github.com/dapr/dapr/pkg/testing" testinggrpc "github.com/dapr/dapr/pkg/testing/grpc" "github.com/dapr/kit/logger" ) const ( TestRuntimeConfigID = "consumer0" eventKey = `"event":` data1 string = `{"orderId":"1"}` data2 string = `{"orderId":"2"}` data3 string = `{"orderId":"3"}` data4 string = `{"orderId":"4"}` data5 string = `{"orderId":"5"}` data6 string = `{"orderId":"6"}` data7 string = `{"orderId":"7"}` data8 string = `{"orderId":"8"}` data9 string = `` data10 string = `{"orderId":"10"}` ext1Key string = "ext1Key" ext1Value string = "ext1Value" ext2Key string = "ext2Key" ext2Value string = "ext2Value" //nolint:goconst order1 string = `{"data":` + data1 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"9b6767c3-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"type1"}` order2 string = `{"data":` + data2 + `,"datacontenttype":"application/json","` + ext2Key + `":"` + ext2Value + `","id":"993f4e4a-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type2"}` order3 string = `{"data":` + data3 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"6767010u-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"type1"}` order4 string = `{"data":` + data4 + `,"datacontenttype":"application/json","` + ext2Key + `":"` + ext2Value + `","id":"91011121-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type2"}` order5 string = `{"data":` + data5 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"718271cd-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"type1"}` order6 string = `{"data":` + data6 + `,"datacontenttype":"application/json","` + ext2Key + `":"` + ext2Value + `","id":"7uw2233d-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type2"}` order7 string = `{"data":` + data7 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"78sqs98s-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"type1"}` order8 string = `{"data":` + data8 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"45122j82-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type1"}` order9 string = `{"` + ext1Key + `":"` + ext1Value + `","orderId":"9","type":"type1"}` order10 string = `{"data":` + data10 + `,"datacontenttype":"application/json","` + ext2Key + `":"` + ext2Value + `","id":"ded2rd44-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type2"}` wrongOrder string = `{"data":` + data2 + `,"datacontenttype":"application/xml;wwwwwww","` + ext2Key + `":"` + ext2Value + `","id":"993f4e4a-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type2"}` orders1 string = "orders1" ) func getBulkMessageEntries(len int) []contribpubsub.BulkMessageEntry { bulkEntries := make([]contribpubsub.BulkMessageEntry, 10) bulkEntries[0] = contribpubsub.BulkMessageEntry{EntryId: "1111111a", Event: []byte(order1)} bulkEntries[1] = contribpubsub.BulkMessageEntry{EntryId: "2222222b", Event: []byte(order2)} bulkEntries[2] = contribpubsub.BulkMessageEntry{EntryId: "333333c", Event: []byte(order3)} bulkEntries[3] = contribpubsub.BulkMessageEntry{EntryId: "4444444d", Event: []byte(order4)} bulkEntries[4] = contribpubsub.BulkMessageEntry{EntryId: "5555555e", Event: []byte(order5)} bulkEntries[5] = contribpubsub.BulkMessageEntry{EntryId: "66666666f", Event: []byte(order6)} bulkEntries[6] = contribpubsub.BulkMessageEntry{EntryId: "7777777g", Event: []byte(order7)} bulkEntries[7] = contribpubsub.BulkMessageEntry{EntryId: "8888888h", Event: []byte(order8)} bulkEntries[8] = contribpubsub.BulkMessageEntry{EntryId: "9999999i", Event: []byte(order9)} bulkEntries[9] = contribpubsub.BulkMessageEntry{EntryId: "10101010j", Event: []byte(order10)} return bulkEntries[:len] } func getBulkMessageEntriesWithWrongData() []contribpubsub.BulkMessageEntry { bulkEntries := make([]contribpubsub.BulkMessageEntry, 1) bulkEntries[0] = contribpubsub.BulkMessageEntry{EntryId: "1", Event: []byte(wrongOrder)} return bulkEntries } type ExpectedExtension struct { extKey string extValue string } func getExpectedBulkRequests() map[string][]string { mapPathEntries := map[string][]string{ "type1": {data1, data3, data5, data7, data8, data9}, "type2": {data2, data4, data6, data10}, } return mapPathEntries } func getExpectedExtension() map[string]ExpectedExtension { return map[string]ExpectedExtension{ "type1": {ext1Key, ext1Value}, "type2": {ext2Key, ext2Value}, } } func TestBulkSubscribe(t *testing.T) { const testBulkSubscribePubsub = "bulkSubscribePubSub" t.Run("bulk Subscribe Message for raw payload", func(t *testing.T) { comp := &mockSubscribePubSub{} require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) resp := contribpubsub.AppBulkResponse{AppResponses: []contribpubsub.AppBulkResponseEntry{{ EntryId: "0", Status: contribpubsub.Success, }}} respB, _ := json.Marshal(resp) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(respB). WithContentType("application/json") defer fakeResp.Close() mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, Metadata: map[string]string{"rawPayload": "true"}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) err = comp.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Data: []byte(`{"orderId":"1"}`), }) require.NoError(t, err) pubsubIns := comp assert.Equal(t, 1, pubsubIns.bulkPubCount["topic0"]) assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Contains(t, string(reqs["orders"]), `event":"eyJvcmRlcklkIjoiMSJ9"`) }) t.Run("bulk Subscribe Message for cloud event", func(t *testing.T) { comp := &mockSubscribePubSub{} require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) resp := contribpubsub.AppBulkResponse{AppResponses: []contribpubsub.AppBulkResponseEntry{{ EntryId: "0", Status: contribpubsub.Success, }}} respB, _ := json.Marshal(resp) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(respB). WithContentType("application/json") defer fakeResp.Close() mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) order := `{"data":{"orderId":1},"datacontenttype":"application/json","id":"8b540b03-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"com.dapr.event.sent"}` err = comp.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Data: []byte(order), }) require.NoError(t, err) pubsubIns := comp assert.Equal(t, 1, pubsubIns.bulkPubCount["topic0"]) assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Contains(t, string(reqs["orders"]), eventKey+order) }) t.Run("bulk Subscribe multiple Messages at once for cloud events", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) resp := contribpubsub.AppBulkResponse{AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "1111111a", Status: contribpubsub.Success}, {EntryId: "2222222b", Status: contribpubsub.Success}, }} respB, _ := json.Marshal(resp) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(respB). WithContentType("application/json") defer fakeResp.Close() mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) msgArr := getBulkMessageEntries(2) comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 2) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a", "2222222b")) assert.Equal(t, 1, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Contains(t, string(reqs["orders"]), eventKey+order1) assert.Contains(t, string(reqs["orders"]), eventKey+order2) fakeResp2 := invokev1.NewInvokeMethodResponse(404, "OK", nil) defer fakeResp2.Close() mockAppChannel1 := new(channelt.MockAppChannel) mockAppChannel1.Init() ps.channels.WithAppChannel(mockAppChannel1) mockAppChannel1.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp2, nil) msgArr = getBulkMessageEntries(3) comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 3) require.NoError(t, comp.GetBulkResponse().Error) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a", "2222222b", "333333c")) assert.Equal(t, 2, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs = mockAppChannel1.GetInvokedRequest() mockAppChannel1.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Contains(t, string(reqs["orders"]), eventKey+order1) assert.Contains(t, string(reqs["orders"]), eventKey+order2) assert.Contains(t, string(reqs["orders"]), eventKey+order3) fakeResp3 := invokev1.NewInvokeMethodResponse(400, "OK", nil) defer fakeResp3.Close() mockAppChannel2 := new(channelt.MockAppChannel) mockAppChannel2.Init() ps.channels.WithAppChannel(mockAppChannel2) mockAppChannel2.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp3, nil) msgArr = getBulkMessageEntries(4) comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 4) require.Error(t, comp.GetBulkResponse().Error) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a", "2222222b", "333333c", "4444444d")) assert.Equal(t, 3, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs = mockAppChannel2.GetInvokedRequest() mockAppChannel2.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Contains(t, string(reqs["orders"]), eventKey+order1) assert.Contains(t, string(reqs["orders"]), eventKey+order2) assert.Contains(t, string(reqs["orders"]), eventKey+order3) assert.Contains(t, string(reqs["orders"]), eventKey+order4) mockAppChannel3 := new(channelt.MockAppChannel) mockAppChannel3.Init() ps.channels.WithAppChannel(mockAppChannel3) mockAppChannel3.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(nil, errors.New("Mock error")) msgArr = getBulkMessageEntries(1) comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 1) require.Error(t, comp.GetBulkResponse().Error) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a")) assert.Equal(t, 4, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs = mockAppChannel3.GetInvokedRequest() mockAppChannel3.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Contains(t, string(reqs["orders"]), eventKey+order1) }) t.Run("bulk Subscribe events on different paths", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) resp := contribpubsub.AppBulkResponse{AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "1111111a", Status: contribpubsub.Success}, {EntryId: "2222222b", Status: contribpubsub.Success}, }} respB, _ := json.Marshal(resp) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(respB). WithContentType("application/json") defer fakeResp.Close() mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) rule1, err := runtimePubsub.CreateRoutingRule(`event.type == "type1"`, "orders1") require.NoError(t, err) rule2, err := runtimePubsub.CreateRoutingRule(`event.type == "type2"`, "orders2") require.NoError(t, err) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{rule1, rule2}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) msgArr := getBulkMessageEntries(2) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) assert.Equal(t, 1, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.Contains(t, string(reqs["orders1"]), eventKey+order1) assert.NotContains(t, string(reqs["orders1"]), eventKey+order2) assert.Contains(t, string(reqs["orders2"]), eventKey+order2) assert.NotContains(t, string(reqs["orders2"]), eventKey+order1) }) t.Run("verify Responses when bulk Subscribe events on different paths", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() responseItemsOrders1 := contribpubsub.AppBulkResponse{ AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "1111111a", Status: "SUCCESS"}, {EntryId: "333333c", Status: "RETRY"}, {EntryId: "5555555e", Status: "DROP"}, {EntryId: "7777777g", Status: "RETRY"}, {EntryId: "8888888h", Status: "SUCCESS"}, {EntryId: "9999999i", Status: "SUCCESS"}, }, } resp1, _ := json.Marshal(responseItemsOrders1) respInvoke1 := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(resp1). WithContentType("application/json") defer respInvoke1.Close() responseItemsOrders2 := contribpubsub.AppBulkResponse{ AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "2222222b", Status: "SUCCESS"}, {EntryId: "4444444d", Status: "DROP"}, {EntryId: "66666666f", Status: "DROP"}, {EntryId: "10101010j", Status: "SUCCESS"}, }, } resp2, _ := json.Marshal(responseItemsOrders2) respInvoke2 := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(resp2). WithContentType("application/json") defer respInvoke2.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("orders1")).Return(respInvoke1, nil) mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("orders2")).Return(respInvoke2, nil) rule1, err := runtimePubsub.CreateRoutingRule(`event.type == "type1"`, orders1) require.NoError(t, err) rule2, err := runtimePubsub.CreateRoutingRule(`event.type == "type2"`, "orders2") require.NoError(t, err) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{rule1, rule2}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) msgArr := getBulkMessageEntries(10) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) assert.Equal(t, 1, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) assert.True(t, verifyIfEventContainsStrings(reqs["orders1"], eventKey+order1, eventKey+order3, eventKey+order5, eventKey+order7, eventKey+order8, eventKey+order9)) assert.True(t, verifyIfEventNotContainsStrings(reqs["orders1"], eventKey+order2, eventKey+order4, eventKey+order6, eventKey+order10)) assert.True(t, verifyIfEventContainsStrings(reqs["orders2"], eventKey+order2, eventKey+order4, eventKey+order6, eventKey+order10)) assert.True(t, verifyIfEventNotContainsStrings(reqs["orders2"], eventKey+order1, eventKey+order3, eventKey+order5, eventKey+order7, eventKey+order8, eventKey+order9)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: false}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("verify Responses when entryId supplied blank while sending messages", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) msgArr := getBulkMessageEntries(4) msgArr[0].EntryId = "" msgArr[2].EntryId = "" responseItemsOrders1 := contribpubsub.AppBulkResponse{ AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "2222222b", Status: "SUCCESS"}, {EntryId: "4444444d", Status: "SUCCESS"}, }, } resp1, _ := json.Marshal(responseItemsOrders1) respInvoke1 := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(resp1). WithContentType("application/json") defer respInvoke1.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("orders")).Return(respInvoke1, nil) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) assert.Equal(t, 1, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.True(t, verifyIfEventContainsStrings(reqs["orders"], eventKey+order2, eventKey+order4)) assert.True(t, verifyIfEventNotContainsStrings(reqs["orders"], eventKey+order1, eventKey+order3)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "", IsError: true}, {EntryId: "2222222b", IsError: false}, {EntryId: "", IsError: true}, {EntryId: "4444444d", IsError: false}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("verify bulk Subscribe Responses when App sends back out of order entryIds", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) msgArr := getBulkMessageEntries(5) responseItemsOrders1 := contribpubsub.AppBulkResponse{ AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "2222222b", Status: "RETRY"}, {EntryId: "333333c", Status: "SUCCESS"}, {EntryId: "5555555e", Status: "RETRY"}, {EntryId: "1111111a", Status: "SUCCESS"}, {EntryId: "4444444d", Status: "SUCCESS"}, }, } resp1, _ := json.Marshal(responseItemsOrders1) respInvoke1 := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(resp1). WithContentType("application/json") defer respInvoke1.Close() mockAppChannel.On( "InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("orders"), ).Return(respInvoke1, nil) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) assert.Equal(t, 1, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.True(t, verifyIfEventContainsStrings(reqs["orders"], eventKey+order1, eventKey+order2, eventKey+order3, eventKey+order4, eventKey+order5)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: true}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("verify bulk Subscribe Responses when App sends back wrong entryIds", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) mockAppChannel := new(channelt.MockAppChannel) mockAppChannel.Init() ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) msgArr := getBulkMessageEntries(5) responseItemsOrders1 := contribpubsub.AppBulkResponse{ AppResponses: []contribpubsub.AppBulkResponseEntry{ {EntryId: "wrongEntryId1", Status: "SUCCESS"}, {EntryId: "2222222b", Status: "RETRY"}, {EntryId: "333333c", Status: "SUCCESS"}, {EntryId: "wrongEntryId2", Status: "SUCCESS"}, {EntryId: "5555555e", Status: "RETRY"}, }, } resp1, _ := json.Marshal(responseItemsOrders1) respInvoke1 := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataBytes(resp1). WithContentType("application/json") defer respInvoke1.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("orders"), ).Return(respInvoke1, nil) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) assert.Equal(t, 1, comp.bulkPubCount["topic0"]) assert.True(t, comp.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.True(t, verifyIfEventContainsStrings(reqs["orders"], eventKey+order1, eventKey+order2, eventKey+order3, eventKey+order4, eventKey+order5)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) } func TestBulkSubscribeGRPC(t *testing.T) { testBulkSubscribePubsub := "bulkSubscribePubSub" t.Run("GRPC - bulk Subscribe Message for raw payload", func(t *testing.T) { port, err := freeport.GetFreePort() require.NoError(t, err) comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) nbei1 := contribpubsub.BulkMessageEntry{EntryId: "1111111a", Event: []byte(`{"orderId":"1"}`)} nbei2 := contribpubsub.BulkMessageEntry{EntryId: "2222222b", Event: []byte(`{"orderId":"2"}`)} msgArr := []contribpubsub.BulkMessageEntry{nbei1, nbei2} responseEntries := make([]*runtimev1pb.TopicEventBulkResponseEntry, 2) for k, msg := range msgArr { responseEntries[k] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msg.EntryId, } } responseEntries = setBulkResponseStatus(responseEntries, runtimev1pb.TopicEventResponse_DROP, runtimev1pb.TopicEventResponse_SUCCESS) responses := runtimev1pb.TopicEventBulkResponse{ Statuses: responseEntries, } mapResp := make(map[string]*runtimev1pb.TopicEventBulkResponse) mapResp["orders"] = &responses // create mock application server first mockServer := &channelt.MockServer{ BulkResponsePerPath: mapResp, Error: nil, } grpcServer := startTestAppCallbackAlphaGRPCServer(t, port, mockServer) if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) // create a new AppChannel and gRPC client for every test mockAppChannel := channels.New(channels.Options{ Registry: registry.New(registry.NewOptions()), ComponentStore: compstore.New(), GlobalConfig: new(config.Configuration), GRPC: grpc, AppConnectionConfig: config.AppConnectionConfig{ Port: port, }, }) require.NoError(t, mockAppChannel.Refresh()) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: false, GRPC: grpc, Channels: mockAppChannel, PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, Metadata: map[string]string{"rawPayload": "true"}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 2) require.NoError(t, comp.GetBulkResponse().Error) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a", "2222222b")) require.NoError(t, err) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: false}, }, } assert.Contains(t, string(mockServer.RequestsReceived["orders"].GetEntries()[0].GetBytes()), `{"orderId":"1"}`) assert.Contains(t, string(mockServer.RequestsReceived["orders"].GetEntries()[1].GetBytes()), `{"orderId":"2"}`) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) mockServer.BulkResponsePerPath = nil mockServer.Error = status.Error(codes.Unimplemented, "method not implemented") comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 2) require.NoError(t, comp.GetBulkResponse().Error) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a", "2222222b")) mockServer.Error = status.Error(codes.Unknown, "unknown error") comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) assert.Len(t, comp.GetBulkResponse().Statuses, 2) require.Error(t, comp.GetBulkResponse().Error) require.NoError(t, assertItemExistsOnce(comp.GetBulkResponse().Statuses, "1111111a", "2222222b")) }) t.Run("GRPC - bulk Subscribe cloud event Message on different paths and verify response", func(t *testing.T) { port, err := freeport.GetFreePort() require.NoError(t, err) reg := registry.New(registry.NewOptions()) comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) rule1, err := runtimePubsub.CreateRoutingRule(`event.type == "type1"`, orders1) require.NoError(t, err) rule2, err := runtimePubsub.CreateRoutingRule(`event.type == "type2"`, "orders2") require.NoError(t, err) msgArr := getBulkMessageEntries(10) responseEntries1 := make([]*runtimev1pb.TopicEventBulkResponseEntry, 6) responseEntries2 := make([]*runtimev1pb.TopicEventBulkResponseEntry, 4) i := 0 j := 0 for k, msg := range msgArr { if strings.Contains(string(msgArr[k].Event), "type1") { responseEntries1[i] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msg.EntryId, } i++ } else if strings.Contains(string(msgArr[k].Event), "type2") { responseEntries2[j] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msg.EntryId, } j++ } } responseEntries1 = setBulkResponseStatus(responseEntries1, runtimev1pb.TopicEventResponse_DROP, runtimev1pb.TopicEventResponse_RETRY, runtimev1pb.TopicEventResponse_DROP, runtimev1pb.TopicEventResponse_RETRY, runtimev1pb.TopicEventResponse_SUCCESS, runtimev1pb.TopicEventResponse_DROP) responseEntries2 = setBulkResponseStatus(responseEntries2, runtimev1pb.TopicEventResponse_RETRY, runtimev1pb.TopicEventResponse_DROP, runtimev1pb.TopicEventResponse_RETRY, runtimev1pb.TopicEventResponse_SUCCESS) responses1 := runtimev1pb.TopicEventBulkResponse{ Statuses: responseEntries1, } responses2 := runtimev1pb.TopicEventBulkResponse{ Statuses: responseEntries2, } mapResp := make(map[string]*runtimev1pb.TopicEventBulkResponse) mapResp[orders1] = &responses1 mapResp["orders2"] = &responses2 // create mock application server first mockServer := &channelt.MockServer{ BulkResponsePerPath: mapResp, Error: nil, } grpcServer := startTestAppCallbackAlphaGRPCServer(t, port, mockServer) if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } // create a new AppChannel and gRPC client for every test grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) mockAppChannel := channels.New(channels.Options{ ComponentStore: compstore.New(), Registry: reg, GlobalConfig: new(config.Configuration), AppConnectionConfig: config.AppConnectionConfig{Port: port}, GRPC: grpc, }) require.NoError(t, mockAppChannel.Refresh()) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: false, GRPC: grpc, Channels: mockAppChannel, PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{rule1, rule2}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: true}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: false}, {EntryId: "66666666f", IsError: true}, {EntryId: "7777777g", IsError: true}, {EntryId: "8888888h", IsError: false}, {EntryId: "9999999i", IsError: false}, {EntryId: "10101010j", IsError: false}, }, } assert.True(t, verifyBulkSubscribeRequest(getExpectedBulkRequests()["type1"], getExpectedExtension()["type1"], mockServer.RequestsReceived[orders1])) assert.True(t, verifyBulkSubscribeRequest(getExpectedBulkRequests()["type2"], getExpectedExtension()["type2"], mockServer.RequestsReceived["orders2"])) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("GRPC - verify Responses when entryId supplied blank while sending messages", func(t *testing.T) { port, err := freeport.GetFreePort() require.NoError(t, err) reg := registry.New(registry.NewOptions()) comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) msgArr := getBulkMessageEntries(4) msgArr[0].EntryId = "" msgArr[2].EntryId = "" responseEntries := make([]*runtimev1pb.TopicEventBulkResponseEntry, 4) for k, msg := range msgArr { responseEntries[k] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msg.EntryId, } } responseEntries[1].Status = runtimev1pb.TopicEventResponse_SUCCESS responseEntries[3].Status = runtimev1pb.TopicEventResponse_SUCCESS responses := runtimev1pb.TopicEventBulkResponse{ Statuses: responseEntries, } mapResp := make(map[string]*runtimev1pb.TopicEventBulkResponse) mapResp["orders"] = &responses // create mock application server first mockServer := &channelt.MockServer{ BulkResponsePerPath: mapResp, Error: nil, } grpcServer := startTestAppCallbackAlphaGRPCServer(t, port, mockServer) if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) mockAppChannel := channels.New(channels.Options{ ComponentStore: compstore.New(), Registry: reg, GlobalConfig: new(config.Configuration), GRPC: grpc, AppConnectionConfig: config.AppConnectionConfig{Port: port}, }) require.NoError(t, err) require.NoError(t, mockAppChannel.Refresh()) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: false, GRPC: grpc, Channels: mockAppChannel, PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "", IsError: true}, {EntryId: "2222222b", IsError: false}, {EntryId: "", IsError: true}, {EntryId: "4444444d", IsError: false}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("GRPC - verify bulk Subscribe Responses when App sends back out of order entryIds", func(t *testing.T) { port, err := freeport.GetFreePort() require.NoError(t, err) reg := registry.New(registry.NewOptions()) comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) msgArr := getBulkMessageEntries(5) responseEntries := make([]*runtimev1pb.TopicEventBulkResponseEntry, 5) responseEntries[0] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msgArr[1].EntryId, Status: runtimev1pb.TopicEventResponse_RETRY, } responseEntries[1] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msgArr[2].EntryId, Status: runtimev1pb.TopicEventResponse_SUCCESS, } responseEntries[2] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msgArr[4].EntryId, Status: runtimev1pb.TopicEventResponse_RETRY, } responseEntries[3] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msgArr[0].EntryId, Status: runtimev1pb.TopicEventResponse_SUCCESS, } responseEntries[4] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msgArr[3].EntryId, Status: runtimev1pb.TopicEventResponse_SUCCESS, } responses := runtimev1pb.TopicEventBulkResponse{ Statuses: responseEntries, } mapResp := make(map[string]*runtimev1pb.TopicEventBulkResponse) mapResp["orders"] = &responses // create mock application server first mockServer := &channelt.MockServer{ BulkResponsePerPath: mapResp, Error: nil, } grpcServer := startTestAppCallbackAlphaGRPCServer(t, port, mockServer) if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) mockAppChannel := channels.New(channels.Options{ ComponentStore: compstore.New(), Registry: reg, GlobalConfig: new(config.Configuration), GRPC: grpc, AppConnectionConfig: config.AppConnectionConfig{Port: port}, }) require.NoError(t, err) require.NoError(t, mockAppChannel.Refresh()) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: false, GRPC: grpc, Channels: mockAppChannel, PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: false}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: false}, {EntryId: "5555555e", IsError: true}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("GRPC - verify bulk Subscribe Responses when App sends back wrong entryIds", func(t *testing.T) { port, err := freeport.GetFreePort() require.NoError(t, err) reg := registry.New(registry.NewOptions()) comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) msgArr := getBulkMessageEntries(5) responseEntries := make([]*runtimev1pb.TopicEventBulkResponseEntry, 5) for k, msg := range msgArr { responseEntries[k] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msg.EntryId, } } responseEntries[0].EntryId = "wrongId1" responseEntries[3].EntryId = "wrongId2" responseEntries = setBulkResponseStatus(responseEntries, runtimev1pb.TopicEventResponse_SUCCESS, runtimev1pb.TopicEventResponse_RETRY, runtimev1pb.TopicEventResponse_SUCCESS, runtimev1pb.TopicEventResponse_SUCCESS, runtimev1pb.TopicEventResponse_RETRY) responses := runtimev1pb.TopicEventBulkResponse{ Statuses: responseEntries, } mapResp := make(map[string]*runtimev1pb.TopicEventBulkResponse) mapResp["orders"] = &responses // create mock application server first mockServer := &channelt.MockServer{ BulkResponsePerPath: mapResp, Error: nil, } grpcServer := startTestAppCallbackAlphaGRPCServer(t, port, mockServer) if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) mockAppChannel := channels.New(channels.Options{ ComponentStore: compstore.New(), Registry: reg, GlobalConfig: new(config.Configuration), GRPC: grpc, AppConnectionConfig: config.AppConnectionConfig{Port: port}, }) require.NoError(t, err) require.NoError(t, mockAppChannel.Refresh()) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: false, GRPC: grpc, Channels: mockAppChannel, PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1111111a", IsError: true}, {EntryId: "2222222b", IsError: true}, {EntryId: "333333c", IsError: false}, {EntryId: "4444444d", IsError: true}, {EntryId: "5555555e", IsError: true}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) t.Run("GRPC - verify bulk Subscribe Response when error while fetching Entry due to wrong dataContentType", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) port, err := freeport.GetFreePort() require.NoError(t, err) reg := registry.New(registry.NewOptions()) msgArr := getBulkMessageEntriesWithWrongData() responseEntries := make([]*runtimev1pb.TopicEventBulkResponseEntry, 5) for k, msg := range msgArr { responseEntries[k] = &runtimev1pb.TopicEventBulkResponseEntry{ EntryId: msg.EntryId, } } // create mock application server first mockServer := &channelt.MockServer{ BulkResponsePerPath: nil, Error: nil, } grpcServer := startTestAppCallbackAlphaGRPCServer(t, port, mockServer) if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) mockAppChannel := channels.New(channels.Options{ ComponentStore: compstore.New(), Registry: reg, GlobalConfig: new(config.Configuration), GRPC: grpc, AppConnectionConfig: config.AppConnectionConfig{Port: port}, }) require.NoError(t, mockAppChannel.Refresh()) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: false, GRPC: grpc, Channels: mockAppChannel, PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{{Path: "orders"}}, BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) _, err = comp.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) require.NoError(t, err) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ {EntryId: "1", IsError: true}, }, } assert.True(t, verifyBulkSubscribeResponses(expectedResponse, comp.bulkReponse.Statuses)) }) } func startTestAppCallbackAlphaGRPCServer(t *testing.T, port int, mockServer *channelt.MockServer) *grpc.Server { lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) require.NoError(t, err) grpcServer := grpc.NewServer() go func() { runtimev1pb.RegisterAppCallbackServer(grpcServer, mockServer) runtimev1pb.RegisterAppCallbackAlphaServer(grpcServer, mockServer) if err := grpcServer.Serve(lis); err != nil { panic(err) } }() // wait until server starts time.Sleep(testinggrpc.MaxGRPCServerUptime) return grpcServer } func setBulkResponseStatus(responses []*runtimev1pb.TopicEventBulkResponseEntry, status ...runtimev1pb.TopicEventResponse_TopicEventResponseStatus, ) []*runtimev1pb.TopicEventBulkResponseEntry { for i, s := range status { responses[i].Status = s } return responses } type BulkResponseEntryExpectation struct { EntryId string //nolint:stylecheck IsError bool } type BulkResponseExpectation struct { Responses []BulkResponseEntryExpectation } func verifyBulkSubscribeResponses(expected BulkResponseExpectation, actual []contribpubsub.BulkSubscribeResponseEntry) bool { for i, expectedEntryResponse := range expected.Responses { if expectedEntryResponse.EntryId != actual[i].EntryId { return false } if (actual[i].Error != nil) != expectedEntryResponse.IsError { return false } } return true } func verifyIfEventContainsStrings(event []byte, elems ...string) bool { for _, elem := range elems { if !strings.Contains(string(event), elem) { return false } } return true } func verifyIfEventNotContainsStrings(event []byte, elems ...string) bool { for _, elem := range elems { if strings.Contains(string(event), elem) { return false } } return true } func verifyBulkSubscribeRequest(expectedData []string, expectedExtension ExpectedExtension, actual *runtimev1pb.TopicEventBulkRequest, ) bool { for i, expectedEntryReq := range expectedData { if expectedEntryReq != string(actual.GetEntries()[i].GetCloudEvent().GetData()) || actual.GetEntries()[i].GetCloudEvent().GetExtensions().GetFields()[expectedExtension.extKey].GetStringValue() != expectedExtension.extValue { return false } } return true } func assertItemExistsOnce(collection []contribpubsub.BulkSubscribeResponseEntry, items ...string) error { count := 0 for _, item := range items { for _, c := range collection { if c.EntryId == item { count++ } } if count != 1 { return fmt.Errorf("item %s not found or found more than once", item) } count = 0 } return nil } // mockSubscribePubSub is an in-memory pubsub component. type mockSubscribePubSub struct { bulkHandlers map[string]contribpubsub.BulkHandler handlers map[string]contribpubsub.Handler pubCount map[string]int bulkPubCount map[string]int isBulkSubscribe bool bulkReponse contribpubsub.BulkSubscribeResponse features []contribpubsub.Feature } // type BulkSubscribeResponse struct { // Init is a mock initialization method. func (m *mockSubscribePubSub) Init(ctx context.Context, metadata contribpubsub.Metadata) error { m.bulkHandlers = make(map[string]contribpubsub.BulkHandler) m.handlers = make(map[string]contribpubsub.Handler) m.pubCount = make(map[string]int) m.bulkPubCount = make(map[string]int) return nil } // Publish is a mock publish method. Immediately trigger handler if topic is subscribed. func (m *mockSubscribePubSub) Publish(ctx context.Context, req *contribpubsub.PublishRequest) error { m.pubCount[req.Topic]++ var err error if handler, ok := m.handlers[req.Topic]; ok { pubsubMsg := &contribpubsub.NewMessage{ Data: req.Data, Topic: req.Topic, } handler(context.Background(), pubsubMsg) } else if bulkHandler, ok := m.bulkHandlers[req.Topic]; ok { m.bulkPubCount[req.Topic]++ nbei := contribpubsub.BulkMessageEntry{ EntryId: "0", Event: req.Data, } msgArr := []contribpubsub.BulkMessageEntry{nbei} nbm := &contribpubsub.BulkMessage{ Entries: msgArr, Topic: req.Topic, } _, err = bulkHandler(context.Background(), nbm) } return err } // BulkPublish is a mock publish method. Immediately call the handler for each event in request if topic is subscribed. func (m *mockSubscribePubSub) BulkPublish(_ context.Context, req *contribpubsub.BulkPublishRequest) (contribpubsub.BulkPublishResponse, error) { m.bulkPubCount[req.Topic]++ res := contribpubsub.BulkPublishResponse{} if handler, ok := m.handlers[req.Topic]; ok { for _, entry := range req.Entries { m.pubCount[req.Topic]++ // TODO this needs to be modified as part of BulkSubscribe deadletter test pubsubMsg := &contribpubsub.NewMessage{ Data: entry.Event, Topic: req.Topic, } handler(context.Background(), pubsubMsg) } } else if bulkHandler, ok := m.bulkHandlers[req.Topic]; ok { nbm := &contribpubsub.BulkMessage{ Entries: req.Entries, Topic: req.Topic, } bulkResponses, err := bulkHandler(context.Background(), nbm) m.bulkReponse.Statuses = bulkResponses m.bulkReponse.Error = err } return res, nil } // Subscribe is a mock subscribe method. func (m *mockSubscribePubSub) Subscribe(_ context.Context, req contribpubsub.SubscribeRequest, handler contribpubsub.Handler) error { m.handlers[req.Topic] = handler return nil } func (m *mockSubscribePubSub) Close() error { return nil } func (m *mockSubscribePubSub) Features() []contribpubsub.Feature { return m.features } func (m *mockSubscribePubSub) BulkSubscribe(ctx context.Context, req contribpubsub.SubscribeRequest, handler contribpubsub.BulkHandler) error { m.isBulkSubscribe = true m.bulkHandlers[req.Topic] = handler return nil } func (m *mockSubscribePubSub) GetBulkResponse() contribpubsub.BulkSubscribeResponse { return m.bulkReponse } func TestPubSubDeadLetter(t *testing.T) { const testBulkSubscribePubsub = "bulkSubscribePubSub" testDeadLetterPubsub := "failPubsub" t.Run("succeeded to publish message to dead letter when send message to app returns error", func(t *testing.T) { comp := &mockSubscribePubSub{ features: []contribpubsub.Feature{contribpubsub.FeatureBulkPublish}, } require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) // Mock send message to app returns error. mockAppChannel := new(channelt.MockAppChannel) mockAppChannel. On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything). Return(nil, errors.New("failed to send")) var bulkPublishedCalled string adapter := publisherfake.New().WithBulkPublishFn(func(_ context.Context, req *contribpubsub.BulkPublishRequest) (contribpubsub.BulkPublishResponse, error) { bulkPublishedCalled = req.Topic return contribpubsub.BulkPublishResponse{}, nil }) ps, err := New(Options{ Resiliency: resiliency.New(log), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testBulkSubscribePubsub, Topic: "topic0", Adapter: adapter, Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{ {Path: "orders"}, }, DeadLetterTopic: "topic1", BulkSubscribe: &runtimePubsub.BulkSubscribe{ Enabled: true, }, }, }) require.NoError(t, err) t.Cleanup(ps.Stop) err = comp.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testDeadLetterPubsub, Topic: "topic0", Data: []byte(`{"id":"1"}`), }) require.NoError(t, err) assert.Equal(t, 1, comp.pubCount["topic0"]) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) assert.Equal(t, "topic1", bulkPublishedCalled) }) t.Run("use dead letter with resiliency", func(t *testing.T) { comp := &mockSubscribePubSub{} require.NoError(t, comp.Init(context.Background(), contribpubsub.Metadata{})) // Mock send message to app returns error. mockAppChannel := new(channelt.MockAppChannel) mockAppChannel. On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything). Return(nil, errors.New("failed to send")) var publishedCalled string var publishedCount int adapter := publisherfake.New().WithPublishFn(func(_ context.Context, req *contribpubsub.PublishRequest) error { publishedCalled = req.Topic publishedCount++ return nil }) ps, err := New(Options{ Resiliency: resiliency.FromConfigurations(logger.NewLogger("test"), daprt.TestResiliency), IsHTTP: true, Channels: new(channels.Channels).WithAppChannel(mockAppChannel), PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: TestRuntimeConfigID, PubSubName: testDeadLetterPubsub, Topic: "topic0", Adapter: adapter, Route: runtimePubsub.Subscription{ Rules: []*runtimePubsub.Rule{ {Path: "orders"}, }, DeadLetterTopic: "topic1", }, }) require.NoError(t, err) t.Cleanup(ps.Stop) err = comp.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testDeadLetterPubsub, Topic: "topic0", Data: []byte(`{"id":"1"}`), }) require.NoError(t, err) // Consider of resiliency, publish message may retry in some cases, make sure the pub count is greater than 1. assert.GreaterOrEqual(t, comp.pubCount["topic0"], 1) // Make sure every message that is sent to topic0 is sent to its dead letter topic1. assert.Equal(t, comp.pubCount["topic0"], publishedCount) // Except of the one getting config from app, make sure each publish will result to twice subscribe call mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2*comp.pubCount["topic0"]) assert.Equal(t, "topic1", publishedCalled) }) } func matchContextInterface(v any) bool { _, ok := v.(context.Context) return ok } func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true }) }
mikeee/dapr
pkg/runtime/subscription/bulksubscription_test.go
GO
mit
62,480
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "context" "encoding/json" "errors" "fmt" "io" "net/http" "strings" "time" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/dapr/components-contrib/contenttype" contribpubsub "github.com/dapr/components-contrib/pubsub" diag "github.com/dapr/dapr/pkg/diagnostics" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" runtimev1 "github.com/dapr/dapr/pkg/proto/runtime/v1" rterrors "github.com/dapr/dapr/pkg/runtime/errors" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" ) func (s *Subscription) publishMessageHTTP(ctx context.Context, msg *rtpubsub.SubscribedMessage) error { cloudEvent := msg.CloudEvent var span trace.Span req := invokev1.NewInvokeMethodRequest(msg.Path). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(msg.Data). WithContentType(contenttype.CloudEventContentType). WithCustomHTTPMetadata(msg.Metadata) defer req.Close() iTraceID := cloudEvent[contribpubsub.TraceParentField] if iTraceID == nil { iTraceID = cloudEvent[contribpubsub.TraceIDField] } if iTraceID != nil { traceID := iTraceID.(string) sc, _ := diag.SpanContextFromW3CString(traceID) ctx, span = diag.StartInternalCallbackSpan(ctx, "pubsub/"+msg.Topic, sc, s.tracingSpec) } start := time.Now() resp, err := s.channels.AppChannel().InvokeMethod(ctx, req, "") elapsed := diag.ElapsedSince(start) if err != nil { diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) return fmt.Errorf("error returned from app channel while sending pub/sub event to app: %w", rterrors.NewRetriable(err)) } defer resp.Close() statusCode := int(resp.Status().GetCode()) if span != nil { m := diag.ConstructSubscriptionSpanAttributes(msg.Topic) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromHTTPStatus(span, statusCode) span.End() } if (statusCode >= 200) && (statusCode <= 299) { // Any 2xx is considered a success. var appResponse contribpubsub.AppResponse err := json.NewDecoder(resp.RawData()).Decode(&appResponse) if err != nil { if errors.Is(err, io.EOF) { log.Debugf("skipping status check due to empty response body from pub/sub event %v", cloudEvent[contribpubsub.IDField]) } else { log.Debugf("skipping status check due to error parsing result from pub/sub event %v: %s", cloudEvent[contribpubsub.IDField], err) } diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Success)), "", msg.Topic, elapsed) return nil } switch appResponse.Status { case "": // Consider empty status field as success fallthrough case contribpubsub.Success: diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Success)), "", msg.Topic, elapsed) return nil case contribpubsub.Retry: diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) // TODO: add retry error info return fmt.Errorf("RETRY status returned from app while processing pub/sub event %v: %w", cloudEvent[contribpubsub.IDField], rterrors.NewRetriable(nil)) case contribpubsub.Drop: diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Drop)), strings.ToLower(string(contribpubsub.Success)), msg.Topic, elapsed) log.Warnf("DROP status returned from app while processing pub/sub event %v", cloudEvent[contribpubsub.IDField]) return rtpubsub.ErrMessageDropped } // Consider unknown status field as error and retry diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) return fmt.Errorf("unknown status returned from app while processing pub/sub event %v, status: %v, err: %w", cloudEvent[contribpubsub.IDField], appResponse.Status, rterrors.NewRetriable(nil)) } body, _ := resp.RawDataFull() if statusCode == http.StatusNotFound { // These are errors that are not retriable, for now it is just 404 but more status codes can be added. // When adding/removing an error here, check if that is also applicable to GRPC since there is a mapping between HTTP and GRPC errors: // https://cloud.google.com/apis/design/errors#handling_errors log.Errorf("non-retriable error returned from app while processing pub/sub event %v: %s. status code returned: %v", cloudEvent[contribpubsub.IDField], body, statusCode) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Drop)), "", msg.Topic, elapsed) return nil } // Every error from now on is a retriable error. errMsg := fmt.Sprintf("retriable error returned from app while processing pub/sub event %v, topic: %v, body: %s. status code returned: %v", cloudEvent[contribpubsub.IDField], cloudEvent[contribpubsub.TopicField], body, statusCode) log.Warnf(errMsg) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) return rterrors.NewRetriable(errors.New(errMsg)) } func (s *Subscription) publishMessageGRPC(ctx context.Context, msg *rtpubsub.SubscribedMessage) error { cloudEvent := msg.CloudEvent envelope, span, err := rtpubsub.GRPCEnvelopeFromSubscriptionMessage(ctx, msg, log, s.tracingSpec) if err != nil { return err } ctx = invokev1.WithCustomGRPCMetadata(ctx, msg.Metadata) conn, err := s.grpc.GetAppClient() if err != nil { return fmt.Errorf("error while getting app client: %w", err) } clientV1 := runtimev1.NewAppCallbackClient(conn) start := time.Now() res, err := clientV1.OnTopicEvent(ctx, envelope) elapsed := diag.ElapsedSince(start) if span != nil { m := diag.ConstructSubscriptionSpanAttributes(envelope.GetTopic()) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromGRPCError(span, err) span.End() } if err != nil { errStatus, hasErrStatus := status.FromError(err) if hasErrStatus && (errStatus.Code() == codes.Unimplemented) { // DROP log.Warnf("non-retriable error returned from app while processing pub/sub event %v: %s", cloudEvent[contribpubsub.IDField], err) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Drop)), "", msg.Topic, elapsed) return nil } err = fmt.Errorf("error returned from app while processing pub/sub event %v: %w", cloudEvent[contribpubsub.IDField], rterrors.NewRetriable(err)) log.Debug(err) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) // on error from application, return error for redelivery of event return err } switch res.GetStatus() { case runtimev1.TopicEventResponse_SUCCESS: //nolint:nosnakecase // on uninitialized status, this is the case it defaults to as an uninitialized status defaults to 0 which is // success from protobuf definition diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Success)), "", msg.Topic, elapsed) return nil case runtimev1.TopicEventResponse_RETRY: //nolint:nosnakecase diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) // TODO: add retry error info return fmt.Errorf("RETRY status returned from app while processing pub/sub event %v: %w", cloudEvent[contribpubsub.IDField], rterrors.NewRetriable(nil)) case runtimev1.TopicEventResponse_DROP: //nolint:nosnakecase log.Warnf("DROP status returned from app while processing pub/sub event %v", cloudEvent[contribpubsub.IDField]) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Drop)), strings.ToLower(string(contribpubsub.Success)), msg.Topic, elapsed) return rtpubsub.ErrMessageDropped } // Consider unknown status field as error and retry diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.PubSub, strings.ToLower(string(contribpubsub.Retry)), "", msg.Topic, elapsed) return fmt.Errorf("unknown status returned from app while processing pub/sub event %v, status: %v, err: %w", cloudEvent[contribpubsub.IDField], res.GetStatus(), rterrors.NewRetriable(nil)) }
mikeee/dapr
pkg/runtime/subscription/publish.go
GO
mit
8,995
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "bytes" "context" "encoding/json" "errors" "fmt" "net/http" "reflect" "testing" "github.com/phayes/freeport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/dapr/components-contrib/contenttype" contribpubsub "github.com/dapr/components-contrib/pubsub" inmemory "github.com/dapr/components-contrib/pubsub/in-memory" "github.com/dapr/dapr/pkg/api/grpc/manager" channelt "github.com/dapr/dapr/pkg/channel/testing" "github.com/dapr/dapr/pkg/config" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" "github.com/dapr/dapr/pkg/modes" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" "github.com/dapr/dapr/pkg/runtime/compstore" rterrors "github.com/dapr/dapr/pkg/runtime/errors" runtimePubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/dapr/pkg/runtime/registry" testinggrpc "github.com/dapr/dapr/pkg/testing/grpc" "github.com/dapr/kit/logger" "github.com/dapr/kit/ptr" ) func TestErrorPublishedNonCloudEventHTTP(t *testing.T) { topic := "topic1" testPubSubMessage := &runtimePubsub.SubscribedMessage{ CloudEvent: map[string]interface{}{}, Topic: topic, Data: []byte("testing"), Metadata: map[string]string{"pubsubName": "testpubsub"}, Path: "topic1", PubSub: "testpubsub", } fakeReq := invokev1.NewInvokeMethodRequest(testPubSubMessage.Topic). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(testPubSubMessage.Data). WithContentType(contenttype.CloudEventContentType). WithCustomHTTPMetadata(testPubSubMessage.Metadata) defer fakeReq.Close() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) ps, err := New(Options{ IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", PubSub: &runtimePubsub.PubsubItem{Component: comp}, }) require.NoError(t, err) t.Run("ok without result body", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel var appResp contribpubsub.AppResponse var buf bytes.Buffer require.NoError(t, json.NewEncoder(&buf).Encode(appResp)) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil).WithRawData(&buf) defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.Anything, fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err) }) t.Run("ok with empty body", func(t *testing.T) { log.SetOutputLevel(logger.DebugLevel) defer log.SetOutputLevel(logger.InfoLevel) mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil).WithRawData(nil) defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.Anything, fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err) }) t.Run("ok with retry", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"RETRY\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.Anything, fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.Error(t, err) }) t.Run("ok with drop", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"DROP\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.Anything, fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert assert.Equal(t, runtimePubsub.ErrMessageDropped, err) }) t.Run("ok with unknown", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"UNKNOWN\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.Anything, fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.Error(t, err) }) t.Run("not found response", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(404, "NotFound", nil) defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.Anything, fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err) }) } func TestErrorPublishedNonCloudEventGRPC(t *testing.T) { topic := "topic1" testPubSubMessage := &runtimePubsub.SubscribedMessage{ CloudEvent: map[string]interface{}{}, Topic: topic, Data: []byte("testing"), Metadata: map[string]string{"pubsubName": "testpubsub"}, Path: "topic1", } ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) ps, err := New(Options{ AppID: "test", Namespace: "ns1", PubSubName: "testpubsub", Topic: topic, IsHTTP: false, PubSub: &runtimePubsub.PubsubItem{Component: comp}, Resiliency: resiliency.New(logger.NewLogger("test")), GRPC: manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{}), }) require.NoError(t, err) testcases := []struct { Name string Status runtimev1pb.TopicEventResponse_TopicEventResponseStatus Error error ExpectError bool }{ { Name: "ok without success", Status: runtimev1pb.TopicEventResponse_SUCCESS, }, { Name: "ok with retry", Status: runtimev1pb.TopicEventResponse_RETRY, ExpectError: true, }, { Name: "ok with drop", Status: runtimev1pb.TopicEventResponse_DROP, ExpectError: true, }, { Name: "ok with unknown", Status: runtimev1pb.TopicEventResponse_TopicEventResponseStatus(999), ExpectError: true, }, { Name: "ok with error", Error: errors.New("TEST"), ExpectError: true, }, } for _, tc := range testcases { t.Run(tc.Name, func(t *testing.T) { mockClientConn := channelt.MockClientConn{ InvokeFn: func(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { if tc.Error != nil { return tc.Error } response, ok := reply.(*runtimev1pb.TopicEventResponse) if !ok { return fmt.Errorf("unexpected reply type: %s", reflect.TypeOf(reply)) } response.Status = tc.Status return nil }, } ps.grpc.SetAppClientConn(&mockClientConn) err := ps.publishMessageGRPC(context.Background(), testPubSubMessage) if tc.ExpectError { require.Error(t, err) } else { require.NoError(t, err) } }) } } func TestOnNewPublishedMessage(t *testing.T) { topic := "topic1" envelope := contribpubsub.NewCloudEventsEnvelope("", "", contribpubsub.DefaultCloudEventType, "", topic, "testpubsub2", "", []byte("Test Message"), "", "") b, err := json.Marshal(envelope) require.NoError(t, err) testPubSubMessage := &runtimePubsub.SubscribedMessage{ CloudEvent: envelope, Topic: topic, Data: b, Metadata: map[string]string{"pubsubName": "testpubsub"}, Path: "topic1", } fakeReq := invokev1.NewInvokeMethodRequest(testPubSubMessage.Topic). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(testPubSubMessage.Data). WithContentType(contenttype.CloudEventContentType). WithCustomHTTPMetadata(testPubSubMessage.Metadata) defer fakeReq.Close() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) ps, err := New(Options{ IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", PubSub: &runtimePubsub.PubsubItem{Component: comp}, AppID: "consumer0", }) require.NoError(t, err) t.Run("succeeded to publish message to user app with empty response", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel var appResp contribpubsub.AppResponse var buf bytes.Buffer require.NoError(t, json.NewEncoder(&buf).Encode(appResp)) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil).WithRawData(&buf) defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message without TraceID", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel var appResp contribpubsub.AppResponse var buf bytes.Buffer require.NoError(t, json.NewEncoder(&buf).Encode(appResp)) fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil).WithRawData(&buf) defer fakeResp.Close() // Generate a new envelope to avoid affecting other tests by modifying shared `envelope` envelopeNoTraceID := contribpubsub.NewCloudEventsEnvelope( "", "", contribpubsub.DefaultCloudEventType, "", topic, "testpubsub2", "", []byte("Test Message"), "", "") delete(envelopeNoTraceID, contribpubsub.TraceIDField) bNoTraceID, err := json.Marshal(envelopeNoTraceID) require.NoError(t, err) message := &runtimePubsub.SubscribedMessage{ CloudEvent: envelopeNoTraceID, Topic: topic, Data: bNoTraceID, Metadata: map[string]string{"pubsubName": "testpubsub"}, Path: "topic1", } fakeReqNoTraceID := invokev1.NewInvokeMethodRequest(message.Topic). WithHTTPExtension(http.MethodPost, ""). WithRawDataBytes(message.Data). WithContentType(contenttype.CloudEventContentType). WithCustomHTTPMetadata(testPubSubMessage.Metadata) defer fakeReqNoTraceID.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReqNoTraceID).Return(fakeResp, nil) // act err = ps.publishMessageHTTP(context.Background(), message) // assert require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app with non-json response", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("OK"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app with status", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"SUCCESS\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app but app ask for retry", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"RETRY\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert var cloudEvent map[string]interface{} json.Unmarshal(testPubSubMessage.Data, &cloudEvent) expectedClientError := fmt.Errorf("RETRY status returned from app while processing pub/sub event %v: %w", cloudEvent["id"].(string), rterrors.NewRetriable(nil)) assert.Equal(t, expectedClientError.Error(), err.Error()) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app but app ask to drop", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"DROP\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert assert.Equal(t, runtimePubsub.ErrMessageDropped, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app but app returned unknown status code", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"status\": \"not_valid\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.Error(t, err, "expected error on unknown status") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app but app returned empty status code", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"message\": \"empty status\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err, "expected no error on empty status") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("succeeded to publish message to user app and app returned unexpected json response", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) // User App subscribes 1 topics via http app channel fakeResp := invokev1.NewInvokeMethodResponse(200, "OK", nil). WithRawDataString("{ \"message\": \"success\"}"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err, "expected no error on unknown status") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("failed to publish message error on invoking method", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) invokeError := errors.New("error invoking method") mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(nil, invokeError) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert expectedError := fmt.Errorf("error returned from app channel while sending pub/sub event to app: %w", rterrors.NewRetriable(invokeError)) assert.Equal(t, expectedError.Error(), err.Error(), "expected errors to match") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("failed to publish message to user app with 404", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) fakeResp := invokev1.NewInvokeMethodResponse(404, "Not Found", nil). WithRawDataString("Not found"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert require.NoError(t, err, "expected error to be nil") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) t.Run("failed to publish message to user app with 500", func(t *testing.T) { mockAppChannel := new(channelt.MockAppChannel) ps.channels = new(channels.Channels).WithAppChannel(mockAppChannel) fakeResp := invokev1.NewInvokeMethodResponse(500, "Internal Error", nil). WithRawDataString("Internal Error"). WithContentType("application/json") defer fakeResp.Close() mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), fakeReq).Return(fakeResp, nil) // act err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert var cloudEvent map[string]interface{} json.Unmarshal(testPubSubMessage.Data, &cloudEvent) errMsg := fmt.Sprintf("retriable error returned from app while processing pub/sub event %v, topic: %v, body: Internal Error. status code returned: 500", cloudEvent["id"].(string), cloudEvent["topic"]) expectedClientError := rterrors.NewRetriable(errors.New(errMsg)) assert.Equal(t, expectedClientError.Error(), err.Error()) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) } func TestOnNewPublishedMessageGRPC(t *testing.T) { topic := "topic1" envelope := contribpubsub.NewCloudEventsEnvelope("", "", contribpubsub.DefaultCloudEventType, "", topic, "testpubsub2", "", []byte("Test Message"), "", "") // add custom attributes envelope["customInt"] = 123 envelope["customString"] = "abc" envelope["customBool"] = true envelope["customFloat"] = 1.23 envelope["customArray"] = []interface{}{"a", "b", 789, 3.1415} envelope["customMap"] = map[string]interface{}{"a": "b", "c": 456} b, err := json.Marshal(envelope) require.NoError(t, err) testPubSubMessage := &runtimePubsub.SubscribedMessage{ CloudEvent: envelope, Topic: topic, Data: b, Metadata: map[string]string{"pubsubName": "testpubsub"}, Path: "topic1", } envelope = contribpubsub.NewCloudEventsEnvelope("", "", contribpubsub.DefaultCloudEventType, "", topic, "testpubsub2", "application/octet-stream", []byte{0x1}, "", "") // add custom attributes envelope["customInt"] = 123 envelope["customString"] = "abc" envelope["customBool"] = true envelope["customFloat"] = 1.23 envelope["customArray"] = []interface{}{"a", "b", 789, 3.1415} envelope["customMap"] = map[string]interface{}{"a": "b", "c": 456} base64, err := json.Marshal(envelope) require.NoError(t, err) testPubSubMessageBase64 := &runtimePubsub.SubscribedMessage{ CloudEvent: envelope, Topic: topic, Data: base64, Metadata: map[string]string{"pubsubName": "testpubsub"}, Path: "topic1", } testCases := []struct { name string message *runtimePubsub.SubscribedMessage responseStatus runtimev1pb.TopicEventResponse_TopicEventResponseStatus expectedError error noResponseStatus bool responseError error validateCloudEventExtension *map[string]interface{} }{ { name: "failed to publish message to user app with unimplemented error", message: testPubSubMessage, noResponseStatus: true, responseError: status.Errorf(codes.Unimplemented, "unimplemented method"), }, { name: "failed to publish message to user app with response error", message: testPubSubMessage, noResponseStatus: true, responseError: assert.AnError, expectedError: fmt.Errorf( "error returned from app while processing pub/sub event %v: %w", testPubSubMessage.CloudEvent[contribpubsub.IDField], rterrors.NewRetriable(status.Error(codes.Unknown, assert.AnError.Error())), ), }, { name: "succeeded to publish message to user app with empty response", message: testPubSubMessage, noResponseStatus: true, }, { name: "succeeded to publish message to user app with success response", message: testPubSubMessage, responseStatus: runtimev1pb.TopicEventResponse_SUCCESS, }, { name: "succeeded to publish message to user app with base64 encoded cloud event", message: testPubSubMessageBase64, responseStatus: runtimev1pb.TopicEventResponse_SUCCESS, }, { name: "succeeded to publish message to user app with retry", message: testPubSubMessage, responseStatus: runtimev1pb.TopicEventResponse_RETRY, expectedError: fmt.Errorf( "RETRY status returned from app while processing pub/sub event %v: %w", testPubSubMessage.CloudEvent[contribpubsub.IDField], rterrors.NewRetriable(nil), ), }, { name: "succeeded to publish message to user app with drop", message: testPubSubMessage, responseStatus: runtimev1pb.TopicEventResponse_DROP, expectedError: runtimePubsub.ErrMessageDropped, }, { name: "succeeded to publish message to user app with invalid response", message: testPubSubMessage, responseStatus: runtimev1pb.TopicEventResponse_TopicEventResponseStatus(99), expectedError: fmt.Errorf( "unknown status returned from app while processing pub/sub event %v, status: %v, err: %w", testPubSubMessage.CloudEvent[contribpubsub.IDField], runtimev1pb.TopicEventResponse_TopicEventResponseStatus(99), rterrors.NewRetriable(nil), ), }, { name: "succeeded to publish message to user app and validated cloud event extension attributes", message: testPubSubMessage, responseStatus: runtimev1pb.TopicEventResponse_SUCCESS, validateCloudEventExtension: ptr.Of(map[string]interface{}{ "customInt": float64(123), "customString": "abc", "customBool": true, "customFloat": float64(1.23), "customArray": []interface{}{"a", "b", float64(789), float64(3.1415)}, "customMap": map[string]interface{}{"a": "b", "c": float64(456)}, }), }, { name: "succeeded to publish message to user app and validated cloud event extension attributes with base64 encoded data", message: testPubSubMessageBase64, responseStatus: runtimev1pb.TopicEventResponse_SUCCESS, validateCloudEventExtension: ptr.Of(map[string]interface{}{ "customInt": float64(123), "customString": "abc", "customBool": true, "customFloat": float64(1.23), "customArray": []interface{}{"a", "b", float64(789), float64(3.1415)}, "customMap": map[string]interface{}{"a": "b", "c": float64(456)}, }), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // setup // getting new port for every run to avoid conflict and timing issues between tests if sharing same port port, err := freeport.GetFreePort() require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) comp := inmemory.New(log) require.NoError(t, comp.Init(ctx, contribpubsub.Metadata{})) reg := registry.New(registry.NewOptions()) ps, err := New(Options{ IsHTTP: true, Resiliency: resiliency.New(logger.NewLogger("test")), Namespace: "ns1", PubSub: &runtimePubsub.PubsubItem{Component: comp}, Topic: topic, PubSubName: "testpubsub", AppID: "consumer0", GRPC: manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}), }) require.NoError(t, err) var grpcServer *grpc.Server // create mock application server first if !tc.noResponseStatus { grpcServer = testinggrpc.StartTestAppCallbackGRPCServer(t, port, &channelt.MockServer{ TopicEventResponseStatus: tc.responseStatus, Error: tc.responseError, ValidateCloudEventExtension: tc.validateCloudEventExtension, }) } else { grpcServer = testinggrpc.StartTestAppCallbackGRPCServer(t, port, &channelt.MockServer{ Error: tc.responseError, ValidateCloudEventExtension: tc.validateCloudEventExtension, }) } if grpcServer != nil { // properly stop the gRPC server defer grpcServer.Stop() } grpc := manager.NewManager(nil, modes.StandaloneMode, &manager.AppChannelConfig{Port: port}) ps.channels = channels.New(channels.Options{ Registry: reg, ComponentStore: compstore.New(), GlobalConfig: new(config.Configuration), AppConnectionConfig: config.AppConnectionConfig{Port: port}, GRPC: grpc, }) require.NoError(t, ps.channels.Refresh()) ps.grpc = grpc // act err = ps.publishMessageGRPC(context.Background(), tc.message) // assert if tc.expectedError != nil { assert.Equal(t, err.Error(), tc.expectedError.Error()) } else { require.NoError(t, err) } }) } }
mikeee/dapr
pkg/runtime/subscription/publish_test.go
GO
mit
28,156
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package subscription import ( "context" "encoding/json" "errors" "fmt" "strings" "github.com/dapr/components-contrib/metadata" contribpubsub "github.com/dapr/components-contrib/pubsub" "github.com/dapr/dapr/pkg/api/grpc/manager" "github.com/dapr/dapr/pkg/config" diag "github.com/dapr/dapr/pkg/diagnostics" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/channels" rterrors "github.com/dapr/dapr/pkg/runtime/errors" rtpubsub "github.com/dapr/dapr/pkg/runtime/pubsub" "github.com/dapr/kit/logger" ) type Options struct { AppID string Namespace string PubSubName string Topic string IsHTTP bool PubSub *rtpubsub.PubsubItem Resiliency resiliency.Provider TraceSpec *config.TracingSpec Route rtpubsub.Subscription Channels *channels.Channels GRPC *manager.Manager Adapter rtpubsub.Adapter AdapterStreamer rtpubsub.AdapterStreamer } type Subscription struct { appID string namespace string pubsubName string topic string isHTTP bool pubsub *rtpubsub.PubsubItem resiliency resiliency.Provider route rtpubsub.Subscription tracingSpec *config.TracingSpec channels *channels.Channels grpc *manager.Manager adapter rtpubsub.Adapter adapterStreamer rtpubsub.AdapterStreamer cancel func() } var log = logger.NewLogger("dapr.runtime.processor.pubsub.subscription") func New(opts Options) (*Subscription, error) { allowed := rtpubsub.IsOperationAllowed(opts.Topic, opts.PubSub, opts.PubSub.ScopedSubscriptions) if !allowed { return nil, fmt.Errorf("subscription to topic '%s' on pubsub '%s' is not allowed", opts.Topic, opts.PubSubName) } ctx, cancel := context.WithCancel(context.Background()) s := &Subscription{ appID: opts.AppID, namespace: opts.Namespace, pubsubName: opts.PubSubName, topic: opts.Topic, isHTTP: opts.IsHTTP, pubsub: opts.PubSub, resiliency: opts.Resiliency, route: opts.Route, tracingSpec: opts.TraceSpec, channels: opts.Channels, grpc: opts.GRPC, cancel: cancel, adapter: opts.Adapter, adapterStreamer: opts.AdapterStreamer, } name := s.pubsubName route := s.route policyDef := s.resiliency.ComponentInboundPolicy(name, resiliency.Pubsub) routeMetadata := route.Metadata namespaced := s.pubsub.NamespaceScoped if route.BulkSubscribe != nil && route.BulkSubscribe.Enabled { err := s.bulkSubscribeTopic(ctx, policyDef) if err != nil { cancel() return nil, fmt.Errorf("failed to bulk subscribe to topic %s: %w", s.topic, err) } return s, nil } // TODO: @joshvanl: move subsscribedTopic to struct subscribeTopic := s.topic if namespaced { subscribeTopic = s.namespace + s.topic } err := s.pubsub.Component.Subscribe(ctx, contribpubsub.SubscribeRequest{ Topic: subscribeTopic, Metadata: routeMetadata, }, func(ctx context.Context, msg *contribpubsub.NewMessage) error { if msg.Metadata == nil { msg.Metadata = make(map[string]string, 1) } msg.Metadata[rtpubsub.MetadataKeyPubSub] = name msgTopic := msg.Topic if s.pubsub.NamespaceScoped { msgTopic = strings.Replace(msgTopic, s.namespace, "", 1) } rawPayload, err := metadata.IsRawPayload(route.Metadata) if err != nil { log.Errorf("error deserializing pubsub metadata: %s", err) if route.DeadLetterTopic != "" { if dlqErr := s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic); dlqErr == nil { // dlq has been configured and message is successfully sent to dlq. diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Drop)), "", msgTopic, 0) return nil } } diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Retry)), "", msgTopic, 0) return err } var cloudEvent map[string]interface{} data := msg.Data if rawPayload { cloudEvent = contribpubsub.FromRawPayload(msg.Data, msgTopic, name) data, err = json.Marshal(cloudEvent) if err != nil { log.Errorf("error serializing cloud event in pubsub %s and topic %s: %s", name, msgTopic, err) if route.DeadLetterTopic != "" { if dlqErr := s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic); dlqErr == nil { // dlq has been configured and message is successfully sent to dlq. diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Drop)), "", msgTopic, 0) return nil } } diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Retry)), "", msgTopic, 0) return err } } else { err = json.Unmarshal(msg.Data, &cloudEvent) if err != nil { log.Errorf("error deserializing cloud event in pubsub %s and topic %s: %s", name, msgTopic, err) if route.DeadLetterTopic != "" { if dlqErr := s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic); dlqErr == nil { // dlq has been configured and message is successfully sent to dlq. diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Drop)), "", msgTopic, 0) return nil } } diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Retry)), "", msgTopic, 0) return err } } if contribpubsub.HasExpired(cloudEvent) { log.Warnf("dropping expired pub/sub event %v as of %v", cloudEvent[contribpubsub.IDField], cloudEvent[contribpubsub.ExpirationField]) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Drop)), "", msgTopic, 0) if route.DeadLetterTopic != "" { _ = s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic) } return nil } routePath, shouldProcess, err := findMatchingRoute(route.Rules, cloudEvent) if err != nil { log.Errorf("error finding matching route for event %v in pubsub %s and topic %s: %s", cloudEvent[contribpubsub.IDField], name, msgTopic, err) if route.DeadLetterTopic != "" { if dlqErr := s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic); dlqErr == nil { // dlq has been configured and message is successfully sent to dlq. diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Drop)), "", msgTopic, 0) return nil } } diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Retry)), "", msgTopic, 0) return err } if !shouldProcess { // The event does not match any route specified so ignore it. log.Debugf("no matching route for event %v in pubsub %s and topic %s; skipping", cloudEvent[contribpubsub.IDField], name, msgTopic) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, name, strings.ToLower(string(contribpubsub.Drop)), strings.ToLower(string(contribpubsub.Success)), msgTopic, 0) if route.DeadLetterTopic != "" { _ = s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic) } return nil } sm := &rtpubsub.SubscribedMessage{ CloudEvent: cloudEvent, Data: data, Topic: msgTopic, Metadata: msg.Metadata, Path: routePath, PubSub: name, } policyRunner := resiliency.NewRunner[any](context.Background(), policyDef) _, err = policyRunner(func(ctx context.Context) (any, error) { var pErr error if s.adapterStreamer != nil { pErr = s.adapterStreamer.Publish(ctx, sm) } else { if s.isHTTP { pErr = s.publishMessageHTTP(ctx, sm) } else { pErr = s.publishMessageGRPC(ctx, sm) } } var rErr *rterrors.RetriableError if errors.As(pErr, &rErr) { log.Warnf("encountered a retriable error while publishing a subscribed message to topic %s, err: %v", msgTopic, rErr.Unwrap()) } else if errors.Is(pErr, rtpubsub.ErrMessageDropped) { // send dropped message to dead letter queue if configured if route.DeadLetterTopic != "" { derr := s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic) if derr != nil { log.Warnf("failed to send dropped message to dead letter queue for topic %s: %v", msgTopic, derr) } } return nil, nil } else if pErr != nil { log.Errorf("encountered a non-retriable error while publishing a subscribed message to topic %s, err: %v", msgTopic, pErr) } return nil, pErr }) if err != nil && err != context.Canceled { // Sending msg to dead letter queue. // If no DLQ is configured, return error for backwards compatibility (component-level retry). if route.DeadLetterTopic == "" { return err } _ = s.sendToDeadLetter(ctx, name, msg, route.DeadLetterTopic) return nil } return err }) if err != nil { cancel() return nil, fmt.Errorf("failed to subscribe to topic %s: %w", s.topic, err) } return s, nil } func (s *Subscription) Stop() { s.cancel() } func (s *Subscription) sendToDeadLetter(ctx context.Context, name string, msg *contribpubsub.NewMessage, deadLetterTopic string) error { req := &contribpubsub.PublishRequest{ Data: msg.Data, PubsubName: name, Topic: deadLetterTopic, Metadata: msg.Metadata, ContentType: msg.ContentType, } if err := s.adapter.Publish(ctx, req); err != nil { log.Errorf("error sending message to dead letter, origin topic: %s dead letter topic %s err: %w", msg.Topic, deadLetterTopic, err) return err } return nil } // findMatchingRoute selects the path based on routing rules. If there are // no matching rules, the route-level path is used. func findMatchingRoute(rules []*rtpubsub.Rule, cloudEvent interface{}) (path string, shouldProcess bool, err error) { hasRules := len(rules) > 0 if hasRules { data := map[string]interface{}{ "event": cloudEvent, } rule, err := matchRoutingRule(rules, data) if err != nil { return "", false, err } if rule != nil { return rule.Path, true, nil } } return "", false, nil } func matchRoutingRule(rules []*rtpubsub.Rule, data map[string]interface{}) (*rtpubsub.Rule, error) { for _, rule := range rules { if rule.Match == nil || len(rule.Match.String()) == 0 { return rule, nil } iResult, err := rule.Match.Eval(data) if err != nil { return nil, err } result, ok := iResult.(bool) if !ok { return nil, fmt.Errorf("the result of match expression %s was not a boolean", rule.Match) } if result { return rule, nil } } return nil, nil }
mikeee/dapr
pkg/runtime/subscription/subscription.go
GO
mit
11,197
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package runtime import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" ) // tracerProviderStore allows us to capture the trace provider options // and set a trace provider as per those settings // // This is needed because the OpenTelemetry does not allow accessing // tracer provider settings after registration type tracerProviderStore interface { // RegisterOptions registers a trace.Exporter. RegisterExporter(exporter sdktrace.SpanExporter) RegisterResource(res *resource.Resource) RegisterSampler(sampler sdktrace.Sampler) RegisterTracerProvider() *sdktrace.TracerProvider HasExporter() bool } // newOpentelemetryTracerProviderStore returns an opentelemetryOptionsStore func newOpentelemetryTracerProviderStore() *opentelemetryTracerProviderStore { exps := []sdktrace.SpanExporter{} return &opentelemetryTracerProviderStore{exps, nil, nil} } // opentelemetryOptionsStore is an implementation of traceOptionsStore type opentelemetryTracerProviderStore struct { exporters []sdktrace.SpanExporter res *resource.Resource sampler sdktrace.Sampler } // RegisterExporter adds a Span Exporter for registration with open telemetry global trace provider func (s *opentelemetryTracerProviderStore) RegisterExporter(exporter sdktrace.SpanExporter) { s.exporters = append(s.exporters, exporter) } // HasExporter returns whether at least one Span Exporter has been registered func (s *opentelemetryTracerProviderStore) HasExporter() bool { return len(s.exporters) > 0 } // RegisterResource adds a Resource for registration with open telemetry global trace provider func (s *opentelemetryTracerProviderStore) RegisterResource(res *resource.Resource) { s.res = res } // RegisterSampler adds a custom sampler for registration with open telemetry global trace provider func (s *opentelemetryTracerProviderStore) RegisterSampler(sampler sdktrace.Sampler) { s.sampler = sampler } // RegisterTraceProvider registers a trace provider as per the tracer options in the store func (s *opentelemetryTracerProviderStore) RegisterTracerProvider() *sdktrace.TracerProvider { if len(s.exporters) != 0 { tracerOptions := []sdktrace.TracerProviderOption{} for _, exporter := range s.exporters { tracerOptions = append(tracerOptions, sdktrace.WithBatcher(exporter)) } if s.res != nil { tracerOptions = append(tracerOptions, sdktrace.WithResource(s.res)) } if s.sampler != nil { tracerOptions = append(tracerOptions, sdktrace.WithSampler(s.sampler)) } tp := sdktrace.NewTracerProvider(tracerOptions...) otel.SetTracerProvider(tp) return tp } return nil } // fakeTracerOptionsStore implements tracerOptionsStore by merely record the exporters // and config that were registered/applied. // // This is only for use in unit tests. type fakeTracerProviderStore struct { exporters []sdktrace.SpanExporter res *resource.Resource sampler sdktrace.Sampler } // newFakeTracerProviderStore returns an opentelemetryOptionsStore func newFakeTracerProviderStore() *fakeTracerProviderStore { exps := []sdktrace.SpanExporter{} return &fakeTracerProviderStore{exps, nil, nil} } // RegisterExporter adds a Span Exporter for registration with open telemetry global trace provider func (s *fakeTracerProviderStore) RegisterExporter(exporter sdktrace.SpanExporter) { s.exporters = append(s.exporters, exporter) } // RegisterResource adds a Resource for registration with open telemetry global trace provider func (s *fakeTracerProviderStore) RegisterResource(res *resource.Resource) { s.res = res } // RegisterSampler adds a custom sampler for registration with open telemetry global trace provider func (s *fakeTracerProviderStore) RegisterSampler(sampler sdktrace.Sampler) { s.sampler = sampler } // RegisterTraceProvider does nothing func (s *fakeTracerProviderStore) RegisterTracerProvider() *sdktrace.TracerProvider { return nil } // HasExporter returns whether at least one Span Exporter has been registered func (s *fakeTracerProviderStore) HasExporter() bool { return len(s.exporters) > 0 }
mikeee/dapr
pkg/runtime/trace.go
GO
mit
4,674
/* Copyright 2021 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //nolint:forbidigo package runtime import ( "fmt" "io" "net/http" "time" ) var ( timeoutSeconds = 60 requestTimeoutMillis = 500 periodMillis = 100 urlFormat = "http://localhost:%s/v1.0/healthz/outbound" ) func WaitUntilDaprOutboundReady(daprHTTPPort string) { outboundReadyHealthURL := fmt.Sprintf(urlFormat, daprHTTPPort) client := &http.Client{ Timeout: time.Duration(requestTimeoutMillis) * time.Millisecond, } println(fmt.Sprintf("Waiting for Dapr to be outbound ready (timeout: %d seconds): url=%s\n", timeoutSeconds, outboundReadyHealthURL)) var err error timeoutAt := time.Now().Add(time.Duration(timeoutSeconds) * time.Second) lastPrintErrorTime := time.Now() for time.Now().Before(timeoutAt) { err = checkIfOutboundReady(client, outboundReadyHealthURL) if err == nil { println("Dapr is outbound ready!") return } if time.Now().After(lastPrintErrorTime) { // print the error once in one seconds to avoid too many errors lastPrintErrorTime = time.Now().Add(time.Second) println(fmt.Sprintf("Dapr outbound NOT ready yet: %v", err)) } time.Sleep(time.Duration(periodMillis) * time.Millisecond) } println(fmt.Sprintf("timeout waiting for Dapr to become outbound ready. Last error: %v", err)) } func checkIfOutboundReady(client *http.Client, outboundReadyHealthURL string) error { req, err := http.NewRequest(http.MethodGet, outboundReadyHealthURL, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } defer func() { _ = resp.Body.Close() }() _, err = io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusNoContent { return fmt.Errorf("HTTP status code %v", resp.StatusCode) } return nil }
mikeee/dapr
pkg/runtime/wait.go
GO
mit
2,326
# Dapr Workflow Engine The Dapr Workflow engine enables developers to author workflows using code and execute them using the Dapr sidecar. You can learn more about this project here: [[Proposal] Workflow building block and engine (#4576)](https://github.com/dapr/dapr/issues/4576). This README is designed to be used by maintainers to help with getting started. This README will be updated with more information as the project progresses. ## Building Daprd The workflow engine is entirely encapsulated within the [dapr sidecar (a.k.a. daprd)](https://docs.dapr.io/concepts/dapr-services/sidecar/). All dependencies are compiled directly into the binary. Internally, this engine depends on the [Durable Task Framework for Go](https://github.com/microsoft/durabletask-go), an MIT-licensed open-source project for authoring workflows (or "orchestrations") as code. Use the following command to get the latest build of this dependency: ```bash go get github.com/microsoft/durabletask-go ``` Be mindful, that the above command will also pull in dependencies for sqlite, which we don't want or require. Those can be manually removed from go.mod and go.sum. The following bash command can be used to build a version of Daprd that supports the workflow engine. ```bash DEBUG=1 make build ``` * `DEBUG=1` is required to attach debuggers. This should never be set for production or performance testing workloads. After building, the following bash command can be run from the project root to test the code: ```bash ./dist/linux_amd64/debug/daprd --app-id wfapp --dapr-grpc-port 4001 --placement-host-address :6050 --components-path ~/.dapr/components/ --config ~/.dapr/config.yaml ``` * The gRPC port is set to `4001` since that's what the Durable Task test clients default to. * This assumes a placement service running locally on port `6050` (the default). * This assumes a basic actor-compatible state store is configured in `~/.dapr/components`. * You should see logs with `scope=dapr.runtime.wfengine` if the workflow engine is enabled in your build. Here's an example of the log output you'll see from Dapr when the workflow engine is enabled: ``` INFO[0000] configuring workflow engine gRPC endpoint app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge INFO[0000] configuring workflow engine with actors backend app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge INFO[0000] Registering component for dapr workflow engine... app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge INFO[0000] Initializing Dapr workflow engine app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge ``` Note that the workflow engine doesn't fully start up until an application opens a work-item stream on it, after which you'll see the following logs: ``` INFO[0146] work item stream established by user-agent: XYZ app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge INFO[0146] worker started with backend dapr.actors/v1-alpha app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge INFO[0146] workflow engine started app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge ``` If you want to see the full set of logs, run daprd with verbose logging enabled (`--log-level debug`). You'll see a few additional logs in this case, indicating that the workflow engine is waiting for new work items: ``` DEBU[0000] orchestration-processor: waiting for new work items... app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge DEBU[0000] activity-processor: waiting for new work items... app_id=wfapp instance=XYZ scope=dapr.runtime.wfengine type=log ver=edge ``` ## Running tests ### Unit tests Unit tests can be run using the following `go` command from the repo root. Depending on the speed of your development machine, these tests should complete in less than 30 seconds. ```bash go test ./pkg/runtime/wfengine/... -tags=unit ``` If you're using VS Code, you can also run tests directly from the IDE. ### Manual testing There are no end-to-end tests that directly target the Dapr Workflows engine yet. However, this engine is fully compatible with .NET and Java Durable Task SDKs. | Language/Stack | Package | Project Home | Samples | | - | - | - | - | | .NET | [![NuGet](https://img.shields.io/nuget/v/Microsoft.DurableTask.Client.svg?style=flat)](https://www.nuget.org/packages/Microsoft.DurableTask.Client/) | [GitHub](https://github.com/microsoft/durabletask-dotnet) | [Samples](https://github.com/microsoft/durabletask-dotnet/tree/main/samples) | | Java | [![Maven Central](https://img.shields.io/maven-central/v/com.microsoft/durabletask-client?label=durabletask-client)](https://search.maven.org/artifact/com.microsoft/durabletask-client) | [GitHub](https://github.com/microsoft/durabletask-java) | [Samples](https://github.com/microsoft/durabletask-java/tree/main/samples/src/main/java/io/durabletask/samples) | You can also run the samples above and have them execute end-to-end with Dapr running locally on the same machine. The samples connect to gRPC over port `4001` by default, which will work without changes as long as Dapr is configured with `4001` as its gRPC port (like in the example above). ### Durable Task integration testing For quick integration testing, you can run the following docker command which runs a suite of integration tests used by the official Durable Task .NET SDK: ```bash docker run -e GRPC_HOST="host.docker.internal" cgillum/durabletask-dotnet-tester:0.5.0-beta ``` Note that the test assumes the daprd process can be reached over `localhost` with port `4001` as the gRPC port on the host machine. These values can be overridden with the following environment variables: * `GRPC_HOST`: Use this to change from the default `127.0.0.1` to some other value, for example `host.docker.internal`. * `GRPC_PORT`: Set this environment variable to change the default port from `4001` to something else. If successful, you should see output that looks like the following: ``` Test run for /root/out/bin/Debug/Microsoft.DurableTask.Tests/net6.0/Microsoft.DurableTask.Tests.dll (.NETCoreApp,Version=v6.0) Microsoft (R) Test Execution Command Line Tool Version 17.3.1 (x64) Copyright (c) Microsoft Corporation. All rights reserved. Starting test execution, please wait... A total of 1 test files matched the specified pattern. [xUnit.net 00:00:00.00] xUnit.net VSTest Adapter v2.4.3+1b45f5407b (64-bit .NET 6.0.10) [xUnit.net 00:00:00.82] Discovering: Microsoft.DurableTask.Tests [xUnit.net 00:00:00.90] Discovered: Microsoft.DurableTask.Tests [xUnit.net 00:00:00.90] Starting: Microsoft.DurableTask.Tests Passed Microsoft.DurableTask.Tests.OrchestrationPatterns.ExternalEvents(eventCount: 100) [6 s] Passed Microsoft.DurableTask.Tests.OrchestrationPatterns.ExternalEvents(eventCount: 1) [309 ms] Passed Microsoft.DurableTask.Tests.OrchestrationPatterns.LongTimer [8 s] Passed Microsoft.DurableTask.Tests.OrchestrationPatterns.SubOrchestration [1 s] ... Passed Microsoft.DurableTask.Tests.OrchestrationPatterns.ActivityFanOut [914 ms] [xUnit.net 00:01:01.04] Finished: Microsoft.DurableTask.Tests Passed Microsoft.DurableTask.Tests.OrchestrationPatterns.SingleActivity_Async [365 ms] Test Run Successful. Total tests: 33 Passed: 33 Total time: 1.0290 Minutes ``` ## How the workflow engine works The Dapr Workflow engine introduced a new concept of *internal actors*. These are actors that are registered and implemented directly in Daprd with no host application dependency. Just like regular actors, they have turn-based concurrency, support reminders, and are scaled out using the placement service. Internal actors also leverage the configured state store for actors. The workflow engine uses these actors as the core runtime primitives for workflows. Each workflow instance corresponds to a single `dapr.internal.wfengine.workflow` actor instance. The ID of the workflow instance is the same as the internal actor ID. The internal actor is responsible for triggering workflow execution and for storing workflow state. The actual workflow logic lives outside the Dapr sidecar in a host application. The host application uses a new gRPC endpoint on the daprd gRPC API server to send and receive workflow-specific commands to/from the actor-based workflow engine. The workflow app doesn't need to take on any actor dependencies, nor is it aware that actors are involved in the execution of the workflows. Actors are purely an implementation detail. ### State storage Each workflow actor saves its state using the following keys: * `metadata`: Contains meta information about the workflow as a JSON blob. Includes information such as the length of the inbox, the length of the history, and a 64-bit integer representing the workflow generation (for cases where the instance ID gets reused). The length information is used to determine which keys need to be read or written to when loading or saving workflow state updates. * `inbox-NNNNNN`: Multiple keys containing an ordered list of workflow inbox events. Each key holds the data for a single event. The inbox is effectively a FIFO queue of events that the workflow needs to process, with items removed from the earlier indices and added to the end indices. * `history-NNNNNN`: Multiple keys containing an ordered list of history events. Each key holds the data for a single event. History events are only added and never removed, except in the case of "continue as new", where all history events are purged. * `customStatus`: Contains a user-defined workflow status value. The `inbox-NNNNNN` and `history-NNNNNN` key schemes are used to enable arbitrarily large amounts of data. These schemes are also designed for efficient updates. An alternate design would be to store the workflow history as a blob in a single key. However, this would limit the maximum size of the history and would make updates more expensive, since the full history would need to be serialized instead of just inserting incremental additions (the history is an append-only log of events). The tradeoff with this key scheme design is that loading workflow state becomes more expensive since it's spread out across multiple keys. This is mitigated by the fact that actor state can be cached in memory, removing the need for any reads while the actors are active. However, it could be a problem if workflow histories get large and if actors get moved around or activated frequently. Below is an example of what keys would be used to store the state of a simple workflow execution with ID '797f67f0c10846f592d0ac82dea1f248', as shown using `redis-cli`. ``` 127.0.0.1:6379> keys *797f67f0c10846f592d0ac82dea1f248* 1) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||history-000002" 2) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||customStatus" 3) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||metadata" 4) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||history-000003" 5) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||history-000005" 6) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||history-000001" 7) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||history-000000" 8) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||history-000004" 9) "myapp||dapr.internal.wfengine.workflow||797f67f0c10846f592d0ac82dea1f248||inbox-000000" ``` **IMPORTANT**: At the time of writing, there is no automatic purging of state for completed workflows. This means that the configured state store will continue to acquire new state indefinitely as more workflows are executed. Until automatic cleanup is implemented, old state will need to be purged manually from the configured state store. ### Resiliency Workflows are resilient to infrastructure failures. This is achieved by using reminders to drive all execution. If a process faults mid-execution, the reminder that initiated that execution will get scheduled again by Dapr to resume the execution from it's previous checkpoint, which is stored in the state store. At all times, there is at least one reminder active for each workflow. However, there is typically a different reminder created for each *step* in the workflow. Here's an example of all the reminders that may get created as part of running a full end-to-end workflow. | Reminder name | Description | Payload? | | - | - | - | | `start` | Triggers the initial execution step of a workflow after it's created. | No | | `new-event` | Triggers subsequent processing of events by a workflow. | No | | `timer` | A special event reminder for a *durable timer* that is scheduled to run sometime in the future. | Yes, the durable task history event associated with the durable timer. | | `run-activity` | Triggers the execution of a workflow activity. | Yes, a UUID representing the current workflow generation. | > Note that all reminder names are suffixed with a series of random characters. For example, the `start` reminder might actually be named `start-149eb437`. This is because multiple reminders with the same name can result in unexpected behavior. Each reminder is created by default with a 1-minute period. If a workflow or activity execution fails unexpectedly, it will be retried automatically after the 1-minute period expires. If the workflow or activity executions succeeds, then the reminder will be immediately deleted.
mikeee/dapr
pkg/runtime/wfengine/README.md
Markdown
mit
13,694
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package actors import ( "bytes" "context" "encoding/json" "errors" "fmt" "strings" "time" "github.com/microsoft/durabletask-go/api" "github.com/microsoft/durabletask-go/backend" "github.com/dapr/dapr/pkg/actors" diag "github.com/dapr/dapr/pkg/diagnostics" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" internalsv1pb "github.com/dapr/dapr/pkg/proto/internals/v1" ) var ErrDuplicateInvocation = errors.New("duplicate invocation") const activityStateKey = "activityState" type activityActor struct { actorID string actorRuntime actors.Actors scheduler activityScheduler state *activityState cachingDisabled bool defaultTimeout time.Duration reminderInterval time.Duration config actorsBackendConfig } // ActivityRequest represents a request by a worklow to invoke an activity. type ActivityRequest struct { HistoryEvent []byte } type activityState struct { EventPayload []byte } // activityScheduler is a func interface for pushing activity work items into the backend type activityScheduler func(ctx context.Context, wi *backend.ActivityWorkItem) error type activityActorOpts struct { cachingDisabled bool defaultTimeout time.Duration reminderInterval time.Duration } // NewActivityActor creates an internal activity actor for executing workflow activity logic. func NewActivityActor(scheduler activityScheduler, backendConfig actorsBackendConfig, opts *activityActorOpts) actors.InternalActorFactory { return func(actorType string, actorID string, actors actors.Actors) actors.InternalActor { a := &activityActor{ actorID: actorID, actorRuntime: actors, scheduler: scheduler, defaultTimeout: 1 * time.Hour, reminderInterval: 1 * time.Minute, config: backendConfig, cachingDisabled: opts.cachingDisabled, } if opts.defaultTimeout > 0 { a.defaultTimeout = opts.defaultTimeout } if opts.reminderInterval > 0 { a.reminderInterval = opts.reminderInterval } return a } } // InvokeMethod implements actors.InternalActor and schedules the background execution of a workflow activity. // Activities are scheduled by workflows and can execute for arbitrary lengths of time. Instead of executing // activity logic directly, InvokeMethod creates a reminder that executes the activity logic. InvokeMethod // returns immediately after creating the reminder, enabling the workflow to continue processing other events // in parallel. func (a *activityActor) InvokeMethod(ctx context.Context, methodName string, data []byte, metadata map[string][]string) ([]byte, error) { wfLogger.Debugf("Activity actor '%s': invoking method '%s'", a.actorID, methodName) var ar ActivityRequest if err := actors.DecodeInternalActorData(bytes.NewReader(data), &ar); err != nil { return nil, fmt.Errorf("failed to decode activity request: %w", err) } // Try to load activity state. If we find any, that means the activity invocation is a duplicate. if _, err := a.loadActivityState(ctx); err != nil { return nil, err } if methodName == "PurgeWorkflowState" { return nil, a.purgeActivityState(ctx) } // Save the request details to the state store in case we need it after recovering from a failure. err := a.saveActivityState(ctx, &activityState{ EventPayload: ar.HistoryEvent, }) if err != nil { return nil, err } // The actual execution is triggered by a reminder err = a.createReliableReminder(ctx, nil) return nil, err } // InvokeReminder implements actors.InternalActor and executes the activity logic. func (a *activityActor) InvokeReminder(ctx context.Context, reminder actors.InternalActorReminder, metadata map[string][]string) error { wfLogger.Debugf("Activity actor '%s': invoking reminder '%s'", a.actorID, reminder.Name) state, _ := a.loadActivityState(ctx) // TODO: On error, reply with a failure - this requires support from durabletask-go to produce TaskFailure results timeoutCtx, cancelTimeout := context.WithTimeout(ctx, a.defaultTimeout) defer cancelTimeout() err := a.executeActivity(timeoutCtx, reminder.Name, state.EventPayload) var recoverableErr *recoverableError // Returning nil signals that we want the execution to be retried in the next period interval switch { case err == nil: // We delete the reminder on success and on non-recoverable errors. return actors.ErrReminderCanceled case errors.Is(err, context.DeadlineExceeded): wfLogger.Warnf("%s: execution of '%s' timed-out and will be retried later: %v", a.actorID, reminder.Name, err) return nil case errors.Is(err, context.Canceled): wfLogger.Warnf("%s: received cancellation signal while waiting for activity execution '%s'", a.actorID, reminder.Name) return nil case errors.As(err, &recoverableErr): wfLogger.Warnf("%s: execution failed with a recoverable error and will be retried later: %v", a.actorID, err) return nil default: // Other error wfLogger.Errorf("%s: execution failed with a non-recoverable error: %v", a.actorID, err) // TODO: Reply with a failure - this requires support from durabletask-go to produce TaskFailure results return actors.ErrReminderCanceled } } func (a *activityActor) executeActivity(ctx context.Context, name string, eventPayload []byte) error { taskEvent, err := backend.UnmarshalHistoryEvent(eventPayload) if err != nil { return err } activityName := "" if ts := taskEvent.GetTaskScheduled(); ts != nil { activityName = ts.GetName() } else { return fmt.Errorf("invalid activity task event: '%s'", taskEvent.String()) } endIndex := strings.Index(a.actorID, "::") if endIndex < 0 { return fmt.Errorf("invalid activity actor ID: '%s'", a.actorID) } workflowID := a.actorID[0:endIndex] wi := &backend.ActivityWorkItem{ SequenceNumber: int64(taskEvent.GetEventId()), InstanceID: api.InstanceID(workflowID), NewEvent: taskEvent, Properties: make(map[string]interface{}), } // Executing activity code is a one-way operation. We must wait for the app code to report its completion, which // will trigger this callback channel. // TODO: Need to come up with a design for timeouts. Some activities may need to run for hours but we also need // to handle the case where the app crashes and never responds to the workflow. It may be necessary to // introduce some kind of heartbeat protocol to help identify such cases. callback := make(chan bool) wi.Properties[CallbackChannelProperty] = callback wfLogger.Debugf("Activity actor '%s': scheduling activity '%s' for workflow with instanceId '%s'", a.actorID, name, wi.InstanceID) err = a.scheduler(ctx, wi) if errors.Is(err, context.DeadlineExceeded) { return newRecoverableError(fmt.Errorf("timed-out trying to schedule an activity execution - this can happen if too many activities are running in parallel or if the workflow engine isn't running: %w", err)) } else if err != nil { return newRecoverableError(fmt.Errorf("failed to schedule an activity execution: %w", err)) } // Activity execution started start := time.Now() executionStatus := "" elapsed := float64(0) // Record metrics on exit defer func() { if executionStatus != "" { diag.DefaultWorkflowMonitoring.ActivityExecutionEvent(ctx, activityName, executionStatus, elapsed) } }() loop: for { t := time.NewTimer(10 * time.Minute) select { case <-ctx.Done(): if !t.Stop() { <-t.C } // Activity execution failed with recoverable error elapsed = diag.ElapsedSince(start) executionStatus = diag.StatusRecoverable return ctx.Err() // will be retried case <-t.C: if deadline, ok := ctx.Deadline(); ok { wfLogger.Warnf("Activity actor '%s': '%s' is still running - will keep waiting until '%v'", a.actorID, name, deadline) } else { wfLogger.Warnf("Activity actor '%s': '%s' is still running - will keep waiting indefinitely", a.actorID, name) } case completed := <-callback: if !t.Stop() { <-t.C } // Activity execution completed elapsed = diag.ElapsedSince(start) if completed { break loop } else { // Activity execution failed with recoverable error executionStatus = diag.StatusRecoverable return newRecoverableError(errExecutionAborted) // AbandonActivityWorkItem was called } } } wfLogger.Debugf("Activity actor '%s': activity completed for workflow with instanceId '%s' activityName '%s'", a.actorID, wi.InstanceID, name) // publish the result back to the workflow actor as a new event to be processed resultData, err := backend.MarshalHistoryEvent(wi.Result) if err != nil { // Returning non-recoverable error executionStatus = diag.StatusFailed return err } req := internalsv1pb. NewInternalInvokeRequest(AddWorkflowEventMethod). WithActor(a.config.workflowActorType, workflowID). WithData(resultData). WithContentType(invokev1.OctetStreamContentType) _, err = a.actorRuntime.Call(ctx, req) switch { case err != nil: // Returning recoverable error, record metrics executionStatus = diag.StatusRecoverable return newRecoverableError(fmt.Errorf("failed to invoke '%s' method on workflow actor: %w", AddWorkflowEventMethod, err)) case wi.Result.GetTaskCompleted() != nil: // Activity execution completed successfully executionStatus = diag.StatusSuccess case wi.Result.GetTaskFailed() != nil: // Activity execution failed executionStatus = diag.StatusFailed } return nil } // InvokeTimer implements actors.InternalActor func (*activityActor) InvokeTimer(ctx context.Context, timer actors.InternalActorReminder, metadata map[string][]string) error { return errors.New("timers are not implemented") } // DeactivateActor implements actors.InternalActor func (a *activityActor) DeactivateActor(ctx context.Context) error { wfLogger.Debugf("Activity actor '%s': deactivating", a.actorID) a.state = nil // A bit of extra caution, shouldn't be necessary return nil } func (a *activityActor) loadActivityState(ctx context.Context) (*activityState, error) { // See if the state for this actor is already cached in memory. if a.state != nil { return a.state, nil } // Loading from the state store is only expected in process failure recovery scenarios. wfLogger.Debugf("Activity actor '%s': loading activity state", a.actorID) req := actors.GetStateRequest{ ActorType: a.config.activityActorType, ActorID: a.actorID, Key: activityStateKey, } res, err := a.actorRuntime.GetState(ctx, &req) if err != nil { return nil, fmt.Errorf("failed to load activity state: %w", err) } if len(res.Data) == 0 { // no data was found - this is expected on the initial invocation of the activity actor. return nil, nil } state := &activityState{} err = json.Unmarshal(res.Data, state) if err != nil { return nil, fmt.Errorf("failed to unmarshal activity state: %w", err) } return state, nil } func (a *activityActor) saveActivityState(ctx context.Context, state *activityState) error { req := actors.TransactionalRequest{ ActorType: a.config.activityActorType, ActorID: a.actorID, Operations: []actors.TransactionalOperation{{ Operation: actors.Upsert, Request: actors.TransactionalUpsert{ Key: activityStateKey, Value: state, }, }}, } if err := a.actorRuntime.TransactionalStateOperation(ctx, &req); err != nil { return fmt.Errorf("failed to save activity state: %w", err) } if !a.cachingDisabled { a.state = state } return nil } func (a *activityActor) purgeActivityState(ctx context.Context) error { wfLogger.Debugf("Activity actor '%s': purging activity state", a.actorID) err := a.actorRuntime.TransactionalStateOperation(ctx, &actors.TransactionalRequest{ ActorType: a.config.activityActorType, ActorID: a.actorID, Operations: []actors.TransactionalOperation{{ Operation: actors.Delete, Request: actors.TransactionalDelete{ Key: activityStateKey, }, }}, }) if err != nil { return fmt.Errorf("failed to delete activity state with error: %w", err) } return nil } func (a *activityActor) createReliableReminder(ctx context.Context, data any) error { const reminderName = "run-activity" wfLogger.Debugf("Activity actor '%s': creating reminder '%s' for immediate execution", a.actorID, reminderName) dataEnc, err := json.Marshal(data) if err != nil { return fmt.Errorf("failed to encode data as JSON: %w", err) } return a.actorRuntime.CreateReminder(ctx, &actors.CreateReminderRequest{ ActorType: a.config.activityActorType, ActorID: a.actorID, Data: dataEnc, DueTime: "0s", Name: reminderName, Period: a.reminderInterval.String(), }) }
mikeee/dapr
pkg/runtime/wfengine/backends/actors/activity_actor.go
GO
mit
13,187
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package actors import ( "bytes" "context" "encoding/json" "errors" "fmt" "sync/atomic" "time" "github.com/microsoft/durabletask-go/api" "github.com/microsoft/durabletask-go/backend" "github.com/dapr/dapr/pkg/actors" wfbe "github.com/dapr/dapr/pkg/components/wfbackend" diag "github.com/dapr/dapr/pkg/diagnostics" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" internalsv1pb "github.com/dapr/dapr/pkg/proto/internals/v1" "github.com/dapr/dapr/utils" "github.com/dapr/kit/logger" ) var ( wfLogger = logger.NewLogger("dapr.wfengine.backend.actors") errExecutionAborted = errors.New("execution aborted") ) const ( defaultNamespace = "default" WorkflowNameLabelKey = "workflow" ActivityNameLabelKey = "activity" ) // actorsBackendConfig is the configuration for the workflow engine's actors backend type actorsBackendConfig struct { AppID string workflowActorType string activityActorType string } // NewActorsBackendConfig creates a new workflow engine configuration func NewActorsBackendConfig(appID string) actorsBackendConfig { return actorsBackendConfig{ AppID: appID, workflowActorType: actors.InternalActorTypePrefix + utils.GetNamespaceOrDefault(defaultNamespace) + utils.DotDelimiter + appID + utils.DotDelimiter + WorkflowNameLabelKey, activityActorType: actors.InternalActorTypePrefix + utils.GetNamespaceOrDefault(defaultNamespace) + utils.DotDelimiter + appID + utils.DotDelimiter + ActivityNameLabelKey, } } // String implements fmt.Stringer and is primarily used for debugging purposes. func (c *actorsBackendConfig) String() string { if c == nil { return "(nil)" } return fmt.Sprintf("AppID='%s' workflowActorType='%s' activityActorType='%s'", c.AppID, c.workflowActorType, c.activityActorType) } type ActorBackend struct { orchestrationWorkItemChan chan *backend.OrchestrationWorkItem activityWorkItemChan chan *backend.ActivityWorkItem config actorsBackendConfig activityActorOpts activityActorOpts workflowActorOpts workflowActorOpts actorRuntime actors.ActorRuntime actorsReady atomic.Bool actorsReadyCh chan struct{} } func NewActorBackend(md wfbe.Metadata, _ logger.Logger) (backend.Backend, error) { backendConfig := NewActorsBackendConfig(md.AppID) // These channels are used by actors to call into this backend object orchestrationWorkItemChan := make(chan *backend.OrchestrationWorkItem) activityWorkItemChan := make(chan *backend.ActivityWorkItem) return &ActorBackend{ orchestrationWorkItemChan: orchestrationWorkItemChan, activityWorkItemChan: activityWorkItemChan, config: backendConfig, actorsReadyCh: make(chan struct{}), }, nil } // getWorkflowScheduler returns a workflowScheduler func that sends an orchestration work item to the Durable Task Framework. func getWorkflowScheduler(orchestrationWorkItemChan chan *backend.OrchestrationWorkItem) workflowScheduler { return func(ctx context.Context, wi *backend.OrchestrationWorkItem) error { wfLogger.Debugf("%s: scheduling workflow execution with durabletask engine", wi.InstanceID) select { case <-ctx.Done(): // <-- engine is shutting down or a caller timeout expired return ctx.Err() case orchestrationWorkItemChan <- wi: // blocks until the engine is ready to process the work item return nil } } } // getActivityScheduler returns an activityScheduler func that sends an activity work item to the Durable Task Framework. func getActivityScheduler(activityWorkItemChan chan *backend.ActivityWorkItem) activityScheduler { return func(ctx context.Context, wi *backend.ActivityWorkItem) error { wfLogger.Debugf( "%s: scheduling [%s#%d] activity execution with durabletask engine", wi.InstanceID, wi.NewEvent.GetTaskScheduled().GetName(), wi.NewEvent.GetEventId()) select { case <-ctx.Done(): // engine is shutting down return ctx.Err() case activityWorkItemChan <- wi: // blocks until the engine is ready to process the work item return nil } } } // InternalActors returns a map of internal actors that are used to implement workflows func (abe *ActorBackend) GetInternalActorsMap() map[string]actors.InternalActorFactory { internalActors := make(map[string]actors.InternalActorFactory) internalActors[abe.config.workflowActorType] = NewWorkflowActor(getWorkflowScheduler(abe.orchestrationWorkItemChan), abe.config, &abe.workflowActorOpts) internalActors[abe.config.activityActorType] = NewActivityActor(getActivityScheduler(abe.activityWorkItemChan), abe.config, &abe.activityActorOpts) return internalActors } func (abe *ActorBackend) SetActorRuntime(ctx context.Context, actorRuntime actors.ActorRuntime) { abe.actorRuntime = actorRuntime if abe.actorsReady.CompareAndSwap(false, true) { close(abe.actorsReadyCh) } } func (abe *ActorBackend) RegisterActor(ctx context.Context) error { if abe.actorRuntime != nil { for actorType, actor := range abe.GetInternalActorsMap() { err := abe.actorRuntime.RegisterInternalActor(ctx, actorType, actor, time.Minute*1) if err != nil { return fmt.Errorf("failed to register workflow actor %s: %w", actorType, err) } } } return nil } // CreateOrchestrationInstance implements backend.Backend and creates a new workflow instance. // // Internally, creating a workflow instance also creates a new actor with the same ID. The create // request is saved into the actor's "inbox" and then executed via a reminder thread. If the app is // scaled out across multiple replicas, the actor might get assigned to a replicas other than this one. func (abe *ActorBackend) CreateOrchestrationInstance(ctx context.Context, e *backend.HistoryEvent, opts ...backend.OrchestrationIdReusePolicyOptions) error { if err := abe.validateConfiguration(); err != nil { return err } var workflowInstanceID string if es := e.GetExecutionStarted(); es == nil { return errors.New("the history event must be an ExecutionStartedEvent") } else if oi := es.GetOrchestrationInstance(); oi == nil { return errors.New("the ExecutionStartedEvent did not contain orchestration instance information") } else { workflowInstanceID = oi.GetInstanceId() } policy := &api.OrchestrationIdReusePolicy{} for _, opt := range opts { opt(policy) } eventData, err := backend.MarshalHistoryEvent(e) if err != nil { return err } requestBytes, err := json.Marshal(CreateWorkflowInstanceRequest{ Policy: policy, StartEventBytes: eventData, }) if err != nil { return fmt.Errorf("failed to marshal CreateWorkflowInstanceRequest: %w", err) } // Invoke the well-known workflow actor directly, which will be created by this invocation request. // Note that this request goes directly to the actor runtime, bypassing the API layer. req := internalsv1pb.NewInternalInvokeRequest(CreateWorkflowInstanceMethod). WithActor(abe.config.workflowActorType, workflowInstanceID). WithData(requestBytes). WithContentType(invokev1.JSONContentType) start := time.Now() _, err = abe.actorRuntime.Call(ctx, req) elapsed := diag.ElapsedSince(start) if err != nil { // failed request to CREATE workflow, record count and latency metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.CreateWorkflow, diag.StatusFailed, elapsed) return err } // successful request to CREATE workflow, record count and latency metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.CreateWorkflow, diag.StatusSuccess, elapsed) return nil } // GetOrchestrationMetadata implements backend.Backend func (abe *ActorBackend) GetOrchestrationMetadata(ctx context.Context, id api.InstanceID) (*api.OrchestrationMetadata, error) { // Invoke the corresponding actor, which internally stores its own workflow metadata req := internalsv1pb. NewInternalInvokeRequest(GetWorkflowMetadataMethod). WithActor(abe.config.workflowActorType, string(id)). WithContentType(invokev1.OctetStreamContentType) start := time.Now() res, err := abe.actorRuntime.Call(ctx, req) elapsed := diag.ElapsedSince(start) if err != nil { // failed request to GET workflow Information, record count and latency metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.GetWorkflow, diag.StatusFailed, elapsed) return nil, err } // successful request to GET workflow information, record count and latency metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.GetWorkflow, diag.StatusSuccess, elapsed) var metadata api.OrchestrationMetadata err = actors.DecodeInternalActorData(bytes.NewReader(res.GetMessage().GetData().GetValue()), &metadata) if err != nil { return nil, fmt.Errorf("failed to decode the internal actor response: %w", err) } return &metadata, nil } // AbandonActivityWorkItem implements backend.Backend. It gets called by durabletask-go when there is // an unexpected failure in the workflow activity execution pipeline. func (*ActorBackend) AbandonActivityWorkItem(ctx context.Context, wi *backend.ActivityWorkItem) error { wfLogger.Warnf("%s: aborting activity execution (::%d)", wi.InstanceID, wi.NewEvent.GetEventId()) // Sending false signals the waiting activity actor to abort the activity execution. if channel, ok := wi.Properties[CallbackChannelProperty]; ok { channel.(chan bool) <- false } return nil } // AbandonOrchestrationWorkItem implements backend.Backend. It gets called by durabletask-go when there is // an unexpected failure in the workflow orchestration execution pipeline. func (*ActorBackend) AbandonOrchestrationWorkItem(ctx context.Context, wi *backend.OrchestrationWorkItem) error { wfLogger.Warnf("%s: aborting workflow execution", wi.InstanceID) // Sending false signals the waiting workflow actor to abort the workflow execution. if channel, ok := wi.Properties[CallbackChannelProperty]; ok { channel.(chan bool) <- false } return nil } // AddNewOrchestrationEvent implements backend.Backend and sends the event e to the workflow actor identified by id. func (abe *ActorBackend) AddNewOrchestrationEvent(ctx context.Context, id api.InstanceID, e *backend.HistoryEvent) error { data, err := backend.MarshalHistoryEvent(e) if err != nil { return err } // Send the event to the corresponding workflow actor, which will store it in its event inbox. req := internalsv1pb. NewInternalInvokeRequest(AddWorkflowEventMethod). WithActor(abe.config.workflowActorType, string(id)). WithData(data). WithContentType(invokev1.OctetStreamContentType) start := time.Now() _, err = abe.actorRuntime.Call(ctx, req) elapsed := diag.ElapsedSince(start) if err != nil { // failed request to ADD EVENT, record count and latency metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.AddEvent, diag.StatusFailed, elapsed) return err } // successful request to ADD EVENT, record count and latency metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.AddEvent, diag.StatusSuccess, elapsed) return nil } // CompleteActivityWorkItem implements backend.Backend func (*ActorBackend) CompleteActivityWorkItem(ctx context.Context, wi *backend.ActivityWorkItem) error { // Sending true signals the waiting activity actor to complete the execution normally. wi.Properties[CallbackChannelProperty].(chan bool) <- true return nil } // CompleteOrchestrationWorkItem implements backend.Backend func (*ActorBackend) CompleteOrchestrationWorkItem(ctx context.Context, wi *backend.OrchestrationWorkItem) error { // Sending true signals the waiting workflow actor to complete the execution normally. wi.Properties[CallbackChannelProperty].(chan bool) <- true return nil } // CreateTaskHub implements backend.Backend func (*ActorBackend) CreateTaskHub(context.Context) error { return nil } // DeleteTaskHub implements backend.Backend func (*ActorBackend) DeleteTaskHub(context.Context) error { return errors.New("not supported") } // GetActivityWorkItem implements backend.Backend func (abe *ActorBackend) GetActivityWorkItem(ctx context.Context) (*backend.ActivityWorkItem, error) { // Wait for the activity actor to signal us with some work to do wfLogger.Debug("Actor backend is waiting for an activity actor to schedule an invocation.") select { case wi := <-abe.activityWorkItemChan: wfLogger.Debugf( "Actor backend received a [%s#%d] activity task for workflow '%s'.", wi.NewEvent.GetTaskScheduled().GetName(), wi.NewEvent.GetEventId(), wi.InstanceID) return wi, nil case <-ctx.Done(): return nil, ctx.Err() } } // GetOrchestrationRuntimeState implements backend.Backend func (abe *ActorBackend) GetOrchestrationRuntimeState(ctx context.Context, owi *backend.OrchestrationWorkItem) (*backend.OrchestrationRuntimeState, error) { // Invoke the corresponding actor, which internally stores its own workflow state. req := internalsv1pb. NewInternalInvokeRequest(GetWorkflowStateMethod). WithActor(abe.config.workflowActorType, string(owi.InstanceID)). WithContentType(invokev1.OctetStreamContentType) res, err := abe.actorRuntime.Call(ctx, req) if err != nil { return nil, err } wfState := &workflowState{} err = wfState.DecodeWorkflowState(res.GetMessage().GetData().GetValue()) if err != nil { return nil, fmt.Errorf("failed to decode the internal actor response: %w", err) } runtimeState := getRuntimeState(string(owi.InstanceID), wfState) return runtimeState, nil } // GetOrchestrationWorkItem implements backend.Backend func (abe *ActorBackend) GetOrchestrationWorkItem(ctx context.Context) (*backend.OrchestrationWorkItem, error) { // Wait for the workflow actor to signal us with some work to do wfLogger.Debug("Actor backend is waiting for a workflow actor to schedule an invocation.") select { case wi := <-abe.orchestrationWorkItemChan: wfLogger.Debugf("Actor backend received a workflow task for workflow '%s'.", wi.InstanceID) return wi, nil case <-ctx.Done(): return nil, ctx.Err() } } // PurgeOrchestrationState deletes all saved state for the specific orchestration instance. func (abe *ActorBackend) PurgeOrchestrationState(ctx context.Context, id api.InstanceID) error { req := internalsv1pb. NewInternalInvokeRequest(PurgeWorkflowStateMethod). WithActor(abe.config.workflowActorType, string(id)) start := time.Now() _, err := abe.actorRuntime.Call(ctx, req) elapsed := diag.ElapsedSince(start) if err != nil { // failed request to PURGE WORKFLOW, record latency and count metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.PurgeWorkflow, diag.StatusFailed, elapsed) return err } // successful request to PURGE WORKFLOW, record latency and count metrics. diag.DefaultWorkflowMonitoring.WorkflowOperationEvent(ctx, diag.PurgeWorkflow, diag.StatusSuccess, elapsed) return nil } // Start implements backend.Backend func (abe *ActorBackend) Start(ctx context.Context) error { err := abe.validateConfiguration() if err != nil { return err } return nil } // Stop implements backend.Backend func (*ActorBackend) Stop(context.Context) error { return nil } // String displays the type information func (abe *ActorBackend) String() string { return "dapr.actors/v1-beta" } func (abe *ActorBackend) validateConfiguration() error { if abe.actorRuntime == nil { return errors.New("actor runtime has not been configured") } return nil } // WaitForActorsReady blocks until the actor runtime is set in the object (or until the context is canceled). func (abe *ActorBackend) WaitForActorsReady(ctx context.Context) { select { case <-ctx.Done(): // No-op case <-abe.actorsReadyCh: // No-op } } // DisableActorCaching turns off the default caching done by the workflow and activity actors. // This method is primarily intended to be used for testing to ensure correct behavior // when actors are newly activated on nodes, but without requiring the actor to actually // go through activation. func (abe *ActorBackend) DisableActorCaching(disable bool) { abe.workflowActorOpts.cachingDisabled = disable abe.activityActorOpts.cachingDisabled = disable } // SetWorkflowTimeout allows configuring a default timeout for workflow execution steps. // If the timeout is exceeded, the workflow execution step will be abandoned and retried. // Note that this timeout is for a non-blocking step in the workflow (which is expected // to always complete almost immediately) and not for the end-to-end workflow execution. func (abe *ActorBackend) SetWorkflowTimeout(timeout time.Duration) { abe.workflowActorOpts.defaultTimeout = timeout } // SetActivityTimeout allows configuring a default timeout for activity executions. // If the timeout is exceeded, the activity execution will be abandoned and retried. func (abe *ActorBackend) SetActivityTimeout(timeout time.Duration) { abe.activityActorOpts.defaultTimeout = timeout } // SetActorReminderInterval sets the amount of delay between internal retries for // workflow and activity actors. This impacts how long it takes for an operation to // restart itself after a timeout or a process failure is encountered while running. func (abe *ActorBackend) SetActorReminderInterval(interval time.Duration) { abe.workflowActorOpts.reminderInterval = interval abe.activityActorOpts.reminderInterval = interval }
mikeee/dapr
pkg/runtime/wfengine/backends/actors/backend.go
GO
mit
17,875
//go:build unit // +build unit /* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package actors import ( "context" "testing" "github.com/microsoft/durabletask-go/api" "github.com/microsoft/durabletask-go/backend" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/dapr/components-contrib/state" "github.com/dapr/dapr/pkg/actors" "github.com/dapr/dapr/pkg/components/wfbackend" "github.com/dapr/dapr/pkg/config" "github.com/dapr/dapr/pkg/resiliency" "github.com/dapr/dapr/pkg/runtime/compstore" daprt "github.com/dapr/dapr/pkg/testing" "github.com/dapr/kit/logger" ) const ( testAppID = "wf-app" workflowActorType = "dapr.internal.default.wf-app.workflow" activityActorType = "dapr.internal.default.wf-app.activity" ) func TestNoWorkflowState(t *testing.T) { actors := getActorRuntime(t) state, err := LoadWorkflowState(context.Background(), actors, "wf1", NewActorsBackendConfig(testAppID)) require.NoError(t, err) assert.Empty(t, state) } func TestDefaultWorkflowState(t *testing.T) { state := NewWorkflowState(NewActorsBackendConfig(testAppID)) assert.Equal(t, uint64(1), state.Generation) } func TestAddingToInbox(t *testing.T) { state := NewWorkflowState(NewActorsBackendConfig(testAppID)) for i := 0; i < 10; i++ { state.AddToInbox(&backend.HistoryEvent{}) } req, err := state.GetSaveRequest("wf1") require.NoError(t, err) assert.Equal(t, "wf1", req.ActorID) assert.Equal(t, workflowActorType, req.ActorType) upsertCount, deleteCount := countOperations(t, req) assert.Equal(t, 11, upsertCount) // 10x inbox + metadata assert.Equal(t, 0, deleteCount) } func TestClearingInbox(t *testing.T) { state := NewWorkflowState(NewActorsBackendConfig(testAppID)) for i := 0; i < 10; i++ { // Simulate the loadng of inbox events from storage state.Inbox = append(state.Inbox, &backend.HistoryEvent{}) } state.ClearInbox() req, err := state.GetSaveRequest("wf1") require.NoError(t, err) assert.Equal(t, "wf1", req.ActorID) assert.Equal(t, workflowActorType, req.ActorType) upsertCount, deleteCount := countOperations(t, req) assert.Equal(t, 1, upsertCount) // metadata only assert.Equal(t, 10, deleteCount) // the 10 inbox messages should get deleted } func TestAddingToHistory(t *testing.T) { wfstate := NewWorkflowState(NewActorsBackendConfig(testAppID)) runtimeState := backend.NewOrchestrationRuntimeState(api.InstanceID("wf1"), nil) for i := 0; i < 10; i++ { err := runtimeState.AddEvent(&backend.HistoryEvent{}) require.NoError(t, err) } wfstate.ApplyRuntimeStateChanges(runtimeState) req, err := wfstate.GetSaveRequest("wf1") require.NoError(t, err) assert.Equal(t, "wf1", req.ActorID) assert.Equal(t, workflowActorType, req.ActorType) upsertCount, deleteCount := countOperations(t, req) assert.Equal(t, 12, upsertCount) // 10x history + metadata + customStatus assert.Equal(t, 0, deleteCount) } func TestLoadSavedState(t *testing.T) { wfstate := NewWorkflowState(NewActorsBackendConfig(testAppID)) runtimeState := backend.NewOrchestrationRuntimeState(api.InstanceID("wf1"), nil) for i := 0; i < 10; i++ { err := runtimeState.AddEvent(&backend.HistoryEvent{EventId: int32(i)}) require.NoError(t, err) } wfstate.ApplyRuntimeStateChanges(runtimeState) wfstate.CustomStatus = "my custom status" for i := 0; i < 5; i++ { wfstate.AddToInbox(&backend.HistoryEvent{EventId: int32(i)}) } req, err := wfstate.GetSaveRequest("wf1") require.NoError(t, err) upsertCount, deleteCount := countOperations(t, req) assert.Equal(t, 17, upsertCount) // 10x history, 5x inbox, 1 metadata, 1 customStatus assert.Equal(t, 0, deleteCount) actors := getActorRuntime(t) err = actors.TransactionalStateOperation(context.Background(), req) require.NoError(t, err) wfstate, err = LoadWorkflowState(context.Background(), actors, "wf1", NewActorsBackendConfig(testAppID)) require.NoError(t, err) require.NotNil(t, wfstate) assert.Equal(t, "my custom status", wfstate.CustomStatus) assert.Equal(t, uint64(1), wfstate.Generation) require.Len(t, wfstate.History, 10) for i, e := range wfstate.History { assert.Equal(t, int32(i), e.GetEventId()) } require.Len(t, wfstate.Inbox, 5) for i, e := range wfstate.Inbox { assert.Equal(t, int32(i), e.GetEventId()) } } func TestDecodeEncodedState(t *testing.T) { wfstate := NewWorkflowState(NewActorsBackendConfig(testAppID)) wfstate.AddToInbox(&backend.HistoryEvent{EventId: int32(1)}) runtimeState := backend.NewOrchestrationRuntimeState(testAppID, nil) err := runtimeState.AddEvent(&backend.HistoryEvent{EventId: int32(2)}) require.NoError(t, err) wfstate.ApplyRuntimeStateChanges(runtimeState) wfstate.CustomStatus = "test-status" encodedState, err := wfstate.EncodeWorkflowState() require.NoError(t, err) decodedState := NewWorkflowState(NewActorsBackendConfig(testAppID)) err = decodedState.DecodeWorkflowState(encodedState) require.NoError(t, err) assert.Equal(t, wfstate.Inbox[0].GetEventId(), decodedState.Inbox[0].GetEventId()) assert.Equal(t, wfstate.History[0].GetEventId(), decodedState.History[0].GetEventId()) assert.Equal(t, wfstate.CustomStatus, decodedState.CustomStatus) } func TestResetLoadedState(t *testing.T) { wfstate := NewWorkflowState(NewActorsBackendConfig(testAppID)) runtimeState := backend.NewOrchestrationRuntimeState(api.InstanceID("wf1"), nil) for i := 0; i < 10; i++ { require.NoError(t, runtimeState.AddEvent(&backend.HistoryEvent{})) } wfstate.ApplyRuntimeStateChanges(runtimeState) for i := 0; i < 5; i++ { wfstate.AddToInbox(&backend.HistoryEvent{}) } req, err := wfstate.GetSaveRequest("wf1") require.NoError(t, err) actorRuntime := getActorRuntime(t) err = actorRuntime.TransactionalStateOperation(context.Background(), req) require.NoError(t, err) wfstate, err = LoadWorkflowState(context.Background(), actorRuntime, "wf1", NewActorsBackendConfig(testAppID)) require.NoError(t, err) require.NotNil(t, wfstate) assert.Equal(t, uint64(1), wfstate.Generation) wfstate.Reset() assert.Equal(t, uint64(2), wfstate.Generation) req, err = wfstate.GetSaveRequest("wf1") require.NoError(t, err) assert.Len(t, req.Operations, 17) // history x10 + inbox x5 + metadata + customStatus upsertCount, deleteCount := countOperations(t, req) assert.Equal(t, 2, upsertCount) // metadata + customStatus assert.Equal(t, 15, deleteCount) // all history and inbox records are deleted } func TestInvalidStart(t *testing.T) { be, err := NewActorBackend(wfbackend.Metadata{}, nil) require.NoError(t, err) require.NotNil(t, be) err = be.Start(context.TODO()) require.Error(t, err) err = be.Start(context.TODO()) require.Error(t, err) } func getActorRuntime(t *testing.T) actors.Actors { store := fakeStore() cfg := actors.NewConfig(actors.ConfigOpts{ AppID: testAppID, ActorsService: "placement:placement:5050", AppConfig: config.ApplicationConfig{}, }) compStore := compstore.New() compStore.AddStateStore("workflowStore", store) act, err := actors.NewActors(actors.ActorsOpts{ CompStore: compStore, Config: cfg, StateStoreName: "workflowStore", MockPlacement: actors.NewMockPlacement(testAppID), Resiliency: resiliency.New(logger.NewLogger("test")), }) require.NoError(t, err) return act } func countOperations(t *testing.T, req *actors.TransactionalRequest) (upsertCount, deleteCount int) { for _, op := range req.Operations { if op.Operation == actors.Upsert { upsertCount++ } else if op.Operation == actors.Delete { deleteCount++ } else { t.Fatalf("unexpected operation type: %v", op.Operation) } } return upsertCount, deleteCount } func fakeStore() state.Store { return daprt.NewFakeStateStore() }
mikeee/dapr
pkg/runtime/wfengine/backends/actors/backend_test.go
GO
mit
8,254
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package actors import ( "context" "crypto/rand" "encoding/base64" "encoding/json" "errors" "fmt" "io" "strconv" "strings" "sync/atomic" "time" "github.com/microsoft/durabletask-go/api" "github.com/microsoft/durabletask-go/backend" "github.com/dapr/dapr/pkg/actors" diag "github.com/dapr/dapr/pkg/diagnostics" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" internalsv1pb "github.com/dapr/dapr/pkg/proto/internals/v1" ) const ( CallbackChannelProperty = "dapr.callback" CreateWorkflowInstanceMethod = "CreateWorkflowInstance" GetWorkflowMetadataMethod = "GetWorkflowMetadata" AddWorkflowEventMethod = "AddWorkflowEvent" PurgeWorkflowStateMethod = "PurgeWorkflowState" GetWorkflowStateMethod = "GetWorkflowState" ) type workflowActor struct { actorID string actors actors.Actors state *workflowState scheduler workflowScheduler cachingDisabled bool defaultTimeout time.Duration reminderInterval time.Duration config actorsBackendConfig activityResultAwaited atomic.Bool } type durableTimer struct { Bytes []byte `json:"bytes"` Generation uint64 `json:"generation"` } type recoverableError struct { cause error } type CreateWorkflowInstanceRequest struct { Policy *api.OrchestrationIdReusePolicy `json:"policy"` StartEventBytes []byte `json:"startEventBytes"` } // workflowScheduler is a func interface for pushing workflow (orchestration) work items into the backend type workflowScheduler func(ctx context.Context, wi *backend.OrchestrationWorkItem) error func NewDurableTimer(bytes []byte, generation uint64) durableTimer { return durableTimer{bytes, generation} } func newRecoverableError(err error) *recoverableError { return &recoverableError{cause: err} } func (err *recoverableError) Error() string { return err.cause.Error() } type workflowActorOpts struct { cachingDisabled bool defaultTimeout time.Duration reminderInterval time.Duration } func NewWorkflowActor(scheduler workflowScheduler, config actorsBackendConfig, opts *workflowActorOpts) actors.InternalActorFactory { return func(actorType string, actorID string, actors actors.Actors) actors.InternalActor { wf := &workflowActor{ actorID: actorID, actors: actors, scheduler: scheduler, defaultTimeout: 30 * time.Second, reminderInterval: 1 * time.Minute, config: config, cachingDisabled: opts.cachingDisabled, } if opts.defaultTimeout > 0 { wf.defaultTimeout = opts.defaultTimeout } if opts.reminderInterval > 0 { wf.reminderInterval = opts.reminderInterval } return wf } } // InvokeMethod implements actors.InternalActor func (wf *workflowActor) InvokeMethod(ctx context.Context, methodName string, request []byte, metadata map[string][]string) (res []byte, err error) { wfLogger.Debugf("Workflow actor '%s': invoking method '%s'", wf.actorID, methodName) switch methodName { case CreateWorkflowInstanceMethod: err = wf.createWorkflowInstance(ctx, request) case GetWorkflowMetadataMethod: var resAny any resAny, err = wf.getWorkflowMetadata(ctx) if err == nil { res, err = actors.EncodeInternalActorData(resAny) } case GetWorkflowStateMethod: var state *workflowState state, err = wf.getWorkflowState(ctx) if err == nil { res, err = state.EncodeWorkflowState() } case AddWorkflowEventMethod: err = wf.addWorkflowEvent(ctx, request) case PurgeWorkflowStateMethod: err = wf.purgeWorkflowState(ctx) default: err = fmt.Errorf("no such method: %s", methodName) } return res, err } // InvokeReminder implements actors.InternalActor func (wf *workflowActor) InvokeReminder(ctx context.Context, reminder actors.InternalActorReminder, metadata map[string][]string) error { wfLogger.Debugf("Workflow actor '%s': invoking reminder '%s'", wf.actorID, reminder.Name) // Workflow executions should never take longer than a few seconds at the most timeoutCtx, cancelTimeout := context.WithTimeout(ctx, wf.defaultTimeout) defer cancelTimeout() err := wf.runWorkflow(timeoutCtx, reminder) // We delete the reminder on success and on non-recoverable errors. // Returning nil signals that we want the execution to be retried in the next period interval var re *recoverableError switch { case err == nil: return actors.ErrReminderCanceled case errors.Is(err, context.DeadlineExceeded): wfLogger.Warnf("Workflow actor '%s': execution timed-out and will be retried later: '%v'", wf.actorID, err) return nil case errors.Is(err, context.Canceled): wfLogger.Warnf("Workflow actor '%s': execution was canceled (process shutdown?) and will be retried later: '%v'", wf.actorID, err) return nil case errors.As(err, &re): wfLogger.Warnf("Workflow actor '%s': execution failed with a recoverable error and will be retried later: '%v'", wf.actorID, re) return nil default: // Other error wfLogger.Errorf("Workflow actor '%s': execution failed with a non-recoverable error: %v", wf.actorID, err) return actors.ErrReminderCanceled } } // InvokeTimer implements actors.InternalActor func (wf *workflowActor) InvokeTimer(ctx context.Context, timer actors.InternalActorReminder, metadata map[string][]string) error { return errors.New("timers are not implemented") } // DeactivateActor implements actors.InternalActor func (wf *workflowActor) DeactivateActor(ctx context.Context) error { wfLogger.Debugf("Workflow actor '%s': deactivating", wf.actorID) wf.state = nil // A bit of extra caution, shouldn't be necessary return nil } func (wf *workflowActor) createWorkflowInstance(ctx context.Context, request []byte) error { // create a new state entry if one doesn't already exist state, err := wf.loadInternalState(ctx) if err != nil { return err } created := false if state == nil { state = NewWorkflowState(wf.config) created = true } var createWorkflowInstanceRequest CreateWorkflowInstanceRequest if err = json.Unmarshal(request, &createWorkflowInstanceRequest); err != nil { return fmt.Errorf("failed to unmarshal createWorkflowInstanceRequest: %w", err) } reuseIDPolicy := createWorkflowInstanceRequest.Policy startEventBytes := createWorkflowInstanceRequest.StartEventBytes // Ensure that the start event payload is a valid durabletask execution-started event startEvent, err := backend.UnmarshalHistoryEvent(startEventBytes) if err != nil { return err } if es := startEvent.GetExecutionStarted(); es == nil { return errors.New("invalid execution start event") } else { if es.GetParentInstance() == nil { wfLogger.Debugf("Workflow actor '%s': creating workflow '%s' with instanceId '%s'", wf.actorID, es.GetName(), es.GetOrchestrationInstance().GetInstanceId()) } else { wfLogger.Debugf("Workflow actor '%s': creating child workflow '%s' with instanceId '%s' parentWorkflow '%s' parentWorkflowId '%s'", es.GetName(), es.GetOrchestrationInstance().GetInstanceId(), es.GetParentInstance().GetName(), es.GetParentInstance().GetOrchestrationInstance().GetInstanceId()) } } // orchestration didn't exist and was just created if created { return wf.scheduleWorkflowStart(ctx, startEvent, state) } // orchestration already existed: apply reuse id policy runtimeState := getRuntimeState(wf.actorID, state) runtimeStatus := runtimeState.RuntimeStatus() // if target status doesn't match, fall back to original logic, create instance only if previous one is completed if !isStatusMatch(reuseIDPolicy.GetOperationStatus(), runtimeStatus) { return wf.createIfCompleted(ctx, runtimeState, state, startEvent) } switch reuseIDPolicy.GetAction() { case api.REUSE_ID_ACTION_IGNORE: // Log an warning message and ignore creating new instance wfLogger.Warnf("Workflow actor '%s': ignoring request to recreate the current workflow instance", wf.actorID) return nil case api.REUSE_ID_ACTION_TERMINATE: // terminate existing instance if err := wf.cleanupWorkflowStateInternal(ctx, state, false); err != nil { return fmt.Errorf("failed to terminate existing instance with ID '%s'", wf.actorID) } // created a new instance state.Reset() return wf.scheduleWorkflowStart(ctx, startEvent, state) } // default Action ERROR, fall back to original logic return wf.createIfCompleted(ctx, runtimeState, state, startEvent) } func isStatusMatch(statuses []api.OrchestrationStatus, runtimeStatus api.OrchestrationStatus) bool { for _, status := range statuses { if status == runtimeStatus { return true } } return false } func (wf *workflowActor) createIfCompleted(ctx context.Context, runtimeState *backend.OrchestrationRuntimeState, state *workflowState, startEvent *backend.HistoryEvent) error { // We block (re)creation of existing workflows unless they are in a completed state // Or if they still have any pending activity result awaited. if !runtimeState.IsCompleted() { return fmt.Errorf("an active workflow with ID '%s' already exists", wf.actorID) } if wf.activityResultAwaited.Load() { return fmt.Errorf("a terminated workflow with ID '%s' is already awaiting an activity result", wf.actorID) } wfLogger.Infof("Workflow actor '%s': workflow was previously completed and is being recreated", wf.actorID) state.Reset() return wf.scheduleWorkflowStart(ctx, startEvent, state) } func (wf *workflowActor) scheduleWorkflowStart(ctx context.Context, startEvent *backend.HistoryEvent, state *workflowState) error { // Schedule a reminder to execute immediately after this operation. The reminder will trigger the actual // workflow execution. This is preferable to using the current thread so that we don't block the client // while the workflow logic is running. if _, err := wf.createReliableReminder(ctx, "start", nil, 0); err != nil { return err } state.AddToInbox(startEvent) return wf.saveInternalState(ctx, state) } // This method cleans up a workflow associated with the given actorID func (wf *workflowActor) cleanupWorkflowStateInternal(ctx context.Context, state *workflowState, requiredAndNotCompleted bool) error { // If the workflow is required to complete but it's not yet completed then return [ErrNotCompleted] // This check is used by purging workflow if requiredAndNotCompleted { return api.ErrNotCompleted } err := wf.removeCompletedStateData(ctx, state) if err != nil { return err } // This will create a request to purge everything req, err := state.GetPurgeRequest(wf.actorID) if err != nil { return err } // This will do the purging err = wf.actors.TransactionalStateOperation(ctx, req) if err != nil { return err } wf.state = nil return nil } func (wf *workflowActor) getWorkflowMetadata(ctx context.Context) (*api.OrchestrationMetadata, error) { state, err := wf.loadInternalState(ctx) if err != nil { return nil, err } if state == nil { return nil, api.ErrInstanceNotFound } runtimeState := getRuntimeState(wf.actorID, state) name, _ := runtimeState.Name() createdAt, _ := runtimeState.CreatedTime() lastUpdated, _ := runtimeState.LastUpdatedTime() input, _ := runtimeState.Input() output, _ := runtimeState.Output() failureDetuils, _ := runtimeState.FailureDetails() metadata := api.NewOrchestrationMetadata( runtimeState.InstanceID(), name, runtimeState.RuntimeStatus(), createdAt, lastUpdated, input, output, state.CustomStatus, failureDetuils, ) return metadata, nil } func (wf *workflowActor) getWorkflowState(ctx context.Context) (*workflowState, error) { state, err := wf.loadInternalState(ctx) wfLogger.Errorf("Workflow actor '%s': getWorkflowState, state: %s", wf.actorID, state) if err != nil { return nil, err } if state == nil { return nil, api.ErrInstanceNotFound } return state, nil } // This method purges all the completed activity data from a workflow associated with the given actorID func (wf *workflowActor) purgeWorkflowState(ctx context.Context) error { state, err := wf.loadInternalState(ctx) if err != nil { return err } if state == nil { return api.ErrInstanceNotFound } runtimeState := getRuntimeState(wf.actorID, state) return wf.cleanupWorkflowStateInternal(ctx, state, !runtimeState.IsCompleted()) } func (wf *workflowActor) addWorkflowEvent(ctx context.Context, historyEventBytes []byte) error { state, err := wf.loadInternalState(ctx) if err != nil { return err } if state == nil { return api.ErrInstanceNotFound } e, err := backend.UnmarshalHistoryEvent(historyEventBytes) if e.GetTaskCompleted() != nil || e.GetTaskFailed() != nil { wf.activityResultAwaited.CompareAndSwap(true, false) } if err != nil { return err } wfLogger.Debugf("Workflow actor '%s': adding event '%v' to the workflow inbox", wf.actorID, e) state.AddToInbox(e) if _, err := wf.createReliableReminder(ctx, "new-event", nil, 0); err != nil { return err } return wf.saveInternalState(ctx, state) } func (wf *workflowActor) getWorkflowName(oldEvents, newEvents []*backend.HistoryEvent) string { for _, e := range oldEvents { if es := e.GetExecutionStarted(); es != nil { return es.GetName() } } for _, e := range newEvents { if es := e.GetExecutionStarted(); es != nil { return es.GetName() } } return "" } func (wf *workflowActor) runWorkflow(ctx context.Context, reminder actors.InternalActorReminder) error { state, err := wf.loadInternalState(ctx) if err != nil { return fmt.Errorf("error loading internal state: %w", err) } if state == nil { // The assumption is that someone manually deleted the workflow state. This is non-recoverable. return errors.New("no workflow state found") } if strings.HasPrefix(reminder.Name, "timer-") { var timerData durableTimer if err = reminder.DecodeData(&timerData); err != nil { // Likely the result of an incompatible durable task timer format change. This is non-recoverable. return err } if timerData.Generation < state.Generation { wfLogger.Infof("Workflow actor '%s': ignoring durable timer from previous generation '%v'", wf.actorID, timerData.Generation) return nil } else { e, eventErr := backend.UnmarshalHistoryEvent(timerData.Bytes) if eventErr != nil { // Likely the result of an incompatible durable task timer format change. This is non-recoverable. return fmt.Errorf("failed to unmarshal timer data %w", eventErr) } state.Inbox = append(state.Inbox, e) } } if len(state.Inbox) == 0 { // This can happen after multiple events are processed in batches; there may still be reminders around // for some of those already processed events. wfLogger.Debugf("Workflow actor '%s': ignoring run request for reminder '%s' because the workflow inbox is empty", wf.actorID, reminder.Name) return nil } // The logic/for loop below purges/removes any leftover state from a completed or failed activity transactionalRequests := make(map[string][]actors.TransactionalOperation) var esHistoryEvent *backend.HistoryEvent for _, e := range state.Inbox { var taskID int32 if ts := e.GetTaskCompleted(); ts != nil { taskID = ts.GetTaskScheduledId() } else if tf := e.GetTaskFailed(); tf != nil { taskID = tf.GetTaskScheduledId() } else { if es := e.GetExecutionStarted(); es != nil { esHistoryEvent = e } continue } op := actors.TransactionalOperation{ Operation: actors.Delete, Request: actors.TransactionalDelete{ Key: activityStateKey, }, } activityActorID := getActivityActorID(wf.actorID, taskID, state.Generation) if transactionalRequests[activityActorID] == nil { transactionalRequests[activityActorID] = []actors.TransactionalOperation{op} } else { transactionalRequests[activityActorID] = append(transactionalRequests[activityActorID], op) } } // TODO: for optimization make multiple go routines and run them in parallel for activityActorID, operations := range transactionalRequests { err = wf.actors.TransactionalStateOperation(ctx, &actors.TransactionalRequest{ ActorType: wf.config.activityActorType, ActorID: activityActorID, Operations: operations, }) if err != nil { return fmt.Errorf("failed to delete activity state for activity actor '%s' with error: %w", activityActorID, err) } } runtimeState := getRuntimeState(wf.actorID, state) wi := &backend.OrchestrationWorkItem{ InstanceID: runtimeState.InstanceID(), NewEvents: state.Inbox, RetryCount: -1, // TODO State: runtimeState, Properties: make(map[string]any, 1), } // Executing workflow code is a one-way operation. We must wait for the app code to report its completion, which // will trigger this callback channel. callback := make(chan bool) wi.Properties[CallbackChannelProperty] = callback // Setting executionStatus to failed by default to record metrics for non-recoverable errors. executionStatus := diag.StatusFailed if runtimeState.IsCompleted() { // If workflow is already completed, set executionStatus to empty string // which will skip recording metrics for this execution. executionStatus = "" } workflowName := wf.getWorkflowName(state.History, state.Inbox) // Request to execute workflow wfLogger.Debugf("Workflow actor '%s': scheduling workflow execution with instanceId '%s'", wf.actorID, wi.InstanceID) // Schedule the workflow execution by signaling the backend err = wf.scheduler(ctx, wi) if err != nil { if errors.Is(err, context.DeadlineExceeded) { return newRecoverableError(fmt.Errorf("timed-out trying to schedule a workflow execution - this can happen if there are too many in-flight workflows or if the workflow engine isn't running: %w", err)) } return newRecoverableError(fmt.Errorf("failed to schedule a workflow execution: %w", err)) } wf.recordWorkflowSchedulingLatency(ctx, esHistoryEvent, workflowName) wfExecutionElapsedTime := float64(0) defer func() { if executionStatus != "" { diag.DefaultWorkflowMonitoring.WorkflowExecutionEvent(ctx, workflowName, executionStatus) diag.DefaultWorkflowMonitoring.WorkflowExecutionLatency(ctx, workflowName, executionStatus, wfExecutionElapsedTime) } }() select { case <-ctx.Done(): // caller is responsible for timeout management // Workflow execution failed with recoverable error executionStatus = diag.StatusRecoverable return ctx.Err() case completed := <-callback: if !completed { // Workflow execution failed with recoverable error executionStatus = diag.StatusRecoverable return newRecoverableError(errExecutionAborted) } } wfLogger.Debugf("Workflow actor '%s': workflow execution returned with status '%s' instanceId '%s'", wf.actorID, runtimeState.RuntimeStatus().String(), wi.InstanceID) // Increment the generation counter if the workflow used continue-as-new. Subsequent actions below // will use this updated generation value for their duplication execution handling. if runtimeState.ContinuedAsNew() { wfLogger.Debugf("Workflow actor '%s': workflow with instanceId '%s' continued as new", wf.actorID, wi.InstanceID) state.Generation += 1 } if !runtimeState.IsCompleted() { // Create reminders for the durable timers. We only do this if the orchestration is still running. for _, t := range runtimeState.PendingTimers() { tf := t.GetTimerFired() if tf == nil { return errors.New("invalid event in the PendingTimers list") } timerBytes, errMarshal := backend.MarshalHistoryEvent(t) if errMarshal != nil { return fmt.Errorf("failed to marshal pending timer data: %w", errMarshal) } delay := time.Until(tf.GetFireAt().AsTime()) if delay < 0 { delay = 0 } reminderPrefix := "timer-" + strconv.Itoa(int(tf.GetTimerId())) data := NewDurableTimer(timerBytes, state.Generation) wfLogger.Debugf("Workflow actor '%s': creating reminder '%s' for the durable timer", wf.actorID, reminderPrefix) if _, err = wf.createReliableReminder(ctx, reminderPrefix, data, delay); err != nil { executionStatus = diag.StatusRecoverable return newRecoverableError(fmt.Errorf("actor '%s' failed to create reminder for timer: %w", wf.actorID, err)) } } } // Process the outbound orchestrator events reqsByName := make(map[string][]backend.OrchestratorMessage, len(runtimeState.PendingMessages())) for _, msg := range runtimeState.PendingMessages() { if es := msg.HistoryEvent.GetExecutionStarted(); es != nil { reqsByName[CreateWorkflowInstanceMethod] = append(reqsByName[CreateWorkflowInstanceMethod], msg) } else if msg.HistoryEvent.GetSubOrchestrationInstanceCompleted() != nil || msg.HistoryEvent.GetSubOrchestrationInstanceFailed() != nil { reqsByName[AddWorkflowEventMethod] = append(reqsByName[AddWorkflowEventMethod], msg) } else { wfLogger.Warnf("Workflow actor '%s': don't know how to process outbound message '%v'", wf.actorID, msg) } } // Schedule activities // TODO: Parallelism for _, e := range runtimeState.PendingTasks() { ts := e.GetTaskScheduled() if ts == nil { wfLogger.Warnf("Workflow actor '%s': unable to process task '%v'", wf.actorID, e) continue } eventData, errMarshal := backend.MarshalHistoryEvent(e) if errMarshal != nil { return errMarshal } activityRequestBytes, errInternal := actors.EncodeInternalActorData(ActivityRequest{ HistoryEvent: eventData, }) if errInternal != nil { return errInternal } targetActorID := getActivityActorID(wf.actorID, e.GetEventId(), state.Generation) wf.activityResultAwaited.Store(true) wfLogger.Debugf("Workflow actor '%s': invoking execute method on activity actor '%s'", wf.actorID, targetActorID) req := internalsv1pb. NewInternalInvokeRequest("Execute"). WithActor(wf.config.activityActorType, targetActorID). WithData(activityRequestBytes). WithContentType(invokev1.OctetStreamContentType) _, err = wf.actors.Call(ctx, req) if errors.Is(err, ErrDuplicateInvocation) { wfLogger.Warnf("Workflow actor '%s': activity invocation '%s::%d' was flagged as a duplicate and will be skipped", wf.actorID, ts.GetName(), e.GetEventId()) continue } else if err != nil { executionStatus = diag.StatusRecoverable return newRecoverableError(fmt.Errorf("failed to invoke activity actor '%s' to execute '%s': %w", targetActorID, ts.GetName(), err)) } } // TODO: Do these in parallel? for method, msgList := range reqsByName { for _, msg := range msgList { eventData, errMarshal := backend.MarshalHistoryEvent(msg.HistoryEvent) if errMarshal != nil { return errMarshal } requestBytes := eventData if method == CreateWorkflowInstanceMethod { requestBytes, err = json.Marshal(CreateWorkflowInstanceRequest{ Policy: &api.OrchestrationIdReusePolicy{}, StartEventBytes: eventData, }) if err != nil { return fmt.Errorf("failed to marshal createWorkflowInstanceRequest: %w", err) } } wfLogger.Debugf("Workflow actor '%s': invoking method '%s' on workflow actor '%s'", wf.actorID, method, msg.TargetInstanceID) req := internalsv1pb. NewInternalInvokeRequest(method). WithActor(wf.config.workflowActorType, msg.TargetInstanceID). WithData(requestBytes). WithContentType(invokev1.OctetStreamContentType) _, err = wf.actors.Call(ctx, req) if err != nil { executionStatus = diag.StatusRecoverable // workflow-related actor methods are never expected to return errors return newRecoverableError(fmt.Errorf("method %s on actor '%s' returned an error: %w", method, msg.TargetInstanceID, err)) } } } state.ApplyRuntimeStateChanges(runtimeState) state.ClearInbox() err = wf.saveInternalState(ctx, state) if err != nil { return err } if executionStatus != "" { // If workflow is not completed, set executionStatus to empty string // which will skip recording metrics for this execution. executionStatus = "" if runtimeState.IsCompleted() { if runtimeState.RuntimeStatus() == api.RUNTIME_STATUS_COMPLETED { executionStatus = diag.StatusSuccess } else { // Setting executionStatus to failed if workflow has failed/terminated/cancelled executionStatus = diag.StatusFailed } wfExecutionElapsedTime = wf.calculateWorkflowExecutionLatency(state) } } if runtimeState.IsCompleted() { wfLogger.Infof("Workflow Actor '%s': workflow completed with status '%s' workflowName '%s'", wf.actorID, runtimeState.RuntimeStatus().String(), workflowName) } return nil } func (*workflowActor) calculateWorkflowExecutionLatency(state *workflowState) (wfExecutionElapsedTime float64) { for _, e := range state.History { if os := e.GetOrchestratorStarted(); os != nil { return diag.ElapsedSince(e.GetTimestamp().AsTime()) } } return 0 } func (*workflowActor) recordWorkflowSchedulingLatency(ctx context.Context, esHistoryEvent *backend.HistoryEvent, workflowName string) { if esHistoryEvent == nil { return } // If the event is an execution started event, then we need to record the scheduled start timestamp if es := esHistoryEvent.GetExecutionStarted(); es != nil { currentTimestamp := time.Now() var scheduledStartTimestamp time.Time timestamp := es.GetScheduledStartTimestamp() if timestamp != nil { scheduledStartTimestamp = timestamp.AsTime() } else { // if scheduledStartTimestamp is nil, then use the event timestamp to consider scheduling latency // This case will happen when the workflow is created and started immediately scheduledStartTimestamp = esHistoryEvent.GetTimestamp().AsTime() } wfSchedulingLatency := float64(currentTimestamp.Sub(scheduledStartTimestamp).Milliseconds()) diag.DefaultWorkflowMonitoring.WorkflowSchedulingLatency(ctx, workflowName, wfSchedulingLatency) } } func (wf *workflowActor) loadInternalState(ctx context.Context) (*workflowState, error) { // See if the state for this actor is already cached in memory if !wf.cachingDisabled && wf.state != nil { return wf.state, nil } // state is not cached, so try to load it from the state store wfLogger.Debugf("Workflow actor '%s': loading workflow state", wf.actorID) state, err := LoadWorkflowState(ctx, wf.actors, wf.actorID, wf.config) if err != nil { return nil, err } if state == nil { // No such state exists in the state store return nil, nil } if !wf.cachingDisabled { // Update cached state wf.state = state } return state, nil } func (wf *workflowActor) saveInternalState(ctx context.Context, state *workflowState) error { // generate and run a state store operation that saves all changes req, err := state.GetSaveRequest(wf.actorID) if err != nil { return err } wfLogger.Debugf("Workflow actor '%s': saving %d keys to actor state store", wf.actorID, len(req.Operations)) if err = wf.actors.TransactionalStateOperation(ctx, req); err != nil { return err } // ResetChangeTracking should always be called after a save operation succeeds state.ResetChangeTracking() if !wf.cachingDisabled { // Update cached state wf.state = state } return nil } func (wf *workflowActor) createReliableReminder(ctx context.Context, namePrefix string, data any, delay time.Duration) (string, error) { // Reminders need to have unique names or else they may not fire in certain race conditions. b := make([]byte, 6) _, err := io.ReadFull(rand.Reader, b) if err != nil { return "", fmt.Errorf("failed to generate reminder ID: %w", err) } reminderName := namePrefix + "-" + base64.RawURLEncoding.EncodeToString(b) wfLogger.Debugf("Workflow actor '%s': creating '%s' reminder with DueTime = '%s'", wf.actorID, reminderName, delay) dataEnc, err := json.Marshal(data) if err != nil { return reminderName, fmt.Errorf("failed to encode data as JSON: %w", err) } return reminderName, wf.actors.CreateReminder(ctx, &actors.CreateReminderRequest{ ActorType: wf.config.workflowActorType, ActorID: wf.actorID, Data: dataEnc, DueTime: delay.String(), Name: reminderName, Period: wf.reminderInterval.String(), }) } func getRuntimeState(actorID string, state *workflowState) *backend.OrchestrationRuntimeState { // TODO: Add caching when a good invalidation policy can be determined return backend.NewOrchestrationRuntimeState(api.InstanceID(actorID), state.History) } func getActivityActorID(workflowActorID string, taskID int32, generation uint64) string { // An activity can be identified by its name followed by its task ID and generation. Example: SayHello::0::1, SayHello::1::1, etc. return workflowActorID + "::" + strconv.Itoa(int(taskID)) + "::" + strconv.FormatUint(generation, 10) } func (wf *workflowActor) removeCompletedStateData(ctx context.Context, state *workflowState) error { // The logic/for loop below purges/removes any leftover state from a completed or failed activity // TODO: for optimization make multiple go routines and run them in parallel var err error for _, e := range state.Inbox { var taskID int32 if ts := e.GetTaskCompleted(); ts != nil { taskID = ts.GetTaskScheduledId() } else if tf := e.GetTaskFailed(); tf != nil { taskID = tf.GetTaskScheduledId() } else { continue } req := actors.TransactionalRequest{ ActorType: wf.config.activityActorType, ActorID: getActivityActorID(wf.actorID, taskID, state.Generation), Operations: []actors.TransactionalOperation{{ Operation: actors.Delete, Request: actors.TransactionalDelete{ Key: activityStateKey, }, }}, } if err = wf.actors.TransactionalStateOperation(ctx, &req); err != nil { return fmt.Errorf("failed to delete activity state with error: %w", err) } } return err }
mikeee/dapr
pkg/runtime/wfengine/backends/actors/workflow_actor.go
GO
mit
30,160
/* Copyright 2024 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package actors import ( "testing" "github.com/stretchr/testify/require" ) func TestRecoverableError(t *testing.T) { err := newRecoverableError(errExecutionAborted) var recoverableErr *recoverableError require.ErrorAs(t, err, &recoverableErr) require.Equal(t, errExecutionAborted.Error(), recoverableErr.Error()) }
mikeee/dapr
pkg/runtime/wfengine/backends/actors/workflow_actor_test.go
GO
mit
886
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package actors import ( "bytes" "context" "encoding/json" "fmt" "strings" "time" "github.com/microsoft/durabletask-go/backend" "github.com/dapr/dapr/pkg/actors" ) const ( inboxKeyPrefix = "inbox" historyKeyPrefix = "history" customStatusKey = "customStatus" metadataKey = "metadata" ) type workflowState struct { Inbox []*backend.HistoryEvent History []*backend.HistoryEvent CustomStatus string Generation uint64 // change tracking inboxAddedCount int inboxRemovedCount int historyAddedCount int historyRemovedCount int config actorsBackendConfig } type workflowStateMetadata struct { InboxLength int HistoryLength int Generation uint64 } func NewWorkflowState(config actorsBackendConfig) *workflowState { return &workflowState{ Generation: 1, config: config, } } func (s *workflowState) Reset() { s.inboxAddedCount = 0 s.inboxRemovedCount += len(s.Inbox) s.Inbox = nil s.historyAddedCount = 0 s.historyRemovedCount += len(s.History) s.History = nil s.CustomStatus = "" s.Generation++ } // ResetChangeTracking resets the change tracking counters. This should be called after a save request. func (s *workflowState) ResetChangeTracking() { s.inboxAddedCount = 0 s.inboxRemovedCount = 0 s.historyAddedCount = 0 s.historyRemovedCount = 0 } func (s *workflowState) ApplyRuntimeStateChanges(runtimeState *backend.OrchestrationRuntimeState) { if runtimeState.ContinuedAsNew() { s.historyRemovedCount += len(s.History) s.historyAddedCount = 0 s.History = nil } newHistoryEvents := runtimeState.NewEvents() s.History = append(s.History, newHistoryEvents...) s.historyAddedCount += len(newHistoryEvents) s.CustomStatus = runtimeState.CustomStatus.GetValue() } func (s *workflowState) AddToInbox(e *backend.HistoryEvent) { s.Inbox = append(s.Inbox, e) s.inboxAddedCount++ } func (s *workflowState) ClearInbox() { for _, e := range s.Inbox { if e.GetTimerFired() != nil { // ignore timer events since those aren't saved into the state store continue } s.inboxRemovedCount++ } s.Inbox = nil s.inboxAddedCount = 0 } func (s *workflowState) GetSaveRequest(actorID string) (*actors.TransactionalRequest, error) { // TODO: Batching up the save requests into smaller chunks to avoid batch size limits in Dapr state stores. req := &actors.TransactionalRequest{ ActorType: s.config.workflowActorType, ActorID: actorID, Operations: make([]actors.TransactionalOperation, 0, 100), } if err := addStateOperations(req, inboxKeyPrefix, s.Inbox, s.inboxAddedCount, s.inboxRemovedCount); err != nil { return nil, err } if err := addStateOperations(req, historyKeyPrefix, s.History, s.historyAddedCount, s.historyRemovedCount); err != nil { return nil, err } // We update the custom status only when the workflow itself has been updated, and not when // we're saving changes only to the workflow inbox. // CONSIDER: Only save custom status if it has changed. However, need a way to track this. if s.historyAddedCount > 0 || s.historyRemovedCount > 0 { req.Operations = append(req.Operations, actors.TransactionalOperation{ Operation: actors.Upsert, Request: actors.TransactionalUpsert{Key: customStatusKey, Value: s.CustomStatus}, }) } // Every time we save, we also update the metadata with information about the size of the history and inbox, // as well as the generation of the workflow. metadata := workflowStateMetadata{ InboxLength: len(s.Inbox), HistoryLength: len(s.History), Generation: s.Generation, } req.Operations = append(req.Operations, actors.TransactionalOperation{ Operation: actors.Upsert, Request: actors.TransactionalUpsert{Key: metadataKey, Value: metadata}, }) return req, nil } // String implements fmt.Stringer and is primarily used for debugging purposes. func (s *workflowState) String() string { if s == nil { return "(nil)" } inbox := make([]string, len(s.Inbox)) for i, v := range s.Inbox { if v == nil { inbox[i] = "[(nil)]" } else { inbox[i] = "[" + v.String() + "]" } } history := make([]string, len(s.History)) for i, v := range s.History { if v == nil { history[i] = "[(nil)]" } else { history[i] = "[" + v.String() + "]" } } return fmt.Sprintf("Inbox:%s\nHistory:%s\nCustomStatus:%s\nGeneration:%d\ninboxAddedCount:%d\ninboxRemovedCount:%d\nhistoryAddedCount:%d\nhistoryRemovedCount:%d\nconfig:%s", strings.Join(inbox, ", "), strings.Join(history, ", "), s.CustomStatus, s.Generation, s.inboxAddedCount, s.inboxRemovedCount, s.historyAddedCount, s.historyRemovedCount, s.config.String()) } // EncodeWorkflowState encodes the workflow state into a byte array. // It only encodes the inbox, history, and custom status. func (s *workflowState) EncodeWorkflowState() ([]byte, error) { // Encode history events encodedHistory := make([][]byte, len(s.History)) for i, event := range s.History { encodedEvent, err := backend.MarshalHistoryEvent(event) if err != nil { return nil, err } encodedHistory[i] = encodedEvent } encodedInbox := make([][]byte, len(s.Inbox)) for i, event := range s.Inbox { encodedEvent, err := backend.MarshalHistoryEvent(event) if err != nil { return nil, err } encodedInbox[i] = encodedEvent } // Encode workflowState encodedState, err := actors.EncodeInternalActorData(&struct { Inbox [][]byte History [][]byte CustomStatus string }{ Inbox: encodedInbox, History: encodedHistory, CustomStatus: s.CustomStatus, }) if err != nil { return nil, err } return encodedState, nil } // DecodeWorkflowState decodes the workflow state from a byte array encoded using `EncodeWorkflowState`. // It only decodes the inbox, history, and custom status. func (s *workflowState) DecodeWorkflowState(encodedState []byte) error { // Decode workflowState var decodedState struct { Inbox [][]byte History [][]byte CustomStatus string } err := actors.DecodeInternalActorData(bytes.NewReader(encodedState), &decodedState) if err != nil { return err } // Decode history events s.History = make([]*backend.HistoryEvent, len(decodedState.History)) for i, encodedEvent := range decodedState.History { event, err := backend.UnmarshalHistoryEvent(encodedEvent) if err != nil { return err } s.History[i] = event } s.Inbox = make([]*backend.HistoryEvent, len(decodedState.Inbox)) for i, encodedEvent := range decodedState.Inbox { event, err := backend.UnmarshalHistoryEvent(encodedEvent) if err != nil { return err } s.Inbox[i] = event } s.CustomStatus = decodedState.CustomStatus return nil } func addStateOperations(req *actors.TransactionalRequest, keyPrefix string, events []*backend.HistoryEvent, addedCount int, removedCount int) error { // TODO: Investigate whether Dapr state stores put limits on batch sizes. It seems some storage // providers have limits and we need to know if that impacts this algorithm: // https://learn.microsoft.com/azure/cosmos-db/nosql/transactional-batch#limitations for i := len(events) - addedCount; i < len(events); i++ { e := events[i] data, err := backend.MarshalHistoryEvent(e) if err != nil { return err } req.Operations = append(req.Operations, actors.TransactionalOperation{ Operation: actors.Upsert, Request: actors.TransactionalUpsert{Key: getMultiEntryKeyName(keyPrefix, i), Value: data}, }) } for i := len(events); i < removedCount; i++ { req.Operations = append(req.Operations, actors.TransactionalOperation{ Operation: actors.Delete, Request: actors.TransactionalDelete{Key: getMultiEntryKeyName(keyPrefix, i)}, }) } return nil } func addPurgeStateOperations(req *actors.TransactionalRequest, keyPrefix string, events []*backend.HistoryEvent) error { // TODO: Investigate whether Dapr state stores put limits on batch sizes. It seems some storage // providers have limits and we need to know if that impacts this algorithm: // https://learn.microsoft.com/azure/cosmos-db/nosql/transactional-batch#limitations for i := 0; i < len(events); i++ { req.Operations = append(req.Operations, actors.TransactionalOperation{ Operation: actors.Delete, Request: actors.TransactionalDelete{Key: getMultiEntryKeyName(keyPrefix, i)}, }) } return nil } func LoadWorkflowState(ctx context.Context, actorRuntime actors.Actors, actorID string, config actorsBackendConfig) (*workflowState, error) { loadStartTime := time.Now() loadedRecords := 0 // Load metadata req := actors.GetStateRequest{ ActorType: config.workflowActorType, ActorID: actorID, Key: metadataKey, } res, err := actorRuntime.GetState(ctx, &req) loadedRecords++ if err != nil { return nil, fmt.Errorf("failed to load workflow metadata: %w", err) } if len(res.Data) == 0 { // no state found return nil, nil } var metadata workflowStateMetadata if err = json.Unmarshal(res.Data, &metadata); err != nil { return nil, fmt.Errorf("failed to unmarshal workflow metadata: %w", err) } // Load inbox, history, and custom status using a bulk request state := NewWorkflowState(config) state.Generation = metadata.Generation state.Inbox = make([]*backend.HistoryEvent, metadata.InboxLength) state.History = make([]*backend.HistoryEvent, metadata.HistoryLength) bulkReq := &actors.GetBulkStateRequest{ ActorType: config.workflowActorType, ActorID: actorID, // Initializing with size for all the inbox, history, and custom status Keys: make([]string, metadata.InboxLength+metadata.HistoryLength+1), } var n int bulkReq.Keys[n] = customStatusKey n++ for i := 0; i < metadata.InboxLength; i++ { bulkReq.Keys[n] = getMultiEntryKeyName(inboxKeyPrefix, i) n++ } for i := 0; i < metadata.HistoryLength; i++ { bulkReq.Keys[n] = getMultiEntryKeyName(historyKeyPrefix, i) n++ } // Perform the request bulkRes, err := actorRuntime.GetBulkState(ctx, bulkReq) if err != nil { return nil, fmt.Errorf("failed to load workflow state: %w", err) } // Parse responses loadedRecords += len(bulkRes) var key string for i := 0; i < metadata.InboxLength; i++ { key = getMultiEntryKeyName(inboxKeyPrefix, i) if bulkRes[key] == nil { return nil, fmt.Errorf("failed to load inbox state key '%s': not found", key) } state.Inbox[i], err = backend.UnmarshalHistoryEvent(bulkRes[key]) if err != nil { return nil, fmt.Errorf("failed to unmarshal history event from inbox state key '%s': %w", key, err) } } for i := 0; i < metadata.HistoryLength; i++ { key = getMultiEntryKeyName(historyKeyPrefix, i) if bulkRes[key] == nil { return nil, fmt.Errorf("failed to load history state key '%s': not found", key) } state.History[i], err = backend.UnmarshalHistoryEvent(bulkRes[key]) if err != nil { return nil, fmt.Errorf("failed to unmarshal history event from history state key '%s': %w", key, err) } } if len(bulkRes[customStatusKey]) > 0 { err = json.Unmarshal(bulkRes[customStatusKey], &state.CustomStatus) if err != nil { return nil, fmt.Errorf("failed to unmarshal JSON from custom status key entry: %w", err) } } wfLogger.Infof("%s: loaded %d state records in %v", actorID, loadedRecords, time.Since(loadStartTime)) return state, nil } func (s *workflowState) GetPurgeRequest(actorID string) (*actors.TransactionalRequest, error) { req := &actors.TransactionalRequest{ ActorType: s.config.workflowActorType, ActorID: actorID, // Initial capacity should be enough to contain the entire inbox, history, and custom status + metadata Operations: make([]actors.TransactionalOperation, 0, len(s.Inbox)+len(s.History)+2), } // Inbox Purging if err := addPurgeStateOperations(req, inboxKeyPrefix, s.Inbox); err != nil { return nil, err } // History Purging if err := addPurgeStateOperations(req, historyKeyPrefix, s.History); err != nil { return nil, err } req.Operations = append(req.Operations, actors.TransactionalOperation{ Operation: actors.Delete, Request: actors.TransactionalDelete{Key: customStatusKey}, }, actors.TransactionalOperation{ Operation: actors.Delete, Request: actors.TransactionalDelete{Key: metadataKey}, }, ) return req, nil } func getMultiEntryKeyName(prefix string, i int) string { return fmt.Sprintf("%s-%06d", prefix, i) }
mikeee/dapr
pkg/runtime/wfengine/backends/actors/workflowstate.go
GO
mit
12,926
/* Copyright 2023 The Dapr Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sqlite import ( "fmt" "github.com/microsoft/durabletask-go/backend" "github.com/microsoft/durabletask-go/backend/sqlite" wfbe "github.com/dapr/dapr/pkg/components/wfbackend" "github.com/dapr/kit/logger" ) func NewSQLiteBackend(md wfbe.Metadata, log logger.Logger) (backend.Backend, error) { sqliteMetadata := &sqliteMetadata{} err := sqliteMetadata.Parse(md.Properties) if err != nil { log.Errorf("Failed to parse SQLite backend metadata; SQLite backend is not initialized: %v", err) return nil, fmt.Errorf("failed to parse SQLite backend metadata: %w", err) } be := sqlite.NewSqliteBackend(&sqliteMetadata.SqliteOptions, log) return be, nil }
mikeee/dapr
pkg/runtime/wfengine/backends/sqlite/backend.go
GO
mit
1,233