code
stringlengths 0
56.1M
| repo_name
stringclasses 515
values | path
stringlengths 2
147
| language
stringclasses 447
values | license
stringclasses 7
values | size
int64 0
56.8M
|
---|---|---|---|---|---|
package lock
import (
"os"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
const key = "lock-key-1234567"
func TestMain(m *testing.M) {
SaveLockConfiguration("store1", map[string]string{strategyKey: strategyNone})
SaveLockConfiguration("store2", map[string]string{strategyKey: strategyAppid})
SaveLockConfiguration("store3", map[string]string{strategyKey: strategyDefault})
SaveLockConfiguration("store4", map[string]string{strings.ToUpper(strategyKey): strategyStoreName})
SaveLockConfiguration("store5", map[string]string{strategyKey: "other-fixed-prefix"})
// if strategyKey not set
SaveLockConfiguration("store6", map[string]string{})
os.Exit(m.Run())
}
func TestSaveLockConfiguration(t *testing.T) {
testIllegalKeys := []struct {
storename string
prefix string
}{
{
storename: "lockstore01",
prefix: "a||b",
},
}
for _, item := range testIllegalKeys {
err := SaveLockConfiguration(item.storename, map[string]string{
strategyKey: item.prefix,
})
require.Error(t, err)
}
}
func TestGetModifiedLockKey(t *testing.T) {
// use custom prefix key
testIllegalKeys := []struct {
storename string
prefix string
key string
}{
{
storename: "lockstore01",
prefix: "a",
key: "c||d",
},
}
for _, item := range testIllegalKeys {
err := SaveLockConfiguration(item.storename, map[string]string{
strategyKey: item.prefix,
})
require.NoError(t, err)
_, err = GetModifiedLockKey(item.key, item.storename, "")
require.Error(t, err)
}
}
func TestNonePrefix(t *testing.T) {
modifiedLockKey, _ := GetModifiedLockKey(key, "store1", "appid1")
require.Equal(t, "lock||"+key, modifiedLockKey)
}
func TestAppidPrefix(t *testing.T) {
modifiedLockKey, _ := GetModifiedLockKey(key, "store2", "appid1")
require.Equal(t, "lock||appid1||lock-key-1234567", modifiedLockKey)
}
func TestAppidPrefix_WithEnptyAppid(t *testing.T) {
modifiedLockKey, _ := GetModifiedLockKey(key, "store2", "")
require.Equal(t, "lock||lock-key-1234567", modifiedLockKey)
}
func TestDefaultPrefix(t *testing.T) {
modifiedLockKey, _ := GetModifiedLockKey(key, "store3", "appid1")
require.Equal(t, "lock||appid1||lock-key-1234567", modifiedLockKey)
}
func TestStoreNamePrefix(t *testing.T) {
key := "lock-key-1234567"
modifiedLockKey, _ := GetModifiedLockKey(key, "store4", "appid1")
require.Equal(t, "lock||store4||lock-key-1234567", modifiedLockKey)
}
func TestOtherFixedPrefix(t *testing.T) {
modifiedLockKey, _ := GetModifiedLockKey(key, "store5", "appid1")
require.Equal(t, "lock||other-fixed-prefix||lock-key-1234567", modifiedLockKey)
}
func TestLegacyPrefix(t *testing.T) {
modifiedLockKey, _ := GetModifiedLockKey(key, "store6", "appid1")
require.Equal(t, "lock||appid1||lock-key-1234567", modifiedLockKey)
}
func TestPrefix_StoreNotInitial(t *testing.T) {
// no config for store999
modifiedLockKey, _ := GetModifiedLockKey(key, "store999", "appid99")
require.Equal(t, "lock||appid99||lock-key-1234567", modifiedLockKey)
}
|
mikeee/dapr
|
pkg/components/lock/lock_config_test.go
|
GO
|
mit
| 3,021 |
package lock
import (
"fmt"
"strings"
"github.com/dapr/components-contrib/lock"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
type Registry struct {
Logger logger.Logger
stores map[string]func(logger.Logger) lock.Store
}
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry
func init() {
DefaultRegistry = NewRegistry()
}
func NewRegistry() *Registry {
return &Registry{
stores: make(map[string]func(logger.Logger) lock.Store),
}
}
func (r *Registry) RegisterComponent(componentFactory func(logger.Logger) lock.Store, names ...string) {
for _, name := range names {
r.stores[createFullName(name)] = componentFactory
}
}
func (r *Registry) Create(name, version, logName string) (lock.Store, error) {
if method, ok := r.getStore(name, version, logName); ok {
return method(), nil
}
return nil, fmt.Errorf("couldn't find lock store %s/%s", name, version)
}
func (r *Registry) getStore(name, version, logName string) (func() lock.Store, bool) {
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
factoryMethod, ok := r.stores[nameLower+"/"+versionLower]
if ok {
return r.wrapFn(factoryMethod, logName), true
}
if components.IsInitialVersion(versionLower) {
factoryMethod, ok = r.stores[nameLower]
if ok {
return r.wrapFn(factoryMethod, logName), true
}
}
return nil, false
}
func (r *Registry) wrapFn(componentFactory func(logger.Logger) lock.Store, logName string) func() lock.Store {
return func() lock.Store {
l := r.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
}
func createFullName(name string) string {
return strings.ToLower("lock." + name)
}
|
mikeee/dapr
|
pkg/components/lock/registry.go
|
GO
|
mit
| 1,780 |
package lock
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/lock"
"github.com/dapr/kit/logger"
)
const (
compName = "mock"
compNameV2 = "mock/v2"
fullName = "lock." + compName
)
func TestNewRegistry(t *testing.T) {
r := NewRegistry()
r.RegisterComponent(func(_ logger.Logger) lock.Store {
return nil
}, compName)
r.RegisterComponent(func(_ logger.Logger) lock.Store {
return nil
}, compNameV2)
if _, err := r.Create(fullName, "v1", ""); err != nil {
t.Fatalf("create mock store failed: %v", err)
}
if _, err := r.Create(fullName, "v2", ""); err != nil {
t.Fatalf("create mock store failed: %v", err)
}
if _, err := r.Create("not exists", "v1", ""); !strings.Contains(err.Error(), "couldn't find lock store") {
t.Fatalf("create mock store failed: %v", err)
}
}
func TestAliasing(t *testing.T) {
const alias = "my-alias"
r := NewRegistry()
r.RegisterComponent(func(_ logger.Logger) lock.Store {
return nil
}, "", alias)
_, err := r.Create("lock."+alias, "", "")
require.NoError(t, err)
}
|
mikeee/dapr
|
pkg/components/lock/registry_test.go
|
GO
|
mit
| 1,089 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http
import (
"fmt"
"strings"
contribmiddleware "github.com/dapr/components-contrib/middleware"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/dapr/pkg/middleware"
"github.com/dapr/kit/logger"
)
type (
// Registry is the interface for callers to get registered HTTP middleware.
Registry struct {
Logger logger.Logger
middleware map[string]func(logger.Logger) FactoryMethod
}
// FactoryMethod is the method creating middleware from metadata.
FactoryMethod func(metadata contribmiddleware.Metadata) (middleware.HTTP, error)
)
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry
func init() {
DefaultRegistry = NewRegistry()
}
// NewRegistry returns a new HTTP middleware registry.
func NewRegistry() *Registry {
return &Registry{
middleware: map[string]func(logger.Logger) FactoryMethod{},
}
}
// RegisterComponent adds a new HTTP middleware to the registry.
func (p *Registry) RegisterComponent(componentFactory func(logger.Logger) FactoryMethod, names ...string) {
for _, name := range names {
p.middleware[createFullName(name)] = componentFactory
}
}
// Create instantiates a HTTP middleware based on `name`.
func (p *Registry) Create(name, version string, metadata contribmiddleware.Metadata, logName string) (middleware.HTTP, error) {
if method, ok := p.getMiddleware(name, version, logName); ok {
mid, err := method(metadata)
if err != nil {
return nil, fmt.Errorf("error creating HTTP middleware %s/%s: %w", name, version, err)
}
return mid, nil
}
return nil, fmt.Errorf("HTTP middleware %s/%s has not been registered", name, version)
}
func (p *Registry) getMiddleware(name, version, logName string) (FactoryMethod, bool) {
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
middlewareFn, ok := p.middleware[nameLower+"/"+versionLower]
if ok {
return p.applyLogger(middlewareFn, logName), true
}
if components.IsInitialVersion(versionLower) {
middlewareFn, ok = p.middleware[nameLower]
if ok {
return p.applyLogger(middlewareFn, logName), true
}
}
return nil, false
}
func (p *Registry) applyLogger(componentFactory func(logger.Logger) FactoryMethod, logName string) FactoryMethod {
l := p.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
func createFullName(name string) string {
return strings.ToLower("middleware.http." + name)
}
|
mikeee/dapr
|
pkg/components/middleware/http/registry.go
|
GO
|
mit
| 3,037 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http_test
import (
"fmt"
nethttp "net/http"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
h "github.com/dapr/components-contrib/middleware"
"github.com/dapr/dapr/pkg/components/middleware/http"
"github.com/dapr/dapr/pkg/middleware"
"github.com/dapr/kit/logger"
)
func TestRegistry(t *testing.T) {
testRegistry := http.NewRegistry()
t.Run("middleware is registered", func(t *testing.T) {
const (
middlewareName = "mockMiddleware"
middlewareNameV2 = "mockMiddleware/v2"
componentName = "middleware.http." + middlewareName
)
// Initiate mock object
var mock middleware.HTTP
var mockV2 middleware.HTTP
mock = func(next nethttp.Handler) nethttp.Handler {
return nil
}
mockV2 = func(next nethttp.Handler) nethttp.Handler {
return nil
}
metadata := h.Metadata{}
// act
testRegistry.RegisterComponent(func(_ logger.Logger) http.FactoryMethod {
return func(h.Metadata) (middleware.HTTP, error) {
return mock, nil
}
}, middlewareName)
testRegistry.RegisterComponent(func(_ logger.Logger) http.FactoryMethod {
return func(h.Metadata) (middleware.HTTP, error) {
return mockV2, nil
}
}, middlewareNameV2)
// Function values are not comparable.
// You can't take the address of a function, but if you print it with
// the fmt package, it prints its address. So you can use fmt.Sprintf()
// to get the address of a function value.
// assert v0 and v1
p, e := testRegistry.Create(componentName, "v0", metadata, "")
require.NoError(t, e)
assert.Equal(t, reflect.ValueOf(mock), reflect.ValueOf(p))
p, e = testRegistry.Create(componentName, "v1", metadata, "")
require.NoError(t, e)
assert.Equal(t, reflect.ValueOf(mock), reflect.ValueOf(p))
// assert v2
pV2, e := testRegistry.Create(componentName, "v2", metadata, "")
require.NoError(t, e)
assert.Equal(t, reflect.ValueOf(mockV2), reflect.ValueOf(pV2))
// check case-insensitivity
pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", metadata, "")
require.NoError(t, e)
assert.Equal(t, reflect.ValueOf(mockV2), reflect.ValueOf(pV2))
})
t.Run("middleware is not registered", func(t *testing.T) {
const (
middlewareName = "fakeMiddleware"
componentName = "middleware.http." + middlewareName
)
metadata := h.Metadata{}
// act
p, actualError := testRegistry.Create(componentName, "v1", metadata, "")
expectedError := fmt.Errorf("HTTP middleware %s/v1 has not been registered", componentName)
// assert
assert.Nil(t, p)
assert.Equal(t, expectedError.Error(), actualError.Error())
})
}
|
mikeee/dapr
|
pkg/components/middleware/http/registry_test.go
|
GO
|
mit
| 3,219 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nameresolution
import (
"fmt"
"strings"
nr "github.com/dapr/components-contrib/nameresolution"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
type (
FactoryMethod func(logger.Logger) nr.Resolver
// Registry handles registering and creating name resolution components.
Registry struct {
Logger logger.Logger
resolvers map[string]FactoryMethod
}
)
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry
func init() {
DefaultRegistry = NewRegistry()
}
// NewRegistry creates a name resolution registry.
func NewRegistry() *Registry {
return &Registry{
resolvers: map[string]FactoryMethod{},
}
}
// RegisterComponent adds a name resolver to the registry.
func (s *Registry) RegisterComponent(componentFactory FactoryMethod, names ...string) {
for _, name := range names {
s.resolvers[createFullName(name)] = componentFactory
}
}
// Create instantiates a name resolution resolver based on `name`.
func (s *Registry) Create(name, version, logName string) (nr.Resolver, error) {
if method, ok := s.getResolver(createFullName(name), version, logName); ok {
return method(), nil
}
return nil, fmt.Errorf("couldn't find name resolver %s/%s", name, version)
}
func (s *Registry) getResolver(name, version, logName string) (func() nr.Resolver, bool) {
if s.resolvers == nil {
return nil, false
}
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
resolverFn, ok := s.resolvers[nameLower+"/"+versionLower]
if ok {
return s.wrapFn(resolverFn, logName), true
}
if components.IsInitialVersion(versionLower) {
resolverFn, ok = s.resolvers[nameLower]
if ok {
return s.wrapFn(resolverFn, logName), true
}
}
return nil, false
}
func (s *Registry) wrapFn(componentFactory FactoryMethod, logName string) func() nr.Resolver {
return func() nr.Resolver {
l := s.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
}
func createFullName(name string) string {
return strings.ToLower("nameresolution." + name)
}
|
mikeee/dapr
|
pkg/components/nameresolution/registry.go
|
GO
|
mit
| 2,692 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nameresolution_test
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
nr "github.com/dapr/components-contrib/nameresolution"
"github.com/dapr/dapr/pkg/components/nameresolution"
"github.com/dapr/kit/logger"
)
type mockResolver struct {
nr.Resolver
}
func TestRegistry(t *testing.T) {
testRegistry := nameresolution.NewRegistry()
t.Run("name resolver is registered", func(t *testing.T) {
const (
resolverName = "mockResolver"
resolverNameV2 = "mockResolver/v2"
)
// Initiate mock object
mock := &mockResolver{}
mockV2 := &mockResolver{}
// act
testRegistry.RegisterComponent(func(_ logger.Logger) nr.Resolver {
return mock
}, resolverName)
testRegistry.RegisterComponent(func(_ logger.Logger) nr.Resolver {
return mockV2
}, resolverNameV2)
// assert v0 and v1
p, e := testRegistry.Create(resolverName, "v0", "")
require.NoError(t, e)
assert.Same(t, mock, p)
p, e = testRegistry.Create(resolverName, "v1", "")
require.NoError(t, e)
assert.Same(t, mock, p)
// assert v2
pV2, e := testRegistry.Create(resolverName, "v2", "")
require.NoError(t, e)
assert.Same(t, mockV2, pV2)
// check case-insensitivity
pV2, e = testRegistry.Create(strings.ToUpper(resolverName), "V2", "")
require.NoError(t, e)
assert.Same(t, mockV2, pV2)
})
t.Run("name resolver is not registered", func(t *testing.T) {
const (
resolverName = "fakeResolver"
)
// act
p, actualError := testRegistry.Create(resolverName, "v1", "")
expectedError := fmt.Errorf("couldn't find name resolver %s/v1", resolverName)
// assert
assert.Nil(t, p)
assert.Equal(t, expectedError.Error(), actualError.Error())
})
}
|
mikeee/dapr
|
pkg/components/nameresolution/registry_test.go
|
GO
|
mit
| 2,304 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluggable
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/jhump/protoreflect/grpcreflect"
"google.golang.org/grpc"
reflectpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
"github.com/dapr/dapr/utils"
"github.com/dapr/kit/logger"
)
var (
discoveryLog = logger.NewLogger("pluggable-components-discovery")
onServiceDiscovered map[string]func(name string, dialer GRPCConnectionDialer)
)
func init() {
onServiceDiscovered = make(map[string]func(name string, dialer GRPCConnectionDialer))
}
// AddServiceDiscoveryCallback adds a callback function that should be called when the given service was discovered.
func AddServiceDiscoveryCallback(serviceName string, callbackFunc func(name string, dialer GRPCConnectionDialer)) {
onServiceDiscovered[serviceName] = callbackFunc
}
// removeExt removes file extension
func removeExt(fileName string) string {
return fileName[:len(fileName)-len(filepath.Ext(fileName))]
}
const (
SocketFolderEnvVar = "DAPR_COMPONENTS_SOCKETS_FOLDER"
defaultSocketFolder = "/tmp/dapr-components-sockets"
)
// GetSocketFolderPath returns the shared unix domain socket folder path
func GetSocketFolderPath() string {
return utils.GetEnvOrElse(SocketFolderEnvVar, defaultSocketFolder)
}
type service struct {
// protoRef is the proto service name
protoRef string
// componentName is the component name that implements such service.
componentName string
// dialer is the used grpc connectiondialer.
dialer GRPCConnectionDialer
}
type reflectServiceClient interface {
ListServices() ([]string, error)
Reset()
}
type grpcConnectionCloser interface {
grpc.ClientConnInterface
Close() error
}
// serviceDiscovery returns all available discovered pluggable components services.
// uses gRPC reflection package to list implemented services.
func serviceDiscovery(reflectClientFactory func(string) (reflectServiceClient, func(), error)) ([]service, error) {
services := []service{}
componentsSocketPath := GetSocketFolderPath()
_, err := os.Stat(componentsSocketPath)
if os.IsNotExist(err) { // not exists is the same as empty.
return services, nil
}
log.Debugf("loading pluggable components under path %s", componentsSocketPath)
if err != nil {
return nil, err
}
files, err := os.ReadDir(componentsSocketPath)
if err != nil {
return nil, fmt.Errorf("could not list pluggable components unix sockets: %w", err)
}
for _, dirEntry := range files {
if dirEntry.IsDir() { // skip dirs
continue
}
f, err := dirEntry.Info()
if err != nil {
return nil, err
}
socket := filepath.Join(componentsSocketPath, f.Name())
if !utils.IsSocket(f) {
discoveryLog.Warnf("could not use socket for file %s", socket)
continue
}
refctClient, cleanup, err := reflectClientFactory(socket)
if err != nil {
return nil, err
}
defer cleanup()
serviceList, err := refctClient.ListServices()
if err != nil {
return nil, fmt.Errorf("unable to list services: %w", err)
}
dialer := socketDialer(socket, grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
componentName := removeExt(f.Name())
for _, svc := range serviceList {
services = append(services, service{
componentName: componentName,
protoRef: svc,
dialer: dialer,
})
}
}
log.Debugf("found %d pluggable component services", len(services)-1) // reflection api doesn't count.
return services, nil
}
// callback invoke callback function for each given service
func callback(services []service) {
for _, service := range services {
callback, ok := onServiceDiscovered[service.protoRef]
if !ok { // ignoring unknown service
continue
}
callback(service.componentName, service.dialer)
log.Infof("pluggable component '%s' was successfully registered for '%s'", service.componentName, service.protoRef)
}
}
// reflectServiceConnectionCloser is used for cleanup the stream created to be used for the reflection service.
func reflectServiceConnectionCloser(conn grpcConnectionCloser, client reflectServiceClient) func() {
return func() {
client.Reset()
conn.Close()
}
}
// Discover discover the pluggable components and callback the service discovery with the given component name and grpc dialer.
func Discover(ctx context.Context) error {
services, err := serviceDiscovery(func(socket string) (reflectServiceClient, func(), error) {
conn, err := SocketDial(
ctx,
socket,
grpc.WithBlock(),
)
if err != nil {
return nil, nil, err
}
client := grpcreflect.NewClientV1Alpha(ctx, reflectpb.NewServerReflectionClient(conn))
return client, reflectServiceConnectionCloser(conn, client), nil
})
if err != nil {
return err
}
callback(services)
return nil
}
|
mikeee/dapr
|
pkg/components/pluggable/discovery.go
|
GO
|
mit
| 5,283 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluggable
import (
"errors"
"net"
"os"
"runtime"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type fakeReflectService struct {
listServicesCalled atomic.Int64
listServicesResp []string
listServicesErr error
onResetCalled func()
}
func (f *fakeReflectService) ListServices() ([]string, error) {
f.listServicesCalled.Add(1)
return f.listServicesResp, f.listServicesErr
}
func (f *fakeReflectService) Reset() {
f.onResetCalled()
}
type fakeGrpcCloser struct {
grpcConnectionCloser
onCloseCalled func()
}
func (f *fakeGrpcCloser) Close() error {
f.onCloseCalled()
return nil
}
func TestServiceCallback(t *testing.T) {
t.Run("callback should be called when service ref is registered", func(t *testing.T) {
const fakeComponentName, fakeServiceName = "fake-comp", "fake-svc"
called := 0
AddServiceDiscoveryCallback(fakeServiceName, func(name string, _ GRPCConnectionDialer) {
called++
assert.Equal(t, fakeComponentName, name)
})
callback([]service{{protoRef: fakeServiceName, componentName: fakeComponentName}})
assert.Equal(t, 1, called)
})
}
func TestConnectionCloser(t *testing.T) {
t.Run("connection closer should call grpc close and client reset", func(t *testing.T) {
const close, reset = "close", "reset"
callOrder := []string{}
fakeCloser := &fakeGrpcCloser{
onCloseCalled: func() {
callOrder = append(callOrder, close)
},
}
fakeService := &fakeReflectService{
onResetCalled: func() {
callOrder = append(callOrder, reset)
},
}
closer := reflectServiceConnectionCloser(fakeCloser, fakeService)
closer()
assert.Len(t, callOrder, 2)
assert.Equal(t, []string{reset, close}, callOrder)
})
}
func TestComponentDiscovery(t *testing.T) {
if runtime.GOOS == "windows" {
return
}
t.Run("add service callback should add a new entry when called", func(t *testing.T) {
AddServiceDiscoveryCallback("fake", func(string, GRPCConnectionDialer) {})
assert.NotEmpty(t, onServiceDiscovered)
})
t.Run("serviceDiscovery should return empty services if directory not exists", func(t *testing.T) {
services, err := serviceDiscovery(func(string) (reflectServiceClient, func(), error) {
return &fakeReflectService{}, func() {}, nil
})
require.NoError(t, err)
assert.Empty(t, services)
})
t.Run("serviceDiscovery should not connect to service that isn't a unix domain socket", func(t *testing.T) {
const fakeSocketFolder, pattern = "/tmp/test", "fake"
err := os.MkdirAll(fakeSocketFolder, os.ModePerm)
defer os.RemoveAll(fakeSocketFolder)
require.NoError(t, err)
t.Setenv(SocketFolderEnvVar, fakeSocketFolder)
_, err = os.CreateTemp(fakeSocketFolder, pattern)
require.NoError(t, err)
services, err := serviceDiscovery(func(string) (reflectServiceClient, func(), error) {
return &fakeReflectService{}, func() {}, nil
})
require.NoError(t, err)
assert.Empty(t, services)
})
t.Run("serviceDiscovery should return an error when reflect client factory returns an error", func(t *testing.T) {
const fakeSocketFolder = "/tmp/test"
err := os.MkdirAll(fakeSocketFolder, os.ModePerm)
defer os.RemoveAll(fakeSocketFolder)
require.NoError(t, err)
t.Setenv(SocketFolderEnvVar, fakeSocketFolder)
const fileName = fakeSocketFolder + "/socket1234.sock"
listener, err := net.Listen("unix", fileName)
require.NoError(t, err)
defer listener.Close()
reflectService := &fakeReflectService{}
_, err = serviceDiscovery(func(string) (reflectServiceClient, func(), error) {
return nil, nil, errors.New("fake-err")
})
require.Error(t, err)
assert.Equal(t, int64(0), reflectService.listServicesCalled.Load())
})
t.Run("serviceDiscovery should return an error when list services return an error", func(t *testing.T) {
const fakeSocketFolder = "/tmp/test"
err := os.MkdirAll(fakeSocketFolder, os.ModePerm)
defer os.RemoveAll(fakeSocketFolder)
require.NoError(t, err)
t.Setenv(SocketFolderEnvVar, fakeSocketFolder)
const fileName = fakeSocketFolder + "/socket1234.sock"
listener, err := net.Listen("unix", fileName)
require.NoError(t, err)
defer listener.Close()
reflectService := &fakeReflectService{
listServicesErr: errors.New("fake-err"),
}
_, err = serviceDiscovery(func(string) (reflectServiceClient, func(), error) {
return reflectService, func() {}, nil
})
require.Error(t, err)
assert.Equal(t, int64(1), reflectService.listServicesCalled.Load())
})
t.Run("serviceDiscovery should return all services list", func(t *testing.T) {
const fakeSocketFolder = "/tmp/test"
err := os.MkdirAll(fakeSocketFolder, os.ModePerm)
defer os.RemoveAll(fakeSocketFolder)
require.NoError(t, err)
t.Setenv(SocketFolderEnvVar, fakeSocketFolder)
subFolder := fakeSocketFolder + "/subfolder"
err = os.MkdirAll(subFolder, os.ModePerm) // should skip subfolders
defer os.RemoveAll(subFolder)
require.NoError(t, err)
const fileName = fakeSocketFolder + "/socket1234.sock"
listener, err := net.Listen("unix", fileName)
require.NoError(t, err)
defer listener.Close()
svcList := []string{"svcA", "svcB"}
reflectService := &fakeReflectService{
listServicesResp: svcList,
}
services, err := serviceDiscovery(func(string) (reflectServiceClient, func(), error) {
return reflectService, func() {}, nil
})
require.NoError(t, err)
assert.Len(t, services, len(svcList))
assert.Equal(t, int64(1), reflectService.listServicesCalled.Load())
})
}
func TestRemoveExt(t *testing.T) {
t.Run("remove ext should remove file extension when it has one", func(t *testing.T) {
assert.Equal(t, "a", removeExt("a.sock"))
})
t.Run("remove ext should not change file name when it has no extension", func(t *testing.T) {
assert.Equal(t, "a", removeExt("a"))
})
}
func TestGetSocketFolder(t *testing.T) {
t.Run("get socket folder should use default when env var is not set", func(t *testing.T) {
assert.Equal(t, defaultSocketFolder, GetSocketFolderPath())
})
t.Run("get socket folder should use env var when set", func(t *testing.T) {
const fakeSocketFolder = "/tmp"
t.Setenv(SocketFolderEnvVar, fakeSocketFolder)
assert.Equal(t, fakeSocketFolder, GetSocketFolderPath())
})
}
|
mikeee/dapr
|
pkg/components/pluggable/discovery_test.go
|
GO
|
mit
| 6,802 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluggable
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type ErrorConverter func(status.Status) error
// Compose together two errors converters by applying the inner first and if the error was not converted, then it applies to the outer.
func (outer ErrorConverter) Compose(inner ErrorConverter) ErrorConverter {
return func(s status.Status) error {
err := inner(s)
st, ok := status.FromError(err)
if ok {
return outer(*st)
}
return err
}
}
// MethodErrorConverter represents a simple map that maps from a grpc statuscode to a domain-level error.
type MethodErrorConverter map[codes.Code]ErrorConverter
func (m MethodErrorConverter) Merge(other MethodErrorConverter) MethodErrorConverter {
n := MethodErrorConverter{}
for k, v := range m {
n[k] = v
}
for k, v := range other {
converter, ok := n[k]
if !ok {
n[k] = v
} else { // compose converter in case of two errors has the same grpc status code.
n[k] = converter.Compose(v)
}
}
return n
}
|
mikeee/dapr
|
pkg/components/pluggable/errors.go
|
GO
|
mit
| 1,586 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluggable
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestComposeErrorsConverters(t *testing.T) {
t.Run("compose should not call outer function when the error was converted", func(t *testing.T) {
outerCalled := 0
outer := ErrorConverter(func(s status.Status) error {
outerCalled++
return s.Err()
})
innerCalled := 0
inner := ErrorConverter(func(s status.Status) error {
innerCalled++
return errors.New("")
})
composed := outer.Compose(inner)
err := composed(*status.New(codes.Unknown, ""))
require.Error(t, err)
assert.Equal(t, 0, outerCalled)
assert.Equal(t, 1, innerCalled)
})
t.Run("compose should call outer function when the error was not converted", func(t *testing.T) {
outerCalled := 0
outer := ErrorConverter(func(s status.Status) error {
outerCalled++
return errors.New("my-new-err")
})
innerCalled := 0
inner := ErrorConverter(func(s status.Status) error {
innerCalled++
return s.Err()
})
composed := outer.Compose(inner)
err := composed(*status.New(codes.Unknown, ""))
require.Error(t, err)
assert.Equal(t, 1, outerCalled)
assert.Equal(t, 1, innerCalled)
})
}
func TestErrorsMerge(t *testing.T) {
t.Run("merge should compose errors with the same grpc code", func(t *testing.T) {
outerCalled := 0
errors1 := MethodErrorConverter{
codes.Canceled: func(s status.Status) error {
outerCalled++
return s.Err()
},
}
innerCalled := 0
errors2 := MethodErrorConverter{
codes.Canceled: func(s status.Status) error {
innerCalled++
return s.Err()
},
}
merged := errors1.Merge(errors2)
assert.Len(t, merged, 1)
f, ok := merged[codes.Canceled]
assert.True(t, ok)
err := f(*status.New(codes.Unknown, ""))
require.Error(t, err)
assert.Equal(t, 1, innerCalled)
assert.Equal(t, 1, outerCalled)
})
}
|
mikeee/dapr
|
pkg/components/pluggable/errors_test.go
|
GO
|
mit
| 2,531 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluggable
import (
"context"
"fmt"
"github.com/dapr/kit/logger"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
var log = logger.NewLogger("pluggable-components-grpc-connector")
// GRPCClient is any client that supports common pluggable grpc operations.
type GRPCClient interface {
// Ping is for liveness purposes.
Ping(ctx context.Context, in *proto.PingRequest, opts ...grpc.CallOption) (*proto.PingResponse, error)
}
// NewConverterFunc returns a function that maps from any error to a business error.
// if the error is unknown it is kept as is, otherwise a converter function will be used.
func NewConverterFunc(errorsConverters MethodErrorConverter) func(error) error {
return func(err error) error {
s, ok := status.FromError(err)
if !ok {
return err
}
convert, ok := errorsConverters[s.Code()]
if !ok {
return err
}
return convert(*s)
}
}
type GRPCConnectionDialer func(ctx context.Context, name string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
// WithOptions returns a new connection dialer that adds the new options to it.
func (g GRPCConnectionDialer) WithOptions(newOpts ...grpc.DialOption) GRPCConnectionDialer {
return func(ctx context.Context, name string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
return g(ctx, name, append(opts, newOpts...)...)
}
}
// GRPCConnector is a connector that uses underlying gRPC protocol for common operations.
type GRPCConnector[TClient GRPCClient] struct {
// Context is the component shared context
Context context.Context
// Cancel is used for cancelling inflight requests
Cancel context.CancelFunc
// Client is the proto client.
Client TClient
dialer GRPCConnectionDialer
conn *grpc.ClientConn
clientFactory func(grpc.ClientConnInterface) TClient
}
// metadataInstanceID is used to differentiate between multiples instance of the same component.
const metadataInstanceID = "x-component-instance"
// instanceIDStreamInterceptor returns a grpc client unary interceptor that adds the instanceID on outgoing metadata.
// instanceID is used for multiplexing connection if the component supports it.
func instanceIDUnaryInterceptor(instanceID string) grpc.UnaryClientInterceptor {
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
return invoker(metadata.AppendToOutgoingContext(ctx, metadataInstanceID, instanceID), method, req, reply, cc, opts...)
}
}
// instanceIDStreamInterceptor returns a grpc client stream interceptor that adds the instanceID on outgoing metadata.
// instanceID is used for multiplexing connection if the component supports it.
func instanceIDStreamInterceptor(instanceID string) grpc.StreamClientInterceptor {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
return streamer(metadata.AppendToOutgoingContext(ctx, metadataInstanceID, instanceID), desc, cc, method, opts...)
}
}
// socketDialer creates a dialer for the given socket.
func socketDialer(socket string, additionalOpts ...grpc.DialOption) GRPCConnectionDialer {
return func(ctx context.Context, name string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
additionalOpts = append(additionalOpts, grpc.WithStreamInterceptor(instanceIDStreamInterceptor(name)), grpc.WithUnaryInterceptor(instanceIDUnaryInterceptor(name)))
return SocketDial(ctx, socket, append(additionalOpts, opts...)...)
}
}
// SocketDial creates a grpc connection using the given socket.
func SocketDial(ctx context.Context, socket string, additionalOpts ...grpc.DialOption) (*grpc.ClientConn, error) {
udsSocket := "unix://" + socket
log.Debugf("using socket defined at '%s'", udsSocket)
additionalOpts = append(additionalOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
grpcConn, err := grpc.DialContext(ctx, udsSocket, additionalOpts...)
if err != nil {
return nil, fmt.Errorf("unable to open GRPC connection using socket '%s': %w", udsSocket, err)
}
return grpcConn, nil
}
// Dial opens a grpcConnection and creates a new client instance.
func (g *GRPCConnector[TClient]) Dial(name string) error {
grpcConn, err := g.dialer(g.Context, name)
if err != nil {
return fmt.Errorf("unable to open GRPC connection using the dialer: %w", err)
}
g.conn = grpcConn
g.Client = g.clientFactory(grpcConn)
return nil
}
// Ping pings the grpc component.
// It uses "WaitForReady" avoiding failing in transient failures.
func (g *GRPCConnector[TClient]) Ping() error {
_, err := g.Client.Ping(g.Context, &proto.PingRequest{}, grpc.WaitForReady(true))
return err
}
// Close closes the underlying gRPC connection and cancel all inflight requests.
func (g *GRPCConnector[TClient]) Close() error {
g.Cancel()
return g.conn.Close()
}
// NewGRPCConnectorWithDialer creates a new grpc connector for the given client factory and dialer.
func NewGRPCConnectorWithDialer[TClient GRPCClient](dialer GRPCConnectionDialer, factory func(grpc.ClientConnInterface) TClient) *GRPCConnector[TClient] {
ctx, cancel := context.WithCancel(context.Background())
return &GRPCConnector[TClient]{
Context: ctx,
Cancel: cancel,
dialer: dialer,
clientFactory: factory,
}
}
// NewGRPCConnector creates a new grpc connector for the given client factory and socket file, using the default socket dialer.
func NewGRPCConnector[TClient GRPCClient](socket string, factory func(grpc.ClientConnInterface) TClient) *GRPCConnector[TClient] {
return NewGRPCConnectorWithDialer(socketDialer(socket), factory)
}
|
mikeee/dapr
|
pkg/components/pluggable/grpc.go
|
GO
|
mit
| 6,398 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pluggable
import (
"context"
"fmt"
"net"
"os"
"runtime"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/types/known/structpb"
)
type fakeClient struct {
pingCalled atomic.Int64
}
func (f *fakeClient) Ping(context.Context, *proto.PingRequest, ...grpc.CallOption) (*proto.PingResponse, error) {
f.pingCalled.Add(1)
return &proto.PingResponse{}, nil
}
type fakeSvc struct {
onHandlerCalled func(context.Context)
}
func (f *fakeSvc) handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
f.onHandlerCalled(ctx)
return structpb.NewNullValue(), nil
}
func TestGRPCConnector(t *testing.T) {
// gRPC Pluggable component requires Unix Domain Socket to work, I'm skipping this test when running on windows.
if runtime.GOOS == "windows" {
return
}
t.Run("invoke method should contain component name as request metadata", func(t *testing.T) {
const (
fakeSvcName = "dapr.my.service.fake"
fakeMethodName = "MyMethod"
componentName = "my-fake-component"
)
handlerCalled := 0
fakeSvc := &fakeSvc{
onHandlerCalled: func(ctx context.Context) {
handlerCalled++
md, ok := metadata.FromIncomingContext(ctx)
assert.True(t, ok)
v := md.Get(metadataInstanceID)
require.NotEmpty(t, v)
assert.Equal(t, componentName, v[0])
},
}
fakeFactoryCalled := 0
clientFake := &fakeClient{}
fakeFactory := func(grpc.ClientConnInterface) *fakeClient {
fakeFactoryCalled++
return clientFake
}
const fakeSocketPath = "/tmp/socket.sock"
os.RemoveAll(fakeSocketPath) // guarantee that is not being used.
defer os.RemoveAll(fakeSocketPath)
listener, err := net.Listen("unix", fakeSocketPath)
require.NoError(t, err)
defer listener.Close()
connector := NewGRPCConnectorWithDialer(socketDialer(fakeSocketPath, grpc.WithBlock()), fakeFactory)
defer connector.Close()
s := grpc.NewServer()
fakeDesc := &grpc.ServiceDesc{
ServiceName: fakeSvcName,
HandlerType: (*interface{})(nil),
Methods: []grpc.MethodDesc{{
MethodName: fakeMethodName,
Handler: fakeSvc.handler,
}},
}
s.RegisterService(fakeDesc, fakeSvc)
go func() {
s.Serve(listener)
s.Stop()
}()
require.NoError(t, connector.Dial(componentName))
acceptedStatus := []connectivity.State{
connectivity.Ready,
connectivity.Idle,
}
assert.Contains(t, acceptedStatus, connector.conn.GetState())
assert.Equal(t, 1, fakeFactoryCalled)
require.NoError(t, connector.conn.Invoke(context.Background(), fmt.Sprintf("/%s/%s", fakeSvcName, fakeMethodName), structpb.NewNullValue(), structpb.NewNullValue()))
assert.Equal(t, 1, handlerCalled)
})
t.Run("grpc connection should be idle or ready when the process is listening to the socket due to withblock usage", func(t *testing.T) {
fakeFactoryCalled := 0
clientFake := &fakeClient{}
fakeFactory := func(grpc.ClientConnInterface) *fakeClient {
fakeFactoryCalled++
return clientFake
}
const fakeSocketPath = "/tmp/socket.sock"
os.RemoveAll(fakeSocketPath) // guarantee that is not being used.
defer os.RemoveAll(fakeSocketPath)
listener, err := net.Listen("unix", fakeSocketPath)
require.NoError(t, err)
defer listener.Close()
connector := NewGRPCConnectorWithDialer(socketDialer(fakeSocketPath, grpc.WithBlock(), grpc.FailOnNonTempDialError(true)), fakeFactory)
defer connector.Close()
go func() {
s := grpc.NewServer()
s.Serve(listener)
s.Stop()
}()
require.NoError(t, connector.Dial(""))
acceptedStatus := []connectivity.State{
connectivity.Ready,
connectivity.Idle,
}
assert.Contains(t, acceptedStatus, connector.conn.GetState())
assert.Equal(t, 1, fakeFactoryCalled)
assert.Equal(t, int64(0), clientFake.pingCalled.Load())
})
t.Run("grpc connection should be ready when socket is listening", func(t *testing.T) {
fakeFactoryCalled := 0
clientFake := &fakeClient{}
fakeFactory := func(grpc.ClientConnInterface) *fakeClient {
fakeFactoryCalled++
return clientFake
}
const fakeSocketPath = "/tmp/socket.sock"
os.RemoveAll(fakeSocketPath) // guarantee that is not being used.
defer os.RemoveAll(fakeSocketPath)
connector := NewGRPCConnector(fakeSocketPath, fakeFactory)
listener, err := net.Listen("unix", fakeSocketPath)
require.NoError(t, err)
defer listener.Close()
require.NoError(t, connector.Dial(""))
defer connector.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
assert.True(t, connector.conn.WaitForStateChange(ctx, connectivity.Idle))
// could be in a transient failure for short time window.
if connector.conn.GetState() == connectivity.TransientFailure {
assert.True(t, connector.conn.WaitForStateChange(ctx, connectivity.TransientFailure))
}
// https://grpc.github.io/grpc/core/md_doc_connectivity-semantics-and-api.html
notAcceptedStatus := []connectivity.State{
connectivity.TransientFailure,
connectivity.Idle,
connectivity.Shutdown,
}
assert.NotContains(t, notAcceptedStatus, connector.conn.GetState())
})
}
|
mikeee/dapr
|
pkg/components/pluggable/grpc_test.go
|
GO
|
mit
| 5,931 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pubsub
import (
"context"
"errors"
"fmt"
"io"
"sync"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/dapr/pkg/components/pluggable"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
"github.com/dapr/kit/logger"
)
// grpcPubSub is a implementation of a pubsub over a gRPC Protocol.
type grpcPubSub struct {
*pluggable.GRPCConnector[proto.PubSubClient]
// features is the list of pubsub implemented features.
features []pubsub.Feature
logger logger.Logger
}
// Init initializes the grpc pubsub passing out the metadata to the grpc component.
// It also fetches and set the component features.
func (p *grpcPubSub) Init(ctx context.Context, metadata pubsub.Metadata) error {
if err := p.Dial(metadata.Name); err != nil {
return err
}
protoMetadata := &proto.MetadataRequest{
Properties: metadata.Properties,
}
_, err := p.Client.Init(p.Context, &proto.PubSubInitRequest{
Metadata: protoMetadata,
})
if err != nil {
return err
}
// TODO Static data could be retrieved in another way, a necessary discussion should start soon.
// we need to call the method here because features could return an error and the features interface doesn't support errors
featureResponse, err := p.Client.Features(p.Context, &proto.FeaturesRequest{})
if err != nil {
return err
}
p.features = make([]pubsub.Feature, len(featureResponse.GetFeatures()))
for idx, f := range featureResponse.GetFeatures() {
p.features[idx] = pubsub.Feature(f)
}
return nil
}
// Features lists all implemented features.
func (p *grpcPubSub) Features() []pubsub.Feature {
return p.features
}
// Publish publishes data to a topic.
func (p *grpcPubSub) Publish(ctx context.Context, req *pubsub.PublishRequest) error {
_, err := p.Client.Publish(ctx, &proto.PublishRequest{
Topic: req.Topic,
PubsubName: req.PubsubName,
Data: req.Data,
Metadata: req.Metadata,
})
return err
}
func (p *grpcPubSub) BulkPublish(ctx context.Context, req *pubsub.BulkPublishRequest) (pubsub.BulkPublishResponse, error) {
entries := make([]*proto.BulkMessageEntry, len(req.Entries))
for i, entry := range req.Entries {
entries[i] = &proto.BulkMessageEntry{
EntryId: entry.EntryId,
Event: entry.Event,
ContentType: entry.ContentType,
Metadata: entry.Metadata,
}
}
response, err := p.Client.BulkPublish(ctx, &proto.BulkPublishRequest{
Topic: req.Topic,
PubsubName: req.PubsubName,
Entries: entries,
Metadata: req.Metadata,
})
if err != nil {
return pubsub.BulkPublishResponse{}, err
}
failedEntries := make([]pubsub.BulkPublishResponseFailedEntry, len(response.GetFailedEntries()))
for i, failedEntry := range response.GetFailedEntries() {
failedEntries[i] = pubsub.BulkPublishResponseFailedEntry{
EntryId: failedEntry.GetEntryId(),
Error: errors.New(failedEntry.GetError()),
}
}
return pubsub.BulkPublishResponse{FailedEntries: failedEntries}, nil
}
type messageHandler = func(*proto.PullMessagesResponse)
// adaptHandler returns a non-error function that handle the message with the given handler and ack when returns.
//
//nolint:nosnakecase
func (p *grpcPubSub) adaptHandler(ctx context.Context, streamingPull proto.PubSub_PullMessagesClient, handler pubsub.Handler) messageHandler {
safeSend := &sync.Mutex{}
return func(msg *proto.PullMessagesResponse) {
m := pubsub.NewMessage{
Data: msg.GetData(),
ContentType: &msg.ContentType,
Topic: msg.GetTopicName(),
Metadata: msg.GetMetadata(),
}
var ackError *proto.AckMessageError
if err := handler(ctx, &m); err != nil {
p.logger.Errorf("error when handling message on topic %s", msg.GetTopicName())
ackError = &proto.AckMessageError{
Message: err.Error(),
}
}
// As per documentation:
// When using streams,
// one must take care to avoid calling either SendMsg or RecvMsg multiple times against the same Stream from different goroutines.
// In other words, it's safe to have a goroutine calling SendMsg and another goroutine calling RecvMsg on the same stream at the same time.
// But it is not safe to call SendMsg on the same stream in different goroutines, or to call RecvMsg on the same stream in different goroutines.
// https://github.com/grpc/grpc-go/blob/master/Documentation/concurrency.md#streams
safeSend.Lock()
defer safeSend.Unlock()
if err := streamingPull.Send(&proto.PullMessagesRequest{
AckMessageId: msg.GetId(),
AckError: ackError,
}); err != nil {
p.logger.Errorf("error when ack'ing message %s from topic %s", msg.GetId(), msg.GetTopicName())
}
}
}
// pullMessages pull messages of the given subscription and execute the handler for that messages.
func (p *grpcPubSub) pullMessages(ctx context.Context, topic *proto.Topic, handler pubsub.Handler) error {
// first pull should be sync and subsequent connections can be made in background if necessary
pull, err := p.Client.PullMessages(ctx)
if err != nil {
return fmt.Errorf("unable to subscribe: %w", err)
}
streamCtx, cancel := context.WithCancel(pull.Context())
err = pull.Send(&proto.PullMessagesRequest{
Topic: topic,
})
cleanup := func() {
if closeErr := pull.CloseSend(); closeErr != nil {
p.logger.Warnf("could not close pull stream of topic %s: %v", topic.GetName(), closeErr)
}
cancel()
}
if err != nil {
cleanup()
return fmt.Errorf("unable to subscribe: %w", err)
}
handle := p.adaptHandler(streamCtx, pull, handler)
go func() {
defer cleanup()
for {
msg, err := pull.Recv()
if err == io.EOF { // no more messages
return
}
// TODO reconnect on error
if err != nil {
p.logger.Errorf("failed to receive message: %v", err)
return
}
p.logger.Debugf("received message from stream on topic %s", msg.GetTopicName())
go handle(msg)
}
}()
return nil
}
// Subscribe subscribes to a given topic and callback the handler when a new message arrives.
func (p *grpcPubSub) Subscribe(ctx context.Context, req pubsub.SubscribeRequest, handler pubsub.Handler) error {
subscription := &proto.Topic{
Name: req.Topic,
Metadata: req.Metadata,
}
return p.pullMessages(ctx, subscription, handler)
}
// fromConnector creates a new GRPC pubsub using the given underlying connector.
func fromConnector(l logger.Logger, connector *pluggable.GRPCConnector[proto.PubSubClient]) *grpcPubSub {
return &grpcPubSub{
features: make([]pubsub.Feature, 0),
GRPCConnector: connector,
logger: l,
}
}
// NewGRPCPubSub creates a new grpc pubsub using the given socket factory.
func NewGRPCPubSub(l logger.Logger, socket string) *grpcPubSub {
return fromConnector(l, pluggable.NewGRPCConnector(socket, proto.NewPubSubClient))
}
// newGRPCPubSub creates a new grpc pubsub for the given pluggable component.
func newGRPCPubSub(dialer pluggable.GRPCConnectionDialer) func(l logger.Logger) pubsub.PubSub {
return func(l logger.Logger) pubsub.PubSub {
return fromConnector(l, pluggable.NewGRPCConnectorWithDialer(dialer, proto.NewPubSubClient))
}
}
func init() {
//nolint:nosnakecase
pluggable.AddServiceDiscoveryCallback(proto.PubSub_ServiceDesc.ServiceName, func(name string, dialer pluggable.GRPCConnectionDialer) {
DefaultRegistry.RegisterComponent(newGRPCPubSub(dialer), name)
})
}
|
mikeee/dapr
|
pkg/components/pubsub/pluggable.go
|
GO
|
mit
| 7,862 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pubsub
import (
"context"
"errors"
"fmt"
"net"
"os"
"runtime"
"sync"
"sync/atomic"
"testing"
guuid "github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/dapr/pkg/components/pluggable"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
testingGrpc "github.com/dapr/dapr/pkg/testing/grpc"
"github.com/dapr/kit/logger"
)
var testLogger = logger.NewLogger("pubsub-pluggable-test")
type server struct {
proto.UnimplementedPubSubServer
initCalled atomic.Int64
onInitCalled func(*proto.PubSubInitRequest)
initErr error
featuresCalled atomic.Int64
featuresErr error
publishCalled atomic.Int64
onPublishCalled func(*proto.PublishRequest)
publishErr error
pullChan chan *proto.PullMessagesResponse
pingCalled atomic.Int64
pingErr error
onAckReceived func(*proto.PullMessagesRequest)
pullCalled atomic.Int64
pullErr error
}
//nolint:nosnakecase
func (s *server) PullMessages(svc proto.PubSub_PullMessagesServer) error {
s.pullCalled.Add(1)
if s.onAckReceived != nil {
go func() {
for {
msg, err := svc.Recv()
if err != nil {
return
}
s.onAckReceived(msg)
}
}()
}
if s.pullChan != nil {
for msg := range s.pullChan {
if err := svc.Send(msg); err != nil {
return err
}
}
}
return s.pullErr
}
func (s *server) Init(_ context.Context, req *proto.PubSubInitRequest) (*proto.PubSubInitResponse, error) {
s.initCalled.Add(1)
if s.onInitCalled != nil {
s.onInitCalled(req)
}
return &proto.PubSubInitResponse{}, s.initErr
}
func (s *server) Features(context.Context, *proto.FeaturesRequest) (*proto.FeaturesResponse, error) {
s.featuresCalled.Add(1)
return &proto.FeaturesResponse{}, s.featuresErr
}
func (s *server) Publish(_ context.Context, req *proto.PublishRequest) (*proto.PublishResponse, error) {
s.publishCalled.Add(1)
if s.onPublishCalled != nil {
s.onPublishCalled(req)
}
return &proto.PublishResponse{}, s.publishErr
}
func (s *server) Ping(context.Context, *proto.PingRequest) (*proto.PingResponse, error) {
s.pingCalled.Add(1)
return &proto.PingResponse{}, s.pingErr
}
func TestPubSubPluggableCalls(t *testing.T) {
getPubSub := testingGrpc.TestServerFor(testLogger, func(s *grpc.Server, svc *server) {
proto.RegisterPubSubServer(s, svc)
}, func(cci grpc.ClientConnInterface) *grpcPubSub {
client := proto.NewPubSubClient(cci)
pubsub := fromConnector(testLogger, pluggable.NewGRPCConnector("/tmp/socket.sock", proto.NewPubSubClient))
pubsub.Client = client
return pubsub
})
if runtime.GOOS != "windows" {
t.Run("test init should populate features and call grpc init", func(t *testing.T) {
const (
fakeName = "name"
fakeType = "type"
fakeVersion = "v1"
fakeComponentName = "component"
fakeSocketFolder = "/tmp"
)
uniqueID := guuid.New().String()
socket := fmt.Sprintf("%s/%s.sock", fakeSocketFolder, uniqueID)
defer os.Remove(socket)
connector := pluggable.NewGRPCConnector(socket, proto.NewPubSubClient)
defer connector.Close()
listener, err := net.Listen("unix", socket)
require.NoError(t, err)
defer listener.Close()
s := grpc.NewServer()
srv := &server{}
proto.RegisterPubSubServer(s, srv)
go func() {
if serveErr := s.Serve(listener); serveErr != nil {
testLogger.Debugf("Server exited with error: %v", serveErr)
}
}()
ps := fromConnector(testLogger, connector)
err = ps.Init(context.Background(), pubsub.Metadata{
Base: contribMetadata.Base{},
})
require.NoError(t, err)
assert.Equal(t, int64(1), srv.featuresCalled.Load())
assert.Equal(t, int64(1), srv.initCalled.Load())
})
}
t.Run("features should return the component features'", func(t *testing.T) {
ps, cleanup, err := getPubSub(&server{})
require.NoError(t, err)
defer cleanup()
assert.Empty(t, ps.Features())
ps.features = []pubsub.Feature{pubsub.FeatureMessageTTL}
assert.NotEmpty(t, ps.Features())
assert.Equal(t, pubsub.FeatureMessageTTL, ps.Features()[0])
})
t.Run("publish should call publish grpc method", func(t *testing.T) {
const fakeTopic = "fakeTopic"
svc := &server{
onPublishCalled: func(req *proto.PublishRequest) {
assert.Equal(t, fakeTopic, req.GetTopic())
},
}
ps, cleanup, err := getPubSub(svc)
require.NoError(t, err)
defer cleanup()
err = ps.Publish(context.Background(), &pubsub.PublishRequest{
Topic: fakeTopic,
})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.publishCalled.Load())
})
t.Run("publish should return an error if grpc method returns an error", func(t *testing.T) {
const fakeTopic = "fakeTopic"
svc := &server{
onPublishCalled: func(req *proto.PublishRequest) {
assert.Equal(t, fakeTopic, req.GetTopic())
},
publishErr: errors.New("fake-publish-err"),
}
ps, cleanup, err := getPubSub(svc)
require.NoError(t, err)
defer cleanup()
err = ps.Publish(context.Background(), &pubsub.PublishRequest{
Topic: fakeTopic,
})
require.Error(t, err)
assert.Equal(t, int64(1), svc.publishCalled.Load())
})
t.Run("subscribe should callback handler when new messages arrive", func(t *testing.T) {
const fakeTopic, fakeData1, fakeData2 = "fakeTopic", "fakeData1", "fakeData2"
var (
messagesAcked sync.WaitGroup
topicSent sync.WaitGroup
messagesProcessed sync.WaitGroup
totalAckErrors atomic.Int64
handleCalled atomic.Int64
)
messagesData := [][]byte{[]byte(fakeData1), []byte(fakeData2)}
messages := make([]*proto.PullMessagesResponse, len(messagesData))
messagesAcked.Add(len(messages))
messagesProcessed.Add(len(messages))
topicSent.Add(1)
for idx, data := range messagesData {
messages[idx] = &proto.PullMessagesResponse{
Data: data,
TopicName: fakeTopic,
Metadata: map[string]string{},
ContentType: "",
}
}
messageChan := make(chan *proto.PullMessagesResponse, len(messages))
defer close(messageChan)
for _, message := range messages {
messageChan <- message
}
svc := &server{
pullChan: messageChan,
onAckReceived: func(ma *proto.PullMessagesRequest) {
if ma.GetTopic() != nil {
topicSent.Done()
} else {
messagesAcked.Done()
}
if ma.GetAckError() != nil {
totalAckErrors.Add(1)
}
},
}
ps, cleanup, err := getPubSub(svc)
require.NoError(t, err)
defer cleanup()
handleErrors := make(chan error, 1) // simulating an ack error
handleErrors <- errors.New("fake-error")
close(handleErrors)
err = ps.Subscribe(context.Background(), pubsub.SubscribeRequest{
Topic: fakeTopic,
}, func(_ context.Context, m *pubsub.NewMessage) error {
handleCalled.Add(1)
messagesProcessed.Done()
assert.Contains(t, messagesData, m.Data)
return <-handleErrors
})
require.NoError(t, err)
topicSent.Wait()
messagesProcessed.Wait()
messagesAcked.Wait()
assert.Equal(t, int64(len(messages)), handleCalled.Load())
assert.Equal(t, int64(1), totalAckErrors.Load()) // at least one message should be an error
})
}
|
mikeee/dapr
|
pkg/components/pubsub/pluggable_test.go
|
GO
|
mit
| 7,862 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pubsub
import (
"fmt"
"strings"
"github.com/dapr/components-contrib/pubsub"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
type Registry struct {
Logger logger.Logger
messageBuses map[string]func(logger.Logger) pubsub.PubSub
}
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry = NewRegistry()
// NewRegistry returns a new pub sub registry.
func NewRegistry() *Registry {
return &Registry{
messageBuses: map[string]func(logger.Logger) pubsub.PubSub{},
}
}
// RegisterComponent adds a new message bus to the registry.
func (p *Registry) RegisterComponent(componentFactory func(logger.Logger) pubsub.PubSub, names ...string) {
for _, name := range names {
p.messageBuses[createFullName(name)] = componentFactory
}
}
// Create instantiates a pub/sub based on `name`.
func (p *Registry) Create(name, version, logName string) (pubsub.PubSub, error) {
if method, ok := p.getPubSub(name, version, logName); ok {
return method(), nil
}
return nil, fmt.Errorf("couldn't find message bus %s/%s", name, version)
}
func (p *Registry) getPubSub(name, version, logName string) (func() pubsub.PubSub, bool) {
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
pubSubFn, ok := p.messageBuses[nameLower+"/"+versionLower]
if ok {
return p.wrapFn(pubSubFn, logName), true
}
if components.IsInitialVersion(versionLower) {
pubSubFn, ok = p.messageBuses[nameLower]
if ok {
return p.wrapFn(pubSubFn, logName), true
}
}
return nil, false
}
func (p *Registry) wrapFn(componentFactory func(logger.Logger) pubsub.PubSub, logName string) func() pubsub.PubSub {
return func() pubsub.PubSub {
l := p.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
}
func createFullName(name string) string {
return strings.ToLower("pubsub." + name)
}
|
mikeee/dapr
|
pkg/components/pubsub/registry.go
|
GO
|
mit
| 2,511 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pubsub
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/pubsub"
daprt "github.com/dapr/dapr/pkg/testing"
"github.com/dapr/kit/logger"
)
func TestCreateFullName(t *testing.T) {
t.Run("create redis pubsub key name", func(t *testing.T) {
assert.Equal(t, "pubsub.redis", createFullName("redis"))
})
t.Run("create kafka pubsub key name", func(t *testing.T) {
assert.Equal(t, "pubsub.kafka", createFullName("kafka"))
})
t.Run("create azure service bus pubsub key name", func(t *testing.T) {
assert.Equal(t, "pubsub.azure.servicebus", createFullName("azure.servicebus"))
})
t.Run("create rabbitmq pubsub key name", func(t *testing.T) {
assert.Equal(t, "pubsub.rabbitmq", createFullName("rabbitmq"))
})
}
func TestCreatePubSub(t *testing.T) {
testRegistry := NewRegistry()
t.Run("pubsub messagebus is registered", func(t *testing.T) {
const (
pubSubName = "mockPubSub"
pubSubNameV2 = "mockPubSub/v2"
componentName = "pubsub." + pubSubName
)
// Initiate mock object
mockPubSub := new(daprt.MockPubSub)
mockPubSubV2 := new(daprt.MockPubSub)
// act
testRegistry.RegisterComponent(func(_ logger.Logger) pubsub.PubSub {
return mockPubSub
}, pubSubName)
testRegistry.RegisterComponent(func(_ logger.Logger) pubsub.PubSub {
return mockPubSubV2
}, pubSubNameV2)
// assert v0 and v1
p, e := testRegistry.Create(componentName, "v0", "")
require.NoError(t, e)
assert.Same(t, mockPubSub, p)
p, e = testRegistry.Create(componentName, "v1", "")
require.NoError(t, e)
assert.Same(t, mockPubSub, p)
// assert v2
pV2, e := testRegistry.Create(componentName, "v2", "")
require.NoError(t, e)
assert.Same(t, mockPubSubV2, pV2)
// check case-insensitivity
pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "")
require.NoError(t, e)
assert.Same(t, mockPubSubV2, pV2)
})
t.Run("pubsub messagebus is not registered", func(t *testing.T) {
const PubSubName = "fakePubSub"
// act
p, actualError := testRegistry.Create(createFullName(PubSubName), "v1", "")
expectedError := fmt.Errorf("couldn't find message bus %s/v1", createFullName(PubSubName))
// assert
assert.Nil(t, p)
assert.Equal(t, expectedError.Error(), actualError.Error())
})
}
|
mikeee/dapr
|
pkg/components/pubsub/registry_test.go
|
GO
|
mit
| 2,916 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secretstores
import (
"context"
"github.com/dapr/components-contrib/secretstores"
"github.com/dapr/dapr/pkg/components/pluggable"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
"github.com/dapr/kit/logger"
)
// grpcSecretStore is a implementation of a secret store over a gRPC Protocol.
type grpcSecretStore struct {
*pluggable.GRPCConnector[proto.SecretStoreClient]
// features is the list of state store implemented features.
features []secretstores.Feature
}
// Init initializes the grpc secret store passing out the metadata to the grpc component.
func (gss *grpcSecretStore) Init(ctx context.Context, metadata secretstores.Metadata) error {
if err := gss.Dial(metadata.Name); err != nil {
return err
}
protoMetadata := &proto.MetadataRequest{
Properties: metadata.Properties,
}
_, err := gss.Client.Init(gss.Context, &proto.SecretStoreInitRequest{
Metadata: protoMetadata,
})
if err != nil {
return err
}
// TODO Static data could be retrieved in another way, a necessary discussion should start soon.
// we need to call the method here because features could return an error and the features interface doesn't support errors
featureResponse, err := gss.Client.Features(gss.Context, &proto.FeaturesRequest{})
if err != nil {
return err
}
gss.features = make([]secretstores.Feature, len(featureResponse.GetFeatures()))
for idx, f := range featureResponse.GetFeatures() {
gss.features[idx] = secretstores.Feature(f)
}
return nil
}
// Features lists all implemented features.
func (gss *grpcSecretStore) Features() []secretstores.Feature {
return gss.features
}
// GetSecret retrieves a secret using a key and returns a map of decrypted string/string values.
func (gss *grpcSecretStore) GetSecret(ctx context.Context, req secretstores.GetSecretRequest) (secretstores.GetSecretResponse, error) {
resp, err := gss.Client.Get(gss.Context, &proto.GetSecretRequest{
Key: req.Name,
Metadata: req.Metadata,
})
if err != nil {
return secretstores.GetSecretResponse{}, err
}
return secretstores.GetSecretResponse{
Data: resp.GetData(),
}, nil
}
// BulkGetSecret retrieves all secrets and returns a map of decrypted string/string values.
func (gss *grpcSecretStore) BulkGetSecret(ctx context.Context, req secretstores.BulkGetSecretRequest) (secretstores.BulkGetSecretResponse, error) {
resp, err := gss.Client.BulkGet(gss.Context, &proto.BulkGetSecretRequest{
Metadata: req.Metadata,
})
if err != nil {
return secretstores.BulkGetSecretResponse{}, err
}
items := make(map[string]map[string]string, len(resp.GetData()))
for k, v := range resp.GetData() {
s := v.GetSecrets()
items[k] = make(map[string]string, len(s))
for k2, v2 := range s {
items[k][k2] = v2
}
}
return secretstores.BulkGetSecretResponse{
Data: items,
}, nil
}
// fromConnector creates a new GRPC pubsub using the given underlying connector.
func fromConnector(l logger.Logger, connector *pluggable.GRPCConnector[proto.SecretStoreClient]) *grpcSecretStore {
return &grpcSecretStore{
features: make([]secretstores.Feature, 0),
GRPCConnector: connector,
}
}
// NewGRPCSecretStore creates a new grpc pubsub using the given socket factory.
func NewGRPCSecretStore(l logger.Logger, socket string) *grpcSecretStore {
return fromConnector(l, pluggable.NewGRPCConnector(socket, proto.NewSecretStoreClient))
}
// newGRPCSecretStore creates a new grpc pubsub for the given pluggable component.
func newGRPCSecretStore(dialer pluggable.GRPCConnectionDialer) func(l logger.Logger) secretstores.SecretStore {
return func(l logger.Logger) secretstores.SecretStore {
return fromConnector(l, pluggable.NewGRPCConnectorWithDialer(dialer, proto.NewSecretStoreClient))
}
}
func init() {
//nolint:nosnakecase
pluggable.AddServiceDiscoveryCallback(proto.SecretStore_ServiceDesc.ServiceName, func(name string, dialer pluggable.GRPCConnectionDialer) {
DefaultRegistry.RegisterComponent(newGRPCSecretStore(dialer), name)
})
}
|
mikeee/dapr
|
pkg/components/secretstores/pluggable.go
|
GO
|
mit
| 4,557 |
//go:build !windows
// +build !windows
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secretstores
import (
"context"
"errors"
"fmt"
"net"
"os"
"sync/atomic"
"testing"
guuid "github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/secretstores"
"github.com/dapr/dapr/pkg/components/pluggable"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
testingGrpc "github.com/dapr/dapr/pkg/testing/grpc"
"github.com/dapr/kit/logger"
)
var testLogger = logger.NewLogger("secretstores-pluggable-logger")
type server struct {
proto.UnimplementedSecretStoreServer
initCalled atomic.Int64
onInitCalled func(*proto.SecretStoreInitRequest)
initErr error
featuresCalled atomic.Int64
featuresErr error
getSecretCalled atomic.Int64
onGetSecret func(*proto.GetSecretRequest)
getSecretErr error
bulkGetSecretCalled atomic.Int64
onBulkGetSecret func(*proto.BulkGetSecretRequest)
bulkGetSecretErr error
pingCalled atomic.Int64
pingErr error
}
func (s *server) Init(ctx context.Context, req *proto.SecretStoreInitRequest) (*proto.SecretStoreInitResponse, error) {
s.initCalled.Add(1)
if s.onInitCalled != nil {
s.onInitCalled(req)
}
return &proto.SecretStoreInitResponse{}, s.initErr
}
func (s *server) Features(ctx context.Context, req *proto.FeaturesRequest) (*proto.FeaturesResponse, error) {
s.featuresCalled.Add(1)
return &proto.FeaturesResponse{}, s.featuresErr
}
func (s *server) Get(ctx context.Context, req *proto.GetSecretRequest) (*proto.GetSecretResponse, error) {
s.getSecretCalled.Add(1)
if s.onGetSecret != nil {
s.onGetSecret(req)
}
return &proto.GetSecretResponse{}, s.getSecretErr
}
func (s *server) BulkGet(ctx context.Context, req *proto.BulkGetSecretRequest) (*proto.BulkGetSecretResponse, error) {
s.bulkGetSecretCalled.Add(1)
if s.onBulkGetSecret != nil {
s.onBulkGetSecret(req)
}
return &proto.BulkGetSecretResponse{}, s.bulkGetSecretErr
}
func (s *server) Ping(ctx context.Context, req *proto.PingRequest) (*proto.PingResponse, error) {
s.pingCalled.Add(1)
return &proto.PingResponse{}, s.pingErr
}
func TestComponentCalls(t *testing.T) {
getSecretStores := testingGrpc.TestServerFor(testLogger, func(s *grpc.Server, svc *server) {
proto.RegisterSecretStoreServer(s, svc)
}, func(cci grpc.ClientConnInterface) *grpcSecretStore {
client := proto.NewSecretStoreClient(cci)
secretStore := fromConnector(testLogger, pluggable.NewGRPCConnector("/tmp/socket.sock", proto.NewSecretStoreClient))
secretStore.Client = client
return secretStore
})
t.Run("init should call grpc init and populate features", func(t *testing.T) {
const (
fakeName = "name"
fakeType = "type"
fakeVersion = "v1"
fakeComponentName = "component"
fakeSocketFolder = "/tmp"
)
uniqueID := guuid.New().String()
socket := fmt.Sprintf("%s/%s.sock", fakeSocketFolder, uniqueID)
defer os.Remove(socket)
connector := pluggable.NewGRPCConnector(socket, proto.NewSecretStoreClient)
defer connector.Close()
listener, err := net.Listen("unix", socket)
require.NoError(t, err)
defer listener.Close()
s := grpc.NewServer()
srv := &server{}
proto.RegisterSecretStoreServer(s, srv)
go func() {
if serveErr := s.Serve(listener); serveErr != nil {
testLogger.Debugf("failed to serve: %v", serveErr)
}
}()
secretStore := fromConnector(testLogger, connector)
err = secretStore.Init(context.Background(), secretstores.Metadata{
Base: contribMetadata.Base{},
})
require.NoError(t, err)
assert.Equal(t, int64(1), srv.initCalled.Load())
assert.Equal(t, int64(1), srv.featuresCalled.Load())
})
t.Run("features should return the secret store features", func(t *testing.T) {
secretStore, cleanup, err := getSecretStores(&server{})
require.NoError(t, err)
defer cleanup()
features := secretStore.Features()
assert.Empty(t, features)
secretStore.features = []secretstores.Feature{secretstores.FeatureMultipleKeyValuesPerSecret}
assert.NotEmpty(t, secretStore.Features())
assert.Equal(t, secretstores.FeatureMultipleKeyValuesPerSecret, secretStore.Features()[0])
})
t.Run("get secret should call grpc get secret", func(t *testing.T) {
key := "secretName"
errStr := "secret not found"
svc := &server{
onGetSecret: func(req *proto.GetSecretRequest) {
assert.Equal(t, key, req.GetKey())
},
getSecretErr: errors.New(errStr),
}
secretStore, cleanup, err := getSecretStores(svc)
require.NoError(t, err)
defer cleanup()
resp, err := secretStore.GetSecret(context.Background(), secretstores.GetSecretRequest{
Name: key,
})
assert.Equal(t, int64(1), svc.getSecretCalled.Load())
str := err.Error()
assert.Equal(t, err.Error(), str)
assert.Equal(t, secretstores.GetSecretResponse{}, resp)
})
t.Run("bulk get secret should call grpc bulk get secret", func(t *testing.T) {
errStr := "bulk get secret error"
svc := &server{
onBulkGetSecret: func(req *proto.BulkGetSecretRequest) {
// no-op
},
bulkGetSecretErr: errors.New(errStr),
}
gSecretStores, cleanup, err := getSecretStores(svc)
require.NoError(t, err)
defer cleanup()
resp, err := gSecretStores.BulkGetSecret(context.Background(), secretstores.BulkGetSecretRequest{})
assert.Equal(t, int64(1), svc.bulkGetSecretCalled.Load())
str := err.Error()
assert.Equal(t, err.Error(), str)
assert.Equal(t, secretstores.BulkGetSecretResponse{}, resp)
})
t.Run("ping should not return an err when grpc not returns an error", func(t *testing.T) {
svc := &server{}
gSecretStores, cleanup, err := getSecretStores(svc)
require.NoError(t, err)
defer cleanup()
err = gSecretStores.Ping()
require.NoError(t, err)
assert.Equal(t, int64(1), svc.pingCalled.Load())
})
t.Run("ping should return an err when grpc returns an error", func(t *testing.T) {
svc := &server{
pingErr: errors.New("fake-ping-err"),
}
gSecretStores, cleanup, err := getSecretStores(svc)
require.NoError(t, err)
defer cleanup()
err = gSecretStores.Ping()
require.Error(t, err)
assert.Equal(t, int64(1), svc.pingCalled.Load())
})
}
|
mikeee/dapr
|
pkg/components/secretstores/pluggable_test.go
|
GO
|
mit
| 6,864 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secretstores
import (
"fmt"
"strings"
"github.com/dapr/components-contrib/secretstores"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
// Name of the built-in Kubernetes secret store component.
const BuiltinKubernetesSecretStore = "kubernetes"
// Registry is used to get registered secret store implementations.
type Registry struct {
Logger logger.Logger
secretStores map[string]func(logger.Logger) secretstores.SecretStore
}
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry = NewRegistry()
// NewRegistry returns a new secret store registry.
func NewRegistry() *Registry {
return &Registry{
secretStores: map[string]func(logger.Logger) secretstores.SecretStore{},
}
}
// RegisterComponent adds a new secret store to the registry.
func (s *Registry) RegisterComponent(componentFactory func(logger.Logger) secretstores.SecretStore, names ...string) {
for _, name := range names {
s.secretStores[createFullName(name)] = componentFactory
}
}
// Create instantiates a secret store based on `name`.
func (s *Registry) Create(name, version, logName string) (secretstores.SecretStore, error) {
if method, ok := s.getSecretStore(name, version, logName); ok {
return method(), nil
}
return nil, fmt.Errorf("couldn't find secret store %s/%s", name, version)
}
func (s *Registry) getSecretStore(name, version, logName string) (func() secretstores.SecretStore, bool) {
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
secretStoreFn, ok := s.secretStores[nameLower+"/"+versionLower]
if ok {
return s.wrapFn(secretStoreFn, logName), true
}
if components.IsInitialVersion(versionLower) {
secretStoreFn, ok = s.secretStores[nameLower]
if ok {
return s.wrapFn(secretStoreFn, logName), true
}
}
return nil, false
}
func (s *Registry) wrapFn(componentFactory func(logger.Logger) secretstores.SecretStore, logName string) func() secretstores.SecretStore {
return func() secretstores.SecretStore {
l := s.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
}
func createFullName(name string) string {
return strings.ToLower("secretstores." + name)
}
|
mikeee/dapr
|
pkg/components/secretstores/registry.go
|
GO
|
mit
| 2,837 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secretstores_test
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ss "github.com/dapr/components-contrib/secretstores"
"github.com/dapr/dapr/pkg/components/secretstores"
"github.com/dapr/kit/logger"
)
type mockSecretStore struct {
ss.SecretStore
}
func TestRegistry(t *testing.T) {
testRegistry := secretstores.NewRegistry()
t.Run("secret store is registered", func(t *testing.T) {
const (
secretStoreName = "mockSecretStore"
secretStoreNameV2 = "mockSecretStore/v2"
componentName = "secretstores." + secretStoreName
)
// Initiate mock object
mock := &mockSecretStore{}
mockV2 := &mockSecretStore{}
// act
testRegistry.RegisterComponent(func(_ logger.Logger) ss.SecretStore {
return mock
}, secretStoreName)
testRegistry.RegisterComponent(func(_ logger.Logger) ss.SecretStore {
return mockV2
}, secretStoreNameV2)
// assert v0 and v1
p, e := testRegistry.Create(componentName, "v0", "")
require.NoError(t, e)
assert.Same(t, mock, p)
p, e = testRegistry.Create(componentName, "v1", "")
require.NoError(t, e)
assert.Same(t, mock, p)
// assert v2
pV2, e := testRegistry.Create(componentName, "v2", "")
require.NoError(t, e)
assert.Same(t, mockV2, pV2)
// check case-insensitivity
pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "")
require.NoError(t, e)
assert.Same(t, mockV2, pV2)
})
t.Run("secret store is not registered", func(t *testing.T) {
const (
resolverName = "fakeSecretStore"
componentName = "secretstores." + resolverName
)
// act
p, actualError := testRegistry.Create(componentName, "v1", "")
expectedError := fmt.Errorf("couldn't find secret store %s/v1", componentName)
// assert
assert.Nil(t, p)
assert.Equal(t, expectedError.Error(), actualError.Error())
})
}
|
mikeee/dapr
|
pkg/components/secretstores/registry_test.go
|
GO
|
mit
| 2,446 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"context"
"errors"
"sync/atomic"
"github.com/cenkalti/backoff/v4"
"github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/resiliency"
)
type stateRequestConstraint interface {
state.SetRequest | state.DeleteRequest
state.StateRequest
}
func requestWithKey[T stateRequestConstraint](reqs []T, key string) int {
for i, r := range reqs {
if r.GetKey() == key {
return i
}
}
// Should never happen…
return -1
}
// PerformBulkStoreOperation performs a bulk set or delete using resiliency, retrying operations that fail only when they can be retried.
func PerformBulkStoreOperation[T stateRequestConstraint](
ctx context.Context, reqs []T, policyDef *resiliency.PolicyDefinition, opts state.BulkStoreOpts,
execSingle func(ctx context.Context, req *T) error,
execMulti func(ctx context.Context, reqs []T, opts state.BulkStoreOpts) error,
) error {
var reqsAtomic atomic.Pointer[[]T]
reqsAtomic.Store(&reqs)
policyRunner := resiliency.NewRunnerWithOptions(ctx,
policyDef,
resiliency.RunnerOpts[[]string]{
// In case of errors, the policy runner function returns a list of items that are to be retried.
// Items that can NOT be retried are the items that either succeeded (when at least one other item failed) or which failed with an etag error (which can't be retried)
Accumulator: func(retry []string) {
rReqs := *reqsAtomic.Load()
newReqs := make([]T, len(retry))
var n int
for _, retryKey := range retry {
i := requestWithKey(reqs, retryKey)
if i >= 0 {
newReqs[n] = rReqs[i]
n++
}
}
newReqs = newReqs[:n]
reqsAtomic.Store(&newReqs)
},
},
)
_, err := policyRunner(func(ctx context.Context) ([]string, error) {
var rErr error
rReqs := *reqsAtomic.Load()
// If there's a single request, perform it in non-bulk
// In this case, we never need to filter out operations
if len(rReqs) == 1 {
rErr = execSingle(ctx, &rReqs[0])
if rErr != nil {
// Check if it's an etag error, which is not retriable
// In that case, wrap inside a permanent backoff error
var etagErr *state.ETagError
if errors.As(rErr, &etagErr) {
rErr = backoff.Permanent(rErr)
}
}
return nil, rErr
}
// Perform the request in bulk
rErr = execMulti(ctx, rReqs, opts)
// If there's no error, short-circuit
if rErr == nil {
return nil, nil
}
// Check if we have a multi-error; if not, return the error as-is
mErr, ok := rErr.(interface{ Unwrap() []error })
if !ok {
return nil, rErr
}
errs := mErr.Unwrap()
if len(errs) == 0 {
// Should never happen…
return nil, rErr
}
// Check which operation(s) failed
// We can retry if at least one error is not an etag error
var canRetry, etagInvalid bool
retry := make([]string, 0, len(errs))
for _, e := range errs {
// Check if it's a BulkStoreError
// If not, we will cause all operations to be retried, because the error was not a multi BulkStoreError
var bse state.BulkStoreError
if !errors.As(e, &bse) {
return nil, rErr
}
// If it's not an etag error, the operation can retry this failed item
if etagErr := bse.ETagError(); etagErr == nil {
canRetry = true
retry = append(retry, bse.Key())
} else if etagErr.Kind() == state.ETagInvalid {
// If at least one etag error is due to an etag invalid, record that
etagInvalid = true
}
}
// If canRetry is false, it means that all errors are etag errors, which are permanent
if !canRetry {
var etagErr *state.ETagError
if etagInvalid {
etagErr = state.NewETagError(state.ETagInvalid, rErr)
} else {
etagErr = state.NewETagError(state.ETagMismatch, rErr)
}
rErr = backoff.Permanent(etagErr)
}
return retry, rErr
})
return err
}
|
mikeee/dapr
|
pkg/components/state/bulk.go
|
GO
|
mit
| 4,361 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"context"
"errors"
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
"github.com/dapr/components-contrib/state"
resiliencyV1alpha1 "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1"
"github.com/dapr/dapr/pkg/resiliency"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
)
func TestPerformBulkStoreOperation(t *testing.T) {
simulatedErr := errors.New("simulated")
etagMismatchErr := state.NewETagError(state.ETagMismatch, simulatedErr)
etagInvalidErr := state.NewETagError(state.ETagInvalid, simulatedErr)
res := resiliency.FromConfigurations(logger.NewLogger("test"), &resiliencyV1alpha1.Resiliency{
Spec: resiliencyV1alpha1.ResiliencySpec{
Policies: resiliencyV1alpha1.Policies{
Retries: map[string]resiliencyV1alpha1.Retry{
"singleRetry": {
MaxRetries: ptr.Of(1),
MaxInterval: "100ms",
Policy: "constant",
Duration: "10ms",
},
},
Timeouts: map[string]string{
"fast": "100ms",
},
},
Targets: resiliencyV1alpha1.Targets{
Components: map[string]resiliencyV1alpha1.ComponentPolicyNames{
"mystate": {
Outbound: resiliencyV1alpha1.PolicyNames{
Retry: "singleRetry",
Timeout: "fast",
},
},
},
},
},
})
policyDef := res.ComponentOutboundPolicy("mystate", resiliency.Statestore)
t.Run("single request", func(t *testing.T) {
reqs := []state.SetRequest{
{Key: "key1"},
}
t.Run("no error", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
count.Add(1)
return nil
},
nil, // The multi method should not be invoked, so this will panic if it happens
)
require.NoError(t, err)
require.Equal(t, uint32(1), count.Load())
})
t.Run("does not retry on etag error", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
count.Add(1)
return etagInvalidErr
},
nil, // The multi method should not be invoked, so this will panic if it happens
)
var etagErr *state.ETagError
require.Error(t, err)
require.ErrorAs(t, err, &etagErr)
require.Equal(t, uint32(1), count.Load())
})
t.Run("retries on other errors", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
count.Add(1)
return simulatedErr
},
nil, // The multi method should not be invoked, so this will panic if it happens
)
require.Error(t, err)
require.Equal(t, uint32(2), count.Load())
})
t.Run("success on second attempt", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
if count.Add(1) == 1 {
return simulatedErr
}
return nil
},
nil, // The multi method should not be invoked, so this will panic if it happens
)
require.NoError(t, err)
require.Equal(t, uint32(2), count.Load())
})
})
t.Run("multiple requests", func(t *testing.T) {
reqs := []state.SetRequest{
{Key: "key1"},
{Key: "key2"},
}
t.Run("all successful", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
nil, // The single method should not be invoked, so this will panic if it happens
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return nil
},
)
require.NoError(t, err)
require.Equal(t, uint32(1), count.Load())
})
t.Run("key1 successful, key2 etag mismatch", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
nil, // The single method should not be invoked, so this will panic if it happens
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return errors.Join(
state.NewBulkStoreError("key2", etagMismatchErr),
)
},
)
require.Error(t, err)
var etagErr *state.ETagError
require.ErrorAs(t, err, &etagErr)
require.Equal(t, state.ETagMismatch, etagErr.Kind())
require.Equal(t, uint32(1), count.Load())
})
t.Run("key1 etag invalid, key2 etag mismatch", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
nil, // The single method should not be invoked, so this will panic if it happens
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return errors.Join(
state.NewBulkStoreError("key1", etagInvalidErr),
state.NewBulkStoreError("key2", etagMismatchErr),
)
},
)
require.Error(t, err)
var etagErr *state.ETagError
require.ErrorAs(t, err, &etagErr)
require.Equal(t, state.ETagInvalid, etagErr.Kind())
require.Equal(t, uint32(1), count.Load())
})
t.Run("key1 successful, key2 fails and is retried", func(t *testing.T) {
count := atomic.Uint32{}
// This should retry, but the second time only key2 should be requested
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
require.Equal(t, "key2", req.Key)
count.Add(1)
return simulatedErr
},
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return errors.Join(
state.NewBulkStoreError("key2", simulatedErr),
)
},
)
require.Error(t, err)
require.Equal(t, simulatedErr, err)
require.Equal(t, uint32(2), count.Load())
})
t.Run("key1 fails and is retried, key2 has etag error", func(t *testing.T) {
count := atomic.Uint32{}
// This should retry, but the second time only key1 should be requested
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
require.Equal(t, "key1", req.Key)
count.Add(1)
return simulatedErr
},
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return errors.Join(
state.NewBulkStoreError("key1", simulatedErr),
state.NewBulkStoreError("key2", etagMismatchErr),
)
},
)
require.Error(t, err)
require.Equal(t, simulatedErr, err)
require.Equal(t, uint32(2), count.Load())
})
t.Run("key1 fails and is retried, key2 has etag error, key3 succeeds", func(t *testing.T) {
reqs2 := []state.SetRequest{
{Key: "key1"},
{Key: "key2"},
{Key: "key3"},
}
count := atomic.Uint32{}
// This should retry, but the second time only key1 should be requested
err := PerformBulkStoreOperation(context.Background(), reqs2, policyDef, state.BulkStoreOpts{},
func(ctx context.Context, req *state.SetRequest) error {
require.Equal(t, "key1", req.Key)
count.Add(1)
return simulatedErr
},
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return errors.Join(
state.NewBulkStoreError("key1", simulatedErr),
state.NewBulkStoreError("key2", etagMismatchErr),
)
},
)
require.Error(t, err)
require.Equal(t, simulatedErr, err)
require.Equal(t, uint32(2), count.Load())
})
t.Run("key1 succeeds on 2nd attempt, key2 succeeds, key3 has etag error on 2nd attempt", func(t *testing.T) {
reqs2 := []state.SetRequest{
{Key: "key1"},
{Key: "key2"},
{Key: "key3"},
}
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs2, policyDef, state.BulkStoreOpts{},
nil, // The single method should not be invoked, so this will panic if it happens
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
if count.Add(1) == 1 {
// On first attempt, key1 and key3 fail with non-etag errors
return errors.Join(
state.NewBulkStoreError("key1", simulatedErr),
state.NewBulkStoreError("key3", simulatedErr),
)
}
// On the second attempt, key3 fails with etag error
require.Len(t, req, 2)
for i := 0; i < 2; i++ {
switch req[i].Key {
case "key3", "key1":
// All good
default:
t.Fatalf("Found unexpected key: %s", req[i].Key)
}
}
return errors.Join(
state.NewBulkStoreError("key3", etagMismatchErr),
)
},
)
require.Error(t, err)
var etagErr *state.ETagError
require.ErrorAs(t, err, &etagErr)
require.Equal(t, state.ETagMismatch, etagErr.Kind())
require.Equal(t, uint32(2), count.Load())
})
t.Run("retries when error is not a multierror", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
nil, // The single method should not be invoked, so this will panic if it happens
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return simulatedErr
},
)
require.Error(t, err)
require.Equal(t, simulatedErr, err)
require.Equal(t, uint32(2), count.Load())
})
t.Run("retries when multierror contains a non-BulkStoreError error", func(t *testing.T) {
count := atomic.Uint32{}
err := PerformBulkStoreOperation(context.Background(), reqs, policyDef, state.BulkStoreOpts{},
nil, // The single method should not be invoked, so this will panic if it happens
func(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
count.Add(1)
return errors.Join(simulatedErr)
},
)
require.Error(t, err)
merr, ok := err.(interface{ Unwrap() []error })
require.True(t, ok)
require.Len(t, merr.Unwrap(), 1)
require.Equal(t, merr.Unwrap()[0], simulatedErr)
require.Equal(t, uint32(2), count.Load())
})
})
}
|
mikeee/dapr
|
pkg/components/state/bulk_test.go
|
GO
|
mit
| 11,009 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"context"
"encoding/json"
"errors"
"fmt"
"math"
"strconv"
"sync"
"time"
"github.com/dapr/kit/ptr"
"github.com/dapr/components-contrib/state"
"github.com/dapr/components-contrib/state/query"
"github.com/dapr/components-contrib/state/utils"
"github.com/dapr/dapr/pkg/components/pluggable"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
"github.com/dapr/kit/logger"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/anypb"
)
var (
ErrNilSetValue = errors.New("an attempt to set a nil value was received, try to use Delete instead")
ErrRespNil = errors.New("the response for GetRequest is nil")
ErrTransactOperationNotSupported = errors.New("transact operation not supported")
)
// errors code
var (
GRPCCodeETagMismatch = codes.FailedPrecondition
GRPCCodeETagInvalid = codes.InvalidArgument
GRPCCodeBulkDeleteRowMismatch = codes.Internal
log = logger.NewLogger("state-pluggable-logger")
)
const (
// etagField is the field that should be specified on gRPC error response.
etagField = "etag"
// affectedRowsMetadataKey is the metadata key used to return bulkdelete mismatch errors affected rows.
affectedRowsMetadataKey = "affected"
// expectedRowsMetadataKey is the metadata key used to return bulkdelete mismatch errors expected rows.
expectedRowsMetadataKey = "expected"
)
// etagErrFromStatus get the etag error from the given gRPC status, if the error is not an etag kind error the return is the original error.
func etagErrFromStatus(s status.Status) (error, bool) {
details := s.Details()
if len(details) != 1 {
return s.Err(), false
}
badRequestDetail, ok := details[0].(*errdetails.BadRequest)
if !ok {
return s.Err(), false
}
violations := badRequestDetail.GetFieldViolations()
if len(violations) != 1 {
return s.Err(), false
}
maybeETagViolation := violations[0]
if maybeETagViolation.GetField() != etagField {
return s.Err(), false
}
return errors.New(maybeETagViolation.GetDescription()), true
}
var etagErrorsConverters = pluggable.MethodErrorConverter{
GRPCCodeETagInvalid: func(s status.Status) error {
sourceErr, ok := etagErrFromStatus(s)
if !ok {
return sourceErr
}
return state.NewETagError(state.ETagInvalid, sourceErr)
},
GRPCCodeETagMismatch: func(s status.Status) error {
sourceErr, ok := etagErrFromStatus(s)
if !ok {
return sourceErr
}
return state.NewETagError(state.ETagMismatch, sourceErr)
},
}
var bulkDeleteErrors = pluggable.MethodErrorConverter{
GRPCCodeBulkDeleteRowMismatch: func(s status.Status) error {
details := s.Details()
if len(details) != 1 {
return s.Err()
}
errorInfoDetail, ok := details[0].(*errdetails.ErrorInfo)
if !ok {
return s.Err()
}
metadata := errorInfoDetail.GetMetadata()
expectedStr, ok := metadata[expectedRowsMetadataKey]
if !ok {
return s.Err()
}
expected, err := strconv.Atoi(expectedStr)
if err != nil {
return fmt.Errorf("%w; cannot convert 'expected' rows to integer: %s", s.Err(), err)
}
affectedStr, ok := metadata[affectedRowsMetadataKey]
if !ok {
return s.Err()
}
affected, err := strconv.Atoi(affectedStr)
if err != nil {
return fmt.Errorf("%w; cannot convert 'affected' rows to integer: %s", s.Err(), err)
}
return state.NewBulkDeleteRowMismatchError(uint64(expected), uint64(affected))
},
}
var (
mapETagErrs = pluggable.NewConverterFunc(etagErrorsConverters)
mapSetErrs = mapETagErrs
mapDeleteErrs = mapETagErrs
mapBulkSetErrs = mapETagErrs
mapBulkDeleteErrs = pluggable.NewConverterFunc(etagErrorsConverters.Merge(bulkDeleteErrors))
)
// grpcStateStore is a implementation of a state store over a gRPC Protocol.
type grpcStateStore struct {
*pluggable.GRPCConnector[stateStoreClient]
// features is the list of state store implemented features.
features []state.Feature
multiMaxSize *int
lock sync.RWMutex
}
// Init initializes the grpc state passing out the metadata to the grpc component.
// It also fetches and set the current components features.
func (ss *grpcStateStore) Init(ctx context.Context, metadata state.Metadata) error {
if err := ss.Dial(metadata.Name); err != nil {
return err
}
protoMetadata := &proto.MetadataRequest{
Properties: metadata.Properties,
}
_, err := ss.Client.Init(ss.Context, &proto.InitRequest{
Metadata: protoMetadata,
})
if err != nil {
return err
}
// TODO Static data could be retrieved in another way, a necessary discussion should start soon.
// we need to call the method here because features could return an error and the features interface doesn't support errors
featureResponse, err := ss.Client.Features(ss.Context, &proto.FeaturesRequest{})
if err != nil {
return err
}
ss.features = make([]state.Feature, len(featureResponse.GetFeatures()))
for idx, f := range featureResponse.GetFeatures() {
ss.features[idx] = state.Feature(f)
}
return nil
}
// Features list all implemented features.
func (ss *grpcStateStore) Features() []state.Feature {
return ss.features
}
// Delete performs a delete operation.
func (ss *grpcStateStore) Delete(ctx context.Context, req *state.DeleteRequest) error {
_, err := ss.Client.Delete(ctx, toDeleteRequest(req))
return mapDeleteErrs(err)
}
// Get performs a get on the state store.
func (ss *grpcStateStore) Get(ctx context.Context, req *state.GetRequest) (*state.GetResponse, error) {
response, err := ss.Client.Get(ctx, toGetRequest(req))
if err != nil {
return nil, err
}
if response == nil {
return nil, ErrRespNil
}
return fromGetResponse(response), nil
}
// Set performs a set operation on the state store.
func (ss *grpcStateStore) Set(ctx context.Context, req *state.SetRequest) error {
protoRequest, err := toSetRequest(req)
if err != nil {
return err
}
_, err = ss.Client.Set(ctx, protoRequest)
return mapSetErrs(err)
}
// BulkDelete performs a delete operation for many keys at once.
func (ss *grpcStateStore) BulkDelete(ctx context.Context, reqs []state.DeleteRequest, opts state.BulkStoreOpts) error {
protoRequests := make([]*proto.DeleteRequest, len(reqs))
for idx := range reqs {
protoRequests[idx] = toDeleteRequest(&reqs[idx])
}
bulkDeleteRequest := &proto.BulkDeleteRequest{
Items: protoRequests,
Options: &proto.BulkDeleteRequestOptions{
Parallelism: int64(opts.Parallelism),
},
}
_, err := ss.Client.BulkDelete(ctx, bulkDeleteRequest)
return mapBulkDeleteErrs(err)
}
// BulkGet performs a get operation for many keys at once.
func (ss *grpcStateStore) BulkGet(ctx context.Context, req []state.GetRequest, opts state.BulkGetOpts) ([]state.BulkGetResponse, error) {
protoRequests := make([]*proto.GetRequest, len(req))
for idx := range req {
protoRequests[idx] = toGetRequest(&req[idx])
}
bulkGetRequest := &proto.BulkGetRequest{
Items: protoRequests,
Options: &proto.BulkGetRequestOptions{
Parallelism: int64(opts.Parallelism),
},
}
bulkGetResponse, err := ss.Client.BulkGet(ctx, bulkGetRequest)
if err != nil {
return nil, err
}
items := make([]state.BulkGetResponse, len(bulkGetResponse.GetItems()))
for idx, resp := range bulkGetResponse.GetItems() {
items[idx] = state.BulkGetResponse{
Key: resp.GetKey(),
Data: resp.GetData(),
ETag: fromETagResponse(resp.GetEtag()),
Metadata: resp.GetMetadata(),
Error: resp.GetError(),
ContentType: strNilIfEmpty(resp.GetContentType()),
}
}
return items, nil
}
// BulkSet performs a set operation for many keys at once.
func (ss *grpcStateStore) BulkSet(ctx context.Context, req []state.SetRequest, opts state.BulkStoreOpts) error {
requests := []*proto.SetRequest{}
for idx := range req {
protoRequest, err := toSetRequest(&req[idx])
if err != nil {
return err
}
requests = append(requests, protoRequest)
}
_, err := ss.Client.BulkSet(ctx, &proto.BulkSetRequest{
Items: requests,
Options: &proto.BulkSetRequestOptions{
Parallelism: int64(opts.Parallelism),
},
})
return mapBulkSetErrs(err)
}
// Query performsn a query in the state store
func (ss *grpcStateStore) Query(ctx context.Context, req *state.QueryRequest) (*state.QueryResponse, error) {
q, err := toQuery(req.Query)
if err != nil {
return nil, err
}
resp, err := ss.Client.Query(ctx, &proto.QueryRequest{
Query: q,
Metadata: req.Metadata,
})
if err != nil {
return nil, err
}
return fromQueryResponse(resp), nil
}
// Multi executes operation in a transactional environment
func (ss *grpcStateStore) Multi(ctx context.Context, request *state.TransactionalStateRequest) error {
operations := make([]*proto.TransactionalStateOperation, len(request.Operations))
for idx, op := range request.Operations {
transactOp, err := toTransactOperation(op)
if err != nil {
return err
}
operations[idx] = transactOp
}
_, err := ss.Client.Transact(ctx, &proto.TransactionalStateRequest{
Operations: operations,
Metadata: request.Metadata,
})
return err
}
// MultiMaxSize returns the maximum number of operations allowed in a transactional request.
func (ss *grpcStateStore) MultiMaxSize() int {
ss.lock.RLock()
multiMaxSize := ss.multiMaxSize
ss.lock.RUnlock()
if multiMaxSize != nil {
return *multiMaxSize
}
ss.lock.Lock()
defer ss.lock.Unlock()
// Check the cached value again in case another goroutine set it
if multiMaxSize != nil {
return *multiMaxSize
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
resp, err := ss.Client.MultiMaxSize(ctx, new(proto.MultiMaxSizeRequest))
if err != nil {
log.Error("failed to get multi max size from state store", err)
ss.multiMaxSize = ptr.Of(-1)
return *ss.multiMaxSize
}
// If the pluggable component is on a 64bit system and the dapr runtime is on a 32bit system,
// the response could be larger than the maximum int32 value.
// In this case, we set the max size to the maximum possible value for a 32bit system.
is32bitSystem := math.MaxInt == math.MaxInt32
if is32bitSystem && resp.GetMaxSize() > int64(math.MaxInt32) {
log.Warnf("multi max size %d is too large for 32bit systems, setting to max possible", resp.GetMaxSize())
ss.multiMaxSize = ptr.Of(math.MaxInt32)
return *ss.multiMaxSize
}
ss.multiMaxSize = ptr.Of(int(resp.GetMaxSize()))
return *ss.multiMaxSize
}
// mappers and helpers.
//
//nolint:nosnakecase
func toSortOrder(order string) proto.Sorting_Order {
ord, ok := proto.Sorting_Order_value[order]
if !ok {
return proto.Sorting_ASC
}
return proto.Sorting_Order(ord)
}
func toSorting(sorting []query.Sorting) []*proto.Sorting {
sortingList := make([]*proto.Sorting, len(sorting))
for idx, sortParam := range sorting {
sortingList[idx] = &proto.Sorting{
Key: sortParam.Key,
Order: toSortOrder(sortParam.Order),
}
}
return sortingList
}
func toPagination(pagination query.Pagination) *proto.Pagination {
return &proto.Pagination{
Limit: int64(pagination.Limit),
Token: pagination.Token,
}
}
func toQuery(req query.Query) (*proto.Query, error) {
filters := make(map[string]*anypb.Any)
for key, value := range req.Filters {
data, err := utils.Marshal(value, json.Marshal)
if err != nil {
return nil, err
}
filters[key] = &anypb.Any{
Value: data,
}
}
return &proto.Query{
Filter: filters,
Sort: toSorting(req.Sort),
Pagination: toPagination(req.Page),
}, nil
}
func fromQueryResponse(resp *proto.QueryResponse) *state.QueryResponse {
results := make([]state.QueryItem, len(resp.GetItems()))
for idx, item := range resp.GetItems() {
itemIdx := state.QueryItem{
Key: item.GetKey(),
Data: item.GetData(),
ETag: fromETagResponse(item.GetEtag()),
Error: item.GetError(),
ContentType: strNilIfEmpty(item.GetContentType()),
}
results[idx] = itemIdx
}
return &state.QueryResponse{
Results: results,
Token: resp.GetToken(),
Metadata: resp.GetMetadata(),
}
}
func toTransactOperation(req state.TransactionalStateOperation) (*proto.TransactionalStateOperation, error) {
switch request := req.(type) {
case state.SetRequest:
setReq, err := toSetRequest(&request)
if err != nil {
return nil, err
}
return &proto.TransactionalStateOperation{
Request: &proto.TransactionalStateOperation_Set{Set: setReq}, //nolint:nosnakecase
}, nil
case state.DeleteRequest:
return &proto.TransactionalStateOperation{
Request: &proto.TransactionalStateOperation_Delete{Delete: toDeleteRequest(&request)}, //nolint:nosnakecase
}, nil
}
return nil, ErrTransactOperationNotSupported
}
func toSetRequest(req *state.SetRequest) (*proto.SetRequest, error) {
if req == nil {
return nil, nil
}
var dataBytes []byte
switch reqValue := req.Value.(type) {
case []byte:
dataBytes = reqValue
default:
if reqValue == nil {
return nil, ErrNilSetValue
}
// TODO only json content type is supported.
var err error
if dataBytes, err = utils.Marshal(reqValue, json.Marshal); err != nil {
return nil, err
}
}
return &proto.SetRequest{
Key: req.GetKey(),
Value: dataBytes,
Etag: toETagRequest(req.ETag),
Metadata: req.GetMetadata(),
ContentType: strValueIfNotNil(req.ContentType),
Options: &proto.StateOptions{
Concurrency: concurrencyOf(req.Options.Concurrency),
Consistency: consistencyOf(req.Options.Consistency),
},
}, nil
}
func fromGetResponse(resp *proto.GetResponse) *state.GetResponse {
return &state.GetResponse{
Data: resp.GetData(),
ETag: fromETagResponse(resp.GetEtag()),
Metadata: resp.GetMetadata(),
ContentType: strNilIfEmpty(resp.GetContentType()),
}
}
func toDeleteRequest(req *state.DeleteRequest) *proto.DeleteRequest {
if req == nil {
return nil
}
return &proto.DeleteRequest{
Key: req.Key,
Etag: toETagRequest(req.ETag),
Metadata: req.Metadata,
Options: &proto.StateOptions{
Concurrency: concurrencyOf(req.Options.Concurrency),
Consistency: consistencyOf(req.Options.Consistency),
},
}
}
func fromETagResponse(etag *proto.Etag) *string {
if etag == nil {
return nil
}
return &etag.Value
}
func toETagRequest(etag *string) *proto.Etag {
if etag == nil {
return nil
}
return &proto.Etag{
Value: *etag,
}
}
func toGetRequest(req *state.GetRequest) *proto.GetRequest {
if req == nil {
return nil
}
return &proto.GetRequest{
Key: req.Key,
Metadata: req.Metadata,
Consistency: consistencyOf(req.Options.Consistency),
}
}
//nolint:nosnakecase
var consistencyModels = map[string]proto.StateOptions_StateConsistency{
state.Eventual: proto.StateOptions_CONSISTENCY_EVENTUAL,
state.Strong: proto.StateOptions_CONSISTENCY_STRONG,
}
//nolint:nosnakecase
func consistencyOf(value string) proto.StateOptions_StateConsistency {
consistency, ok := consistencyModels[value]
if !ok {
return proto.StateOptions_CONSISTENCY_UNSPECIFIED
}
return consistency
}
//nolint:nosnakecase
var concurrencyModels = map[string]proto.StateOptions_StateConcurrency{
state.FirstWrite: proto.StateOptions_CONCURRENCY_FIRST_WRITE,
state.LastWrite: proto.StateOptions_CONCURRENCY_LAST_WRITE,
}
//nolint:nosnakecase
func concurrencyOf(value string) proto.StateOptions_StateConcurrency {
concurrency, ok := concurrencyModels[value]
if !ok {
return proto.StateOptions_CONCURRENCY_UNSPECIFIED
}
return concurrency
}
// stateStoreClient wrapps the conventional stateStoreClient and the transactional stateStore client.
type stateStoreClient struct {
proto.StateStoreClient
proto.TransactionalStateStoreClient
proto.QueriableStateStoreClient
proto.TransactionalStoreMultiMaxSizeClient
}
// strNilIfEmpty returns nil if string is empty
func strNilIfEmpty(str string) *string {
if str == "" {
return nil
}
return &str
}
// strValueIfNotNil returns the string value if not nil
func strValueIfNotNil(str *string) string {
if str != nil {
return *str
}
return ""
}
// newStateStoreClient creates a new stateStore client instance.
func newStateStoreClient(cc grpc.ClientConnInterface) stateStoreClient {
return stateStoreClient{
StateStoreClient: proto.NewStateStoreClient(cc),
TransactionalStateStoreClient: proto.NewTransactionalStateStoreClient(cc),
QueriableStateStoreClient: proto.NewQueriableStateStoreClient(cc),
TransactionalStoreMultiMaxSizeClient: proto.NewTransactionalStoreMultiMaxSizeClient(cc),
}
}
// fromConnector creates a new GRPC state store using the given underlying connector.
func fromConnector(_ logger.Logger, connector *pluggable.GRPCConnector[stateStoreClient]) *grpcStateStore {
return &grpcStateStore{
features: make([]state.Feature, 0),
GRPCConnector: connector,
}
}
// NewGRPCStateStore creates a new grpc state store using the given socket factory.
func NewGRPCStateStore(l logger.Logger, socket string) *grpcStateStore {
return fromConnector(l, pluggable.NewGRPCConnector(socket, newStateStoreClient))
}
// newGRPCStateStore creates a new state store for the given pluggable component.
func newGRPCStateStore(dialer pluggable.GRPCConnectionDialer) func(l logger.Logger) state.Store {
return func(l logger.Logger) state.Store {
return fromConnector(l, pluggable.NewGRPCConnectorWithDialer(dialer, newStateStoreClient))
}
}
func init() {
//nolint:nosnakecase
pluggable.AddServiceDiscoveryCallback(proto.StateStore_ServiceDesc.ServiceName, func(name string, dialer pluggable.GRPCConnectionDialer) {
DefaultRegistry.RegisterComponent(newGRPCStateStore(dialer), name)
})
}
|
mikeee/dapr
|
pkg/components/state/pluggable.go
|
GO
|
mit
| 18,296 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"context"
"errors"
"fmt"
"net"
"os"
"runtime"
"sync/atomic"
"testing"
guuid "github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
contribMetadata "github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/state"
"github.com/dapr/components-contrib/state/query"
"github.com/dapr/dapr/pkg/components/pluggable"
proto "github.com/dapr/dapr/pkg/proto/components/v1"
testingGrpc "github.com/dapr/dapr/pkg/testing/grpc"
"github.com/dapr/kit/logger"
)
type server struct {
proto.UnimplementedStateStoreServer
proto.UnimplementedTransactionalStateStoreServer
initCalled atomic.Int64
featuresCalled atomic.Int64
deleteCalled atomic.Int64
onDeleteCalled func(*proto.DeleteRequest)
deleteErr error
getCalled atomic.Int64
onGetCalled func(*proto.GetRequest)
getErr error
getResponse *proto.GetResponse
setCalled atomic.Int64
onSetCalled func(*proto.SetRequest)
setErr error
pingCalled atomic.Int64
pingErr error
bulkDeleteCalled atomic.Int64
onBulkDeleteCalled func(*proto.BulkDeleteRequest)
bulkDeleteErr error
bulkGetCalled atomic.Int64
onBulkGetCalled func(*proto.BulkGetRequest)
bulkGetErr error
bulkGetResponse *proto.BulkGetResponse
bulkSetCalled atomic.Int64
onBulkSetCalled func(*proto.BulkSetRequest)
bulkSetErr error
transactCalled atomic.Int64
onTransactCalled func(*proto.TransactionalStateRequest)
transactErr error
queryCalled atomic.Int64
onQueryCalled func(*proto.QueryRequest)
queryResp *proto.QueryResponse
queryErr error
}
func (s *server) Query(_ context.Context, req *proto.QueryRequest) (*proto.QueryResponse, error) {
s.queryCalled.Add(1)
if s.onQueryCalled != nil {
s.onQueryCalled(req)
}
return s.queryResp, s.queryErr
}
func (s *server) Transact(_ context.Context, req *proto.TransactionalStateRequest) (*proto.TransactionalStateResponse, error) {
s.transactCalled.Add(1)
if s.onTransactCalled != nil {
s.onTransactCalled(req)
}
return &proto.TransactionalStateResponse{}, s.transactErr
}
func (s *server) Delete(ctx context.Context, req *proto.DeleteRequest) (*proto.DeleteResponse, error) {
s.deleteCalled.Add(1)
if s.onDeleteCalled != nil {
s.onDeleteCalled(req)
}
return &proto.DeleteResponse{}, s.deleteErr
}
func (s *server) Get(ctx context.Context, req *proto.GetRequest) (*proto.GetResponse, error) {
s.getCalled.Add(1)
if s.onGetCalled != nil {
s.onGetCalled(req)
}
return s.getResponse, s.getErr
}
func (s *server) Set(ctx context.Context, req *proto.SetRequest) (*proto.SetResponse, error) {
s.setCalled.Add(1)
if s.onSetCalled != nil {
s.onSetCalled(req)
}
return &proto.SetResponse{}, s.setErr
}
func (s *server) Ping(context.Context, *proto.PingRequest) (*proto.PingResponse, error) {
s.pingCalled.Add(1)
return &proto.PingResponse{}, s.pingErr
}
func (s *server) BulkDelete(ctx context.Context, req *proto.BulkDeleteRequest) (*proto.BulkDeleteResponse, error) {
s.bulkDeleteCalled.Add(1)
if s.onBulkDeleteCalled != nil {
s.onBulkDeleteCalled(req)
}
return &proto.BulkDeleteResponse{}, s.bulkDeleteErr
}
func (s *server) BulkGet(ctx context.Context, req *proto.BulkGetRequest) (*proto.BulkGetResponse, error) {
s.bulkGetCalled.Add(1)
if s.onBulkGetCalled != nil {
s.onBulkGetCalled(req)
}
return s.bulkGetResponse, s.bulkGetErr
}
func (s *server) BulkSet(ctx context.Context, req *proto.BulkSetRequest) (*proto.BulkSetResponse, error) {
s.bulkSetCalled.Add(1)
if s.onBulkSetCalled != nil {
s.onBulkSetCalled(req)
}
return &proto.BulkSetResponse{}, s.bulkSetErr
}
func (s *server) Init(context.Context, *proto.InitRequest) (*proto.InitResponse, error) {
s.initCalled.Add(1)
return &proto.InitResponse{}, nil
}
func (s *server) Features(context.Context, *proto.FeaturesRequest) (*proto.FeaturesResponse, error) {
s.featuresCalled.Add(1)
return &proto.FeaturesResponse{}, nil
}
var testLogger = logger.NewLogger("state-pluggable-logger")
// wrapString into quotes
func wrapString(str string) string {
return fmt.Sprintf("\"%s\"", str)
}
func TestComponentCalls(t *testing.T) {
getStateStore := func(srv *server) (statestore *grpcStateStore, cleanupf func(), err error) {
withSvc := testingGrpc.TestServerWithDialer(testLogger, func(s *grpc.Server, svc *server) {
proto.RegisterStateStoreServer(s, svc)
proto.RegisterTransactionalStateStoreServer(s, svc)
proto.RegisterQueriableStateStoreServer(s, svc)
})
dialer, cleanup, err := withSvc(srv)
require.NoError(t, err)
clientFactory := newGRPCStateStore(func(ctx context.Context, name string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
return dialer(ctx, opts...)
})
client := clientFactory(testLogger).(*grpcStateStore)
require.NoError(t, client.Init(context.Background(), state.Metadata{}))
return client, cleanup, err
}
if runtime.GOOS != "windows" {
t.Run("test init should populate features and call grpc init", func(t *testing.T) {
const (
fakeName = "name"
fakeType = "type"
fakeVersion = "v1"
fakeComponentName = "component"
fakeSocketFolder = "/tmp"
)
uniqueID := guuid.New().String()
socket := fmt.Sprintf("%s/%s.sock", fakeSocketFolder, uniqueID)
defer os.Remove(socket)
connector := pluggable.NewGRPCConnector(socket, newStateStoreClient)
defer connector.Close()
listener, err := net.Listen("unix", socket)
require.NoError(t, err)
defer listener.Close()
s := grpc.NewServer()
srv := &server{}
proto.RegisterStateStoreServer(s, srv)
go func() {
if serveErr := s.Serve(listener); serveErr != nil {
testLogger.Debugf("Server exited with error: %v", serveErr)
}
}()
ps := fromConnector(testLogger, connector)
err = ps.Init(context.Background(), state.Metadata{
Base: contribMetadata.Base{},
})
require.NoError(t, err)
assert.Equal(t, int64(1), srv.featuresCalled.Load())
assert.Equal(t, int64(1), srv.initCalled.Load())
})
} else {
t.Logf("skipping pubsub pluggable component init test due to the lack of OS (%s) support", runtime.GOOS)
}
t.Run("features should return the component features'", func(t *testing.T) {
stStore, cleanup, err := getStateStore(&server{})
require.NoError(t, err)
defer cleanup()
assert.Empty(t, stStore.Features())
stStore.features = []state.Feature{state.FeatureETag}
assert.NotEmpty(t, stStore.Features())
assert.Equal(t, state.FeatureETag, stStore.Features()[0])
})
t.Run("delete should call delete grpc method", func(t *testing.T) {
const fakeKey = "fakeKey"
svc := &server{
onDeleteCalled: func(req *proto.DeleteRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Delete(context.Background(), &state.DeleteRequest{
Key: fakeKey,
})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.deleteCalled.Load())
})
t.Run("delete should return an err when grpc delete returns an error", func(t *testing.T) {
const fakeKey = "fakeKey"
fakeErr := errors.New("my-fake-err")
svc := &server{
onDeleteCalled: func(req *proto.DeleteRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
deleteErr: fakeErr,
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Delete(context.Background(), &state.DeleteRequest{
Key: fakeKey,
})
require.Error(t, err)
assert.Equal(t, int64(1), svc.deleteCalled.Load())
})
t.Run("delete should return etag mismatch err when grpc delete returns etag mismatch code", func(t *testing.T) {
const fakeKey = "fakeKey"
st := status.New(GRPCCodeETagMismatch, "fake-err-msg")
desc := "The ETag field must only contain alphanumeric characters"
v := &errdetails.BadRequest_FieldViolation{
Field: etagField,
Description: desc,
}
br := &errdetails.BadRequest{}
br.FieldViolations = append(br.GetFieldViolations(), v)
st, err := st.WithDetails(br)
require.NoError(t, err)
svc := &server{
onDeleteCalled: func(req *proto.DeleteRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
deleteErr: st.Err(),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Delete(context.Background(), &state.DeleteRequest{
Key: fakeKey,
})
require.Error(t, err)
etag, ok := err.(*state.ETagError)
require.True(t, ok)
assert.Equal(t, state.ETagMismatch, etag.Kind())
assert.Equal(t, int64(1), svc.deleteCalled.Load())
})
t.Run("delete should return etag invalid err when grpc delete returns etag invalid code", func(t *testing.T) {
const fakeKey = "fakeKey"
st := status.New(GRPCCodeETagInvalid, "fake-err-msg")
desc := "The ETag field must only contain alphanumeric characters"
v := &errdetails.BadRequest_FieldViolation{
Field: etagField,
Description: desc,
}
br := &errdetails.BadRequest{}
br.FieldViolations = append(br.GetFieldViolations(), v)
st, err := st.WithDetails(br)
require.NoError(t, err)
svc := &server{
onDeleteCalled: func(req *proto.DeleteRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
deleteErr: st.Err(),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Delete(context.Background(), &state.DeleteRequest{
Key: fakeKey,
})
require.Error(t, err)
etag, ok := err.(*state.ETagError)
require.True(t, ok)
assert.Equal(t, state.ETagInvalid, etag.Kind())
assert.Equal(t, int64(1), svc.deleteCalled.Load())
})
t.Run("get should return an err when grpc get returns an error", func(t *testing.T) {
const fakeKey = "fakeKey"
svc := &server{
onGetCalled: func(req *proto.GetRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
getErr: errors.New("my-fake-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.Get(context.Background(), &state.GetRequest{
Key: fakeKey,
})
require.Error(t, err)
assert.Equal(t, int64(1), svc.getCalled.Load())
assert.Nil(t, resp)
})
t.Run("get should return an err when response is nil", func(t *testing.T) {
const fakeKey = "fakeKey"
svc := &server{
onGetCalled: func(req *proto.GetRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.Get(context.Background(), &state.GetRequest{
Key: fakeKey,
})
require.NoError(t, err)
assert.Equal(t, &state.GetResponse{}, resp)
assert.Equal(t, int64(1), svc.getCalled.Load())
assert.NotNil(t, resp)
assert.Nil(t, resp.Data)
assert.Nil(t, resp.Metadata)
assert.Nil(t, resp.ContentType)
assert.Nil(t, resp.ETag)
})
t.Run("get should return get response when response is returned from the grpc call", func(t *testing.T) {
const fakeKey = "fakeKey"
fakeData := []byte(`fake-data`)
svc := &server{
onGetCalled: func(req *proto.GetRequest) {
assert.Equal(t, fakeKey, req.GetKey())
},
getResponse: &proto.GetResponse{
Data: fakeData,
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.Get(context.Background(), &state.GetRequest{
Key: fakeKey,
})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.getCalled.Load())
assert.Equal(t, resp.Data, fakeData)
})
t.Run("set should return an err when grpc set returns it", func(t *testing.T) {
const fakeKey, fakeData = "fakeKey", "fakeData"
svc := &server{
onSetCalled: func(req *proto.SetRequest) {
assert.Equal(t, fakeKey, req.GetKey())
assert.Equal(t, []byte(wrapString(fakeData)), req.GetValue())
},
setErr: errors.New("fake-set-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Set(context.Background(), &state.SetRequest{
Key: fakeKey,
Value: fakeData,
})
require.Error(t, err)
assert.Equal(t, int64(1), svc.setCalled.Load())
})
t.Run("set should not return an err when grpc not returns an error", func(t *testing.T) {
const fakeKey, fakeData = "fakeKey", "fakeData"
svc := &server{
onSetCalled: func(req *proto.SetRequest) {
assert.Equal(t, fakeKey, req.GetKey())
assert.Equal(t, []byte(wrapString(fakeData)), req.GetValue())
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Set(context.Background(), &state.SetRequest{
Key: fakeKey,
Value: fakeData,
})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.setCalled.Load())
})
t.Run("ping should not return an err when grpc not returns an error", func(t *testing.T) {
svc := &server{}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Ping()
require.NoError(t, err)
assert.Equal(t, int64(1), svc.pingCalled.Load())
})
t.Run("ping should return an err when grpc returns an error", func(t *testing.T) {
svc := &server{
pingErr: errors.New("fake-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Ping()
require.Error(t, err)
assert.Equal(t, int64(1), svc.pingCalled.Load())
})
t.Run("bulkSet should return an err when grpc returns an error", func(t *testing.T) {
svc := &server{
bulkSetErr: errors.New("fake-bulk-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.BulkSet(context.Background(), []state.SetRequest{}, state.BulkStoreOpts{})
require.Error(t, err)
assert.Equal(t, int64(1), svc.bulkSetCalled.Load())
})
t.Run("bulkSet should returns an error when attempted to set value to nil", func(t *testing.T) {
requests := []state.SetRequest{
{
Key: "key-1",
},
}
svc := &server{
onBulkSetCalled: func(_ *proto.BulkSetRequest) {
assert.FailNow(t, "bulkset should not be called")
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.BulkSet(context.Background(), requests, state.BulkStoreOpts{})
require.ErrorIs(t, ErrNilSetValue, err)
assert.Equal(t, int64(0), svc.bulkSetCalled.Load())
})
t.Run("bulkSet should send a bulkSetRequest containing all setRequest items", func(t *testing.T) {
const fakeKey, otherFakeKey, fakeData = "fakeKey", "otherFakeKey", "fakeData"
requests := []state.SetRequest{
{
Key: fakeKey,
Value: fakeData,
},
{
Key: otherFakeKey,
Value: fakeData,
},
}
svc := &server{
onBulkSetCalled: func(bsr *proto.BulkSetRequest) {
assert.Len(t, bsr.GetItems(), len(requests))
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.BulkSet(context.Background(), requests, state.BulkStoreOpts{})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.bulkSetCalled.Load())
})
t.Run("bulkDelete should send a bulkDeleteRequest containing all deleted items", func(t *testing.T) {
const fakeKey, otherFakeKey = "fakeKey", "otherFakeKey"
requests := []state.DeleteRequest{
{
Key: fakeKey,
},
{
Key: otherFakeKey,
},
}
svc := &server{
onBulkDeleteCalled: func(bsr *proto.BulkDeleteRequest) {
assert.Len(t, bsr.GetItems(), len(requests))
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.BulkDelete(context.Background(), requests, state.BulkStoreOpts{})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.bulkDeleteCalled.Load())
})
t.Run("bulkDelete should return an error when grpc bulkDelete returns an error", func(t *testing.T) {
requests := []state.DeleteRequest{
{
Key: "fake",
},
}
svc := &server{
bulkDeleteErr: errors.New("fake-bulk-delete-err"),
onBulkDeleteCalled: func(bsr *proto.BulkDeleteRequest) {
assert.Len(t, bsr.GetItems(), len(requests))
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.BulkDelete(context.Background(), requests, state.BulkStoreOpts{})
require.Error(t, err)
assert.Equal(t, int64(1), svc.bulkDeleteCalled.Load())
})
t.Run("bulkDelete should return bulkDeleteRowMismatchError when grpc bulkDelete returns a grpcCodeBulkDeleteRowMismatchError", func(t *testing.T) {
requests := []state.DeleteRequest{
{
Key: "fake",
},
}
st := status.New(GRPCCodeBulkDeleteRowMismatch, "fake-err-msg")
br := &errdetails.ErrorInfo{}
br.Metadata = map[string]string{
affectedRowsMetadataKey: "100",
expectedRowsMetadataKey: "99",
}
st, err := st.WithDetails(br)
require.NoError(t, err)
svc := &server{
bulkDeleteErr: st.Err(),
onBulkDeleteCalled: func(bsr *proto.BulkDeleteRequest) {
assert.Len(t, bsr.GetItems(), len(requests))
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.BulkDelete(context.Background(), requests, state.BulkStoreOpts{})
require.Error(t, err)
_, ok := err.(*state.BulkDeleteRowMismatchError)
require.True(t, ok)
assert.Equal(t, int64(1), svc.bulkDeleteCalled.Load())
})
t.Run("bulkGet should return an error when grpc bulkGet returns an error", func(t *testing.T) {
requests := []state.GetRequest{
{
Key: "fake",
},
}
svc := &server{
bulkGetErr: errors.New("fake-bulk-get-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.BulkGet(context.Background(), requests, state.BulkGetOpts{})
require.Error(t, err)
assert.Nil(t, resp)
assert.Equal(t, int64(1), svc.bulkGetCalled.Load())
})
t.Run("bulkGet should send a bulkGetRequest containing all retrieved items", func(t *testing.T) {
const fakeKey, otherFakeKey = "fakeKey", "otherFakeKey"
requests := []state.GetRequest{
{
Key: fakeKey,
},
{
Key: otherFakeKey,
},
}
respItems := []*proto.BulkStateItem{{
Key: fakeKey,
}, {Key: otherFakeKey}}
svc := &server{
onBulkGetCalled: func(bsr *proto.BulkGetRequest) {
assert.Len(t, bsr.GetItems(), len(requests))
},
bulkGetResponse: &proto.BulkGetResponse{
Items: respItems,
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.BulkGet(context.Background(), requests, state.BulkGetOpts{})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.Len(t, resp, len(requests))
assert.Equal(t, int64(1), svc.bulkGetCalled.Load())
})
t.Run("transact should returns an error when grpc returns an error", func(t *testing.T) {
svc := &server{
transactErr: errors.New("transact-fake-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Multi(context.Background(), &state.TransactionalStateRequest{
Operations: []state.TransactionalStateOperation{},
Metadata: map[string]string{},
})
require.Error(t, err)
assert.Equal(t, int64(1), svc.transactCalled.Load())
})
t.Run("transact should send a transact containing all operations", func(t *testing.T) {
const fakeKey, otherFakeKey, fakeData = "fakeKey", "otherFakeKey", "fakeData"
operations := []state.SetRequest{
{
Key: fakeKey,
Value: fakeData,
},
{
Key: otherFakeKey,
Value: fakeData,
},
}
svc := &server{
onTransactCalled: func(bsr *proto.TransactionalStateRequest) {
assert.Len(t, bsr.GetOperations(), len(operations))
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
err = stStore.Multi(context.Background(), &state.TransactionalStateRequest{
Operations: []state.TransactionalStateOperation{
operations[0],
operations[1],
},
})
require.NoError(t, err)
assert.Equal(t, int64(1), svc.transactCalled.Load())
})
t.Run("query should return an error when grpc query returns an error", func(t *testing.T) {
svc := &server{
queryErr: errors.New("fake-query-err"),
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.Query(context.Background(), &state.QueryRequest{})
require.Error(t, err)
assert.Nil(t, resp)
assert.Equal(t, int64(1), svc.queryCalled.Load())
})
t.Run("query should send a QueryRequest containing all filters", func(t *testing.T) {
filters := map[string]interface{}{
"a": []string{"a"},
}
request := &state.QueryRequest{
Query: query.Query{
QueryFields: query.QueryFields{
Filters: filters,
},
},
Metadata: map[string]string{},
}
results := []*proto.QueryItem{
{
Key: "",
Data: []byte{},
Etag: &proto.Etag{},
Error: "",
ContentType: "",
},
}
svc := &server{
onQueryCalled: func(bsr *proto.QueryRequest) {
assert.Len(t, bsr.GetQuery().GetFilter(), len(filters))
},
queryResp: &proto.QueryResponse{
Items: results,
},
}
stStore, cleanup, err := getStateStore(svc)
require.NoError(t, err)
defer cleanup()
resp, err := stStore.Query(context.Background(), request)
require.NoError(t, err)
assert.NotNil(t, resp)
assert.Len(t, resp.Results, len(results))
assert.Equal(t, int64(1), svc.queryCalled.Load())
})
}
//nolint:nosnakecase
func TestMappers(t *testing.T) {
t.Run("consistencyOf should return unspecified for unknown consistency", func(t *testing.T) {
assert.Equal(t, proto.StateOptions_CONSISTENCY_UNSPECIFIED, consistencyOf(""))
})
t.Run("consistencyOf should return proper consistency when well-known consistency is used", func(t *testing.T) {
assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, consistencyOf(state.Eventual))
assert.Equal(t, proto.StateOptions_CONSISTENCY_STRONG, consistencyOf(state.Strong))
})
t.Run("concurrencyOf should return unspecified for unknown concurrency", func(t *testing.T) {
assert.Equal(t, proto.StateOptions_CONCURRENCY_UNSPECIFIED, concurrencyOf(""))
})
t.Run("concurrencyOf should return proper concurrency when well-known concurrency is used", func(t *testing.T) {
assert.Equal(t, proto.StateOptions_CONCURRENCY_FIRST_WRITE, concurrencyOf(state.FirstWrite))
assert.Equal(t, proto.StateOptions_CONCURRENCY_LAST_WRITE, concurrencyOf(state.LastWrite))
})
t.Run("toGetRequest should return nil when receiving a nil request", func(t *testing.T) {
assert.Nil(t, toGetRequest(nil))
})
t.Run("toGetRequest should map all properties from the given request", func(t *testing.T) {
const fakeKey = "fake"
getRequest := toGetRequest(&state.GetRequest{
Key: fakeKey,
Metadata: map[string]string{
fakeKey: fakeKey,
},
Options: state.GetStateOption{
Consistency: state.Eventual,
},
})
assert.Equal(t, fakeKey, getRequest.GetKey())
assert.Equal(t, fakeKey, getRequest.GetMetadata()[fakeKey])
assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, getRequest.GetConsistency())
})
t.Run("fromGetResponse should map all properties from the given response", func(t *testing.T) {
fakeData := []byte(`mydata`)
fakeKey := "key"
fakeETag := "etag"
resp := fromGetResponse(&proto.GetResponse{
Data: fakeData,
Etag: &proto.Etag{
Value: fakeETag,
},
Metadata: map[string]string{
fakeKey: fakeKey,
},
})
assert.Equal(t, resp.Data, fakeData)
assert.Equal(t, resp.ETag, &fakeETag)
assert.Equal(t, resp.Metadata[fakeKey], fakeKey)
})
t.Run("toETagRequest should return nil when receiving a nil etag", func(t *testing.T) {
assert.Nil(t, toETagRequest(nil))
})
t.Run("toETagRequest should set the etag value when receiving a valid etag value", func(t *testing.T) {
fakeETag := "this"
etagRequest := toETagRequest(&fakeETag)
assert.NotNil(t, etagRequest)
assert.Equal(t, etagRequest.GetValue(), fakeETag)
})
t.Run("fromETagResponse should return nil when receiving a nil etag response", func(t *testing.T) {
assert.Nil(t, fromETagResponse(nil))
})
t.Run("fromETagResponse should return the etag value from the response", func(t *testing.T) {})
t.Run("toDeleteRequest should return nil when receiving a nil delete request", func(t *testing.T) {
assert.Nil(t, toDeleteRequest(nil))
})
t.Run("toDeleteRequest map all properties for the given request", func(t *testing.T) {})
t.Run("toSetRequest should return nil when receiving a nil set request", func(t *testing.T) {
req, err := toSetRequest(nil)
require.NoError(t, err)
assert.Nil(t, req)
})
t.Run("toSetRequest should wrap string into quotes", func(t *testing.T) {
const fakeKey, fakePropValue = "fakeKey", "fakePropValue"
fakeEtag := "fakeEtag"
for _, fakeValue := range []any{"fakeStrValue", []byte(`fakeByteValue`), make(map[string]string)} {
req, err := toSetRequest(&state.SetRequest{
Key: fakeKey,
Value: fakeValue,
ETag: &fakeEtag,
Metadata: map[string]string{
fakeKey: fakePropValue,
},
Options: state.SetStateOption{
Concurrency: state.LastWrite,
Consistency: state.Eventual,
},
})
require.NoError(t, err)
assert.NotNil(t, req)
assert.Equal(t, fakeKey, req.GetKey())
assert.NotNil(t, req.GetValue())
if v, ok := fakeValue.(string); ok {
assert.Equal(t, string(req.GetValue()), wrapString(v))
}
assert.Equal(t, fakePropValue, req.GetMetadata()[fakeKey])
assert.Equal(t, proto.StateOptions_CONCURRENCY_LAST_WRITE, req.GetOptions().GetConcurrency())
assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, req.GetOptions().GetConsistency())
}
})
t.Run("toSetRequest accept and parse values as []byte", func(t *testing.T) {
const fakeKey, fakePropValue = "fakeKey", "fakePropValue"
fakeEtag := "fakeEtag"
for _, fakeValue := range []any{"fakeStrValue", []byte(`fakeByteValue`), make(map[string]string)} {
req, err := toSetRequest(&state.SetRequest{
Key: fakeKey,
Value: fakeValue,
ETag: &fakeEtag,
Metadata: map[string]string{
fakeKey: fakePropValue,
},
Options: state.SetStateOption{
Concurrency: state.LastWrite,
Consistency: state.Eventual,
},
})
require.NoError(t, err)
assert.NotNil(t, req)
assert.Equal(t, fakeKey, req.GetKey())
assert.NotNil(t, req.GetValue())
assert.Equal(t, fakePropValue, req.GetMetadata()[fakeKey])
assert.Equal(t, proto.StateOptions_CONCURRENCY_LAST_WRITE, req.GetOptions().GetConcurrency())
assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, req.GetOptions().GetConsistency())
}
t.Run("toTransact should return err when type is unrecognized", func(t *testing.T) {
req, err := toTransactOperation(failingTransactOperation{})
assert.Nil(t, req)
require.ErrorIs(t, err, ErrTransactOperationNotSupported)
})
t.Run("toTransact should return set operation when type is SetOperation", func(t *testing.T) {
const fakeData = "fakeData"
req, err := toTransactOperation(state.SetRequest{
Key: fakeKey,
Value: fakeData,
})
require.NoError(t, err)
assert.NotNil(t, req)
assert.IsType(t, &proto.TransactionalStateOperation_Set{}, req.GetRequest())
})
t.Run("toTransact should return delete operation when type is SetOperation", func(t *testing.T) {
req, err := toTransactOperation(state.DeleteRequest{
Key: fakeKey,
})
require.NoError(t, err)
assert.NotNil(t, req)
assert.IsType(t, &proto.TransactionalStateOperation_Delete{}, req.GetRequest())
})
})
}
type failingTransactOperation struct{}
func (failingTransactOperation) Operation() state.OperationType {
return "unknown"
}
func (failingTransactOperation) GetKey() string {
return "unknown"
}
func (failingTransactOperation) GetMetadata() map[string]string {
return nil
}
|
mikeee/dapr
|
pkg/components/state/pluggable_test.go
|
GO
|
mit
| 28,773 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"fmt"
"strings"
"github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
// Registry is an interface for a component that returns registered state store implementations.
type Registry struct {
Logger logger.Logger
stateStores map[string]func(logger.Logger) state.Store
// versionsSet holds a set of component types version information for
// component types that have multiple versions.
versionsSet map[string]components.Versioning
}
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry = NewRegistry()
// NewRegistry is used to create state store registry.
func NewRegistry() *Registry {
return &Registry{
Logger: logger.NewLogger("dapr.state.registry"),
stateStores: make(map[string]func(logger.Logger) state.Store),
versionsSet: make(map[string]components.Versioning),
}
}
// RegisterComponent adds a new state store to the registry.
func (s *Registry) RegisterComponent(componentFactory func(logger.Logger) state.Store, names ...string) {
for _, name := range names {
s.stateStores[createFullName(name)] = componentFactory
}
}
// RegisterComponent adds a new state store to the registry.
func (s *Registry) RegisterComponentWithVersions(name string, versions components.Versioning) {
if len(versions.Default) == 0 {
// Panicking here is appropriate because this is a programming error, and
// will happen at init time when registering components.
// An error here is impossible to resolve at runtime, and code change
// always needs to take place.
panic(fmt.Sprintf("default version not set for %s", name))
}
s.stateStores[createFullVersionedName(name, versions.Preferred.Version)] = toConstructor(versions.Preferred)
for _, version := range append(versions.Others, versions.Deprecated...) {
s.stateStores[createFullVersionedName(name, version.Version)] = toConstructor(version)
}
s.versionsSet[createFullName(name)] = versions
}
func toConstructor(cv components.VersionConstructor) func(logger.Logger) state.Store {
fn, ok := cv.Constructor.(func(logger.Logger) state.Store)
if !ok {
// Panicking here is appropriate because this is a programming error, and
// will happen at init time when registering components.
// An error here is impossible to resolve at runtime, and code change
// always needs to take place.
panic(fmt.Sprintf("constructor for %s is not a state store", cv.Version))
}
return fn
}
func (s *Registry) Create(name, version, logName string) (state.Store, error) {
if method, ok := s.getStateStore(name, version, logName); ok {
return method(), nil
}
return nil, fmt.Errorf("couldn't find state store %s/%s", name, version)
}
func (s *Registry) getStateStore(name, version, logName string) (func() state.Store, bool) {
name = strings.ToLower(name)
version = strings.ToLower(version)
if ver, ok := s.versionsSet[name]; ok {
// Default the version when an empty version string is passed, and component
// has multiple versions.
if len(version) == 0 {
version = ver.Default
}
components.CheckDeprecated(s.Logger, name, version, s.versionsSet[name])
}
stateStoreFn, ok := s.stateStores[name+"/"+version]
if ok {
return s.wrapFn(stateStoreFn, logName), true
}
if components.IsInitialVersion(version) {
stateStoreFn, ok = s.stateStores[name]
if ok {
return s.wrapFn(stateStoreFn, logName), true
}
}
return nil, false
}
func (s *Registry) wrapFn(componentFactory func(logger.Logger) state.Store, logName string) func() state.Store {
return func() state.Store {
l := s.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
}
func createFullName(name string) string {
return strings.ToLower("state." + name)
}
func createFullVersionedName(name, version string) string {
return strings.ToLower("state." + name + "/" + version)
}
|
mikeee/dapr
|
pkg/components/state/registry.go
|
GO
|
mit
| 4,531 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state_test
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
s "github.com/dapr/components-contrib/state"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/dapr/pkg/components/state"
"github.com/dapr/kit/logger"
)
type mockState struct {
s.Store
}
func TestRegistry(t *testing.T) {
testRegistry := state.NewRegistry()
t.Run("state is registered", func(t *testing.T) {
const (
stateName = "mockState"
stateNameV2 = "mockState/v2"
componentName = "state." + stateName
)
// Initiate mock object
mock := &mockState{}
mockV2 := &mockState{}
fooV1 := new(mockState)
fooV2 := new(mockState)
fooV3 := new(mockState)
fooV4 := new(mockState)
fooCV1 := func(_ logger.Logger) s.Store { return fooV1 }
fooCV2 := func(_ logger.Logger) s.Store { return fooV2 }
fooCV3 := func(_ logger.Logger) s.Store { return fooV3 }
fooCV4 := func(_ logger.Logger) s.Store { return fooV4 }
// act
testRegistry.RegisterComponent(func(_ logger.Logger) s.Store {
return mock
}, stateName)
testRegistry.RegisterComponent(func(_ logger.Logger) s.Store {
return mockV2
}, stateNameV2)
testRegistry.RegisterComponentWithVersions("foo", components.Versioning{
Preferred: components.VersionConstructor{Version: "v2", Constructor: fooCV2},
Deprecated: []components.VersionConstructor{
{Version: "v1", Constructor: fooCV1},
{Version: "v3", Constructor: fooCV3},
},
Others: []components.VersionConstructor{
{Version: "v4", Constructor: fooCV4},
},
Default: "v1",
})
// assert v0 and v1
p, e := testRegistry.Create(componentName, "v0", "")
require.NoError(t, e)
assert.Same(t, mock, p)
p, e = testRegistry.Create(componentName, "v1", "")
require.NoError(t, e)
assert.Same(t, mock, p)
// assert v2
pV2, e := testRegistry.Create(componentName, "v2", "")
require.NoError(t, e)
assert.Same(t, mockV2, pV2)
// check case-insensitivity
pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "")
require.NoError(t, e)
assert.Same(t, mockV2, pV2)
// Check availability of foo versions
p, err := testRegistry.Create("state.foo", "v1", "")
require.NoError(t, err)
assert.Same(t, fooV1, p)
p, err = testRegistry.Create("state.foo", "v2", "")
require.NoError(t, err)
assert.Same(t, fooV2, p)
p, err = testRegistry.Create("state.foo", "v3", "")
require.NoError(t, err)
assert.Same(t, fooV3, p)
p, err = testRegistry.Create("state.foo", "v4", "")
require.NoError(t, err)
assert.Same(t, fooV4, p)
p, err = testRegistry.Create("state.foo", "v5", "")
require.Error(t, err)
assert.Nil(t, p)
p, err = testRegistry.Create("state.foo", "", "")
require.NoError(t, err)
assert.Same(t, fooV1, p)
p, err = testRegistry.Create("state.foo", "v0", "")
require.Error(t, err)
assert.Nil(t, p)
})
t.Run("state is not registered", func(t *testing.T) {
const (
stateName = "fakeState"
componentName = "state." + stateName
)
// act
p, actualError := testRegistry.Create(componentName, "v1", "")
expectedError := fmt.Errorf("couldn't find state store %s/v1", componentName)
// assert
assert.Nil(t, p)
assert.Equal(t, expectedError.Error(), actualError.Error())
})
}
|
mikeee/dapr
|
pkg/components/state/registry_test.go
|
GO
|
mit
| 3,856 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"fmt"
"os"
"strings"
"sync"
"github.com/dapr/dapr/pkg/api/errors"
)
const (
strategyKey = "keyprefix"
strategyNamespace = "namespace"
strategyAppid = "appid"
strategyStoreName = "name"
strategyNone = "none"
strategyDefault = strategyAppid
daprSeparator = "||"
)
var (
statesConfigurationLock sync.RWMutex
statesConfiguration = map[string]*StoreConfiguration{}
namespace = os.Getenv("NAMESPACE")
)
type StoreConfiguration struct {
keyPrefixStrategy string
}
func SaveStateConfiguration(storeName string, metadata map[string]string) error {
strategy := strategyDefault
for k, v := range metadata {
if strings.ToLower(k) == strategyKey { //nolint:gocritic
strategy = strings.ToLower(v)
break
}
}
err := checkKeyIllegal(strategy)
if err != nil {
return errors.StateStore(storeName).InvalidKeyName(strategy, err.Error())
}
statesConfigurationLock.Lock()
statesConfiguration[storeName] = &StoreConfiguration{keyPrefixStrategy: strategy}
statesConfigurationLock.Unlock()
return nil
}
func GetModifiedStateKey(key, storeName, appID string) (string, error) {
if err := checkKeyIllegal(key); err != nil {
return "", errors.StateStore(storeName).InvalidKeyName(key, err.Error())
}
stateConfiguration := getStateConfiguration(storeName)
switch stateConfiguration.keyPrefixStrategy {
case strategyNone:
return key, nil
case strategyStoreName:
return storeName + daprSeparator + key, nil
case strategyAppid:
if appID == "" {
return key, nil
}
return appID + daprSeparator + key, nil
case strategyNamespace:
if appID == "" {
return key, nil
}
if namespace == "" {
// if namespace is empty, fallback to app id strategy
return appID + daprSeparator + key, nil
}
return namespace + "." + appID + daprSeparator + key, nil
default:
return stateConfiguration.keyPrefixStrategy + daprSeparator + key, nil
}
}
func GetOriginalStateKey(modifiedStateKey string) string {
splits := strings.SplitN(modifiedStateKey, daprSeparator, 3)
if len(splits) <= 1 {
return modifiedStateKey
}
return splits[1]
}
func getStateConfiguration(storeName string) *StoreConfiguration {
statesConfigurationLock.RLock()
c := statesConfiguration[storeName]
if c != nil {
statesConfigurationLock.RUnlock()
return c
}
statesConfigurationLock.RUnlock()
// Acquire a write lock now to update the value in cache
statesConfigurationLock.Lock()
defer statesConfigurationLock.Unlock()
// Try checking the cache again after acquiring a write lock, in case another goroutine has created the object
c = statesConfiguration[storeName]
if c != nil {
return c
}
c = &StoreConfiguration{keyPrefixStrategy: strategyDefault}
statesConfiguration[storeName] = c
return c
}
func checkKeyIllegal(key string) error {
if strings.Contains(key, daprSeparator) {
return fmt.Errorf("input key/keyPrefix '%s' can't contain '%s'", key, daprSeparator)
}
return nil
}
|
mikeee/dapr
|
pkg/components/state/state_config.go
|
GO
|
mit
| 3,537 |
package state
import (
"fmt"
"os"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/require"
)
const key = "state-key-1234567"
func TestMain(m *testing.M) {
SaveStateConfiguration("store1", map[string]string{strategyKey: strategyNone})
SaveStateConfiguration("store2", map[string]string{strategyKey: strategyAppid})
SaveStateConfiguration("store3", map[string]string{strategyKey: strategyDefault})
SaveStateConfiguration("store4", map[string]string{strings.ToUpper(strategyKey): strategyStoreName})
SaveStateConfiguration("store5", map[string]string{strategyKey: "other-fixed-prefix"})
SaveStateConfiguration("store7", map[string]string{strategyKey: strategyNamespace})
// if strategyKey not set
SaveStateConfiguration("store6", map[string]string{})
os.Exit(m.Run())
}
func TestSaveStateConfiguration(t *testing.T) {
testIllegalKeys := []struct {
storename string
prefix string
}{
{
storename: "statestore01",
prefix: "a||b",
},
}
for _, item := range testIllegalKeys {
err := SaveStateConfiguration(item.storename, map[string]string{
strategyKey: item.prefix,
})
require.Error(t, err)
}
}
func TestGetModifiedStateKey(t *testing.T) {
// use custom prefix key
testIllegalKeys := []struct {
storename string
prefix string
key string
}{
{
storename: "statestore01",
prefix: "a",
key: "c||d",
},
}
for _, item := range testIllegalKeys {
err := SaveStateConfiguration(item.storename, map[string]string{
strategyKey: item.prefix,
})
require.NoError(t, err)
_, err = GetModifiedStateKey(item.key, item.storename, "")
require.Error(t, err)
}
}
func TestNonePrefix(t *testing.T) {
modifiedStateKey, _ := GetModifiedStateKey(key, "store1", "appid1")
require.Equal(t, key, modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestAppidPrefix(t *testing.T) {
modifiedStateKey, _ := GetModifiedStateKey(key, "store2", "appid1")
require.Equal(t, "appid1||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestAppidPrefix_WithEmptyAppid(t *testing.T) {
modifiedStateKey, _ := GetModifiedStateKey(key, "store2", "")
require.Equal(t, "state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestNamespacePrefix(t *testing.T) {
t.Run("with namespace", func(t *testing.T) {
namespace = "ns1"
modifiedStateKey, _ := GetModifiedStateKey(key, "store7", "appid1")
require.Equal(t, "ns1.appid1||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
})
t.Run("with empty namespace, fallback to appid", func(t *testing.T) {
namespace = ""
modifiedStateKey, _ := GetModifiedStateKey(key, "store7", "appid1")
require.Equal(t, "appid1||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
})
t.Run("with empty appid", func(t *testing.T) {
namespace = ""
modifiedStateKey, _ := GetModifiedStateKey(key, "store7", "")
require.Equal(t, "state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
})
}
func TestDefaultPrefix(t *testing.T) {
modifiedStateKey, _ := GetModifiedStateKey(key, "store3", "appid1")
require.Equal(t, "appid1||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestStoreNamePrefix(t *testing.T) {
key := "state-key-1234567"
modifiedStateKey, _ := GetModifiedStateKey(key, "store4", "appid1")
require.Equal(t, "store4||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestOtherFixedPrefix(t *testing.T) {
modifiedStateKey, _ := GetModifiedStateKey(key, "store5", "appid1")
require.Equal(t, "other-fixed-prefix||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestLegacyPrefix(t *testing.T) {
modifiedStateKey, _ := GetModifiedStateKey(key, "store6", "appid1")
require.Equal(t, "appid1||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestPrefix_StoreNotInitial(t *testing.T) {
// no config for store999
modifiedStateKey, _ := GetModifiedStateKey(key, "store999", "appid99")
require.Equal(t, "appid99||state-key-1234567", modifiedStateKey)
originalStateKey := GetOriginalStateKey(modifiedStateKey)
require.Equal(t, key, originalStateKey)
}
func TestStateConfigRace(t *testing.T) {
t.Run("data race between SaveStateConfiguration and GetModifiedStateKey", func(t *testing.T) {
var wg sync.WaitGroup
const iterations = 500
wg.Add(2)
go func() {
defer wg.Done()
for i := 0; i < iterations; i++ {
err := SaveStateConfiguration(fmt.Sprintf("store%d", i), map[string]string{strategyKey: strategyNone})
require.NoError(t, err)
}
}()
go func() {
defer wg.Done()
for i := 0; i < iterations; i++ {
_, err := GetModifiedStateKey(key, fmt.Sprintf("store%d", i), "appid")
require.NoError(t, err)
}
}()
wg.Wait()
})
t.Run("data race between two GetModifiedStateKey", func(t *testing.T) {
var wg sync.WaitGroup
const iterations = 500
wg.Add(2)
go func() {
defer wg.Done()
for i := 0; i < iterations; i++ {
_, err := GetModifiedStateKey(key, fmt.Sprintf("store%d", i), "appid")
require.NoError(t, err)
}
}()
go func() {
defer wg.Done()
for i := 0; i < iterations; i++ {
_, err := GetModifiedStateKey(key, fmt.Sprintf("store%d", i), "appid")
require.NoError(t, err)
}
}()
wg.Wait()
})
}
|
mikeee/dapr
|
pkg/components/state/state_config_test.go
|
GO
|
mit
| 6,047 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"strings"
"github.com/dapr/kit/logger"
)
// VersionConstructor is a version name func pair used to construct a
// component.
type VersionConstructor struct {
Version string
Constructor any
}
// Versioning is a struct that contains the versioning information for a single
// component Type. It is expected that each VersionConstructor be unique.
type Versioning struct {
// Preferred is the preferred version to use, used to log a warning if a
// deprecated version is used.
Preferred VersionConstructor
// Deprecated is a list of deprecated versions to log a warning if used.
Deprecated []VersionConstructor
// Others is a list of other versions that are supported, but not preferred.
Others []VersionConstructor
// Default is the default version to use when no version is specified. This
// should make a VersionConstructor from the set above.
Default string
}
// IsInitialVersion returns true when a version is considered an unstable version (v0)
// or first stable version (v1). For backward compatibility, empty strings are also included.
func IsInitialVersion(version string) bool {
v := strings.ToLower(version)
return v == "" || v == UnstableVersion || v == FirstStableVersion
}
// CheckDeprecated checks if a version is deprecated and logs a warning if it
// is using information derived from the version set.
func CheckDeprecated(log logger.Logger, name, version string, versionSet Versioning) {
for _, v := range versionSet.Deprecated {
if v.Version == version {
log.Warnf(
"WARNING: state store %[1]s/%[2]s is deprecated and will be removed in a future version, please use %[3]s/%[4]s",
name, version, name, versionSet.Preferred.Version)
}
}
}
const (
// Unstable version (v0).
UnstableVersion = "v0"
// First stable version (v1).
FirstStableVersion = "v1"
)
|
mikeee/dapr
|
pkg/components/versioning.go
|
GO
|
mit
| 2,411 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/dapr/dapr/pkg/components"
)
func TestIsInitialVersion(t *testing.T) {
tests := map[string]struct {
version string
initial bool
}{
"empty version": {version: "", initial: true},
"unstable": {version: "v0", initial: true},
"first stable": {version: "v1", initial: true},
"second stable": {version: "v2", initial: false},
"unstable upper": {version: "V0", initial: true},
"first stable upper": {version: "V1", initial: true},
"second stable upper": {version: "V2", initial: false},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
actual := components.IsInitialVersion(tc.version)
assert.Equal(t, tc.initial, actual)
})
}
}
|
mikeee/dapr
|
pkg/components/versioning_test.go
|
GO
|
mit
| 1,367 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
var wasmComponentsMap = map[string]struct{}{}
func RegisterWasmComponentType(category Category, typeName string) {
wasmComponentsMap[string(category)+"."+typeName] = struct{}{}
}
func IsWasmComponentType(componentType string) bool {
_, ok := wasmComponentsMap[componentType]
return ok
}
|
mikeee/dapr
|
pkg/components/wasm.go
|
GO
|
mit
| 873 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wfbackend
import (
"github.com/dapr/components-contrib/metadata"
)
// Metadata represents a set of properties specific for workflow backends.
type Metadata struct {
metadata.Base `json:",inline"`
// Dapr app ID
AppID string
}
|
mikeee/dapr
|
pkg/components/wfbackend/metadata.go
|
GO
|
mit
| 802 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wfbackend
import (
"fmt"
"strings"
"github.com/microsoft/durabletask-go/backend"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
// Registry is an interface for a component that returns registered workflow backend implementations.
type Registry struct {
Logger logger.Logger
workflowBackendComponents map[string]workflowBackendFactory
}
// DefaultRegistry is the singleton with the registry.
var DefaultRegistry *Registry = NewRegistry()
// NewRegistry is used to create workflow registry.
func NewRegistry() *Registry {
return &Registry{
workflowBackendComponents: make(map[string]workflowBackendFactory),
}
}
// RegisterComponent adds a new workflow to the registry.
func (s *Registry) RegisterComponent(componentFactory workflowBackendFactory, names ...string) {
for _, name := range names {
s.workflowBackendComponents[createFullName(name)] = componentFactory
}
}
func (s *Registry) Create(name, version, logName string) (func(Metadata) (backend.Backend, error), error) {
if method, ok := s.getWorkflowBackendComponent(name, version, logName); ok {
return method, nil
}
return nil, fmt.Errorf("couldn't find wokflow backend %s/%s", name, version)
}
func (s *Registry) getWorkflowBackendComponent(name, version, logName string) (func(Metadata) (backend.Backend, error), bool) {
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
workflowFn, ok := s.workflowBackendComponents[nameLower+"/"+versionLower]
if ok {
return s.wrapFn(workflowFn, logName), true
}
if components.IsInitialVersion(versionLower) {
workflowFn, ok = s.workflowBackendComponents[nameLower]
if ok {
return s.wrapFn(workflowFn, logName), true
}
}
return nil, false
}
func (s *Registry) wrapFn(componentFactory workflowBackendFactory, logName string) func(Metadata) (backend.Backend, error) {
return func(m Metadata) (backend.Backend, error) {
l := s.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(m, l)
}
}
func createFullName(name string) string {
return strings.ToLower("workflowbackend." + name)
}
|
mikeee/dapr
|
pkg/components/wfbackend/registry.go
|
GO
|
mit
| 2,751 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wfbackend_test
import (
"fmt"
"strings"
"testing"
"github.com/microsoft/durabletask-go/backend"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
wbe "github.com/dapr/dapr/pkg/components/wfbackend"
"github.com/dapr/kit/logger"
)
type mockWorkflowBackend struct {
backend.Backend
}
func TestRegistry(t *testing.T) {
testRegistry := wbe.NewRegistry()
t.Run("workflow backend is registered", func(t *testing.T) {
const (
backendName = "testbackendname"
backendNameV2 = "testbackendname/v2"
componentType = "workflowbackend." + backendName
)
// Initiate mock object
wbeMock := &mockWorkflowBackend{}
wbeMockV2 := &mockWorkflowBackend{}
md := wbe.Metadata{}
// act
testRegistry.RegisterComponent(func(_ wbe.Metadata, _ logger.Logger) (backend.Backend, error) {
return wbeMock, nil
}, backendName)
testRegistry.RegisterComponent(func(_ wbe.Metadata, _ logger.Logger) (backend.Backend, error) {
return wbeMockV2, nil
}, backendNameV2)
// assert v0 and v1
wbeFn, err := testRegistry.Create(componentType, "v0", "")
require.NoError(t, err)
wbe, err := wbeFn(md)
require.NoError(t, err)
assert.Same(t, wbeMock, wbe)
wbeFn, err = testRegistry.Create(componentType, "v1", "")
require.NoError(t, err)
wbe, err = wbeFn(md)
require.NoError(t, err)
assert.Same(t, wbeMock, wbe)
// assert v2
wbeFn, err = testRegistry.Create(componentType, "v2", "")
require.NoError(t, err)
wbe, err = wbeFn(md)
require.NoError(t, err)
assert.Same(t, wbeMockV2, wbe)
// check case-insensitivity
wbeFn, err = testRegistry.Create(strings.ToUpper(componentType), "V2", "")
require.NoError(t, err)
wbe, err = wbeFn(md)
require.NoError(t, err)
assert.Same(t, wbeMockV2, wbe)
// check case-insensitivity
testRegistry.Logger = logger.NewLogger("wfengine.backend")
wbeFn, err = testRegistry.Create(strings.ToUpper(componentType), "V2", "workflowbackendlog")
require.NoError(t, err)
wbe, err = wbeFn(md)
require.NoError(t, err)
assert.Same(t, wbeMockV2, wbe)
})
t.Run("workflow backend is not registered", func(t *testing.T) {
const (
backendName = "fakeBackend"
componentType = "workflowbackend." + backendName
)
// act
wbe, actualError := testRegistry.Create(componentType, "v1", "")
expectedError := fmt.Errorf("couldn't find wokflow backend %s/v1", componentType)
// assert
assert.Nil(t, wbe)
assert.Equal(t, expectedError.Error(), actualError.Error())
})
}
|
mikeee/dapr
|
pkg/components/wfbackend/registry_test.go
|
GO
|
mit
| 3,066 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wfbackend
import (
"github.com/microsoft/durabletask-go/backend"
"github.com/dapr/kit/logger"
)
// workflowBackendFactory is a function that returns a workflow backend
type workflowBackendFactory func(Metadata, logger.Logger) (backend.Backend, error)
|
mikeee/dapr
|
pkg/components/wfbackend/wfbackend.go
|
GO
|
mit
| 825 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workflows
import (
"fmt"
"strings"
wfs "github.com/dapr/components-contrib/workflows"
"github.com/dapr/dapr/pkg/components"
"github.com/dapr/kit/logger"
)
// Registry is an interface for a component that returns registered state store implementations.
type Registry struct {
Logger logger.Logger
workflowComponents map[string]func(logger.Logger) wfs.Workflow
}
// DefaultRegistry is the singleton with the registry .
var DefaultRegistry *Registry = NewRegistry()
// NewRegistry is used to create workflow registry.
func NewRegistry() *Registry {
return &Registry{
workflowComponents: map[string]func(logger.Logger) wfs.Workflow{},
}
}
// RegisterComponent adds a new workflow to the registry.
func (s *Registry) RegisterComponent(componentFactory func(logger.Logger) wfs.Workflow, names ...string) {
for _, name := range names {
s.workflowComponents[createFullName(name)] = componentFactory
}
}
func (s *Registry) Create(name, version, logName string) (wfs.Workflow, error) {
if method, ok := s.getWorkflowComponent(name, version, logName); ok {
return method(), nil
}
return nil, fmt.Errorf("couldn't find wokflow %s/%s", name, version)
}
func (s *Registry) getWorkflowComponent(name, version, logName string) (func() wfs.Workflow, bool) {
nameLower := strings.ToLower(name)
versionLower := strings.ToLower(version)
workflowFn, ok := s.workflowComponents[nameLower+"/"+versionLower]
if ok {
return s.wrapFn(workflowFn, logName), true
}
if components.IsInitialVersion(versionLower) {
workflowFn, ok = s.workflowComponents[nameLower]
if ok {
return s.wrapFn(workflowFn, logName), true
}
}
return nil, false
}
func (s *Registry) wrapFn(componentFactory func(logger.Logger) wfs.Workflow, logName string) func() wfs.Workflow {
return func() wfs.Workflow {
l := s.Logger
if logName != "" && l != nil {
l = l.WithFields(map[string]any{
"component": logName,
})
}
return componentFactory(l)
}
}
func createFullName(name string) string {
return strings.ToLower("workflow." + name)
}
|
mikeee/dapr
|
pkg/components/workflows/registry.go
|
GO
|
mit
| 2,626 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import "strings"
const (
SingleStageWildcard = "/*"
MultiStageWildcard = "/**"
Separation = "/"
)
type Trie struct {
root *trieNode
}
type trieNode struct {
Char string
Data *AccessControlListOperationAction
SubNodes []*trieNode
}
func NewTrie() *Trie {
return &Trie{
root: newTrieNode("/", nil),
}
}
func newTrieNode(char string, data *AccessControlListOperationAction) *trieNode {
node := &trieNode{
Char: char,
Data: data,
SubNodes: nil,
}
return node
}
func (trie *Trie) Search(operation string) *AccessControlListOperationAction {
node := trie.root
operationParts := strings.Split(operation, Separation)
length := len(operationParts)
for index, char := range operationParts {
if index == 0 {
continue
}
isEnd := index == length-1
char = Separation + char
node = node.findSubNode(char, isEnd)
if node == nil {
return nil
}
if node.Data != nil {
if !isEnd && strings.HasSuffix(node.Char, SingleStageWildcard) && !strings.HasSuffix(node.Char, MultiStageWildcard) {
continue
}
return node.Data
} else if isEnd {
node = node.findSubNode(SingleStageWildcard, isEnd)
if node != nil && node.Data != nil {
return node.Data
}
}
}
return nil
}
func (node *trieNode) findSubNode(target string, isEnd bool) *trieNode {
if nil == node.SubNodes {
return nil
}
return findNodeWithWildcard(target, node.SubNodes, isEnd)
}
func (trie *Trie) PutOperationAction(operation string, data *AccessControlListOperationAction) {
operationParts := strings.Split(operation, Separation)
length := len(operationParts)
node := trie.root
for index, char := range operationParts {
if index == 0 {
continue
}
char = Separation + char
subNode := findNode(char, node.SubNodes)
var newNode *trieNode
if nil == subNode {
if index == length-1 {
newNode = newTrieNode(char, data)
} else {
newNode = newTrieNode(char, nil)
}
node.addSubNode(newNode)
node = newNode
} else if index == length-1 {
if subNode.Data == nil {
subNode.Data = data
}
} else {
node = subNode
}
}
}
func findNodeWithWildcard(char string, nodes []*trieNode, isEnd bool) *trieNode {
if nil == nodes || len(nodes) < 1 {
return nil
}
for _, node := range nodes {
if node.Char == char {
return node
}
if node.Char == SingleStageWildcard {
if isEnd {
return node
}
continue
}
if node.Char == MultiStageWildcard {
return node
}
if isMatch(char, node.Char) {
return node
}
}
return nil
}
func findNode(char string, nodes []*trieNode) *trieNode {
if nil == nodes || len(nodes) < 1 {
return nil
}
for _, node := range nodes {
if node.Char == char {
return node
}
}
return nil
}
func (node *trieNode) addSubNode(newNode *trieNode) {
if nil == node.SubNodes {
node.SubNodes = []*trieNode{newNode}
} else {
node.SubNodes = append(node.SubNodes, newNode)
}
}
// Ability to provide '*' wildcard matching
// '*' can match any string, can be empty, i.e. match zero or any number of characters.
func isMatch(target string, patten string) bool {
tl := len(target)
pl := len(patten)
matchResults := make([][]bool, tl+1)
for i := 0; i <= tl; i++ {
matchResults[i] = make([]bool, pl+1)
}
matchResults[0][0] = true
for i := 1; i <= pl; i++ {
if patten[i-1] == '*' {
matchResults[0][i] = true
} else {
break
}
}
for i := 1; i <= tl; i++ {
for j := 1; j <= pl; j++ {
if patten[j-1] == '*' {
matchResults[i][j] = matchResults[i][j-1] || matchResults[i-1][j]
} else if target[i-1] == patten[j-1] {
matchResults[i][j] = matchResults[i-1][j-1]
}
}
}
return matchResults[tl][pl]
}
|
mikeee/dapr
|
pkg/config/acl_trie.go
|
GO
|
mit
| 4,274 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestWildcardMatch(t *testing.T) {
t.Run("test wildcard in suffix", func(t *testing.T) {
assert.True(t, isMatch("/ABC", "/A*"))
assert.True(t, isMatch("/ABC.a", "/A*"))
})
t.Run("test wildcard in prefix", func(t *testing.T) {
assert.True(t, isMatch("/ABC", "/*C"))
assert.True(t, isMatch("/ABC.a", "/*a"))
})
t.Run("test wildcard in the middle", func(t *testing.T) {
assert.True(t, isMatch("/abcd", "/a*d"))
assert.True(t, isMatch("/ABC.a", "/AB*.a"))
})
}
|
mikeee/dapr
|
pkg/config/acl_trie_test.go
|
GO
|
mit
| 1,120 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// ApplicationConfig is an optional config supplied by user code.
type ApplicationConfig struct {
Entities []string `json:"entities"`
// Duration. example: "1h".
ActorIdleTimeout string `json:"actorIdleTimeout"`
// Duration. example: "30s".
DrainOngoingCallTimeout string `json:"drainOngoingCallTimeout"`
DrainRebalancedActors bool `json:"drainRebalancedActors"`
Reentrancy ReentrancyConfig `json:"reentrancy,omitempty"`
RemindersStoragePartitions int `json:"remindersStoragePartitions"`
// Duplicate of the above config so we can assign it to individual entities.
EntityConfigs []EntityConfig `json:"entitiesConfig,omitempty"`
}
type ReentrancyConfig struct {
Enabled bool `json:"enabled"`
MaxStackDepth *int `json:"maxStackDepth,omitempty"`
}
type EntityConfig struct {
Entities []string `json:"entities"`
// Duration. example: "1h".
ActorIdleTimeout string `json:"actorIdleTimeout"`
// Duration. example: "30s".
DrainOngoingCallTimeout string `json:"drainOngoingCallTimeout"`
DrainRebalancedActors bool `json:"drainRebalancedActors"`
Reentrancy ReentrancyConfig `json:"reentrancy,omitempty"`
RemindersStoragePartitions int `json:"remindersStoragePartitions"`
}
|
mikeee/dapr
|
pkg/config/app_configuration.go
|
GO
|
mit
| 1,888 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"time"
"github.com/dapr/dapr/pkg/config/protocol"
)
const (
// AppHealthConfigDefaultProbeInterval is the default interval for app health probes.
AppHealthConfigDefaultProbeInterval = 5 * time.Second
// AppHealthConfigDefaultProbeTimeout is the default value for probe timeouts.
AppHealthConfigDefaultProbeTimeout = 500 * time.Millisecond
// AppHealthConfigDefaultThreshold is the default threshold for determining failures in app health checks.
AppHealthConfigDefaultThreshold = int32(3)
)
// AppHealthConfig is the configuration object for the app health probes.
type AppHealthConfig struct {
ProbeInterval time.Duration
ProbeTimeout time.Duration
ProbeOnly bool
Threshold int32
}
// AppConnectionConfig holds the configuration for the app connection.
type AppConnectionConfig struct {
ChannelAddress string
HealthCheck *AppHealthConfig
HealthCheckHTTPPath string
MaxConcurrency int
Port int
Protocol protocol.Protocol
}
|
mikeee/dapr
|
pkg/config/app_connection.go
|
GO
|
mit
| 1,584 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"strings"
"time"
grpcRetry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
"github.com/spf13/cast"
yaml "gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/dapr/dapr/pkg/buildinfo"
env "github.com/dapr/dapr/pkg/config/env"
operatorv1pb "github.com/dapr/dapr/pkg/proto/operator/v1"
"github.com/dapr/dapr/utils"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
)
// Feature Flags section
type Feature string
const (
// Enables support for setting TTL on Actor state keys.
ActorStateTTL Feature = "ActorStateTTL"
// Enables support for hot reloading of Daprd Components.
HotReload Feature = "HotReload"
)
// end feature flags section
const (
operatorCallTimeout = time.Second * 5
operatorMaxRetries = 100
AllowAccess = "allow"
DenyAccess = "deny"
DefaultTrustDomain = "public"
DefaultNamespace = "default"
ActionPolicyApp = "app"
ActionPolicyGlobal = "global"
defaultMaxWorkflowConcurrentInvocations = 100
defaultMaxActivityConcurrentInvocations = 100
)
// Configuration is an internal (and duplicate) representation of Dapr's Configuration CRD.
type Configuration struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
Spec ConfigurationSpec `json:"spec" yaml:"spec"`
// Internal fields
featuresEnabled map[Feature]struct{}
}
// AccessControlList is an in-memory access control list config for fast lookup.
type AccessControlList struct {
DefaultAction string
TrustDomain string
PolicySpec map[string]AccessControlListPolicySpec
}
// AccessControlListPolicySpec is an in-memory access control list config per app for fast lookup.
type AccessControlListPolicySpec struct {
AppName string
DefaultAction string
TrustDomain string
Namespace string
AppOperationActions *Trie
}
// AccessControlListOperationAction is an in-memory access control list config per operation for fast lookup.
type AccessControlListOperationAction struct {
VerbAction map[string]string
OperationName string
OperationAction string
}
type ConfigurationSpec struct {
HTTPPipelineSpec *PipelineSpec `json:"httpPipeline,omitempty" yaml:"httpPipeline,omitempty"`
AppHTTPPipelineSpec *PipelineSpec `json:"appHttpPipeline,omitempty" yaml:"appHttpPipeline,omitempty"`
TracingSpec *TracingSpec `json:"tracing,omitempty" yaml:"tracing,omitempty"`
MTLSSpec *MTLSSpec `json:"mtls,omitempty" yaml:"mtls,omitempty"`
MetricSpec *MetricSpec `json:"metric,omitempty" yaml:"metric,omitempty"`
MetricsSpec *MetricSpec `json:"metrics,omitempty" yaml:"metrics,omitempty"`
Secrets *SecretsSpec `json:"secrets,omitempty" yaml:"secrets,omitempty"`
AccessControlSpec *AccessControlSpec `json:"accessControl,omitempty" yaml:"accessControl,omitempty"`
NameResolutionSpec *NameResolutionSpec `json:"nameResolution,omitempty" yaml:"nameResolution,omitempty"`
Features []FeatureSpec `json:"features,omitempty" yaml:"features,omitempty"`
APISpec *APISpec `json:"api,omitempty" yaml:"api,omitempty"`
ComponentsSpec *ComponentsSpec `json:"components,omitempty" yaml:"components,omitempty"`
LoggingSpec *LoggingSpec `json:"logging,omitempty" yaml:"logging,omitempty"`
WasmSpec *WasmSpec `json:"wasm,omitempty" yaml:"wasm,omitempty"`
WorkflowSpec *WorkflowSpec `json:"workflow,omitempty" yaml:"workflow,omitempty"`
}
// WorkflowSpec defines the configuration for Dapr workflows.
type WorkflowSpec struct {
// maxConcurrentWorkflowInvocations is the maximum number of concurrent workflow invocations that can be scheduled by a single Dapr instance.
// Attempted invocations beyond this will be queued until the number of concurrent invocations drops below this value.
// If omitted, the default value of 100 will be used.
MaxConcurrentWorkflowInvocations int32 `json:"maxConcurrentWorkflowInvocations,omitempty" yaml:"maxConcurrentWorkflowInvocations,omitempty"`
// maxConcurrentActivityInvocations is the maximum number of concurrent activities that can be processed by a single Dapr instance.
// Attempted invocations beyond this will be queued until the number of concurrent invocations drops below this value.
// If omitted, the default value of 100 will be used.
MaxConcurrentActivityInvocations int32 `json:"maxConcurrentActivityInvocations,omitempty" yaml:"maxConcurrentActivityInvocations,omitempty"`
}
func (w *WorkflowSpec) GetMaxConcurrentWorkflowInvocations() int32 {
if w == nil || w.MaxConcurrentWorkflowInvocations <= 0 {
return defaultMaxWorkflowConcurrentInvocations
}
return w.MaxConcurrentWorkflowInvocations
}
func (w *WorkflowSpec) GetMaxConcurrentActivityInvocations() int32 {
if w == nil || w.MaxConcurrentActivityInvocations <= 0 {
return defaultMaxActivityConcurrentInvocations
}
return w.MaxConcurrentActivityInvocations
}
type SecretsSpec struct {
Scopes []SecretsScope `json:"scopes,omitempty"`
}
// SecretsScope defines the scope for secrets.
type SecretsScope struct {
DefaultAccess string `json:"defaultAccess,omitempty" yaml:"defaultAccess,omitempty"`
StoreName string `json:"storeName,omitempty" yaml:"storeName,omitempty"`
AllowedSecrets []string `json:"allowedSecrets,omitempty" yaml:"allowedSecrets,omitempty"`
DeniedSecrets []string `json:"deniedSecrets,omitempty" yaml:"deniedSecrets,omitempty"`
}
type PipelineSpec struct {
Handlers []HandlerSpec `json:"handlers,omitempty" yaml:"handlers,omitempty"`
}
// APISpec describes the configuration for Dapr APIs.
type APISpec struct {
// List of allowed APIs. Can be used in conjunction with denied.
Allowed APIAccessRules `json:"allowed,omitempty"`
// List of denied APIs. Can be used in conjunction with allowed.
Denied APIAccessRules `json:"denied,omitempty"`
}
// APIAccessRule describes an access rule for allowing a Dapr API to be enabled and accessible by an app.
type APIAccessRule struct {
Name string `json:"name"`
Version string `json:"version"`
Protocol APIAccessRuleProtocol `json:"protocol"`
}
// APIAccessRules is a list of API access rules (allowlist or denylist).
type APIAccessRules []APIAccessRule
// APIAccessRuleProtocol is the type for the protocol in APIAccessRules
type APIAccessRuleProtocol string
const (
APIAccessRuleProtocolHTTP APIAccessRuleProtocol = "http"
APIAccessRuleProtocolGRPC APIAccessRuleProtocol = "grpc"
)
// GetRulesByProtocol returns a list of APIAccessRule objects for a protocol
// The result is a map where the key is in the format "<version>/<endpoint>"
func (r APIAccessRules) GetRulesByProtocol(protocol APIAccessRuleProtocol) map[string]struct{} {
res := make(map[string]struct{}, len(r))
for _, v := range r {
//nolint:gocritic
if strings.ToLower(string(v.Protocol)) == string(protocol) {
key := v.Version + "/" + v.Name
res[key] = struct{}{}
}
}
return res
}
type HandlerSpec struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
SelectorSpec SelectorSpec `json:"selector,omitempty" yaml:"selector,omitempty"`
}
// LogName returns the name of the handler that can be used in logging.
func (h HandlerSpec) LogName() string {
return utils.ComponentLogName(h.Name, h.Type, h.Version)
}
type SelectorSpec struct {
Fields []SelectorField `json:"fields,omitempty" yaml:"fields,omitempty"`
}
type SelectorField struct {
Field string `json:"field" yaml:"field"`
Value string `json:"value" yaml:"value"`
}
type TracingSpec struct {
SamplingRate string `json:"samplingRate,omitempty" yaml:"samplingRate,omitempty"`
Stdout bool `json:"stdout,omitempty" yaml:"stdout,omitempty"`
Zipkin *ZipkinSpec `json:"zipkin,omitempty" yaml:"zipkin,omitempty"`
Otel *OtelSpec `json:"otel,omitempty" yaml:"otel,omitempty"`
}
// ZipkinSpec defines Zipkin exporter configurations.
type ZipkinSpec struct {
EndpointAddress string `json:"endpointAddress,omitempty" yaml:"endpointAddress,omitempty"`
}
// OtelSpec defines Otel exporter configurations.
type OtelSpec struct {
Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"`
EndpointAddress string `json:"endpointAddress,omitempty" yaml:"endpointAddress,omitempty"`
// Defaults to true
IsSecure *bool `json:"isSecure,omitempty" yaml:"isSecure,omitempty"`
}
// GetIsSecure returns true if the connection should be secured.
func (o OtelSpec) GetIsSecure() bool {
// Defaults to true if nil
return o.IsSecure == nil || *o.IsSecure
}
// MetricSpec configuration for metrics.
type MetricSpec struct {
// Defaults to true
Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
HTTP *MetricHTTP `json:"http,omitempty" yaml:"http,omitempty"`
Rules []MetricsRule `json:"rules,omitempty" yaml:"rules,omitempty"`
}
// GetEnabled returns true if metrics are enabled.
func (m MetricSpec) GetEnabled() bool {
// Defaults to true if nil
return m.Enabled == nil || *m.Enabled
}
// GetHTTPIncreasedCardinality returns true if increased cardinality is enabled for HTTP metrics
func (m MetricSpec) GetHTTPIncreasedCardinality(log logger.Logger) bool {
if m.HTTP == nil || m.HTTP.IncreasedCardinality == nil {
// The default is true in Dapr 1.13, but will be changed to false in 1.15+
// TODO: [MetricsCardinality] Change default in 1.15+
log.Warn("The default value for 'spec.metric.http.increasedCardinality' will change to 'false' in Dapr 1.15 or later")
return true
}
return *m.HTTP.IncreasedCardinality
}
// GetHTTPPathMatching returns the path matching configuration for HTTP metrics
func (m MetricSpec) GetHTTPPathMatching() *PathMatching {
if m.HTTP == nil {
return nil
}
return m.HTTP.PathMatching
}
// MetricHTTP defines configuration for metrics for the HTTP server
type MetricHTTP struct {
// If false, metrics for the HTTP server are collected with increased cardinality.
// The default is true in Dapr 1.13, but will be changed to false in 1.14+
// TODO: [MetricsCardinality] Change default in 1.15+
IncreasedCardinality *bool `json:"increasedCardinality,omitempty" yaml:"increasedCardinality,omitempty"`
PathMatching *PathMatching `json:"pathMatching,omitempty" yaml:"pathMatching,omitempty"`
}
// PathMatching defines configuration options for path matching.
type PathMatching struct {
IngressPaths []string `json:"ingress,omitempty" yaml:"ingress,omitempty"`
EgressPaths []string `json:"egress,omitempty" yaml:"egress,omitempty"`
}
// MetricsRule defines configuration options for a metric.
type MetricsRule struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Labels []MetricLabel `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// MetricsLabel defines an object that allows to set regex expressions for a label.
type MetricLabel struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Regex map[string]string `json:"regex,omitempty" yaml:"regex,omitempty"`
}
// AppPolicySpec defines the policy data structure for each app.
type AppPolicySpec struct {
AppName string `json:"appId,omitempty" yaml:"appId,omitempty"`
DefaultAction string `json:"defaultAction,omitempty" yaml:"defaultAction,omitempty"`
TrustDomain string `json:"trustDomain,omitempty" yaml:"trustDomain,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
AppOperationActions []AppOperation `json:"operations,omitempty" yaml:"operations,omitempty"`
}
// AppOperation defines the data structure for each app operation.
type AppOperation struct {
Operation string `json:"name,omitempty" yaml:"name,omitempty"`
HTTPVerb []string `json:"httpVerb,omitempty" yaml:"httpVerb,omitempty"`
Action string `json:"action,omitempty" yaml:"action,omitempty"`
}
// AccessControlSpec is the spec object in ConfigurationSpec.
type AccessControlSpec struct {
DefaultAction string `json:"defaultAction,omitempty" yaml:"defaultAction,omitempty"`
TrustDomain string `json:"trustDomain,omitempty" yaml:"trustDomain,omitempty"`
AppPolicies []AppPolicySpec `json:"policies,omitempty" yaml:"policies,omitempty"`
}
type NameResolutionSpec struct {
Component string `json:"component,omitempty" yaml:"component,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Configuration any `json:"configuration,omitempty" yaml:"configuration,omitempty"`
}
// MTLSSpec defines mTLS configuration.
type MTLSSpec struct {
Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
WorkloadCertTTL string `json:"workloadCertTTL,omitempty" yaml:"workloadCertTTL,omitempty"`
AllowedClockSkew string `json:"allowedClockSkew,omitempty" yaml:"allowedClockSkew,omitempty"`
SentryAddress string `json:"sentryAddress,omitempty" yaml:"sentryAddress,omitempty"`
ControlPlaneTrustDomain string `json:"controlPlaneTrustDomain,omitempty" yaml:"controlPlaneTrustDomain,omitempty"`
// Additional token validators to use.
// When Dapr is running in Kubernetes mode, this is in addition to the built-in "kubernetes" validator.
// In self-hosted mode, enabling a custom validator will disable the built-in "insecure" validator.
TokenValidators []ValidatorSpec `json:"tokenValidators,omitempty" yaml:"tokenValidators,omitempty"`
}
// ValidatorSpec contains additional token validators to use.
type ValidatorSpec struct {
// Name of the validator
Name string `json:"name"`
// Options for the validator, if any
Options any `json:"options,omitempty"`
}
// OptionsMap returns the validator options as a map[string]string.
// If the options are empty, or if the conversion fails, returns nil.
func (v ValidatorSpec) OptionsMap() map[string]string {
if v.Options == nil {
return nil
}
return cast.ToStringMapString(v.Options)
}
// FeatureSpec defines which preview features are enabled.
type FeatureSpec struct {
Name Feature `json:"name" yaml:"name"`
Enabled bool `json:"enabled" yaml:"enabled"`
}
// ComponentsSpec describes the configuration for Dapr components
type ComponentsSpec struct {
// Denylist of component types that cannot be instantiated
Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"`
}
// WasmSpec describes the security profile for all Dapr Wasm components.
type WasmSpec struct {
// Force enabling strict sandbox mode for all WASM components.
// When this is enabled, WASM components always run in strict mode regardless of their configuration.
// Strict mode enhances security of the WASM sandbox by limiting access to certain capabilities such as real-time clocks and random number generators.
StrictSandbox bool `json:"strictSandbox,omitempty" yaml:"strictSandbox,omitempty"`
}
// GetStrictSandbox returns the value of StrictSandbox, with nil-checks.
func (w *WasmSpec) GetStrictSandbox() bool {
return w != nil && w.StrictSandbox
}
// LoggingSpec defines the configuration for logging.
type LoggingSpec struct {
// Configure API logging.
APILogging *APILoggingSpec `json:"apiLogging,omitempty" yaml:"apiLogging,omitempty"`
}
// APILoggingSpec defines the configuration for API logging.
type APILoggingSpec struct {
// Default value for enabling API logging. Sidecars can always override this by setting `--enable-api-logging` to true or false explicitly.
// The default value is false.
Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"`
// When enabled, obfuscates the values of URLs in HTTP API logs, logging the route name rather than the full path being invoked, which could contain PII.
// Default: false.
// This option has no effect if API logging is disabled.
ObfuscateURLs bool `json:"obfuscateURLs,omitempty" yaml:"obfuscateURLs,omitempty"`
// If true, health checks are not reported in API logs. Default: false.
// This option has no effect if API logging is disabled.
OmitHealthChecks bool `json:"omitHealthChecks,omitempty" yaml:"omitHealthChecks,omitempty"`
}
// LoadDefaultConfiguration returns the default config.
func LoadDefaultConfiguration() *Configuration {
return &Configuration{
Spec: ConfigurationSpec{
TracingSpec: &TracingSpec{
Otel: &OtelSpec{
IsSecure: ptr.Of(true),
},
},
MetricSpec: &MetricSpec{
Enabled: ptr.Of(true),
},
AccessControlSpec: &AccessControlSpec{
DefaultAction: AllowAccess,
TrustDomain: "public",
},
WorkflowSpec: &WorkflowSpec{
MaxConcurrentWorkflowInvocations: defaultMaxWorkflowConcurrentInvocations,
MaxConcurrentActivityInvocations: defaultMaxActivityConcurrentInvocations,
},
},
}
}
// LoadStandaloneConfiguration gets the path to a config file and loads it into a configuration.
func LoadStandaloneConfiguration(configs ...string) (*Configuration, error) {
conf := LoadDefaultConfiguration()
// Load all config files and apply them on top of the default config
for _, config := range configs {
_, err := os.Stat(config)
if err != nil {
return nil, err
}
b, err := os.ReadFile(config)
if err != nil {
return nil, err
}
// Parse environment variables from yaml
b = []byte(os.ExpandEnv(string(b)))
err = yaml.Unmarshal(b, conf)
if err != nil {
return nil, err
}
}
err := conf.sortAndValidateSecretsConfiguration()
if err != nil {
return nil, err
}
conf.sortMetricsSpec()
return conf, nil
}
// LoadKubernetesConfiguration gets configuration from the Kubernetes operator with a given name.
func LoadKubernetesConfiguration(config string, namespace string, podName string, operatorClient operatorv1pb.OperatorClient) (*Configuration, error) {
resp, err := operatorClient.GetConfiguration(context.Background(), &operatorv1pb.GetConfigurationRequest{
Name: config,
Namespace: namespace,
PodName: podName,
}, grpcRetry.WithMax(operatorMaxRetries), grpcRetry.WithPerRetryTimeout(operatorCallTimeout))
if err != nil {
return nil, err
}
b := resp.GetConfiguration()
if len(b) == 0 {
return nil, fmt.Errorf("configuration %s not found", config)
}
conf := LoadDefaultConfiguration()
err = json.Unmarshal(b, conf)
if err != nil {
return nil, err
}
err = conf.sortAndValidateSecretsConfiguration()
if err != nil {
return nil, err
}
conf.sortMetricsSpec()
return conf, nil
}
// Update configuration from Otlp Environment Variables, if they exist.
func SetTracingSpecFromEnv(conf *Configuration) {
// If Otel Endpoint is already set, then don't override.
if conf.Spec.TracingSpec.Otel.EndpointAddress != "" {
return
}
if endpoint := os.Getenv(env.OtlpExporterEndpoint); endpoint != "" {
// remove "http://" or "https://" from the endpoint
endpoint = strings.TrimPrefix(endpoint, "http://")
endpoint = strings.TrimPrefix(endpoint, "https://")
conf.Spec.TracingSpec.Otel.EndpointAddress = endpoint
if conf.Spec.TracingSpec.SamplingRate == "" {
conf.Spec.TracingSpec.SamplingRate = "1"
}
// The OTLP attribute allows 'grpc', 'http/protobuf', or 'http/json'.
// Dapr setting can only be 'grpc' or 'http'.
if protocol := os.Getenv(env.OtlpExporterProtocol); strings.HasPrefix(protocol, "http") {
conf.Spec.TracingSpec.Otel.Protocol = "http"
} else {
conf.Spec.TracingSpec.Otel.Protocol = "grpc"
}
if insecure := os.Getenv(env.OtlpExporterInsecure); insecure == "true" {
conf.Spec.TracingSpec.Otel.IsSecure = ptr.Of(false)
}
}
}
// IsSecretAllowed Check if the secret is allowed to be accessed.
func (c SecretsScope) IsSecretAllowed(key string) bool {
// By default, set allow access for the secret store.
access := AllowAccess
// Check and set deny access.
if strings.EqualFold(c.DefaultAccess, DenyAccess) {
access = DenyAccess
}
// If the allowedSecrets list is not empty then check if the access is specifically allowed for this key.
if len(c.AllowedSecrets) != 0 {
return containsKey(c.AllowedSecrets, key)
}
// Check key in deny list if deny list is present for the secret store.
// If the specific key is denied, then alone deny access.
if deny := containsKey(c.DeniedSecrets, key); deny {
return !deny
}
// Check if defined default access is allow.
return access == AllowAccess
}
// Runs Binary Search on a sorted list of strings to find a key.
func containsKey(s []string, key string) bool {
index := sort.SearchStrings(s, key)
return index < len(s) && s[index] == key
}
// LoadFeatures loads the list of enabled features, from the Configuration spec and from the buildinfo.
func (c *Configuration) LoadFeatures() {
forced := buildinfo.Features()
c.featuresEnabled = make(map[Feature]struct{}, len(c.Spec.Features)+len(forced))
for _, feature := range c.Spec.Features {
if feature.Name == "" || !feature.Enabled {
continue
}
c.featuresEnabled[feature.Name] = struct{}{}
}
for _, v := range forced {
if v == "" {
continue
}
c.featuresEnabled[Feature(v)] = struct{}{}
}
}
// IsFeatureEnabled returns true if a Feature (such as a preview) is enabled.
func (c Configuration) IsFeatureEnabled(target Feature) (enabled bool) {
_, enabled = c.featuresEnabled[target]
return enabled
}
// EnabledFeatures returns the list of features that have been enabled.
func (c Configuration) EnabledFeatures() []string {
features := make([]string, len(c.featuresEnabled))
i := 0
for f := range c.featuresEnabled {
features[i] = string(f)
i++
}
return features[:i]
}
// GetTracingSpec returns the tracing spec.
// It's a short-hand that includes nil-checks for safety.
func (c Configuration) GetTracingSpec() TracingSpec {
if c.Spec.TracingSpec == nil {
return TracingSpec{}
}
return *c.Spec.TracingSpec
}
// GetMTLSSpec returns the mTLS spec.
// It's a short-hand that includes nil-checks for safety.
func (c Configuration) GetMTLSSpec() MTLSSpec {
if c.Spec.MTLSSpec == nil {
return MTLSSpec{}
}
return *c.Spec.MTLSSpec
}
// GetMetricsSpec returns the metrics spec.
// It's a short-hand that includes nil-checks for safety.
func (c Configuration) GetMetricsSpec() MetricSpec {
if c.Spec.MetricSpec == nil {
return MetricSpec{}
}
return *c.Spec.MetricSpec
}
// GetAPISpec returns the API spec.
// It's a short-hand that includes nil-checks for safety.
func (c Configuration) GetAPISpec() APISpec {
if c.Spec.APISpec == nil {
return APISpec{}
}
return *c.Spec.APISpec
}
// GetLoggingSpec returns the Logging spec.
// It's a short-hand that includes nil-checks for safety.
func (c Configuration) GetLoggingSpec() LoggingSpec {
if c.Spec.LoggingSpec == nil {
return LoggingSpec{}
}
return *c.Spec.LoggingSpec
}
// GetLoggingSpec returns the Logging.APILogging spec.
// It's a short-hand that includes nil-checks for safety.
func (c Configuration) GetAPILoggingSpec() APILoggingSpec {
if c.Spec.LoggingSpec == nil || c.Spec.LoggingSpec.APILogging == nil {
return APILoggingSpec{}
}
return *c.Spec.LoggingSpec.APILogging
}
// GetWorkflowSpec returns the Workflow spec.
// It's a short-hand that includes nil-checks for safety.
func (c *Configuration) GetWorkflowSpec() WorkflowSpec {
if c == nil || c.Spec.WorkflowSpec == nil {
return WorkflowSpec{
MaxConcurrentWorkflowInvocations: defaultMaxWorkflowConcurrentInvocations,
MaxConcurrentActivityInvocations: defaultMaxActivityConcurrentInvocations,
}
}
return *c.Spec.WorkflowSpec
}
// ToYAML returns the Configuration represented as YAML.
func (c *Configuration) ToYAML() (string, error) {
b, err := yaml.Marshal(c)
if err != nil {
return "", err
}
return string(b), nil
}
// String implements fmt.Stringer and is used for debugging. It returns the Configuration object encoded as YAML.
func (c *Configuration) String() string {
enc, err := c.ToYAML()
if err != nil {
return "Failed to marshal Configuration object to YAML: " + err.Error()
}
return enc
}
// Apply .metrics if set. If not, retain .metric.
func (c *Configuration) sortMetricsSpec() {
if c.Spec.MetricsSpec == nil {
return
}
if c.Spec.MetricsSpec.Enabled != nil {
c.Spec.MetricSpec.Enabled = c.Spec.MetricsSpec.Enabled
}
if len(c.Spec.MetricsSpec.Rules) > 0 {
c.Spec.MetricSpec.Rules = c.Spec.MetricsSpec.Rules
}
if c.Spec.MetricsSpec.HTTP != nil {
c.Spec.MetricSpec.HTTP = c.Spec.MetricsSpec.HTTP
}
}
// Validate the secrets configuration and sort to the allowed and denied lists if present.
func (c *Configuration) sortAndValidateSecretsConfiguration() error {
if c.Spec.Secrets == nil {
return nil
}
set := sets.NewString()
for _, scope := range c.Spec.Secrets.Scopes {
// validate scope
if set.Has(scope.StoreName) {
return fmt.Errorf("%s storeName is repeated in secrets configuration", scope.StoreName)
}
if scope.DefaultAccess != "" &&
!strings.EqualFold(scope.DefaultAccess, AllowAccess) &&
!strings.EqualFold(scope.DefaultAccess, DenyAccess) {
return fmt.Errorf("defaultAccess %s can be either allow or deny", scope.DefaultAccess)
}
set.Insert(scope.StoreName)
// modify scope
sort.Strings(scope.AllowedSecrets)
sort.Strings(scope.DeniedSecrets)
}
return nil
}
// ToYAML returns the ConfigurationSpec represented as YAML.
func (c ConfigurationSpec) ToYAML() (string, error) {
b, err := yaml.Marshal(&c)
if err != nil {
return "", err
}
return string(b), nil
}
// String implements fmt.Stringer and is used for debugging. It returns the Configuration object encoded as YAML.
func (c ConfigurationSpec) String() string {
enc, err := c.ToYAML()
if err != nil {
return fmt.Sprintf("Failed to marshal ConfigurationSpec object to YAML: %v", err)
}
return enc
}
|
mikeee/dapr
|
pkg/config/configuration.go
|
GO
|
mit
| 27,001 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"bytes"
"io"
"os"
"reflect"
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
"github.com/dapr/dapr/pkg/buildinfo"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
)
func TestLoadStandaloneConfiguration(t *testing.T) {
testCases := []struct {
name string
path string
errorExpected bool
}{
{
name: "Valid config file",
path: "./testdata/config.yaml",
errorExpected: false,
},
{
name: "Invalid file path",
path: "invalid_file.yaml",
errorExpected: true,
},
{
name: "Invalid config file",
path: "./testdata/invalid_secrets_config.yaml",
errorExpected: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config, err := LoadStandaloneConfiguration(tc.path)
if tc.errorExpected {
require.Error(t, err, "Expected an error")
assert.Nil(t, config, "Config should not be loaded")
} else {
require.NoError(t, err, "Unexpected error")
assert.NotNil(t, config, "Config not loaded as expected")
}
})
}
t.Run("parse environment variables", func(t *testing.T) {
t.Setenv("DAPR_SECRET", "keepitsecret")
config, err := LoadStandaloneConfiguration("./testdata/env_variables_config.yaml")
require.NoError(t, err, "Unexpected error")
assert.NotNil(t, config, "Config not loaded as expected")
assert.Equal(t, "keepitsecret", config.Spec.Secrets.Scopes[0].AllowedSecrets[0])
})
t.Run("check Kind and Name", func(t *testing.T) {
config, err := LoadStandaloneConfiguration("./testdata/config.yaml")
require.NoError(t, err, "Unexpected error")
assert.NotNil(t, config, "Config not loaded as expected")
assert.Equal(t, "secretappconfig", config.ObjectMeta.Name)
assert.Equal(t, "Configuration", config.TypeMeta.Kind)
})
t.Run("metrics spec", func(t *testing.T) {
testCases := []struct {
name string
confFile string
metricEnabled bool
}{
{
name: "metric is enabled by default",
confFile: "./testdata/config.yaml",
metricEnabled: true,
},
{
name: "metric is disabled by config",
confFile: "./testdata/metric_disabled.yaml",
metricEnabled: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config, err := LoadStandaloneConfiguration(tc.confFile)
require.NoError(t, err)
assert.Equal(t, tc.metricEnabled, config.Spec.MetricSpec.GetEnabled())
})
}
})
t.Run("components spec", func(t *testing.T) {
testCases := []struct {
name string
confFile string
componentsDeny []string
}{
{
name: "component deny list",
confFile: "./testdata/components_config.yaml",
componentsDeny: []string{"foo.bar", "hello.world/v1"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config, err := LoadStandaloneConfiguration(tc.confFile)
require.NoError(t, err)
assert.True(t, reflect.DeepEqual(tc.componentsDeny, config.Spec.ComponentsSpec.Deny))
})
}
})
t.Run("features spec", func(t *testing.T) {
testCases := []struct {
name string
confFile string
featureName Feature
featureEnabled bool
}{
{
name: "feature is enabled",
confFile: "./testdata/feature_config.yaml",
featureName: Feature("Actor.Reentrancy"),
featureEnabled: true,
},
{
name: "feature is disabled",
confFile: "./testdata/feature_config.yaml",
featureName: Feature("Test.Feature"),
featureEnabled: false,
},
{
name: "feature is disabled if missing",
confFile: "./testdata/feature_config.yaml",
featureName: Feature("Test.Missing"),
featureEnabled: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config, err := LoadStandaloneConfiguration(tc.confFile)
require.NoError(t, err)
config.LoadFeatures()
assert.Equal(t, tc.featureEnabled, config.IsFeatureEnabled(tc.featureName))
})
}
})
t.Run("mTLS spec", func(t *testing.T) {
config, err := LoadStandaloneConfiguration("./testdata/mtls_config.yaml")
require.NoError(t, err)
mtlsSpec := config.GetMTLSSpec()
assert.True(t, mtlsSpec.Enabled)
assert.Equal(t, "25s", mtlsSpec.WorkloadCertTTL)
assert.Equal(t, "1h", mtlsSpec.AllowedClockSkew)
})
t.Run("workflow spec - configured", func(t *testing.T) {
config, err := LoadStandaloneConfiguration("./testdata/workflow_config.yaml")
require.NoError(t, err)
workflowSpec := config.GetWorkflowSpec()
assert.Equal(t, int32(32), workflowSpec.MaxConcurrentWorkflowInvocations)
assert.Equal(t, int32(64), workflowSpec.MaxConcurrentActivityInvocations)
})
t.Run("workflow spec - defaults", func(t *testing.T) {
// Intentionally loading an unrelated config file to test defaults
config, err := LoadStandaloneConfiguration("./testdata/mtls_config.yaml")
require.NoError(t, err)
workflowSpec := config.GetWorkflowSpec()
// These are the documented default values. Changes to these defaults require changes to
assert.Equal(t, int32(100), workflowSpec.MaxConcurrentWorkflowInvocations)
assert.Equal(t, int32(100), workflowSpec.MaxConcurrentActivityInvocations)
})
t.Run("multiple configurations", func(t *testing.T) {
config, err := LoadStandaloneConfiguration("./testdata/feature_config.yaml", "./testdata/mtls_config.yaml")
require.NoError(t, err)
// From feature_config.yaml
config.LoadFeatures()
assert.True(t, config.IsFeatureEnabled("Actor.Reentrancy"))
assert.False(t, config.IsFeatureEnabled("Test.Feature"))
// From mtls_config.yaml
mtlsSpec := config.GetMTLSSpec()
assert.True(t, mtlsSpec.Enabled)
assert.Equal(t, "25s", mtlsSpec.WorkloadCertTTL)
assert.Equal(t, "1h", mtlsSpec.AllowedClockSkew)
})
t.Run("multiple configurations with overriding", func(t *testing.T) {
config, err := LoadStandaloneConfiguration("./testdata/feature_config.yaml", "./testdata/mtls_config.yaml", "./testdata/override.yaml")
require.NoError(t, err)
// From feature_config.yaml
// Should both be overridden
config.LoadFeatures()
assert.False(t, config.IsFeatureEnabled("Actor.Reentrancy"))
assert.True(t, config.IsFeatureEnabled("Test.Feature"))
// From mtls_config.yaml
mtlsSpec := config.GetMTLSSpec()
assert.False(t, mtlsSpec.Enabled) // Overridden
assert.Equal(t, "25s", mtlsSpec.WorkloadCertTTL)
assert.Equal(t, "1h", mtlsSpec.AllowedClockSkew)
// Spec part encoded as YAML
compareWithFile(t, "./testdata/override_spec_gen.yaml", config.Spec.String())
// Complete YAML
compareWithFile(t, "./testdata/override_gen.yaml", config.String())
})
}
func compareWithFile(t *testing.T, file string, expect string) {
f, err := os.ReadFile(file)
require.NoError(t, err)
// Replace all "\r\n" with "\n" because (*wave hands*, *lesigh*) ... Windows
f = bytes.ReplaceAll(f, []byte{'\r', '\n'}, []byte{'\n'})
assert.Equal(t, expect, string(f))
}
func TestSortAndValidateSecretsConfigration(t *testing.T) {
testCases := []struct {
name string
config Configuration
errorExpected bool
}{
{
name: "empty configuration",
errorExpected: false,
},
{
name: "incorrect default access",
config: Configuration{
Spec: ConfigurationSpec{
Secrets: &SecretsSpec{
Scopes: []SecretsScope{
{
StoreName: "testStore",
DefaultAccess: "incorrect",
},
},
},
},
},
errorExpected: true,
},
{
name: "empty default access",
config: Configuration{
Spec: ConfigurationSpec{
Secrets: &SecretsSpec{
Scopes: []SecretsScope{
{
StoreName: "testStore",
},
},
},
},
},
errorExpected: false,
},
{
name: "repeated store Name",
config: Configuration{
Spec: ConfigurationSpec{
Secrets: &SecretsSpec{
Scopes: []SecretsScope{
{
StoreName: "testStore",
DefaultAccess: AllowAccess,
},
{
StoreName: "testStore",
DefaultAccess: DenyAccess,
},
},
},
},
},
errorExpected: true,
},
{
name: "simple secrets config",
config: Configuration{
Spec: ConfigurationSpec{
Secrets: &SecretsSpec{
Scopes: []SecretsScope{
{
StoreName: "testStore",
DefaultAccess: DenyAccess,
AllowedSecrets: []string{"Z", "b", "a", "c"},
},
},
},
},
},
errorExpected: false,
},
{
name: "case-insensitive default access",
config: Configuration{
Spec: ConfigurationSpec{
Secrets: &SecretsSpec{
Scopes: []SecretsScope{
{
StoreName: "testStore",
DefaultAccess: "DeNY",
AllowedSecrets: []string{"Z", "b", "a", "c"},
},
},
},
},
},
errorExpected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.config.sortAndValidateSecretsConfiguration()
if tc.errorExpected {
require.Error(t, err, "expected validation to fail")
} else if tc.config.Spec.Secrets != nil {
for _, scope := range tc.config.Spec.Secrets.Scopes {
assert.True(t, sort.StringsAreSorted(scope.AllowedSecrets), "expected sorted slice")
assert.True(t, sort.StringsAreSorted(scope.DeniedSecrets), "expected sorted slice")
}
}
})
}
}
func TestIsSecretAllowed(t *testing.T) {
testCases := []struct {
name string
scope SecretsScope
secretKey string
expectedResult bool
}{
{
name: "Empty scope default allow all",
secretKey: "random",
expectedResult: true,
},
{
name: "Empty scope default allow all empty key",
expectedResult: true,
},
{
name: "default deny all secrets empty key",
scope: SecretsScope{
StoreName: "testName",
DefaultAccess: "DeNy", // check case-insensitivity
},
secretKey: "",
expectedResult: false,
},
{
name: "default allow all secrets empty key",
scope: SecretsScope{
StoreName: "testName",
DefaultAccess: "AllOw", // check case-insensitivity
},
secretKey: "",
expectedResult: true,
},
{
name: "default deny all secrets",
scope: SecretsScope{
StoreName: "testName",
DefaultAccess: DenyAccess,
},
secretKey: "random",
expectedResult: false,
},
{
name: "default deny with specific allow secrets",
scope: SecretsScope{
StoreName: "testName",
DefaultAccess: DenyAccess,
AllowedSecrets: []string{"key1"},
},
secretKey: "key1",
expectedResult: true,
},
{
name: "default allow with specific allow secrets",
scope: SecretsScope{
StoreName: "testName",
DefaultAccess: AllowAccess,
AllowedSecrets: []string{"key1"},
},
secretKey: "key2",
expectedResult: false,
},
{
name: "default allow with specific deny secrets",
scope: SecretsScope{
StoreName: "testName",
DefaultAccess: AllowAccess,
DeniedSecrets: []string{"key1"},
},
secretKey: "key1",
expectedResult: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
assert.Equal(t, tc.expectedResult, tc.scope.IsSecretAllowed(tc.secretKey), "incorrect access")
})
}
}
func TestContainsKey(t *testing.T) {
s := []string{"a", "b", "c", "z"}
assert.False(t, containsKey(s, "h"), "unexpected result")
assert.True(t, containsKey(s, "b"), "unexpected result")
}
func TestFeatureEnabled(t *testing.T) {
config := Configuration{
Spec: ConfigurationSpec{
Features: []FeatureSpec{
{
Name: "testEnabled",
Enabled: true,
},
{
Name: "testDisabled",
Enabled: false,
},
},
},
}
config.LoadFeatures()
assert.True(t, config.IsFeatureEnabled("testEnabled"))
assert.False(t, config.IsFeatureEnabled("testDisabled"))
assert.False(t, config.IsFeatureEnabled("testMissing"))
// Test config.EnabledFeatures
// We sort the values before comparing because order isn't guaranteed (and doesn't matter)
actual := config.EnabledFeatures()
expect := append([]string{"testEnabled"}, buildinfo.Features()...)
sort.Strings(actual)
sort.Strings(expect)
assert.EqualValues(t, actual, expect)
}
func TestSetTracingSpecFromEnv(t *testing.T) {
t.Setenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://otlpendpoint:1234")
t.Setenv("OTEL_EXPORTER_OTLP_INSECURE", "true")
t.Setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/json")
// get default configuration
conf := LoadDefaultConfiguration()
// set tracing spec from env
SetTracingSpecFromEnv(conf)
assert.Equal(t, "otlpendpoint:1234", conf.Spec.TracingSpec.Otel.EndpointAddress)
assert.Equal(t, "http", conf.Spec.TracingSpec.Otel.Protocol)
require.False(t, conf.Spec.TracingSpec.Otel.GetIsSecure())
// Spec from config file should not be overridden
conf = LoadDefaultConfiguration()
conf.Spec.TracingSpec.Otel.EndpointAddress = "configfileendpoint:4321"
conf.Spec.TracingSpec.Otel.Protocol = "grpc"
conf.Spec.TracingSpec.Otel.IsSecure = ptr.Of(true)
// set tracing spec from env
SetTracingSpecFromEnv(conf)
assert.Equal(t, "configfileendpoint:4321", conf.Spec.TracingSpec.Otel.EndpointAddress)
assert.Equal(t, "grpc", conf.Spec.TracingSpec.Otel.Protocol)
require.True(t, conf.Spec.TracingSpec.Otel.GetIsSecure())
}
func TestAPIAccessRules(t *testing.T) {
config := &Configuration{
Spec: ConfigurationSpec{
APISpec: &APISpec{
Allowed: APIAccessRules{
APIAccessRule{Name: "foo", Version: "v1", Protocol: "http"},
APIAccessRule{Name: "MyMethod", Version: "v1alpha1", Protocol: "grpc"},
},
Denied: APIAccessRules{
APIAccessRule{Name: "bar", Version: "v1", Protocol: "http"},
},
},
},
}
apiSpec := config.Spec.APISpec
assert.Equal(t, []string{"v1/foo"}, maps.Keys(apiSpec.Allowed.GetRulesByProtocol(APIAccessRuleProtocolHTTP)))
assert.Equal(t, []string{"v1alpha1/MyMethod"}, maps.Keys(apiSpec.Allowed.GetRulesByProtocol(APIAccessRuleProtocolGRPC)))
assert.Equal(t, []string{"v1/bar"}, maps.Keys(apiSpec.Denied.GetRulesByProtocol(APIAccessRuleProtocolHTTP)))
assert.Empty(t, maps.Keys(apiSpec.Denied.GetRulesByProtocol(APIAccessRuleProtocolGRPC)))
}
func TestSortMetrics(t *testing.T) {
t.Run("metrics overrides metric - enabled false", func(t *testing.T) {
config := &Configuration{
Spec: ConfigurationSpec{
MetricSpec: &MetricSpec{
Enabled: ptr.Of(true),
Rules: []MetricsRule{
{
Name: "rule",
},
},
},
MetricsSpec: &MetricSpec{
Enabled: ptr.Of(false),
},
},
}
config.sortMetricsSpec()
assert.False(t, config.Spec.MetricSpec.GetEnabled())
assert.Equal(t, "rule", config.Spec.MetricSpec.Rules[0].Name)
})
t.Run("metrics overrides metric - enabled true", func(t *testing.T) {
config := &Configuration{
Spec: ConfigurationSpec{
MetricSpec: &MetricSpec{
Enabled: ptr.Of(false),
Rules: []MetricsRule{
{
Name: "rule",
},
},
},
MetricsSpec: &MetricSpec{
Enabled: ptr.Of(true),
},
},
}
config.sortMetricsSpec()
assert.True(t, config.Spec.MetricSpec.GetEnabled())
assert.Equal(t, "rule", config.Spec.MetricSpec.Rules[0].Name)
})
t.Run("nil metrics enabled doesn't overrides", func(t *testing.T) {
config := &Configuration{
Spec: ConfigurationSpec{
MetricSpec: &MetricSpec{
Enabled: ptr.Of(true),
Rules: []MetricsRule{
{
Name: "rule",
},
},
},
MetricsSpec: &MetricSpec{},
},
}
config.sortMetricsSpec()
assert.True(t, config.Spec.MetricSpec.GetEnabled())
assert.Equal(t, "rule", config.Spec.MetricSpec.Rules[0].Name)
})
}
func TestMetricsGetHTTPIncreasedCardinality(t *testing.T) {
log := logger.NewLogger("test")
log.SetOutput(io.Discard)
t.Run("no http configuration, returns true", func(t *testing.T) {
m := MetricSpec{
HTTP: nil,
}
assert.True(t, m.GetHTTPIncreasedCardinality(log))
})
t.Run("nil value, returns true", func(t *testing.T) {
m := MetricSpec{
HTTP: &MetricHTTP{
IncreasedCardinality: nil,
},
}
assert.True(t, m.GetHTTPIncreasedCardinality(log))
})
t.Run("value is set to true", func(t *testing.T) {
m := MetricSpec{
HTTP: &MetricHTTP{
IncreasedCardinality: ptr.Of(true),
},
}
assert.True(t, m.GetHTTPIncreasedCardinality(log))
})
t.Run("value is set to false", func(t *testing.T) {
m := MetricSpec{
HTTP: &MetricHTTP{
IncreasedCardinality: ptr.Of(false),
},
}
assert.False(t, m.GetHTTPIncreasedCardinality(log))
})
}
func TestMetricsGetHTTPPathMatching(t *testing.T) {
t.Run("no http configuration, returns nil", func(t *testing.T) {
m := MetricSpec{
HTTP: nil,
}
assert.Nil(t, m.GetHTTPPathMatching())
})
t.Run("nil value, returns nil", func(t *testing.T) {
m := MetricSpec{
HTTP: &MetricHTTP{
PathMatching: nil,
},
}
assert.Nil(t, m.GetHTTPPathMatching())
})
t.Run("config is enabled", func(t *testing.T) {
m := MetricSpec{
HTTP: &MetricHTTP{
PathMatching: &PathMatching{
IngressPaths: []string{"/resource/1"},
EgressPaths: []string{"/resource/2"},
},
},
}
config := m.GetHTTPPathMatching()
assert.Equal(t, []string{"/resource/1"}, config.IngressPaths)
assert.Equal(t, []string{"/resource/2"}, config.EgressPaths)
})
t.Run("config is enabled with only ingress", func(t *testing.T) {
m := MetricSpec{
HTTP: &MetricHTTP{
PathMatching: &PathMatching{
IngressPaths: []string{"/resource/1"},
},
},
}
config := m.GetHTTPPathMatching()
assert.Equal(t, []string{"/resource/1"}, config.IngressPaths)
assert.Nil(t, config.EgressPaths)
})
}
|
mikeee/dapr
|
pkg/config/configuration_test.go
|
GO
|
mit
| 18,468 |
package config
const (
// HostAddress is the address of the instance.
HostAddress string = "HOST_ADDRESS"
// DaprGRPCPort is the dapr api grpc port.
DaprGRPCPort string = "DAPR_GRPC_PORT"
// DaprHTTPPort is the dapr api http port.
DaprHTTPPort string = "DAPR_HTTP_PORT"
// DaprMetricsPort is the dapr metrics port.
DaprMetricsPort string = "DAPR_METRICS_PORT"
// DaprProfilePort is the dapr performance profiling port.
DaprProfilePort string = "DAPR_PROFILE_PORT"
// DaprPort is the dapr internal grpc port (sidecar to sidecar).
DaprPort string = "DAPR_PORT"
// AppPort is the port of the application, http/grpc depending on mode.
AppPort string = "APP_PORT"
// AppID is the ID of the application.
AppID string = "APP_ID"
// OpenTelemetry target URL for OTLP exporter
OtlpExporterEndpoint string = "OTEL_EXPORTER_OTLP_ENDPOINT"
// OpenTelemetry disables client transport security
OtlpExporterInsecure string = "OTEL_EXPORTER_OTLP_INSECURE"
// OpenTelemetry transport protocol (grpc, http/protobuf, http/json)
OtlpExporterProtocol string = "OTEL_EXPORTER_OTLP_PROTOCOL"
)
|
mikeee/dapr
|
pkg/config/env/env_variables.go
|
GO
|
mit
| 1,094 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// KubernetesConfig defines the configuration for Kubernetes mode.
type KubernetesConfig struct {
ControlPlaneAddress string
}
|
mikeee/dapr
|
pkg/config/modes/kubernetes_config.go
|
GO
|
mit
| 705 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// StandaloneConfig is the configuration for standalone mode.
type StandaloneConfig struct {
ResourcesPath []string
}
|
mikeee/dapr
|
pkg/config/modes/standalone_config.go
|
GO
|
mit
| 696 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
// Protocol is a communications protocol.
type Protocol string
const (
// GRPCProtocol is the gRPC communication protocol.
GRPCProtocol Protocol = "grpc"
// GRPCSProtocol is the gRPC communication protocol with TLS (without validating certificates).
GRPCSProtocol Protocol = "grpcs"
// HTTPProtocol is the HTTP communication protocol.
HTTPProtocol Protocol = "http"
// HTTPSProtocol is the HTTPS communication protocol with TLS (without validating certificates).
HTTPSProtocol Protocol = "https"
// H2CProtocol is the HTTP/2 Cleartext communication protocol (HTTP/2 without TLS).
H2CProtocol Protocol = "h2c"
)
// IsHTTP returns true if the app protocol is using HTTP (including HTTPS and H2C).
func (p Protocol) IsHTTP() bool {
switch p {
case HTTPProtocol, HTTPSProtocol, H2CProtocol:
return true
default:
return false
}
}
// HasTLS returns true if the app protocol is using TLS.
func (p Protocol) HasTLS() bool {
switch p {
case HTTPSProtocol, GRPCSProtocol:
return true
default:
return false
}
}
|
mikeee/dapr
|
pkg/config/protocol/protocol.go
|
GO
|
mit
| 1,608 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"testing"
)
func TestProtocolIsHttp(t *testing.T) {
tests := []struct {
name string
protocol Protocol
want bool
}{
{
name: "http",
protocol: HTTPProtocol,
want: true,
},
{
name: "https",
protocol: HTTPSProtocol,
want: true,
},
{
name: "h2c",
protocol: H2CProtocol,
want: true,
},
{
name: "grpc",
protocol: GRPCProtocol,
want: false,
},
{
name: "grpcs",
protocol: GRPCSProtocol,
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.protocol.IsHTTP(); got != tt.want {
t.Errorf("Protocol.IsHTTP() = %v, want %v", got, tt.want)
}
})
}
}
|
mikeee/dapr
|
pkg/config/protocol/protocol_test.go
|
GO
|
mit
| 1,303 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
namespace: default
spec:
components:
deny:
- foo.bar
- hello.world/v1
|
mikeee/dapr
|
pkg/config/testdata/components_config.yaml
|
YAML
|
mit
| 168 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: secretappconfig
spec:
secrets:
scopes:
- storeName: "local"
defaultAccess: "allow"
allowedSecrets: ["daprsecret","redissecret"]
|
mikeee/dapr
|
pkg/config/testdata/config.yaml
|
YAML
|
mit
| 222 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: secretappconfig
spec:
secrets:
scopes:
- storeName: "local"
defaultAccess: "allow"
allowedSecrets: ["${DAPR_SECRET}"]
|
mikeee/dapr
|
pkg/config/testdata/env_variables_config.yaml
|
YAML
|
mit
| 212 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprConfig
spec:
features:
- name: Actor.Reentrancy
enabled: true
- name: Test.Feature
enabled: false
|
mikeee/dapr
|
pkg/config/testdata/feature_config.yaml
|
YAML
|
mit
| 190 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: secretappconfig
spec:
secrets:
scopes:
- storeName: "local"
defaultAccess: "allow"
- storeName: "local"
defaultAccess: "deny"
|
mikeee/dapr
|
pkg/config/testdata/invalid_secrets_config.yaml
|
YAML
|
mit
| 226 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: metricconfig
spec:
metrics:
enabled: false
|
mikeee/dapr
|
pkg/config/testdata/metric_disabled.yaml
|
YAML
|
mit
| 115 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
namespace: default
spec:
mtls:
enabled: true
workloadCertTTL: "25s"
allowedClockSkew: "1h"
|
mikeee/dapr
|
pkg/config/testdata/mtls_config.yaml
|
YAML
|
mit
| 184 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
namespace: default
spec:
mtls:
enabled: false
features:
- name: Test.Feature
enabled: true
|
mikeee/dapr
|
pkg/config/testdata/override.yaml
|
YAML
|
mit
| 188 |
kind: Configuration
apiversion: ""
metadata:
name: daprsystem
generatename: ""
namespace: default
selflink: ""
uid: ""
resourceversion: ""
generation: 0
creationtimestamp: "0001-01-01T00:00:00Z"
deletiontimestamp: null
deletiongraceperiodseconds: null
labels: {}
annotations: {}
ownerreferences: []
finalizers: []
managedfields: []
spec:
tracing:
otel:
isSecure: true
mtls:
workloadCertTTL: 25s
allowedClockSkew: 1h
metric:
enabled: true
accessControl:
defaultAction: allow
trustDomain: public
features:
- name: Test.Feature
enabled: true
workflow:
maxConcurrentWorkflowInvocations: 100
maxConcurrentActivityInvocations: 100
|
mikeee/dapr
|
pkg/config/testdata/override_gen.yaml
|
YAML
|
mit
| 803 |
tracing:
otel:
isSecure: true
mtls:
workloadCertTTL: 25s
allowedClockSkew: 1h
metric:
enabled: true
accessControl:
defaultAction: allow
trustDomain: public
features:
- name: Test.Feature
enabled: true
workflow:
maxConcurrentWorkflowInvocations: 100
maxConcurrentActivityInvocations: 100
|
mikeee/dapr
|
pkg/config/testdata/override_spec_gen.yaml
|
YAML
|
mit
| 337 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: wasmStrictSandbox
spec:
wasm:
strictSandbox: true
|
mikeee/dapr
|
pkg/config/testdata/wasm_strict_sandbox.yaml
|
YAML
|
mit
| 122 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
namespace: default
spec:
workflow:
maxConcurrentWorkflowInvocations: 32
maxConcurrentActivityInvocations: 64
|
mikeee/dapr
|
pkg/config/testdata/workflow_config.yaml
|
YAML
|
mit
| 198 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cors
// DefaultAllowedOrigins is the default origins allowed for the Dapr HTTP servers.
const DefaultAllowedOrigins = "*"
|
mikeee/dapr
|
pkg/cors/cors.go
|
GO
|
mit
| 692 |
package diagnostics
import (
"context"
"strconv"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
var (
processStatusKey = tag.MustNewKey("process_status")
successKey = tag.MustNewKey("success")
topicKey = tag.MustNewKey("topic")
)
const (
Delete = "delete"
Get = "get"
Set = "set"
StateQuery = "query"
ConfigurationSubscribe = "subscribe"
ConfigurationUnsubscribe = "unsubscribe"
StateTransaction = "transaction"
BulkGet = "bulk_get"
BulkDelete = "bulk_delete"
CryptoOp = "crypto_op"
)
// componentMetrics holds dapr runtime metrics for components.
type componentMetrics struct {
pubsubIngressCount *stats.Int64Measure
pubsubIngressLatency *stats.Float64Measure
bulkPubsubIngressCount *stats.Int64Measure
bulkPubsubEventIngressCount *stats.Int64Measure
bulkPubsubIngressLatency *stats.Float64Measure
pubsubEgressCount *stats.Int64Measure
pubsubEgressLatency *stats.Float64Measure
bulkPubsubEgressCount *stats.Int64Measure
bulkPubsubEventEgressCount *stats.Int64Measure
bulkPubsubEgressLatency *stats.Float64Measure
inputBindingCount *stats.Int64Measure
inputBindingLatency *stats.Float64Measure
outputBindingCount *stats.Int64Measure
outputBindingLatency *stats.Float64Measure
stateCount *stats.Int64Measure
stateLatency *stats.Float64Measure
configurationCount *stats.Int64Measure
configurationLatency *stats.Float64Measure
secretCount *stats.Int64Measure
secretLatency *stats.Float64Measure
cryptoCount *stats.Int64Measure
cryptoLatency *stats.Float64Measure
appID string
enabled bool
namespace string
}
// newComponentMetrics returns a componentMetrics instance with default stats.
func newComponentMetrics() *componentMetrics {
return &componentMetrics{
pubsubIngressCount: stats.Int64(
"component/pubsub_ingress/count",
"The number of incoming messages arriving from the pub/sub component.",
stats.UnitDimensionless),
pubsubIngressLatency: stats.Float64(
"component/pubsub_ingress/latencies",
"The consuming app event processing latency.",
stats.UnitMilliseconds),
bulkPubsubIngressCount: stats.Int64(
"component/pubsub_ingress/bulk/count",
"The number of incoming bulk subscribe calls arriving from the bulk pub/sub component.",
stats.UnitDimensionless),
bulkPubsubEventIngressCount: stats.Int64(
"component/pubsub_ingress/bulk/event_count",
"Total number of incoming messages arriving from the bulk pub/sub component via Bulk Subscribe.",
stats.UnitDimensionless),
bulkPubsubIngressLatency: stats.Float64(
"component/pubsub_ingress/bulk/latencies",
"The consuming app event processing latency for the bulk pub/sub component.",
stats.UnitMilliseconds),
pubsubEgressCount: stats.Int64(
"component/pubsub_egress/count",
"The number of outgoing messages published to the pub/sub component.",
stats.UnitDimensionless),
pubsubEgressLatency: stats.Float64(
"component/pubsub_egress/latencies",
"The latency of the response from the pub/sub component.",
stats.UnitMilliseconds),
bulkPubsubEgressCount: stats.Int64(
"component/pubsub_egress/bulk/count",
"The number of bulk publish calls to the pub/sub component.",
stats.UnitDimensionless),
bulkPubsubEventEgressCount: stats.Int64(
"component/pubsub_egress/bulk/event_count",
"The number of outgoing messages to the pub/sub component published through bulk publish API.",
stats.UnitDimensionless),
bulkPubsubEgressLatency: stats.Float64(
"component/pubsub_egress/bulk/latencies",
"The latency of the response for the bulk publish call from the pub/sub component.",
stats.UnitMilliseconds),
inputBindingCount: stats.Int64(
"component/input_binding/count",
"The number of incoming events arriving from the input binding component.",
stats.UnitDimensionless),
inputBindingLatency: stats.Float64(
"component/input_binding/latencies",
"The triggered app event processing latency.",
stats.UnitMilliseconds),
outputBindingCount: stats.Int64(
"component/output_binding/count",
"The number of operations invoked on the output binding component.",
stats.UnitDimensionless),
outputBindingLatency: stats.Float64(
"component/output_binding/latencies",
"The latency of the response from the output binding component.",
stats.UnitMilliseconds),
stateCount: stats.Int64(
"component/state/count",
"The number of operations performed on the state component.",
stats.UnitDimensionless),
stateLatency: stats.Float64(
"component/state/latencies",
"The latency of the response from the state component.",
stats.UnitMilliseconds),
configurationCount: stats.Int64(
"component/configuration/count",
"The number of operations performed on the configuration component.",
stats.UnitDimensionless),
configurationLatency: stats.Float64(
"component/configuration/latencies",
"The latency of the response from the configuration component.",
stats.UnitMilliseconds),
secretCount: stats.Int64(
"component/secret/count",
"The number of operations performed on the secret component.",
stats.UnitDimensionless),
secretLatency: stats.Float64(
"component/secret/latencies",
"The latency of the response from the secret component.",
stats.UnitMilliseconds),
cryptoCount: stats.Int64(
"component/crypto/count",
"The number of operations performed on the crypto component.",
stats.UnitDimensionless),
cryptoLatency: stats.Float64(
"component/crypto/latencies",
"The latency of the response from the crypto component.",
stats.UnitMilliseconds),
}
}
// Init registers the component metrics views.
func (c *componentMetrics) Init(appID, namespace string) error {
c.appID = appID
c.enabled = true
c.namespace = namespace
return view.Register(
diagUtils.NewMeasureView(c.pubsubIngressLatency, []tag.Key{appIDKey, componentKey, namespaceKey, processStatusKey, topicKey, statusKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.pubsubIngressCount, []tag.Key{appIDKey, componentKey, namespaceKey, processStatusKey, topicKey, statusKey}, view.Count()),
diagUtils.NewMeasureView(c.bulkPubsubIngressLatency, []tag.Key{appIDKey, componentKey, namespaceKey, processStatusKey, topicKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.bulkPubsubIngressCount, []tag.Key{appIDKey, componentKey, namespaceKey, processStatusKey, topicKey}, view.Count()),
diagUtils.NewMeasureView(c.bulkPubsubEventIngressCount, []tag.Key{appIDKey, componentKey, namespaceKey, processStatusKey, topicKey}, view.Count()),
diagUtils.NewMeasureView(c.pubsubEgressLatency, []tag.Key{appIDKey, componentKey, namespaceKey, successKey, topicKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.pubsubEgressCount, []tag.Key{appIDKey, componentKey, namespaceKey, successKey, topicKey}, view.Count()),
diagUtils.NewMeasureView(c.bulkPubsubEgressLatency, []tag.Key{appIDKey, componentKey, namespaceKey, successKey, topicKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.bulkPubsubEgressCount, []tag.Key{appIDKey, componentKey, namespaceKey, successKey, topicKey}, view.Count()),
diagUtils.NewMeasureView(c.inputBindingLatency, []tag.Key{appIDKey, componentKey, namespaceKey, successKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.inputBindingCount, []tag.Key{appIDKey, componentKey, namespaceKey, successKey}, view.Count()),
diagUtils.NewMeasureView(c.outputBindingLatency, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.outputBindingCount, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, view.Count()),
diagUtils.NewMeasureView(c.stateLatency, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.stateCount, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, view.Count()),
diagUtils.NewMeasureView(c.configurationLatency, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.configurationCount, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, view.Count()),
diagUtils.NewMeasureView(c.secretLatency, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.secretCount, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, view.Count()),
diagUtils.NewMeasureView(c.cryptoLatency, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(c.cryptoCount, []tag.Key{appIDKey, componentKey, namespaceKey, operationKey, successKey}, view.Count()),
)
}
// PubsubIngressEvent records the metrics for a pub/sub ingress event.
func (c *componentMetrics) PubsubIngressEvent(ctx context.Context, component, processStatus, status, topic string, elapsed float64) {
if c.enabled {
if status == "" {
status = processStatus
}
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.pubsubIngressCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, processStatusKey, processStatus, statusKey, status, topicKey, topic),
c.pubsubIngressCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.pubsubIngressLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, processStatusKey, processStatus, statusKey, status, topicKey, topic),
c.pubsubIngressLatency.M(elapsed))
}
}
}
// BulkPubsubIngressEvent records the metrics for a bulk pub/sub ingress event.
func (c *componentMetrics) BulkPubsubIngressEvent(ctx context.Context, component, topic string, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.bulkPubsubIngressCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, topicKey, topic),
c.bulkPubsubIngressCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.bulkPubsubIngressLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, topicKey, topic),
c.bulkPubsubIngressLatency.M(elapsed))
}
}
}
// BulkPubsubIngressEventEntries records the metrics for entries inside a bulk pub/sub ingress event.
func (c *componentMetrics) BulkPubsubIngressEventEntries(ctx context.Context, component, topic string, processStatus string, eventCount int64) {
if c.enabled && eventCount > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.bulkPubsubEventIngressCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, processStatusKey, processStatus, topicKey, topic),
c.bulkPubsubEventIngressCount.M(eventCount))
}
}
// BulkPubsubEgressEvent records the metris for a pub/sub egress event.
// eventCount if greater than zero implies successful publish of few/all events in the bulk publish call
func (c *componentMetrics) BulkPubsubEgressEvent(ctx context.Context, component, topic string, success bool, eventCount int64, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.bulkPubsubEgressCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, strconv.FormatBool(success), topicKey, topic),
c.bulkPubsubEgressCount.M(1))
if eventCount > 0 {
// There is at leaset one success in the bulk publish call even if overall success of the call might be a failure
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.bulkPubsubEventEgressCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, true, topicKey, topic),
c.bulkPubsubEventEgressCount.M(eventCount))
}
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.bulkPubsubEgressLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, strconv.FormatBool(success), topicKey, topic),
c.bulkPubsubEgressLatency.M(elapsed))
}
}
}
// PubsubEgressEvent records the metris for a pub/sub egress event.
func (c *componentMetrics) PubsubEgressEvent(ctx context.Context, component, topic string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.pubsubEgressCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, strconv.FormatBool(success), topicKey, topic),
c.pubsubEgressCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.pubsubEgressLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, strconv.FormatBool(success), topicKey, topic),
c.pubsubEgressLatency.M(elapsed))
}
}
}
// InputBindingEvent records the metrics for an input binding event.
func (c *componentMetrics) InputBindingEvent(ctx context.Context, component string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.inputBindingCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, strconv.FormatBool(success)),
c.inputBindingCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.inputBindingLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, successKey, strconv.FormatBool(success)),
c.inputBindingLatency.M(elapsed))
}
}
}
// OutputBindingEvent records the metrics for an output binding event.
func (c *componentMetrics) OutputBindingEvent(ctx context.Context, component, operation string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.outputBindingCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.outputBindingCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.outputBindingLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.outputBindingLatency.M(elapsed))
}
}
}
// StateInvoked records the metrics for a state event.
func (c *componentMetrics) StateInvoked(ctx context.Context, component, operation string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.stateCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.stateCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.stateLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.stateLatency.M(elapsed))
}
}
}
// ConfigurationInvoked records the metrics for a configuration event.
func (c *componentMetrics) ConfigurationInvoked(ctx context.Context, component, operation string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.configurationCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.configurationCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.configurationLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.configurationLatency.M(elapsed))
}
}
}
// SecretInvoked records the metrics for a secret event.
func (c *componentMetrics) SecretInvoked(ctx context.Context, component, operation string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.secretCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.secretCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.secretLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.secretLatency.M(elapsed))
}
}
}
// CryptoInvoked records the metrics for a crypto event.
func (c *componentMetrics) CryptoInvoked(ctx context.Context, component, operation string, success bool, elapsed float64) {
if c.enabled {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.cryptoCount.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.cryptoCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(c.cryptoLatency.Name(), appIDKey, c.appID, componentKey, component, namespaceKey, c.namespace, operationKey, operation, successKey, strconv.FormatBool(success)),
c.cryptoLatency.M(elapsed))
}
}
}
func ElapsedSince(start time.Time) float64 {
return float64(time.Since(start) / time.Millisecond)
}
|
mikeee/dapr
|
pkg/diagnostics/component_monitoring.go
|
GO
|
mit
| 17,626 |
package diagnostics
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.opencensus.io/stats/view"
)
const (
componentName = "test"
)
func componentsMetrics() *componentMetrics {
c := newComponentMetrics()
c.Init("test", "default")
return c
}
func TestPubSub(t *testing.T) {
t.Run("record drop by app or sidecar", func(t *testing.T) {
c := componentsMetrics()
c.PubsubIngressEvent(context.Background(), componentName, "drop", "success", "A", 1)
c.PubsubIngressEvent(context.Background(), componentName, "drop", "drop", "A", 1)
viewData, _ := view.RetrieveData("component/pubsub_ingress/count")
v := view.Find("component/pubsub_ingress/count")
allTagsPresent(t, v, viewData[0].Tags)
assert.Len(t, viewData, 2)
assert.Equal(t, int64(1), viewData[0].Data.(*view.CountData).Value)
assert.Equal(t, int64(1), viewData[1].Data.(*view.CountData).Value)
})
t.Run("record ingress count", func(t *testing.T) {
c := componentsMetrics()
c.PubsubIngressEvent(context.Background(), componentName, "retry", "retry", "A", 0)
viewData, _ := view.RetrieveData("component/pubsub_ingress/count")
v := view.Find("component/pubsub_ingress/count")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record ingress latency", func(t *testing.T) {
c := componentsMetrics()
c.PubsubIngressEvent(context.Background(), componentName, "retry", "", "A", 1)
viewData, _ := view.RetrieveData("component/pubsub_ingress/latencies")
v := view.Find("component/pubsub_ingress/latencies")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
t.Run("record egress latency", func(t *testing.T) {
c := componentsMetrics()
c.PubsubEgressEvent(context.Background(), componentName, "A", true, 1)
viewData, _ := view.RetrieveData("component/pubsub_egress/latencies")
v := view.Find("component/pubsub_egress/latencies")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
}
func TestBindings(t *testing.T) {
t.Run("record input binding count", func(t *testing.T) {
c := componentsMetrics()
c.InputBindingEvent(context.Background(), componentName, false, 0)
viewData, _ := view.RetrieveData("component/input_binding/count")
v := view.Find("component/input_binding/count")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record input binding latency", func(t *testing.T) {
c := componentsMetrics()
c.InputBindingEvent(context.Background(), componentName, false, 1)
viewData, _ := view.RetrieveData("component/input_binding/latencies")
v := view.Find("component/input_binding/count")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
t.Run("record output binding count", func(t *testing.T) {
c := componentsMetrics()
c.OutputBindingEvent(context.Background(), componentName, "set", false, 0)
viewData, _ := view.RetrieveData("component/output_binding/count")
v := view.Find("component/input_binding/count")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record output binding latency", func(t *testing.T) {
c := componentsMetrics()
c.OutputBindingEvent(context.Background(), componentName, "set", false, 1)
viewData, _ := view.RetrieveData("component/output_binding/latencies")
v := view.Find("component/output_binding/latencies")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
}
func TestState(t *testing.T) {
t.Run("record state count", func(t *testing.T) {
c := componentsMetrics()
c.StateInvoked(context.Background(), componentName, "get", false, 0)
viewData, _ := view.RetrieveData("component/state/count")
v := view.Find("component/state/count")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record state latency", func(t *testing.T) {
c := componentsMetrics()
c.StateInvoked(context.Background(), componentName, "get", false, 1)
viewData, _ := view.RetrieveData("component/state/latencies")
v := view.Find("component/state/latencies")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
}
func TestConfiguration(t *testing.T) {
t.Run("record configuration count", func(t *testing.T) {
c := componentsMetrics()
c.ConfigurationInvoked(context.Background(), componentName, "get", false, 0)
viewData, _ := view.RetrieveData("component/configuration/count")
v := view.Find("component/configuration/count")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record configuration latency", func(t *testing.T) {
c := componentsMetrics()
c.ConfigurationInvoked(context.Background(), componentName, "get", false, 1)
viewData, _ := view.RetrieveData("component/configuration/latencies")
v := view.Find("component/configuration/latencies")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
}
func TestSecrets(t *testing.T) {
t.Run("record secret count", func(t *testing.T) {
c := componentsMetrics()
c.SecretInvoked(context.Background(), componentName, "get", false, 0)
viewData, _ := view.RetrieveData("component/secret/count")
v := view.Find("component/secret/count")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record secret latency", func(t *testing.T) {
c := componentsMetrics()
c.SecretInvoked(context.Background(), componentName, "get", false, 1)
viewData, _ := view.RetrieveData("component/secret/latencies")
v := view.Find("component/secret/latencies")
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0)
})
}
func TestComponentMetricsInit(t *testing.T) {
c := componentsMetrics()
assert.True(t, c.enabled)
assert.Equal(t, "test", c.appID)
assert.Equal(t, "default", c.namespace)
}
func TestElapsedSince(t *testing.T) {
start := time.Now()
time.Sleep(time.Second)
elapsed := ElapsedSince(start)
assert.GreaterOrEqual(t, elapsed, float64(1000))
}
|
mikeee/dapr
|
pkg/diagnostics/component_monitoring_test.go
|
GO
|
mit
| 6,154 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package consts
import (
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
)
const (
// DaprInternalSpanAttrPrefix is the internal span attribution prefix.
// Middleware will not populate it if the span key starts with this prefix.
DaprInternalSpanAttrPrefix = "__dapr."
// DaprAPISpanNameInternal is the internal attribution, but not populated to span attribution.
DaprAPISpanNameInternal = DaprInternalSpanAttrPrefix + "spanname"
// Span attribute keys
// Reference trace semantics https://github.com/open-telemetry/opentelemetry-specification/tree/master/specification/trace/semantic_conventions
DBSystemSpanAttributeKey = string(semconv.DBSystemKey)
DBNameSpanAttributeKey = string(semconv.DBNameKey)
DBStatementSpanAttributeKey = string(semconv.DBStatementKey)
DBConnectionStringSpanAttributeKey = string(semconv.DBConnectionStringKey)
MessagingSystemSpanAttributeKey = string(semconv.MessagingSystemKey)
MessagingDestinationSpanAttributeKey = string(semconv.MessagingDestinationKey)
MessagingDestinationKindSpanAttributeKey = string(semconv.MessagingDestinationKindKey)
GrpcServiceSpanAttributeKey = string(semconv.RPCServiceKey)
NetPeerNameSpanAttributeKey = string(semconv.NetPeerNameKey)
DaprAPISpanAttributeKey = "dapr.api"
DaprAPIStatusCodeSpanAttributeKey = "dapr.status_code"
DaprAPIProtocolSpanAttributeKey = "dapr.protocol"
DaprAPIInvokeMethod = "dapr.invoke_method"
DaprAPIActorTypeID = "dapr.actor"
DaprAPIHTTPSpanAttrValue = "http"
DaprAPIGRPCSpanAttrValue = "grpc"
StateBuildingBlockType = "state"
SecretBuildingBlockType = "secrets"
BindingBuildingBlockType = "bindings"
PubsubBuildingBlockType = "pubsub"
DaprGRPCServiceInvocationService = "ServiceInvocation"
DaprGRPCDaprService = "Dapr"
// Keys used in the context's metadata for streaming calls
// Note: these keys must always be all-lowercase
DaprCallLocalStreamMethodKey = "__dapr_calllocalstream_method"
)
// MessagingDestinationTopicKind is effectively const, but isn't a const from upstream.
var MessagingDestinationTopicKind = semconv.MessagingDestinationKindTopic.Value.AsString()
// GrpcAppendSpanAttributesFn is the interface that applies to gRPC requests that add span attributes.
type GrpcAppendSpanAttributesFn interface {
// AppendSpanAttributes appends attributes to the map used for the span in tracing for the gRPC method.
AppendSpanAttributes(rpcMethod string, m map[string]string)
}
|
mikeee/dapr
|
pkg/diagnostics/consts/consts.go
|
GO
|
mit
| 3,131 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/dapr/dapr/pkg/api/grpc/metadata"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
// This implementation is inspired by
// https://github.com/census-instrumentation/opencensus-go/tree/master/plugin/ocgrpc
// Tag key definitions for http requests.
var (
KeyServerMethod = tag.MustNewKey("grpc_server_method")
KeyServerStatus = tag.MustNewKey("grpc_server_status")
KeyClientMethod = tag.MustNewKey("grpc_client_method")
KeyClientStatus = tag.MustNewKey("grpc_client_status")
)
const appHealthCheckMethod = "/dapr.proto.runtime.v1.AppCallbackHealthCheck/HealthCheck"
type grpcMetrics struct {
serverReceivedBytes *stats.Int64Measure
serverSentBytes *stats.Int64Measure
serverLatency *stats.Float64Measure
serverCompletedRpcs *stats.Int64Measure
clientSentBytes *stats.Int64Measure
clientReceivedBytes *stats.Int64Measure
clientRoundtripLatency *stats.Float64Measure
clientCompletedRpcs *stats.Int64Measure
healthProbeCompletedCount *stats.Int64Measure
healthProbeRoundripLatency *stats.Float64Measure
appID string
enabled bool
}
func newGRPCMetrics() *grpcMetrics {
return &grpcMetrics{
serverReceivedBytes: stats.Int64(
"grpc.io/server/received_bytes_per_rpc",
"Total bytes received across all messages per RPC.",
stats.UnitBytes),
serverSentBytes: stats.Int64(
"grpc.io/server/sent_bytes_per_rpc",
"Total bytes sent in across all response messages per RPC.",
stats.UnitBytes),
serverLatency: stats.Float64(
"grpc.io/server/server_latency",
"Time between first byte of request received to last byte of response sent, or terminal error.",
stats.UnitMilliseconds),
serverCompletedRpcs: stats.Int64(
"grpc.io/server/completed_rpcs",
"Distribution of bytes sent per RPC, by method.",
stats.UnitDimensionless),
clientSentBytes: stats.Int64(
"grpc.io/client/sent_bytes_per_rpc",
"Total bytes sent across all request messages per RPC.",
stats.UnitBytes),
clientReceivedBytes: stats.Int64(
"grpc.io/client/received_bytes_per_rpc",
"Total bytes received across all response messages per RPC.",
stats.UnitBytes),
clientRoundtripLatency: stats.Float64(
"grpc.io/client/roundtrip_latency",
"Time between first byte of request sent to last byte of response received, or terminal error.",
stats.UnitMilliseconds),
clientCompletedRpcs: stats.Int64(
"grpc.io/client/completed_rpcs",
"Count of RPCs by method and status.",
stats.UnitDimensionless),
healthProbeCompletedCount: stats.Int64(
"grpc.io/healthprobes/completed_count",
"Count of completed health probes",
stats.UnitDimensionless),
healthProbeRoundripLatency: stats.Float64(
"grpc.io/healthprobes/roundtrip_latency",
"Time between first byte of health probes sent to last byte of response received, or terminal error",
stats.UnitMilliseconds),
enabled: false,
}
}
func (g *grpcMetrics) Init(appID string) error {
g.appID = appID
g.enabled = true
return view.Register(
diagUtils.NewMeasureView(g.serverReceivedBytes, []tag.Key{appIDKey, KeyServerMethod}, defaultSizeDistribution),
diagUtils.NewMeasureView(g.serverSentBytes, []tag.Key{appIDKey, KeyServerMethod}, defaultSizeDistribution),
diagUtils.NewMeasureView(g.serverLatency, []tag.Key{appIDKey, KeyServerMethod, KeyServerStatus}, defaultLatencyDistribution),
diagUtils.NewMeasureView(g.serverCompletedRpcs, []tag.Key{appIDKey, KeyServerMethod, KeyServerStatus}, view.Count()),
diagUtils.NewMeasureView(g.clientSentBytes, []tag.Key{appIDKey, KeyClientMethod}, defaultSizeDistribution),
diagUtils.NewMeasureView(g.clientReceivedBytes, []tag.Key{appIDKey, KeyClientMethod}, defaultSizeDistribution),
diagUtils.NewMeasureView(g.clientRoundtripLatency, []tag.Key{appIDKey, KeyClientMethod, KeyClientStatus}, defaultLatencyDistribution),
diagUtils.NewMeasureView(g.clientCompletedRpcs, []tag.Key{appIDKey, KeyClientMethod, KeyClientStatus}, view.Count()),
diagUtils.NewMeasureView(g.healthProbeRoundripLatency, []tag.Key{appIDKey, KeyClientStatus}, defaultLatencyDistribution),
diagUtils.NewMeasureView(g.healthProbeCompletedCount, []tag.Key{appIDKey, KeyClientStatus}, view.Count()),
)
}
func (g *grpcMetrics) IsEnabled() bool {
return g != nil && g.enabled
}
func (g *grpcMetrics) ServerRequestSent(ctx context.Context, method, status string, reqContentSize, resContentSize int64, start time.Time) {
if !g.IsEnabled() {
return
}
elapsed := float64(time.Since(start) / time.Millisecond)
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.serverCompletedRpcs.Name(), appIDKey, g.appID, KeyServerMethod, method, KeyServerStatus, status),
g.serverCompletedRpcs.M(1))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.serverReceivedBytes.Name(), appIDKey, g.appID, KeyServerMethod, method),
g.serverReceivedBytes.M(reqContentSize))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.serverSentBytes.Name(), appIDKey, g.appID, KeyServerMethod, method),
g.serverSentBytes.M(resContentSize))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.serverLatency.Name(), appIDKey, g.appID, KeyServerMethod, method, KeyServerStatus, status),
g.serverLatency.M(elapsed))
}
func (g *grpcMetrics) StreamServerRequestSent(ctx context.Context, method, status string, start time.Time) {
if !g.IsEnabled() {
return
}
elapsed := float64(time.Since(start) / time.Millisecond)
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.serverCompletedRpcs.Name(), appIDKey, g.appID, KeyServerMethod, method, KeyServerStatus, status),
g.serverCompletedRpcs.M(1))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.serverLatency.Name(), appIDKey, g.appID, KeyServerMethod, method, KeyServerStatus, status),
g.serverLatency.M(elapsed))
}
func (g *grpcMetrics) StreamClientRequestSent(ctx context.Context, method, status string, start time.Time) {
if !g.IsEnabled() {
return
}
elapsed := float64(time.Since(start) / time.Millisecond)
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.clientCompletedRpcs.Name(), appIDKey, g.appID, KeyClientMethod, method, KeyClientStatus, status),
g.clientCompletedRpcs.M(1))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.clientRoundtripLatency.Name(), appIDKey, g.appID, KeyClientMethod, method, KeyClientStatus, status),
g.clientRoundtripLatency.M(elapsed))
}
func (g *grpcMetrics) ClientRequestReceived(ctx context.Context, method, status string, reqContentSize, resContentSize int64, start time.Time) {
if !g.IsEnabled() {
return
}
elapsed := float64(time.Since(start) / time.Millisecond)
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.clientCompletedRpcs.Name(), appIDKey, g.appID, KeyClientMethod, method, KeyClientStatus, status),
g.clientCompletedRpcs.M(1))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.clientRoundtripLatency.Name(), appIDKey, g.appID, KeyClientMethod, method, KeyClientStatus, status),
g.clientRoundtripLatency.M(elapsed))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.clientSentBytes.Name(), appIDKey, g.appID, KeyClientMethod, method),
g.clientSentBytes.M(reqContentSize))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.clientReceivedBytes.Name(), appIDKey, g.appID, KeyClientMethod, method),
g.clientReceivedBytes.M(resContentSize))
}
func (g *grpcMetrics) AppHealthProbeCompleted(ctx context.Context, status string, start time.Time) {
if !g.IsEnabled() {
return
}
elapsed := float64(time.Since(start) / time.Millisecond)
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.healthProbeCompletedCount.Name(), appIDKey, g.appID, KeyClientStatus, status),
g.healthProbeCompletedCount.M(1))
stats.RecordWithTags(ctx,
diagUtils.WithTags(g.healthProbeRoundripLatency.Name(), appIDKey, g.appID, KeyClientStatus, status),
g.healthProbeRoundripLatency.M(elapsed))
}
func (g *grpcMetrics) getPayloadSize(payload interface{}) int {
return proto.Size(payload.(proto.Message))
}
// UnaryServerInterceptor is a gRPC server-side interceptor for Unary RPCs.
func (g *grpcMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
start := time.Now()
resp, err := handler(ctx, req)
size := 0
if err == nil {
size = g.getPayloadSize(resp)
}
g.ServerRequestSent(ctx, info.FullMethod, status.Code(err).String(), int64(g.getPayloadSize(req)), int64(size), start)
return resp, err
}
}
// UnaryClientInterceptor is a gRPC client-side interceptor for Unary RPCs.
func (g *grpcMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
start := time.Now()
err := invoker(ctx, method, req, reply, cc, opts...)
var resSize int
if err == nil {
resSize = g.getPayloadSize(reply)
}
if method == appHealthCheckMethod {
g.AppHealthProbeCompleted(ctx, status.Code(err).String(), start)
} else {
g.ClientRequestReceived(ctx, method, status.Code(err).String(), int64(g.getPayloadSize(req)), int64(resSize), start)
}
return err
}
}
// StreamingServerInterceptor is a stream interceptor for gRPC proxying calls that arrive from the application to Dapr
func (g *grpcMetrics) StreamingServerInterceptor() grpc.StreamServerInterceptor {
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
ctx := ss.Context()
md, _ := metadata.FromIncomingContext(ctx)
vals, ok := md[GRPCProxyAppIDKey]
if !ok || len(vals) == 0 {
return handler(srv, ss)
}
now := time.Now()
err := handler(srv, ss)
g.StreamServerRequestSent(ctx, info.FullMethod, status.Code(err).String(), now)
return err
}
}
// StreamingClientInterceptor is a stream interceptor for gRPC proxying calls that arrive from a remote Dapr sidecar
func (g *grpcMetrics) StreamingClientInterceptor() grpc.StreamServerInterceptor {
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
ctx := ss.Context()
md, _ := metadata.FromIncomingContext(ctx)
vals, ok := md[GRPCProxyAppIDKey]
if !ok || len(vals) == 0 {
return handler(srv, ss)
}
now := time.Now()
err := handler(srv, ss)
g.StreamClientRequestSent(ctx, info.FullMethod, status.Code(err).String(), now)
return err
}
}
|
mikeee/dapr
|
pkg/diagnostics/grpc_monitoring.go
|
GO
|
mit
| 11,408 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
grpcMetadata "google.golang.org/grpc/metadata"
"github.com/dapr/dapr/pkg/api/grpc/metadata"
)
type fakeProxyStream struct {
appID string
}
func (f *fakeProxyStream) Context() context.Context {
if f.appID == "" {
return context.Background()
}
ctx := context.Background()
ctx = grpcMetadata.NewIncomingContext(ctx, grpcMetadata.New(map[string]string{GRPCProxyAppIDKey: f.appID}))
ctx, _ = metadata.SetMetadataInTapHandle(ctx, nil)
return ctx
}
func (f *fakeProxyStream) SetHeader(grpcMetadata.MD) error {
return nil
}
func (f *fakeProxyStream) SendHeader(grpcMetadata.MD) error {
return nil
}
func (f *fakeProxyStream) SetTrailer(grpcMetadata.MD) {
}
func (f *fakeProxyStream) SendMsg(m interface{}) error {
return nil
}
func (f *fakeProxyStream) RecvMsg(m interface{}) error {
return nil
}
func TestStreamingServerInterceptor(t *testing.T) {
t.Run("not a proxy request, do not run pipeline", func(t *testing.T) {
m := newGRPCMetrics()
m.Init("test")
i := m.StreamingServerInterceptor()
s := &fakeProxyStream{}
f := func(srv interface{}, stream grpc.ServerStream) error {
return nil
}
err := i(nil, s, &grpc.StreamServerInfo{}, f)
require.NoError(t, err)
rows, err := view.RetrieveData("grpc.io/server/completed_rpcs")
require.NoError(t, err)
assert.Empty(t, rows)
rowsLatency, err := view.RetrieveData("grpc.io/server/server_latency")
require.NoError(t, err)
assert.Empty(t, rowsLatency)
})
t.Run("proxy request, run pipeline", func(t *testing.T) {
m := newGRPCMetrics()
m.Init("test")
i := m.StreamingServerInterceptor()
s := &fakeProxyStream{
appID: "test",
}
f := func(srv interface{}, stream grpc.ServerStream) error {
return nil
}
err := i(nil, s, &grpc.StreamServerInfo{FullMethod: "/appv1.Test"}, f)
require.NoError(t, err)
rows, err := view.RetrieveData("grpc.io/server/completed_rpcs")
require.NoError(t, err)
require.Len(t, rows, 1)
assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name())
assert.Equal(t, "grpc_server_method", rows[0].Tags[1].Key.Name())
assert.Equal(t, "grpc_server_status", rows[0].Tags[2].Key.Name())
rows, err = view.RetrieveData("grpc.io/server/server_latency")
require.NoError(t, err)
require.Len(t, rows, 1)
assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name())
assert.Equal(t, "grpc_server_method", rows[0].Tags[1].Key.Name())
assert.Equal(t, "grpc_server_status", rows[0].Tags[2].Key.Name())
})
}
func TestStreamingClientInterceptor(t *testing.T) {
t.Run("not a proxy request, do not run pipeline", func(t *testing.T) {
m := newGRPCMetrics()
m.Init("test")
i := m.StreamingClientInterceptor()
s := &fakeProxyStream{}
f := func(srv interface{}, stream grpc.ServerStream) error {
return nil
}
err := i(nil, s, &grpc.StreamServerInfo{}, f)
require.NoError(t, err)
rows, err := view.RetrieveData("grpc.io/client/completed_rpcs")
require.NoError(t, err)
assert.Empty(t, rows)
rowsLatency, err := view.RetrieveData("grpc.io/client/roundtrip_latency")
require.NoError(t, err)
assert.Empty(t, rowsLatency)
})
t.Run("proxy request, run pipeline", func(t *testing.T) {
m := newGRPCMetrics()
m.Init("test")
i := m.StreamingClientInterceptor()
s := &fakeProxyStream{
appID: "test",
}
f := func(srv interface{}, stream grpc.ServerStream) error {
return nil
}
err := i(nil, s, &grpc.StreamServerInfo{FullMethod: "/appv1.Test"}, f)
require.NoError(t, err)
rows, err := view.RetrieveData("grpc.io/client/completed_rpcs")
require.NoError(t, err)
assert.Len(t, rows, 1)
assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name())
assert.Equal(t, "grpc_client_method", rows[0].Tags[1].Key.Name())
assert.Equal(t, "grpc_client_status", rows[0].Tags[2].Key.Name())
rowsLatency, err := view.RetrieveData("grpc.io/client/roundtrip_latency")
require.NoError(t, err)
assert.Len(t, rowsLatency, 1)
assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name())
assert.Equal(t, "grpc_client_method", rows[0].Tags[1].Key.Name())
assert.Equal(t, "grpc_client_status", rows[0].Tags[2].Key.Name())
})
}
|
mikeee/dapr
|
pkg/diagnostics/grpc_monitoring_test.go
|
GO
|
mit
| 4,848 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"fmt"
"strings"
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
otelcodes "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
grpcMetadata "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"github.com/dapr/dapr/pkg/api/grpc/metadata"
"github.com/dapr/dapr/pkg/config"
diagConsts "github.com/dapr/dapr/pkg/diagnostics/consts"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
internalv1pb "github.com/dapr/dapr/pkg/proto/internals/v1"
)
const (
GRPCTraceContextKey = "grpc-trace-bin"
GRPCProxyAppIDKey = "dapr-app-id"
daprInternalPrefix = "/dapr.proto.internals."
daprRuntimePrefix = "/dapr.proto.runtime."
daprInvokeServiceMethod = "/dapr.proto.runtime.v1.Dapr/InvokeService"
daprCallLocalStreamMethod = "/dapr.proto.internals.v1.ServiceInvocation/CallLocalStream"
daprWorkflowPrefix = "/TaskHubSidecarService"
)
// GRPCTraceUnaryServerInterceptor sets the trace context or starts the trace client span based on request.
func GRPCTraceUnaryServerInterceptor(appID string, spec config.TracingSpec) grpc.UnaryServerInterceptor {
return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
var (
span trace.Span
spanKind trace.SpanStartOption
prefixedMetadata map[string]string
reqSpanAttr map[string]string
)
sc, _ := SpanContextFromIncomingGRPCMetadata(ctx)
// This middleware is shared by internal gRPC for service invocation and API
// so that it needs to handle separately.
if strings.HasPrefix(info.FullMethod, daprInternalPrefix) {
// For the dapr.proto.internals package, this generates ServerSpan.
// This is invoked by other Dapr runtimes during service invocation.
spanKind = trace.WithSpanKind(trace.SpanKindServer)
} else {
// For the dapr.proto.runtime package, this generates ClientSpan.
// This is invoked by clients (apps) while invoking Dapr APIs.
spanKind = trace.WithSpanKind(trace.SpanKindClient)
}
ctx = trace.ContextWithRemoteSpanContext(ctx, sc)
ctx, span = tracer.Start(ctx, info.FullMethod, spanKind)
resp, err := handler(ctx, req)
if span.SpanContext().IsSampled() {
// users can add dapr- prefix if they want to see the header values in span attributes.
prefixedMetadata = userDefinedMetadata(ctx)
reqSpanAttr = spanAttributesMapFromGRPC(appID, req, info.FullMethod)
// Populates dapr- prefixed header first
for key, value := range reqSpanAttr {
prefixedMetadata[key] = value
}
AddAttributesToSpan(span, prefixedMetadata)
// Correct the span name based on API.
if sname, ok := reqSpanAttr[diagConsts.DaprAPISpanNameInternal]; ok {
span.SetName(sname)
}
}
// Add grpc-trace-bin header for all non-invocation api's
if info.FullMethod != daprInvokeServiceMethod {
traceContextBinary := diagUtils.BinaryFromSpanContext(span.SpanContext())
grpc.SetHeader(ctx, grpcMetadata.Pairs(GRPCTraceContextKey, string(traceContextBinary)))
}
UpdateSpanStatusFromGRPCError(span, err)
span.End()
return resp, err
}
}
// GRPCTraceStreamServerInterceptor sets the trace context or starts the trace client span based on request.
// This is used by proxy requests too.
func GRPCTraceStreamServerInterceptor(appID string, spec config.TracingSpec) grpc.StreamServerInterceptor {
return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
var (
span trace.Span
spanKind trace.SpanStartOption
isProxied bool
)
ctx := ss.Context()
// This middleware is shared by multiple services and proxied requests, which need to be handled separately
switch {
// For gRPC service invocation, this generates ServerSpan
case strings.HasPrefix(info.FullMethod, daprInternalPrefix):
spanKind = trace.WithSpanKind(trace.SpanKindServer)
// For gRPC API, this generates ClientSpan
case strings.HasPrefix(info.FullMethod, daprRuntimePrefix):
spanKind = trace.WithSpanKind(trace.SpanKindClient)
// For Dapr Workflow APIs, this generates ServerSpan
case strings.HasPrefix(info.FullMethod, daprWorkflowPrefix):
spanKind = trace.WithSpanKind(trace.SpanKindServer)
// For proxied requests, this generates a span depending on whether this is the server (target) or client
default:
isProxied = true
md, _ := metadata.FromIncomingContext(ctx)
vals := md.Get(GRPCProxyAppIDKey)
if len(vals) == 0 {
return fmt.Errorf("cannot proxy request: missing %s metadata", GRPCProxyAppIDKey)
}
// vals[0] is the target app ID
if appID == vals[0] {
spanKind = trace.WithSpanKind(trace.SpanKindServer)
} else {
spanKind = trace.WithSpanKind(trace.SpanKindClient)
}
}
// Overwrite context
sc, _ := SpanContextFromIncomingGRPCMetadata(ctx)
ctx = trace.ContextWithRemoteSpanContext(ctx, sc)
ctx, span = tracer.Start(ctx, info.FullMethod, spanKind)
wrapped := grpcMiddleware.WrapServerStream(ss)
wrapped.WrappedContext = ctx
err := handler(srv, wrapped)
if span.SpanContext().IsSampled() {
var (
prefixedMetadata map[string]string
reqSpanAttr map[string]string
)
// users can add dapr- prefix if they want to see the header values in span attributes.
prefixedMetadata = userDefinedMetadata(ctx)
if isProxied {
reqSpanAttr = map[string]string{
diagConsts.DaprAPISpanNameInternal: info.FullMethod,
}
} else {
reqSpanAttr = spanAttributesMapFromGRPC(appID, ss.Context(), info.FullMethod)
}
// Populates dapr- prefixed header first
for key, value := range reqSpanAttr {
prefixedMetadata[key] = value
}
AddAttributesToSpan(span, prefixedMetadata)
// Correct the span name based on API.
if sname, ok := reqSpanAttr[diagConsts.DaprAPISpanNameInternal]; ok {
span.SetName(sname)
}
}
// Add grpc-trace-bin header for all non-invocation api's
if !isProxied && info.FullMethod != daprInvokeServiceMethod {
traceContextBinary := diagUtils.BinaryFromSpanContext(span.SpanContext())
grpc.SetHeader(ctx, grpcMetadata.Pairs(GRPCTraceContextKey, string(traceContextBinary)))
}
UpdateSpanStatusFromGRPCError(span, err)
span.End()
return err
}
}
// userDefinedMetadata returns dapr- prefixed header from incoming metadata.
// Users can add dapr- prefixed headers that they want to see in span attributes.
func userDefinedMetadata(ctx context.Context) map[string]string {
md, ok := metadata.FromIncomingContext(ctx)
daprMetadata := make(map[string]string, len(md))
if !ok {
return daprMetadata
}
for k, v := range md {
if strings.HasPrefix(k, daprHeaderPrefix) && !strings.HasSuffix(k, daprHeaderBinSuffix) {
daprMetadata[k] = v[0]
}
}
return daprMetadata
}
func StartGRPCProducerSpanChildFromParent(ct context.Context, parentSpan trace.Span, spanName string) (context.Context, trace.Span) {
netCtx := trace.ContextWithRemoteSpanContext(ct, parentSpan.SpanContext())
spanKind := trace.WithSpanKind(trace.SpanKindProducer)
ctx, span := tracer.Start(netCtx, spanName, spanKind)
return ctx, span
}
// UpdateSpanStatusFromGRPCError updates tracer span status based on error object.
func UpdateSpanStatusFromGRPCError(span trace.Span, err error) {
if span == nil || err == nil {
return
}
if e, ok := status.FromError(err); ok {
span.SetStatus(otelcodes.Error, e.Message())
} else {
span.SetStatus(otelcodes.Error, err.Error())
}
}
// SpanContextFromIncomingGRPCMetadata returns the SpanContext stored in incoming metadata of context, or empty if there isn't one.
func SpanContextFromIncomingGRPCMetadata(ctx context.Context) (trace.SpanContext, bool) {
var (
sc trace.SpanContext
md metadata.MD
ok bool
)
if md, ok = metadata.FromIncomingContext(ctx); !ok {
return sc, false
}
traceContext := md[GRPCTraceContextKey]
if len(traceContext) > 0 {
sc, ok = diagUtils.SpanContextFromBinary([]byte(traceContext[0]))
} else {
// add workaround to fallback on checking traceparent header
// as grpc-trace-bin is not yet there in OpenTelemetry unlike OpenCensus , tracking issue https://github.com/open-telemetry/opentelemetry-specification/issues/639
// and grpc-dotnet client adheres to OpenTelemetry Spec which only supports http based traceparent header in gRPC path
// TODO : Remove this workaround fix once grpc-dotnet supports grpc-trace-bin header. Tracking issue https://github.com/dapr/dapr/issues/1827
traceContext = md[TraceparentHeader]
if len(traceContext) > 0 {
sc, ok = SpanContextFromW3CString(traceContext[0])
if ok && len(md[TracestateHeader]) > 0 {
ts := TraceStateFromW3CString(md[TracestateHeader][0])
sc.WithTraceState(*ts)
}
}
}
return sc, ok
}
// SpanContextToGRPCMetadata appends binary serialized SpanContext to the outgoing GRPC context.
func SpanContextToGRPCMetadata(ctx context.Context, spanContext trace.SpanContext) context.Context {
traceContextBinary := diagUtils.BinaryFromSpanContext(spanContext)
if len(traceContextBinary) == 0 {
return ctx
}
return grpcMetadata.AppendToOutgoingContext(ctx, GRPCTraceContextKey, string(traceContextBinary))
}
// spanAttributesMapFromGRPC builds the span trace attributes map for gRPC calls based on given parameters as per open-telemetry specs.
// RPC Span Attribute reference https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/rpc.md .
func spanAttributesMapFromGRPC(appID string, req any, rpcMethod string) map[string]string {
// Allocating this map with an initial capacity of 8 which seems to be the "worst case" scenario due to possible unique keys below (note this is an initial capacity and not a hard limit).
// Using an explicit capacity reduces the risk the map will need to be re-allocated multiple times.
m := make(map[string]string, 8)
switch s := req.(type) {
// Context from a server stream
// This is a special case that is used for streaming requests
case context.Context:
md, ok := metadata.FromIncomingContext(s)
if !ok {
break
}
switch rpcMethod {
// Internal service invocation request (with streaming)
case daprCallLocalStreamMethod:
m[diagConsts.GrpcServiceSpanAttributeKey] = diagConsts.DaprGRPCServiceInvocationService
var method string
if len(md[diagConsts.DaprCallLocalStreamMethodKey]) > 0 {
method = md[diagConsts.DaprCallLocalStreamMethodKey][0]
}
m[diagConsts.DaprAPISpanNameInternal] = "CallLocal/" + appID + "/" + method
m[diagConsts.DaprAPIInvokeMethod] = method
}
// Internal service invocation request
case *internalv1pb.InternalInvokeRequest:
m[diagConsts.GrpcServiceSpanAttributeKey] = diagConsts.DaprGRPCServiceInvocationService
// Rename spanname
if s.GetActor() == nil {
m[diagConsts.DaprAPISpanNameInternal] = "CallLocal/" + appID + "/" + s.GetMessage().GetMethod()
m[diagConsts.DaprAPIInvokeMethod] = s.GetMessage().GetMethod()
} else {
m[diagConsts.DaprAPISpanNameInternal] = "CallActor/" + s.GetActor().GetActorType() + "/" + s.GetMessage().GetMethod()
m[diagConsts.DaprAPIActorTypeID] = s.GetActor().GetActorType() + "." + s.GetActor().GetActorId()
}
// Dapr APIs
case diagConsts.GrpcAppendSpanAttributesFn:
s.AppendSpanAttributes(rpcMethod, m)
}
m[diagConsts.DaprAPIProtocolSpanAttributeKey] = diagConsts.DaprAPIGRPCSpanAttrValue
m[diagConsts.DaprAPISpanAttributeKey] = rpcMethod
return m
}
|
mikeee/dapr
|
pkg/diagnostics/grpc_tracing.go
|
GO
|
mit
| 12,084 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
otelcodes "go.opentelemetry.io/otel/codes"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpcMetadata "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"github.com/dapr/dapr/pkg/api/grpc/metadata"
"github.com/dapr/dapr/pkg/config"
diagConsts "github.com/dapr/dapr/pkg/diagnostics/consts"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1"
internalv1pb "github.com/dapr/dapr/pkg/proto/internals/v1"
runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1"
)
func TestSpanAttributesMapFromGRPC(t *testing.T) {
tests := []struct {
rpcMethod string
req any
expectedServiceNameAttribute string
expectedCustomAttribute string
}{
{"/dapr.proto.runtime.v1.Dapr/InvokeService", &runtimev1pb.InvokeServiceRequest{Message: &commonv1pb.InvokeRequest{Method: "mymethod"}}, "ServiceInvocation", "mymethod"},
{"/dapr.proto.runtime.v1.Dapr/GetState", &runtimev1pb.GetStateRequest{StoreName: "mystore"}, "Dapr", "mystore"},
{"/dapr.proto.runtime.v1.Dapr/SaveState", &runtimev1pb.SaveStateRequest{StoreName: "mystore"}, "Dapr", "mystore"},
{"/dapr.proto.runtime.v1.Dapr/DeleteState", &runtimev1pb.DeleteStateRequest{StoreName: "mystore"}, "Dapr", "mystore"},
{"/dapr.proto.runtime.v1.Dapr/GetSecret", &runtimev1pb.GetSecretRequest{StoreName: "mysecretstore"}, "Dapr", "mysecretstore"},
{"/dapr.proto.runtime.v1.Dapr/InvokeBinding", &runtimev1pb.InvokeBindingRequest{Name: "mybindings"}, "Dapr", "mybindings"},
{"/dapr.proto.runtime.v1.Dapr/PublishEvent", &runtimev1pb.PublishEventRequest{Topic: "mytopic"}, "Dapr", "mytopic"},
{"/dapr.proto.runtime.v1.Dapr/BulkPublishEventAlpha1", &runtimev1pb.BulkPublishRequest{Topic: "mytopic"}, "Dapr", "mytopic"},
// Expecting ServiceInvocation because this call will be treated as client call of service invocation.
{"/dapr.proto.internals.v1.ServiceInvocation/CallLocal", &internalv1pb.InternalInvokeRequest{Message: &commonv1pb.InvokeRequest{Method: "mymethod"}}, "ServiceInvocation", "mymethod"},
}
for _, tt := range tests {
t.Run(tt.rpcMethod, func(t *testing.T) {
got := spanAttributesMapFromGRPC("fakeAppID", tt.req, tt.rpcMethod)
assert.Equal(t, tt.expectedServiceNameAttribute, got[diagConsts.GrpcServiceSpanAttributeKey], "servicename attribute should be equal")
})
}
}
func TestUserDefinedMetadata(t *testing.T) {
md := grpcMetadata.MD{
"dapr-userdefined-1": []string{"value1"},
"DAPR-userdefined-2": []string{"value2", "value3"}, // Will be lowercased
"no-attr": []string{"value3"},
}
testCtx := grpcMetadata.NewIncomingContext(context.Background(), md)
metadata.SetMetadataInContextUnary(testCtx, nil, nil, func(ctx context.Context, req any) (any, error) {
testCtx = ctx
return nil, nil
})
m := userDefinedMetadata(testCtx)
assert.Len(t, m, 2)
assert.Equal(t, "value1", m["dapr-userdefined-1"])
assert.Equal(t, "value2", m["dapr-userdefined-2"])
}
func TestSpanContextToGRPCMetadata(t *testing.T) {
t.Run("empty span context", func(t *testing.T) {
ctx := context.Background()
newCtx := SpanContextToGRPCMetadata(ctx, trace.SpanContext{})
assert.Equal(t, ctx, newCtx)
})
}
func TestGRPCTraceUnaryServerInterceptor(t *testing.T) {
exp := newOtelFakeExporter()
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
)
defer func() { _ = tp.Shutdown(context.Background()) }()
otel.SetTracerProvider(tp)
interceptor := GRPCTraceUnaryServerInterceptor("fakeAppID", config.TracingSpec{SamplingRate: "1"})
testTraceParent := "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"
testSpanContext, _ := SpanContextFromW3CString(testTraceParent)
testTraceBinary := diagUtils.BinaryFromSpanContext(testSpanContext)
t.Run("grpc-trace-bin is given", func(t *testing.T) {
ctx := grpcMetadata.NewIncomingContext(context.Background(), grpcMetadata.Pairs("grpc-trace-bin", string(testTraceBinary)))
fakeInfo := &grpc.UnaryServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/GetState",
}
fakeReq := &runtimev1pb.GetStateRequest{
StoreName: "statestore",
Key: "state",
}
var span trace.Span
assertHandler := func(ctx context.Context, req any) (any, error) {
span = diagUtils.SpanFromContext(ctx)
return nil, errors.New("fake error")
}
metadata.SetMetadataInContextUnary(ctx, fakeReq, fakeInfo, func(ctx context.Context, req any) (any, error) {
return interceptor(ctx, fakeReq, fakeInfo, assertHandler)
})
sc := span.SpanContext()
traceID := sc.TraceID()
assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:]))
spanID := sc.SpanID()
assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:]))
})
t.Run("grpc-trace-bin is not given", func(t *testing.T) {
fakeInfo := &grpc.UnaryServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/GetState",
}
fakeReq := &runtimev1pb.GetStateRequest{
StoreName: "statestore",
Key: "state",
}
var span trace.Span
assertHandler := func(ctx context.Context, req any) (any, error) {
span = diagUtils.SpanFromContext(ctx)
return nil, errors.New("fake error")
}
interceptor(context.Background(), fakeReq, fakeInfo, assertHandler)
sc := span.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
t.Run("InvokeService call", func(t *testing.T) {
fakeInfo := &grpc.UnaryServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/InvokeService",
}
fakeReq := &runtimev1pb.InvokeServiceRequest{
Id: "targetID",
Message: &commonv1pb.InvokeRequest{Method: "method1"},
}
var span trace.Span
assertHandler := func(ctx context.Context, req any) (any, error) {
span = diagUtils.SpanFromContext(ctx)
return nil, errors.New("fake error")
}
interceptor(context.Background(), fakeReq, fakeInfo, assertHandler)
sc := span.SpanContext()
spanString := fmt.Sprintf("%v", span)
assert.True(t, strings.Contains(spanString, "CallLocal/targetID/method1"))
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
t.Run("InvokeService call with grpc status error", func(t *testing.T) {
// set a new tracer provider with a callback on span completion to check that the span errors out
checkErrorStatusOnSpan := func(s sdktrace.ReadOnlySpan) {
assert.Equal(t, otelcodes.Error, s.Status().Code, "expected span status to be an error")
}
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
sdktrace.WithSpanProcessor(newOtelFakeSpanProcessor(checkErrorStatusOnSpan)),
)
oldTracerProvider := otel.GetTracerProvider()
defer func() {
_ = tp.Shutdown(context.Background())
// reset tracer provider to older one once the test completes
otel.SetTracerProvider(oldTracerProvider)
}()
otel.SetTracerProvider(tp)
fakeInfo := &grpc.UnaryServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/InvokeService",
}
fakeReq := &runtimev1pb.InvokeServiceRequest{
Id: "targetID",
Message: &commonv1pb.InvokeRequest{Method: "method1"},
}
var span trace.Span
assertHandler := func(ctx context.Context, req any) (any, error) {
span = diagUtils.SpanFromContext(ctx)
// mocking an error that is returned from the gRPC API -- see pkg/grpc/api.go file
return nil, status.Error(codes.Internal, errors.New("fake status error").Error())
}
interceptor(context.Background(), fakeReq, fakeInfo, assertHandler)
sc := span.SpanContext()
spanString := fmt.Sprintf("%v", span)
assert.True(t, strings.Contains(spanString, "CallLocal/targetID/method1"))
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
}
func TestGRPCTraceStreamServerInterceptor(t *testing.T) {
exp := newOtelFakeExporter()
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
)
defer func() { _ = tp.Shutdown(context.Background()) }()
otel.SetTracerProvider(tp)
interceptor := GRPCTraceStreamServerInterceptor("test", config.TracingSpec{SamplingRate: "1"})
testTraceParent := "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"
testSpanContext, _ := SpanContextFromW3CString(testTraceParent)
testTraceBinary := diagUtils.BinaryFromSpanContext(testSpanContext)
t.Run("dapr runtime calls", func(t *testing.T) {
t.Run("base test", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/GetState",
}
h := func(srv any, stream grpc.ServerStream) error {
return nil
}
err := interceptor(nil, &fakeStream{}, fakeInfo, h)
require.NoError(t, err)
})
t.Run("grpc-trace-bin is given", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/GetState",
}
ctx := grpcMetadata.NewIncomingContext(context.Background(), grpcMetadata.Pairs("grpc-trace-bin", string(testTraceBinary)))
ctx, _ = metadata.SetMetadataInTapHandle(ctx, nil)
var span trace.Span
assertHandler := func(srv any, stream grpc.ServerStream) error {
span = diagUtils.SpanFromContext(stream.Context())
return errors.New("fake error")
}
interceptor(nil, &fakeStream{ctx}, fakeInfo, assertHandler)
sc := span.SpanContext()
traceID := sc.TraceID()
assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:]))
spanID := sc.SpanID()
assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:]))
})
t.Run("grpc-trace-bin is not given", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/dapr.proto.runtime.v1.Dapr/GetState",
}
var span trace.Span
assertHandler := func(srv any, stream grpc.ServerStream) error {
span = diagUtils.SpanFromContext(stream.Context())
return errors.New("fake error")
}
interceptor(nil, &fakeStream{}, fakeInfo, assertHandler)
sc := span.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
})
t.Run("internal calls", func(t *testing.T) {
t.Run("base test", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/dapr.proto.internals.v1.ServiceInvocation/CallLocal",
}
h := func(srv any, stream grpc.ServerStream) error {
return nil
}
err := interceptor(nil, &fakeStream{}, fakeInfo, h)
require.NoError(t, err)
})
t.Run("grpc-trace-bin is given", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/dapr.proto.internals.v1.ServiceInvocation/CallLocal",
}
ctx := grpcMetadata.NewIncomingContext(context.Background(), grpcMetadata.Pairs("grpc-trace-bin", string(testTraceBinary)))
ctx, _ = metadata.SetMetadataInTapHandle(ctx, nil)
var span trace.Span
assertHandler := func(srv any, stream grpc.ServerStream) error {
span = diagUtils.SpanFromContext(stream.Context())
return errors.New("fake error")
}
interceptor(nil, &fakeStream{ctx}, fakeInfo, assertHandler)
sc := span.SpanContext()
traceID := sc.TraceID()
assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:]))
spanID := sc.SpanID()
assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:]))
})
t.Run("grpc-trace-bin is not given", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/dapr.proto.internals.v1.ServiceInvocation/CallLocal",
}
var span trace.Span
assertHandler := func(srv any, stream grpc.ServerStream) error {
span = diagUtils.SpanFromContext(stream.Context())
return errors.New("fake error")
}
interceptor(nil, &fakeStream{}, fakeInfo, assertHandler)
sc := span.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
})
t.Run("proxy requests", func(t *testing.T) {
t.Run("proxy request without app id, return error", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/myapp.v1.DoSomething",
}
err := interceptor(nil, &fakeStream{}, fakeInfo, nil)
require.Error(t, err)
})
t.Run("proxy request with app id and grpc-trace-bin", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/myapp.v1.DoSomething",
}
md := grpcMetadata.New(map[string]string{
GRPCProxyAppIDKey: "myapp",
"grpc-trace-bin": string(testTraceBinary),
})
ctx := grpcMetadata.NewIncomingContext(context.Background(), md)
ctx, _ = metadata.SetMetadataInTapHandle(ctx, nil)
var span trace.Span
assertHandler := func(srv any, stream grpc.ServerStream) error {
span = diagUtils.SpanFromContext(stream.Context())
return nil
}
err := interceptor(nil, &fakeStream{ctx}, fakeInfo, assertHandler)
require.NoError(t, err)
sc := span.SpanContext()
traceID := sc.TraceID()
assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:]))
spanID := sc.SpanID()
assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:]))
})
t.Run("proxy request with app id and no grpc-trace-bin", func(t *testing.T) {
fakeInfo := &grpc.StreamServerInfo{
FullMethod: "/myapp.v1.DoSomething",
}
md := grpcMetadata.New(map[string]string{
GRPCProxyAppIDKey: "myapp",
})
ctx := grpcMetadata.NewIncomingContext(context.Background(), md)
ctx, _ = metadata.SetMetadataInTapHandle(ctx, nil)
var span trace.Span
assertHandler := func(srv any, stream grpc.ServerStream) error {
span = diagUtils.SpanFromContext(stream.Context())
return nil
}
err := interceptor(nil, &fakeStream{ctx}, fakeInfo, assertHandler)
require.NoError(t, err)
sc := span.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
})
}
type fakeStream struct {
ctx context.Context
}
func (f *fakeStream) Context() context.Context {
if f.ctx == nil {
return context.Background()
}
return f.ctx
}
func (f *fakeStream) SetHeader(grpcMetadata.MD) error {
return nil
}
func (f *fakeStream) SendHeader(grpcMetadata.MD) error {
return nil
}
func (f *fakeStream) SetTrailer(grpcMetadata.MD) {
}
func (f *fakeStream) SendMsg(m any) error {
return nil
}
func (f *fakeStream) RecvMsg(m any) error {
return nil
}
func TestSpanContextSerialization(t *testing.T) {
wantScConfig := trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
}
wantSc := trace.NewSpanContext(wantScConfig)
passedOverWire := diagUtils.BinaryFromSpanContext(wantSc)
storedInDapr := base64.StdEncoding.EncodeToString(passedOverWire)
decoded, _ := base64.StdEncoding.DecodeString(storedInDapr)
gotSc, _ := diagUtils.SpanContextFromBinary(decoded)
assert.Equal(t, wantSc, gotSc)
}
|
mikeee/dapr
|
pkg/diagnostics/grpc_tracing_test.go
|
GO
|
mit
| 16,169 |
/*
Copyright 2024 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"net/http"
"strconv"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/dapr/dapr/pkg/api/http/endpoints"
"github.com/dapr/dapr/pkg/config"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
"github.com/dapr/dapr/pkg/responsewriter"
"github.com/dapr/kit/logger"
)
// To track the metrics for fasthttp using opencensus, this implementation is inspired by
// https://github.com/census-instrumentation/opencensus-go/tree/master/plugin/ochttp
// Tag key definitions for http requests.
var (
httpStatusCodeKey = tag.MustNewKey("status")
httpPathKey = tag.MustNewKey("path")
httpMethodKey = tag.MustNewKey("method")
log = logger.NewLogger("dapr.runtime.diagnostics")
)
var (
// <<10 -> KBs; <<20 -> MBs; <<30 -> GBs
defaultSizeDistribution = view.Distribution(1<<10, 2<<10, 4<<10, 16<<10, 64<<10, 256<<10, 1<<20, 4<<20, 16<<20, 64<<20, 256<<20, 1<<30, 4<<30)
defaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1_000, 2_000, 5_000, 10_000, 20_000, 50_000, 100_000)
)
type httpMetrics struct {
serverRequestBytes *stats.Int64Measure
serverResponseBytes *stats.Int64Measure
serverLatency *stats.Float64Measure
serverRequestCount *stats.Int64Measure
serverResponseCount *stats.Int64Measure
clientSentBytes *stats.Int64Measure
clientReceivedBytes *stats.Int64Measure
clientRoundtripLatency *stats.Float64Measure
clientCompletedCount *stats.Int64Measure
healthProbeCompletedCount *stats.Int64Measure
healthProbeRoundTripLatency *stats.Float64Measure
appID string
enabled bool
// Enable legacy metrics, which includes the full path
legacy bool
ingress *pathMatching
egress *pathMatching
}
func newHTTPMetrics() *httpMetrics {
return &httpMetrics{
serverRequestBytes: stats.Int64(
"http/server/request_bytes",
"HTTP request body size if set as ContentLength (uncompressed) in server.",
stats.UnitBytes),
serverResponseBytes: stats.Int64(
"http/server/response_bytes",
"HTTP response body size (uncompressed) in server.",
stats.UnitBytes),
serverLatency: stats.Float64(
"http/server/latency",
"HTTP request end-to-end latency in server.",
stats.UnitMilliseconds),
serverRequestCount: stats.Int64(
"http/server/request_count",
"Count of HTTP requests processed by the server.",
stats.UnitDimensionless),
serverResponseCount: stats.Int64(
"http/server/response_count",
"The number of HTTP responses",
stats.UnitDimensionless),
clientSentBytes: stats.Int64(
"http/client/sent_bytes",
"Total bytes sent in request body (not including headers)",
stats.UnitBytes),
clientReceivedBytes: stats.Int64(
"http/client/received_bytes",
"Total bytes received in response bodies (not including headers but including error responses with bodies)",
stats.UnitBytes),
clientRoundtripLatency: stats.Float64(
"http/client/roundtrip_latency",
"Time between first byte of request headers sent to last byte of response received, or terminal error",
stats.UnitMilliseconds),
clientCompletedCount: stats.Int64(
"http/client/completed_count",
"Count of completed requests",
stats.UnitDimensionless),
healthProbeCompletedCount: stats.Int64(
"http/healthprobes/completed_count",
"Count of completed health probes",
stats.UnitDimensionless),
healthProbeRoundTripLatency: stats.Float64(
"http/healthprobes/roundtrip_latency",
"Time between first byte of health probes headers sent to last byte of response received, or terminal error",
stats.UnitMilliseconds),
enabled: false,
}
}
func (h *httpMetrics) IsEnabled() bool {
return h != nil && h.enabled
}
func (h *httpMetrics) ServerRequestCompleted(ctx context.Context, method, path, status string, reqContentSize, resContentSize int64, elapsed float64) {
if !h.IsEnabled() {
return
}
matchedPath, ok := h.egress.matchPath(path)
if ok {
path = matchedPath
}
if h.legacy || h.egress.enabled() {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.serverRequestCount.Name(), appIDKey, h.appID, httpMethodKey, method, httpPathKey, path, httpStatusCodeKey, status),
h.serverRequestCount.M(1))
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.serverLatency.Name(), appIDKey, h.appID, httpMethodKey, method, httpPathKey, path, httpStatusCodeKey, status),
h.serverLatency.M(elapsed))
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.serverResponseCount.Name(), appIDKey, h.appID, httpPathKey, path, httpMethodKey, method, httpStatusCodeKey, status),
h.serverResponseCount.M(1))
} else {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.serverRequestCount.Name(), appIDKey, h.appID, httpMethodKey, method, httpStatusCodeKey, status),
h.serverRequestCount.M(1))
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.serverLatency.Name(), appIDKey, h.appID, httpMethodKey, method, httpStatusCodeKey, status),
h.serverLatency.M(elapsed))
}
stats.RecordWithTags(
ctx, diagUtils.WithTags(h.serverRequestBytes.Name(), appIDKey, h.appID),
h.serverRequestBytes.M(reqContentSize))
stats.RecordWithTags(
ctx, diagUtils.WithTags(h.serverResponseBytes.Name(), appIDKey, h.appID),
h.serverResponseBytes.M(resContentSize))
}
func (h *httpMetrics) ClientRequestStarted(ctx context.Context, method, path string, contentSize int64) {
if !h.IsEnabled() {
return
}
matchedPath, ok := h.ingress.matchPath(path)
if ok {
path = matchedPath
}
if h.legacy || h.ingress.enabled() {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.clientSentBytes.Name(), appIDKey, h.appID, httpPathKey, h.convertPathToMetricLabel(path), httpMethodKey, method),
h.clientSentBytes.M(contentSize))
} else {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.clientSentBytes.Name(), appIDKey, h.appID),
h.clientSentBytes.M(contentSize))
}
}
func (h *httpMetrics) ClientRequestCompleted(ctx context.Context, method, path, status string, contentSize int64, elapsed float64) {
if !h.IsEnabled() {
return
}
matchedPath, ok := h.ingress.matchPath(path)
if ok {
path = matchedPath
}
if h.legacy || h.ingress.enabled() {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.clientCompletedCount.Name(), appIDKey, h.appID, httpPathKey, h.convertPathToMetricLabel(path), httpMethodKey, method, httpStatusCodeKey, status),
h.clientCompletedCount.M(1))
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.clientRoundtripLatency.Name(), appIDKey, h.appID, httpPathKey, h.convertPathToMetricLabel(path), httpMethodKey, method, httpStatusCodeKey, status),
h.clientRoundtripLatency.M(elapsed))
} else {
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.clientCompletedCount.Name(), appIDKey, h.appID, httpStatusCodeKey, status),
h.clientCompletedCount.M(1))
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.clientRoundtripLatency.Name(), appIDKey, h.appID, httpStatusCodeKey, status),
h.clientRoundtripLatency.M(elapsed))
}
stats.RecordWithTags(
ctx, diagUtils.WithTags(h.clientReceivedBytes.Name(), appIDKey, h.appID),
h.clientReceivedBytes.M(contentSize))
}
func (h *httpMetrics) AppHealthProbeStarted(ctx context.Context) {
if !h.IsEnabled() {
return
}
stats.RecordWithTags(ctx, diagUtils.WithTags("", appIDKey, h.appID))
}
func (h *httpMetrics) AppHealthProbeCompleted(ctx context.Context, status string, elapsed float64) {
if !h.IsEnabled() {
return
}
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.healthProbeCompletedCount.Name(), appIDKey, h.appID, httpStatusCodeKey, status),
h.healthProbeCompletedCount.M(1))
stats.RecordWithTags(
ctx,
diagUtils.WithTags(h.healthProbeRoundTripLatency.Name(), appIDKey, h.appID, httpStatusCodeKey, status),
h.healthProbeRoundTripLatency.M(elapsed))
}
func (h *httpMetrics) Init(appID string, config *config.PathMatching, legacy bool) error {
h.appID = appID
h.enabled = true
h.legacy = legacy
if config != nil {
h.ingress = newPathMatching(config.IngressPaths, legacy)
h.egress = newPathMatching(config.EgressPaths, legacy)
}
tags := []tag.Key{appIDKey}
// In legacy mode, we are aggregating based on the path too
var serverTags, clientTags []tag.Key
if h.legacy {
serverTags = []tag.Key{appIDKey, httpMethodKey, httpPathKey, httpStatusCodeKey}
clientTags = []tag.Key{appIDKey, httpMethodKey, httpPathKey, httpStatusCodeKey}
} else {
serverTags = []tag.Key{appIDKey, httpMethodKey, httpStatusCodeKey}
clientTags = []tag.Key{appIDKey, httpStatusCodeKey}
if h.ingress.enabled() {
serverTags = append(serverTags, httpPathKey)
}
if h.egress.enabled() {
clientTags = append(clientTags, httpPathKey, httpMethodKey)
}
}
views := []*view.View{
diagUtils.NewMeasureView(h.serverRequestBytes, tags, defaultSizeDistribution),
diagUtils.NewMeasureView(h.serverResponseBytes, tags, defaultSizeDistribution),
diagUtils.NewMeasureView(h.serverLatency, serverTags, defaultLatencyDistribution),
diagUtils.NewMeasureView(h.serverRequestCount, serverTags, view.Count()),
diagUtils.NewMeasureView(h.clientSentBytes, clientTags, defaultSizeDistribution),
diagUtils.NewMeasureView(h.clientReceivedBytes, tags, defaultSizeDistribution),
diagUtils.NewMeasureView(h.clientRoundtripLatency, clientTags, defaultLatencyDistribution),
diagUtils.NewMeasureView(h.clientCompletedCount, clientTags, view.Count()),
diagUtils.NewMeasureView(h.healthProbeRoundTripLatency, []tag.Key{appIDKey, httpStatusCodeKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(h.healthProbeCompletedCount, []tag.Key{appIDKey, httpStatusCodeKey}, view.Count()),
}
if h.legacy {
views = append(views, diagUtils.NewMeasureView(h.serverResponseCount, serverTags, view.Count()))
}
return view.Register(views...)
}
// HTTPMiddleware is the middleware to track HTTP server-side requests.
func (h *httpMetrics) HTTPMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var reqContentSize int64
if cl := r.Header.Get("content-length"); cl != "" {
reqContentSize, _ = strconv.ParseInt(cl, 10, 64)
if reqContentSize < 0 {
reqContentSize = 0
}
}
var path string
if h.legacy || h.egress.enabled() {
path = h.convertPathToMetricLabel(r.URL.Path)
}
// Wrap the writer in a ResponseWriter so we can collect stats such as status code and size
rw := responsewriter.EnsureResponseWriter(w)
// Process the request
start := time.Now()
next.ServeHTTP(rw, r)
elapsed := float64(time.Since(start) / time.Millisecond)
status := strconv.Itoa(rw.Status())
respSize := int64(rw.Size())
var method string
if h.legacy || h.egress.enabled() {
method = r.Method
} else {
// Check if the context contains a MethodName method
endpointData, _ := r.Context().Value(endpoints.EndpointCtxKey{}).(*endpoints.EndpointCtxData)
method = endpointData.GetEndpointName()
if endpointData != nil && endpointData.Group != nil && endpointData.Group.MethodName != nil {
method = endpointData.Group.MethodName(r)
}
}
// Record the request
h.ServerRequestCompleted(r.Context(), method, path, status, reqContentSize, respSize, elapsed)
})
}
|
mikeee/dapr
|
pkg/diagnostics/http_monitoring.go
|
GO
|
mit
| 11,895 |
/*
Copyright 2024 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"time"
"github.com/dapr/dapr/pkg/config"
)
const (
requestBody = "fake_requestDaprBody"
responseBody = "fake_responseDaprBody"
)
func BenchmarkHTTPMiddlewareLowCardinalityNoPathMatching(b *testing.B) {
testHTTP := newHTTPMetrics()
pathMatching := &config.PathMatching{}
testHTTP.Init("fakeID", pathMatching, false)
handler := testHTTP.HTTPMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(5 * time.Millisecond)
w.Write([]byte(responseBody))
}))
// act
for i := 0; i < b.N; i++ {
testRequest := fakeOrdersHTTPRequest(requestBody, i)
handler.ServeHTTP(httptest.NewRecorder(), testRequest)
}
}
func BenchmarkHTTPMiddlewareHighCardinalityNoPathMatching(b *testing.B) {
testHTTP := newHTTPMetrics()
pathMatching := &config.PathMatching{}
testHTTP.Init("fakeID", pathMatching, true)
handler := testHTTP.HTTPMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(5 * time.Millisecond)
w.Write([]byte(responseBody))
}))
// act
for i := 0; i < b.N; i++ {
testRequest := fakeOrdersHTTPRequest(requestBody, i)
handler.ServeHTTP(httptest.NewRecorder(), testRequest)
}
}
func BenchmarkHTTPMiddlewareLowCardinalityWithPathMatching(b *testing.B) {
testHTTP := newHTTPMetrics()
pathMatching := &config.PathMatching{
IngressPaths: []string{
"/invoke/method/orders/{orderID}",
},
EgressPaths: []string{
"/invoke/method/orders/{orderID}",
},
}
testHTTP.Init("fakeID", pathMatching, false)
handler := testHTTP.HTTPMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(5 * time.Millisecond)
w.Write([]byte(responseBody))
}))
// act
for i := 0; i < b.N; i++ {
testRequest := fakeOrdersHTTPRequest(requestBody, i)
handler.ServeHTTP(httptest.NewRecorder(), testRequest)
}
}
func BenchmarkHTTPMiddlewareHighCardinalityWithPathMatching(b *testing.B) {
testHTTP := newHTTPMetrics()
pathMatching := &config.PathMatching{
IngressPaths: []string{
"/invoke/method/orders/{orderID}",
},
EgressPaths: []string{
"/invoke/method/orders/{orderID}",
},
}
testHTTP.Init("fakeID", pathMatching, true)
handler := testHTTP.HTTPMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(5 * time.Millisecond)
w.Write([]byte(responseBody))
}))
// act
for i := 0; i < b.N; i++ {
testRequest := fakeOrdersHTTPRequest(requestBody, i)
handler.ServeHTTP(httptest.NewRecorder(), testRequest)
}
}
func fakeOrdersHTTPRequest(body string, id int) *http.Request {
url := fmt.Sprintf("http://dapr.io/invoke/method/orders/%d", id)
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Correlation-ID", "e6f4bb20-96c0-426a-9e3d-991ba16a3ebb")
req.Header.Set("XXX-Remote-Addr", "192.168.0.100")
req.Header.Set("Transfer-Encoding", "encoding")
// This is normally set automatically when the request is sent to a server, but in this case we are not using a real server
req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10))
return req
}
|
mikeee/dapr
|
pkg/diagnostics/http_monitoring_benchmark_test.go
|
GO
|
mit
| 3,781 |
/*
Copyright 2024 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"strings"
)
// convertPathToMetricLabel removes the variant parameters in URL path for low cardinality label space
// For example, it removes {keys} param from /v1/state/statestore/{keys}.
// This is only used for legacy metrics
func (h *httpMetrics) convertPathToMetricLabel(path string) string {
if path == "" {
return path
}
p := path
if p[0] == '/' {
p = path[1:]
}
// Split up to 6 delimiters in 'v1/actors/DemoActor/1/timer/name'
parsedPath := strings.SplitN(p, "/", 6)
if len(parsedPath) < 3 {
return path
}
// Replace actor id with {id} for appcallback url - 'actors/DemoActor/1/method/method1'
if parsedPath[0] == "actors" {
parsedPath[2] = "{id}"
return strings.Join(parsedPath, "/")
}
switch parsedPath[1] {
case "state", "secrets":
// state api: Concat 3 items(v1, state, statestore) in /v1/state/statestore/key
// secrets api: Concat 3 items(v1, secrets, keyvault) in /v1/secrets/keyvault/name
return "/" + strings.Join(parsedPath[0:3], "/")
case "actors":
if len(parsedPath) < 5 {
return path
}
// ignore id part
parsedPath[3] = "{id}"
// Concat 5 items(v1, actors, DemoActor, {id}, timer) in /v1/actors/DemoActor/1/timer/name
return "/" + strings.Join(parsedPath[0:5], "/")
case "workflows":
if len(parsedPath) < 4 {
return path
}
// v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>
if len(parsedPath) == 4 {
parsedPath[3] = "{instanceId}"
return "/" + strings.Join(parsedPath[0:4], "/")
}
// v1.0-alpha1/workflows/<workflowComponentName>/<workflowName>/start[?instanceID=<instanceID>]
if len(parsedPath) == 5 && parsedPath[4] != "" && strings.HasPrefix(parsedPath[4], "start") {
// not obfuscating the workflow name, just the possible instanceID
return "/" + strings.Join(parsedPath[0:4], "/") + "/start"
} else {
// v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/terminate
// v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/pause
// v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/resume
// v1.0-alpha1/workflows/<workflowComponentName>/<instanceId>/purge
parsedPath[3] = "{instanceId}"
// v1.0-alpha1/workflows/<workflowComponentName>/<instanceID>/raiseEvent/<eventName>
if len(parsedPath) == 6 && parsedPath[4] == "raiseEvent" && parsedPath[5] != "" {
parsedPath[5] = "{eventName}"
return "/" + strings.Join(parsedPath[0:6], "/")
}
}
return "/" + strings.Join(parsedPath[0:5], "/")
}
return path
}
|
mikeee/dapr
|
pkg/diagnostics/http_monitoring_legacy.go
|
GO
|
mit
| 3,079 |
/*
Copyright 2024 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvertPathToMethodName(t *testing.T) {
convertTests := []struct {
in string
out string
}{
{"/v1/state/statestore/key", "/v1/state/statestore"},
{"/v1/state/statestore", "/v1/state/statestore"},
{"/v1/secrets/keyvault/name", "/v1/secrets/keyvault"},
{"/v1/publish/topic", "/v1/publish/topic"},
{"/v1/bindings/kafka", "/v1/bindings/kafka"},
{"/healthz", "/healthz"},
{"/v1/actors/DemoActor/1/state/key", "/v1/actors/DemoActor/{id}/state"},
{"/v1/actors/DemoActor/1/reminder/name", "/v1/actors/DemoActor/{id}/reminder"},
{"/v1/actors/DemoActor/1/timer/name", "/v1/actors/DemoActor/{id}/timer"},
{"/v1/actors/DemoActor/1/timer/name?query=string", "/v1/actors/DemoActor/{id}/timer"},
{"v1/actors/DemoActor/1/timer/name", "/v1/actors/DemoActor/{id}/timer"},
{"actors/DemoActor/1/method/method1", "actors/DemoActor/{id}/method/method1"},
{"actors/DemoActor/1/method/timer/timer1", "actors/DemoActor/{id}/method/timer/timer1"},
{"actors/DemoActor/1/method/remind/reminder1", "actors/DemoActor/{id}/method/remind/reminder1"},
{"/v1.0-alpha1/workflows/workflowComponentName/mywf/start?instanceID=1234", "/v1.0-alpha1/workflows/workflowComponentName/mywf/start"},
{"/v1.0-alpha1/workflows/workflowComponentName/mywf/start", "/v1.0-alpha1/workflows/workflowComponentName/mywf/start"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/start/value1/value2", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/start"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/terminate", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/terminate"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/terminate/value1/value2", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/terminate"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/raiseEvent/foobaz", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/raiseEvent/{eventName}"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/pause", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/pause"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/resume", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/resume"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234/purge", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}/purge"},
{"/v1.0-alpha1/workflows/workflowComponentName/1234", "/v1.0-alpha1/workflows/workflowComponentName/{instanceId}"},
{"/v1.0-alpha1/workflows/workflowComponentName", "/v1.0-alpha1/workflows/workflowComponentName"},
{"/v1.0-alpha1/workflows", "/v1.0-alpha1/workflows"},
{"", ""},
}
testHTTP := newHTTPMetrics()
for _, tt := range convertTests {
t.Run(tt.in, func(t *testing.T) {
lowCardinalityName := testHTTP.convertPathToMetricLabel(tt.in)
assert.Equal(t, tt.out, lowCardinalityName)
})
}
}
|
mikeee/dapr
|
pkg/diagnostics/http_monitoring_legacy_test.go
|
GO
|
mit
| 3,465 |
/*
Copyright 2024 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"net/http"
"net/url"
"slices"
"strings"
)
var (
pathMatchHandlerFunc = func(pattern string) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rw, ok := w.(*pathMatchingRW)
if !ok {
log.Errorf("Failed to cast to PathMatchingRW")
return
}
rw.matchedPath = pattern
})
}
emptyHandlerFunc = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
)
type pathMatching struct {
mux *http.ServeMux
}
func newPathMatching(paths []string, legacy bool) *pathMatching {
if paths == nil {
return nil
}
if len(paths) == 0 {
return nil
}
cleanPaths := cleanAndSortPaths(paths)
mux := http.NewServeMux()
// Skip the root path if legacy mode is enabled.
if legacy {
mux.Handle("/", emptyHandlerFunc)
} else {
mux.Handle("/", pathMatchHandlerFunc("/"))
}
for _, pattern := range cleanPaths {
mux.Handle(pattern, pathMatchHandlerFunc(pattern))
}
return &pathMatching{
mux: mux,
}
}
// cleanAndSortPaths ensures that we don't have duplicates and removes root path
func cleanAndSortPaths(paths []string) []string {
slices.Sort(paths)
paths = slices.Compact(paths)
cleanPaths := make([]string, 0, len(paths))
for _, path := range paths {
if path == "/" {
continue
}
cleanPaths = append(cleanPaths, path)
}
return cleanPaths
}
func (pm *pathMatching) enabled() bool {
return pm != nil && pm.mux != nil
}
func (pm *pathMatching) matchPath(path string) (string, bool) {
if !pm.enabled() {
return "", false
}
if path == "" {
return "", false
}
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
req := &http.Request{
Method: http.MethodGet,
URL: &url.URL{
Path: path,
},
}
crw := &pathMatchingRW{matchedPath: path}
pm.mux.ServeHTTP(crw, req)
return crw.matchedPath, true
}
type pathMatchingRW struct {
http.ResponseWriter
matchedPath string
}
|
mikeee/dapr
|
pkg/diagnostics/http_monitoring_path_matching.go
|
GO
|
mit
| 2,483 |
package diagnostics
import (
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
"github.com/dapr/dapr/pkg/config"
)
func TestHTTPMiddleware(t *testing.T) {
requestBody := "fake_requestDaprBody"
responseBody := "fake_responseDaprBody"
testRequest := fakeHTTPRequest(requestBody)
// create test httpMetrics
testHTTP := newHTTPMetrics()
testHTTP.Init("fakeID", nil, false)
handler := testHTTP.HTTPMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.Write([]byte(responseBody))
}))
// act
handler.ServeHTTP(httptest.NewRecorder(), testRequest)
// assert
rows, err := view.RetrieveData("http/server/request_count")
require.NoError(t, err)
assert.Len(t, rows, 1)
assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name())
assert.Equal(t, "fakeID", rows[0].Tags[0].Value)
assert.Equal(t, "status", rows[0].Tags[1].Key.Name())
assert.Equal(t, "200", rows[0].Tags[1].Value)
rows, err = view.RetrieveData("http/server/request_bytes")
require.NoError(t, err)
assert.Len(t, rows, 1)
assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name())
assert.Equal(t, "fakeID", rows[0].Tags[0].Value)
assert.InEpsilon(t, float64(len(requestBody)), (rows[0].Data).(*view.DistributionData).Min, 0)
rows, err = view.RetrieveData("http/server/response_bytes")
require.NoError(t, err)
assert.Len(t, rows, 1)
assert.InEpsilon(t, float64(len(responseBody)), (rows[0].Data).(*view.DistributionData).Min, 0)
rows, err = view.RetrieveData("http/server/latency")
require.NoError(t, err)
assert.Len(t, rows, 1)
assert.GreaterOrEqual(t, (rows[0].Data).(*view.DistributionData).Min, 100.0)
}
func TestHTTPMiddlewareWhenMetricsDisabled(t *testing.T) {
requestBody := "fake_requestDaprBody"
responseBody := "fake_responseDaprBody"
testRequest := fakeHTTPRequest(requestBody)
// create test httpMetrics
testHTTP := newHTTPMetrics()
testHTTP.enabled = false
testHTTP.Init("fakeID", nil, false)
v := view.Find("http/server/request_count")
views := []*view.View{v}
view.Unregister(views...)
handler := testHTTP.HTTPMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.Write([]byte(responseBody))
}))
// act
handler.ServeHTTP(httptest.NewRecorder(), testRequest)
// assert
rows, err := view.RetrieveData("http/server/request_count")
require.Error(t, err)
assert.Nil(t, rows)
}
func TestHTTPMetricsPathMatchingNotEnabled(t *testing.T) {
testHTTP := newHTTPMetrics()
testHTTP.enabled = false
pathMatching := &config.PathMatching{}
testHTTP.Init("fakeID", pathMatching, true)
matchedPath, ok := testHTTP.ingress.matchPath("/orders")
require.False(t, ok)
require.Equal(t, "", matchedPath)
}
func TestHTTPMetricsPathMatchingLegacyIncreasedCardinality(t *testing.T) {
testHTTP := newHTTPMetrics()
testHTTP.enabled = false
config := &config.PathMatching{
IngressPaths: []string{
"/v1/orders/{orderID}/items/12345",
"/v1/orders/{orderID}/items/{itemID}",
"/v1/items/{itemID}",
},
EgressPaths: []string{
"/v1/orders/{orderID}/items/{itemID}",
},
}
testHTTP.Init("fakeID", config, true)
// act & assert
// empty path
matchedPath, ok := testHTTP.ingress.matchPath("")
require.False(t, ok)
require.Equal(t, "", matchedPath)
// match "/v1/orders/{orderID}/items/12345"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/orders/12345/items/12345")
require.True(t, ok)
require.Equal(t, "/v1/orders/{orderID}/items/12345", matchedPath)
// match "/v1/orders/{orderID}/items/{itemID}"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/orders/12345/items/1111")
require.True(t, ok)
require.Equal(t, "/v1/orders/{orderID}/items/{itemID}", matchedPath)
// match "/v1/items/{itemID}"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/items/12345")
require.True(t, ok)
require.Equal(t, "/v1/items/{itemID}", matchedPath)
// no match so we keep the path as is
matchedPath, ok = testHTTP.ingress.matchPath("/v2/basket/12345")
require.True(t, ok)
require.Equal(t, "/v2/basket/12345", matchedPath)
// match "/v1/orders/{orderID}/items/{itemID}"
matchedPath, ok = testHTTP.egress.matchPath("/v1/orders/12345/items/1111")
require.True(t, ok)
require.Equal(t, "/v1/orders/{orderID}/items/{itemID}", matchedPath)
// no match so we keep the path as is
matchedPath, ok = testHTTP.egress.matchPath("/v1/items/12345")
require.True(t, ok)
require.Equal(t, "/v1/items/12345", matchedPath)
}
func TestHTTPMetricsPathMatchingLowCardinality(t *testing.T) {
testHTTP := newHTTPMetrics()
testHTTP.enabled = false
config := &config.PathMatching{
IngressPaths: []string{
"/v1/orders/{orderID}/items/12345",
"/v1/orders/{orderID}/items/{itemID}",
"/v1/orders/{orderID}",
"/v1/items/{itemID}",
"/dapr/config",
"/v1/",
"/",
},
EgressPaths: []string{
"/v1/orders/{orderID}/items/{itemID}",
"/dapr/config",
},
}
testHTTP.Init("fakeID", config, false)
// act & assert
// empty path
matchedPath, ok := testHTTP.ingress.matchPath("")
require.False(t, ok)
require.Equal(t, "", matchedPath)
// match "/v1/orders/{orderID}/items/12345"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/orders/12345/items/12345")
require.True(t, ok)
require.Equal(t, "/v1/orders/{orderID}/items/12345", matchedPath)
// match "/v1/orders/{orderID}"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/orders/12345")
require.True(t, ok)
require.Equal(t, "/v1/orders/{orderID}", matchedPath)
// match "/v1/items/{itemID}"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/items/12345")
require.True(t, ok)
require.Equal(t, "/v1/items/{itemID}", matchedPath)
// match "/v1/"
matchedPath, ok = testHTTP.ingress.matchPath("/v1/basket")
require.True(t, ok)
assert.Equal(t, "/v1/", matchedPath)
// match "/"
matchedPath, ok = testHTTP.ingress.matchPath("/v2/orders/1111")
require.True(t, ok)
assert.Equal(t, "/", matchedPath)
// no match so we fallback to "/"
matchedPath, ok = testHTTP.egress.matchPath("/basket/12345")
require.True(t, ok)
require.Equal(t, "/", matchedPath)
matchedPath, ok = testHTTP.egress.matchPath("/dapr/config")
require.True(t, ok)
require.Equal(t, "/dapr/config", matchedPath)
}
func fakeHTTPRequest(body string) *http.Request {
req, err := http.NewRequest(http.MethodPost, "http://dapr.io/invoke/method/testmethod", strings.NewReader(body))
if err != nil {
panic(err)
}
req.Header.Set("Correlation-ID", "e6f4bb20-96c0-426a-9e3d-991ba16a3ebb")
req.Header.Set("XXX-Remote-Addr", "192.168.0.100")
req.Header.Set("Transfer-Encoding", "encoding")
// This is normally set automatically when the request is sent to a server, but in this case we are not using a real server
req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10))
return req
}
|
mikeee/dapr
|
pkg/diagnostics/http_monitoring_test.go
|
GO
|
mit
| 6,937 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"net/http"
"strconv"
"strings"
"github.com/valyala/fasthttp"
otelcodes "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"github.com/dapr/dapr/pkg/api/http/endpoints"
"github.com/dapr/dapr/pkg/config"
diagConsts "github.com/dapr/dapr/pkg/diagnostics/consts"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
"github.com/dapr/dapr/pkg/responsewriter"
)
// We have leveraged the code from opencensus-go plugin to adhere the w3c trace context.
// Reference : https://github.com/census-instrumentation/opencensus-go/blob/master/plugin/ochttp/propagation/tracecontext/propagation.go
const (
supportedVersion = 0
maxVersion = 254
maxTracestateLen = 512
TraceparentHeader = "traceparent"
TracestateHeader = "tracestate"
)
// HTTPTraceMiddleware sets the trace context or starts the trace client span based on request.
func HTTPTraceMiddleware(next http.Handler, appID string, spec config.TracingSpec) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
if isHealthzRequest(path) {
next.ServeHTTP(w, r)
return
}
span := startTracingClientSpanFromHTTPRequest(r, path, spec)
// Wrap the writer in a ResponseWriter so we can collect stats such as status code and size
rw := responsewriter.EnsureResponseWriter(w)
// Before the response is written, we need to add the tracing headers
rw.Before(func(rw responsewriter.ResponseWriter) {
// Add span attributes only if it is sampled, which reduced the perf impact.
if span.SpanContext().IsSampled() {
AddAttributesToSpan(span, userDefinedHTTPHeaders(r))
spanAttr := spanAttributesMapFromHTTPContext(rw, r)
AddAttributesToSpan(span, spanAttr)
// Correct the span name based on API.
if sname, ok := spanAttr[diagConsts.DaprAPISpanNameInternal]; ok {
span.SetName(sname)
}
}
// Check if response has traceparent header and add if absent
if rw.Header().Get(TraceparentHeader) == "" {
span = diagUtils.SpanFromContext(r.Context())
// Using Header.Set here because we know the traceparent header isn't set
SpanContextToHTTPHeaders(span.SpanContext(), rw.Header().Set)
}
UpdateSpanStatusFromHTTPStatus(span, rw.Status())
span.End()
})
next.ServeHTTP(rw, r)
})
}
// userDefinedHTTPHeaders returns dapr- prefixed header from incoming metadata.
// Users can add dapr- prefixed headers that they want to see in span attributes.
func userDefinedHTTPHeaders(r *http.Request) map[string]string {
// Allocate this with enough memory for a pessimistic case
m := make(map[string]string, len(r.Header))
for key, vSlice := range r.Header {
if len(vSlice) < 1 || len(key) < (len(daprHeaderBinSuffix)+1) {
continue
}
key = strings.ToLower(key)
if strings.HasPrefix(key, daprHeaderPrefix) {
// Get the last value for each key
m[key] = vSlice[len(vSlice)-1]
}
}
return m
}
func startTracingClientSpanFromHTTPRequest(r *http.Request, spanName string, spec config.TracingSpec) trace.Span {
sc := SpanContextFromRequest(r)
ctx := trace.ContextWithRemoteSpanContext(r.Context(), sc)
kindOption := trace.WithSpanKind(trace.SpanKindClient)
_, span := tracer.Start(ctx, spanName, kindOption)
diagUtils.AddSpanToRequest(r, span)
return span
}
func StartProducerSpanChildFromParent(ctx *fasthttp.RequestCtx, parentSpan trace.Span) trace.Span {
path := string(ctx.Request.URI().Path())
netCtx := trace.ContextWithRemoteSpanContext(ctx, parentSpan.SpanContext())
kindOption := trace.WithSpanKind(trace.SpanKindProducer)
_, span := tracer.Start(netCtx, path, kindOption)
return span
}
// SpanContextFromRequest extracts a span context from incoming requests.
func SpanContextFromRequest(r *http.Request) (sc trace.SpanContext) {
h := r.Header.Get(TraceparentHeader)
if h == "" {
return trace.SpanContext{}
}
sc, ok := SpanContextFromW3CString(h)
if ok {
ts := tracestateFromRequest(r)
sc = sc.WithTraceState(*ts)
}
return sc
}
func isHealthzRequest(name string) bool {
return strings.Contains(name, "/healthz")
}
// UpdateSpanStatusFromHTTPStatus updates trace span status based on response code.
func UpdateSpanStatusFromHTTPStatus(span trace.Span, code int) {
if span != nil {
statusCode, statusDescription := traceStatusFromHTTPCode(code)
span.SetStatus(statusCode, statusDescription)
}
}
// https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#status
func traceStatusFromHTTPCode(httpCode int) (otelcodes.Code, string) {
code := otelcodes.Unset
if httpCode >= 400 {
code = otelcodes.Error
statusText := http.StatusText(httpCode)
if statusText == "" {
statusText = "Unknown"
}
codeDescription := "Code(" + strconv.FormatInt(int64(httpCode), 10) + "): " + statusText
return code, codeDescription
}
return code, ""
}
func tracestateFromRequest(r *http.Request) *trace.TraceState {
h := r.Header.Get(TracestateHeader)
return TraceStateFromW3CString(h)
}
// SpanContextToHTTPHeaders adds the spancontext in traceparent and tracestate headers.
func SpanContextToHTTPHeaders(sc trace.SpanContext, setHeader func(string, string)) {
// if sc is empty context, no ops.
if sc.Equal(trace.SpanContext{}) {
return
}
h := SpanContextToW3CString(sc)
setHeader(TraceparentHeader, h)
tracestateToHeader(sc, setHeader)
}
func tracestateToHeader(sc trace.SpanContext, setHeader func(string, string)) {
if h := TraceStateToW3CString(sc); h != "" && len(h) <= maxTracestateLen {
setHeader(TracestateHeader, h)
}
}
func spanAttributesMapFromHTTPContext(rw responsewriter.ResponseWriter, r *http.Request) map[string]string {
// Init with a worst-case initial capacity of 7, which is the maximum number of unique keys we expect to add.
// This is just a "hint" to the compiler so when the map is allocated, it has an initial capacity for 7 elements.
// It's a (minor) perf improvement that allows us to avoid re-allocations which are wasteful on the allocator and GC both.
m := make(map[string]string, 7)
// Check if the context contains an AppendSpanAttributes method
endpointData, _ := r.Context().Value(endpoints.EndpointCtxKey{}).(*endpoints.EndpointCtxData)
if endpointData != nil && endpointData.Group != nil && endpointData.Group.AppendSpanAttributes != nil {
endpointData.Group.AppendSpanAttributes(r, m)
}
// Populate dapr original api attributes.
m[diagConsts.DaprAPIProtocolSpanAttributeKey] = diagConsts.DaprAPIHTTPSpanAttrValue
m[diagConsts.DaprAPISpanAttributeKey] = r.Method + " " + r.URL.Path
m[diagConsts.DaprAPIStatusCodeSpanAttributeKey] = strconv.Itoa(rw.Status())
return m
}
|
mikeee/dapr
|
pkg/diagnostics/http_tracing.go
|
GO
|
mit
| 7,262 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"encoding/hex"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
otelcodes "go.opentelemetry.io/otel/codes"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
"github.com/dapr/dapr/pkg/api/http/endpoints"
"github.com/dapr/dapr/pkg/config"
diagConsts "github.com/dapr/dapr/pkg/diagnostics/consts"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
"github.com/dapr/dapr/pkg/responsewriter"
)
func TestSpanContextFromRequest(t *testing.T) {
tests := []struct {
name string
header string
wantSc trace.SpanContextConfig
wantOk bool
}{
{
name: "future version",
header: "02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01",
wantSc: trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
},
wantOk: true,
},
{
name: "zero trace ID and span ID",
header: "00-00000000000000000000000000000000-0000000000000000-01",
wantSc: trace.SpanContextConfig{},
wantOk: false,
},
{
name: "valid header",
header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01",
wantSc: trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
},
wantOk: true,
},
{
name: "missing options",
header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7",
wantSc: trace.SpanContextConfig{},
wantOk: false,
},
{
name: "empty options",
header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-",
wantSc: trace.SpanContextConfig{},
wantOk: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := &http.Request{
Header: make(http.Header),
}
req.Header.Add("traceparent", tt.header)
gotSc := SpanContextFromRequest(req)
wantSc := trace.NewSpanContext(tt.wantSc)
assert.Equalf(t, wantSc, gotSc, "SpanContextFromRequest gotSc = %v, want %v", gotSc, wantSc)
})
}
}
func TestUserDefinedHTTPHeaders(t *testing.T) {
req := &http.Request{
Header: make(http.Header),
}
req.Header.Add("dapr-userdefined-1", "value1")
req.Header.Add("dapr-userdefined-2", "value2")
req.Header.Add("no-attr", "value3")
m := userDefinedHTTPHeaders(req)
assert.Len(t, m, 2)
assert.Equal(t, "value1", m["dapr-userdefined-1"])
assert.Equal(t, "value2", m["dapr-userdefined-2"])
}
func TestSpanContextToHTTPHeaders(t *testing.T) {
tests := []struct {
sc trace.SpanContextConfig
}{
{
sc: trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
},
},
}
for _, tt := range tests {
t.Run("SpanContextToHTTPHeaders", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://test.local/path", nil)
wantSc := trace.NewSpanContext(tt.sc)
SpanContextToHTTPHeaders(wantSc, req.Header.Set)
got := SpanContextFromRequest(req)
assert.Equalf(t, wantSc, got, "SpanContextToHTTPHeaders() got = %v, want %v", got, wantSc)
})
}
t.Run("empty span context", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://test.local/path", nil)
sc := trace.SpanContext{}
SpanContextToHTTPHeaders(sc, req.Header.Set)
assert.Empty(t, req.Header.Get(TraceparentHeader))
})
}
func TestGetSpanAttributesMapFromHTTPContext(t *testing.T) {
tests := []struct {
path string
appendAttributesFn endpoints.AppendSpanAttributesFn
out map[string]string
}{
{
"/v1.0/state/statestore/key",
func(r *http.Request, m map[string]string) {
m[diagConsts.DBSystemSpanAttributeKey] = "state"
m[diagConsts.DBNameSpanAttributeKey] = "statestore"
m[diagConsts.DBConnectionStringSpanAttributeKey] = "state"
},
map[string]string{
diagConsts.DaprAPIProtocolSpanAttributeKey: "http",
diagConsts.DaprAPISpanAttributeKey: "GET /v1.0/state/statestore/key",
diagConsts.DBSystemSpanAttributeKey: "state",
diagConsts.DBNameSpanAttributeKey: "statestore",
diagConsts.DBConnectionStringSpanAttributeKey: "state",
},
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
var err error
req := getTestHTTPRequest()
resp := responsewriter.EnsureResponseWriter(httptest.NewRecorder())
resp.WriteHeader(http.StatusOK)
req.URL, err = url.Parse("http://test.local" + tt.path)
require.NoError(t, err)
ctx := context.WithValue(req.Context(), endpoints.EndpointCtxKey{}, &endpoints.EndpointCtxData{
Group: &endpoints.EndpointGroup{
AppendSpanAttributes: tt.appendAttributesFn,
},
})
req = req.WithContext(ctx)
got := spanAttributesMapFromHTTPContext(responsewriter.EnsureResponseWriter(resp), req)
for k, v := range tt.out {
assert.Equalf(t, v, got[k], "key: %v", k)
}
})
}
}
func TestSpanContextToResponse(t *testing.T) {
tests := []struct {
scConfig trace.SpanContextConfig
}{
{
scConfig: trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
},
},
}
for _, tt := range tests {
t.Run("SpanContextToResponse", func(t *testing.T) {
resp := httptest.NewRecorder()
wantSc := trace.NewSpanContext(tt.scConfig)
SpanContextToHTTPHeaders(wantSc, resp.Header().Set)
h := resp.Header().Get("traceparent")
got, _ := SpanContextFromW3CString(h)
assert.Equalf(t, wantSc, got, "SpanContextToResponse() got = %v, want %v", got, wantSc)
})
}
}
func getTestHTTPRequest() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "http://test.local/v1.0/state/statestore/key", nil)
req.Header.Set("dapr-testheaderkey", "dapr-testheadervalue")
req.Header.Set("x-testheaderkey1", "dapr-testheadervalue")
req.Header.Set("daprd-testheaderkey2", "dapr-testheadervalue")
var (
tid = trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128}
sid = trace.SpanID{1, 2, 4, 8, 16, 32, 64, 128}
)
scConfig := trace.SpanContextConfig{
TraceID: tid,
SpanID: sid,
TraceFlags: 0x0,
}
sc := trace.NewSpanContext(scConfig)
SpanContextToHTTPHeaders(sc, req.Header.Set)
return req
}
func TestHTTPTraceMiddleware(t *testing.T) {
requestBody := "fake_requestDaprBody"
responseBody := "fake_responseDaprBody"
fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.Write([]byte(responseBody))
})
rate := config.TracingSpec{SamplingRate: "1"}
handler := HTTPTraceMiddleware(fakeHandler, "fakeAppID", rate)
exp := newOtelFakeExporter()
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
)
defer func() { _ = tp.Shutdown(context.Background()) }()
otel.SetTracerProvider(tp)
t.Run("traceparent is given in request and sampling is enabled", func(t *testing.T) {
r := newTraceRequest(
requestBody, "/v1.0/state/statestore",
map[string]string{
"traceparent": "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01",
},
)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
span := diagUtils.SpanFromContext(r.Context())
sc := span.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:]))
assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:]))
})
t.Run("traceparent is not given in request", func(t *testing.T) {
r := newTraceRequest(
requestBody, "/v1.0/state/statestore",
map[string]string{
"dapr-userdefined": "value",
},
)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
span := diagUtils.SpanFromContext(r.Context())
sc := span.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.NotEmpty(t, hex.EncodeToString(traceID[:]))
assert.NotEmpty(t, hex.EncodeToString(spanID[:]))
})
t.Run("traceparent not given in response", func(t *testing.T) {
r := newTraceRequest(
requestBody, "/v1.0/state/statestore",
map[string]string{
"dapr-userdefined": "value",
},
)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
span := diagUtils.SpanFromContext(r.Context())
sc := span.SpanContext()
assert.Equal(t, w.Header().Get(TraceparentHeader), SpanContextToW3CString(sc))
})
t.Run("traceparent given in response", func(t *testing.T) {
r := newTraceRequest(
requestBody, "/v1.0/state/statestore",
map[string]string{
"dapr-userdefined": "value",
},
)
w := httptest.NewRecorder()
w.Header().Set(TraceparentHeader, "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01")
w.Header().Set(TracestateHeader, "xyz=t61pCWkhMzZ")
handler.ServeHTTP(w, r)
span := diagUtils.SpanFromContext(r.Context())
sc := span.SpanContext()
assert.NotEqual(t, w.Header().Get(TraceparentHeader), SpanContextToW3CString(sc))
})
}
func TestTraceStatusFromHTTPCode(t *testing.T) {
tests := []struct {
httpCode int
wantOtelCode otelcodes.Code
wantOtelCodeDescription string
}{
{
httpCode: 200,
wantOtelCode: otelcodes.Unset,
wantOtelCodeDescription: "",
},
{
httpCode: 401,
wantOtelCode: otelcodes.Error,
wantOtelCodeDescription: "Code(401): Unauthorized",
},
{
httpCode: 488,
wantOtelCode: otelcodes.Error,
wantOtelCodeDescription: "Code(488): Unknown",
},
}
for _, tt := range tests {
t.Run("traceStatusFromHTTPCode", func(t *testing.T) {
gotOtelCode, gotOtelCodeDescription := traceStatusFromHTTPCode(tt.httpCode)
assert.Equalf(t, tt.wantOtelCode, gotOtelCode, "traceStatusFromHTTPCode(%v) got = %v, want %v", tt.httpCode, gotOtelCode, tt.wantOtelCode)
assert.Equalf(t, tt.wantOtelCodeDescription, gotOtelCodeDescription, "traceStatusFromHTTPCode(%v) got = %v, want %v", tt.httpCode, gotOtelCodeDescription, tt.wantOtelCodeDescription)
})
}
}
func newTraceRequest(body, requestPath string, requestHeader map[string]string) *http.Request {
req, _ := http.NewRequest(http.MethodPost, "http://dapr.io"+requestPath, strings.NewReader(body))
req.Header.Set("Transfer-Encoding", "encoding")
req.Header.Set("Content-Length", strconv.Itoa(len(body)))
for k, v := range requestHeader {
req.Header.Set(k, v)
}
return req
}
|
mikeee/dapr
|
pkg/diagnostics/http_tracing_test.go
|
GO
|
mit
| 11,477 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"time"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/dapr/dapr/pkg/config"
"github.com/dapr/dapr/pkg/diagnostics/utils"
)
// appIDKey is a tag key for App ID.
var appIDKey = tag.MustNewKey("app_id")
var (
// DefaultReportingPeriod is the default view reporting period.
DefaultReportingPeriod = 1 * time.Minute
// DefaultMonitoring holds service monitoring metrics definitions.
DefaultMonitoring = newServiceMetrics()
// DefaultGRPCMonitoring holds default gRPC monitoring handlers and middlewares.
DefaultGRPCMonitoring = newGRPCMetrics()
// DefaultHTTPMonitoring holds default HTTP monitoring handlers and middlewares.
DefaultHTTPMonitoring = newHTTPMetrics()
// DefaultComponentMonitoring holds component specific metrics.
DefaultComponentMonitoring = newComponentMetrics()
// DefaultResiliencyMonitoring holds resiliency specific metrics.
DefaultResiliencyMonitoring = newResiliencyMetrics()
// DefaultWorkflowMonitoring holds workflow specific metrics.
DefaultWorkflowMonitoring = newWorkflowMetrics()
)
// InitMetrics initializes metrics.
func InitMetrics(appID, namespace string, rules []config.MetricsRule, pathMatching *config.PathMatching, legacyMetricsHTTPMetrics bool) error {
if err := DefaultMonitoring.Init(appID); err != nil {
return err
}
if err := DefaultGRPCMonitoring.Init(appID); err != nil {
return err
}
if err := DefaultHTTPMonitoring.Init(appID, pathMatching, legacyMetricsHTTPMetrics); err != nil {
return err
}
if err := DefaultComponentMonitoring.Init(appID, namespace); err != nil {
return err
}
if err := DefaultResiliencyMonitoring.Init(appID); err != nil {
return err
}
if err := DefaultWorkflowMonitoring.Init(appID, namespace); err != nil {
return err
}
// Set reporting period of views
view.SetReportingPeriod(DefaultReportingPeriod)
return utils.CreateRulesMap(rules)
}
|
mikeee/dapr
|
pkg/diagnostics/metrics.go
|
GO
|
mit
| 2,473 |
package diagnostics
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/dapr/dapr/pkg/config"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
func TestRegexRulesSingle(t *testing.T) {
const statName = "test_stat_regex"
methodKey := tag.MustNewKey("method")
testStat := stats.Int64(statName, "Stat used in unit test", stats.UnitDimensionless)
InitMetrics("testAppId2", "", []config.MetricsRule{
{
Name: statName,
Labels: []config.MetricLabel{
{
Name: methodKey.Name(),
Regex: map[string]string{
"/orders/TEST": "/orders/.+",
"/lightsabers/TEST": "/lightsabers/.+",
},
},
},
},
}, nil, false)
t.Run("single regex rule applied", func(t *testing.T) {
view.Register(
diagUtils.NewMeasureView(testStat, []tag.Key{methodKey}, defaultSizeDistribution),
)
t.Cleanup(func() {
view.Unregister(view.Find(statName))
})
stats.RecordWithTags(context.Background(),
diagUtils.WithTags(testStat.Name(), methodKey, "/orders/123"),
testStat.M(1))
viewData, _ := view.RetrieveData(statName)
v := view.Find(statName)
allTagsPresent(t, v, viewData[0].Tags)
assert.Equal(t, "/orders/TEST", viewData[0].Tags[0].Value)
})
t.Run("single regex rule not applied", func(t *testing.T) {
view.Register(
diagUtils.NewMeasureView(testStat, []tag.Key{methodKey}, defaultSizeDistribution),
)
t.Cleanup(func() {
view.Unregister(view.Find(statName))
})
s := newGRPCMetrics()
s.Init("test")
stats.RecordWithTags(context.Background(),
diagUtils.WithTags(testStat.Name(), methodKey, "/siths/123"),
testStat.M(1))
viewData, _ := view.RetrieveData(statName)
v := view.Find(statName)
allTagsPresent(t, v, viewData[0].Tags)
assert.Equal(t, "/siths/123", viewData[0].Tags[0].Value)
})
t.Run("correct regex rules applied", func(t *testing.T) {
view.Register(
diagUtils.NewMeasureView(testStat, []tag.Key{methodKey}, defaultSizeDistribution),
)
t.Cleanup(func() {
view.Unregister(view.Find(statName))
})
s := newGRPCMetrics()
s.Init("test")
stats.RecordWithTags(context.Background(),
diagUtils.WithTags(testStat.Name(), methodKey, "/orders/123"),
testStat.M(1))
stats.RecordWithTags(context.Background(),
diagUtils.WithTags(testStat.Name(), methodKey, "/lightsabers/123"),
testStat.M(1))
viewData, _ := view.RetrieveData(statName)
orders := false
lightsabers := false
for _, v := range viewData {
if v.Tags[0].Value == "/orders/TEST" {
orders = true
} else if v.Tags[0].Value == "/lightsabers/TEST" {
lightsabers = true
}
}
assert.True(t, orders)
assert.True(t, lightsabers)
})
}
|
mikeee/dapr
|
pkg/diagnostics/metrics_regex_test.go
|
GO
|
mit
| 2,769 |
package diagnostics
import (
"context"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
var (
CircuitBreakerPolicy PolicyType = "circuitbreaker"
RetryPolicy PolicyType = "retry"
TimeoutPolicy PolicyType = "timeout"
OutboundPolicyFlowDirection PolicyFlowDirection = "outbound"
InboundPolicyFlowDirection PolicyFlowDirection = "inbound"
)
type PolicyType string
type PolicyFlowDirection string
type resiliencyMetrics struct {
policiesLoadCount *stats.Int64Measure
executionCount *stats.Int64Measure
activationsCount *stats.Int64Measure
appID string
ctx context.Context
enabled bool
}
func newResiliencyMetrics() *resiliencyMetrics {
return &resiliencyMetrics{ //nolint:exhaustruct
policiesLoadCount: stats.Int64(
"resiliency/loaded",
"Number of resiliency policies loaded.",
stats.UnitDimensionless),
executionCount: stats.Int64(
"resiliency/count",
"Number of times a resiliency policyKey has been applied to a building block.",
stats.UnitDimensionless),
activationsCount: stats.Int64(
"resiliency/activations_total",
"Number of times a resiliency policyKey has been activated in a building block after a failure or after a state change.",
stats.UnitDimensionless),
// TODO: how to use correct context
ctx: context.Background(),
enabled: false,
}
}
// Init registers the resiliency metrics views.
func (m *resiliencyMetrics) Init(id string) error {
m.enabled = true
m.appID = id
return view.Register(
diagUtils.NewMeasureView(m.policiesLoadCount, []tag.Key{appIDKey, resiliencyNameKey, namespaceKey}, view.Count()),
diagUtils.NewMeasureView(m.executionCount, []tag.Key{appIDKey, resiliencyNameKey, policyKey, namespaceKey, flowDirectionKey, targetKey, statusKey}, view.Count()),
diagUtils.NewMeasureView(m.activationsCount, []tag.Key{appIDKey, resiliencyNameKey, policyKey, namespaceKey, flowDirectionKey, targetKey, statusKey}, view.Count()),
)
}
// PolicyLoaded records metric when policy is loaded.
func (m *resiliencyMetrics) PolicyLoaded(resiliencyName, namespace string) {
if m.enabled {
_ = stats.RecordWithTags(
m.ctx,
diagUtils.WithTags(m.policiesLoadCount.Name(), appIDKey, m.appID, resiliencyNameKey, resiliencyName, namespaceKey, namespace),
m.policiesLoadCount.M(1),
)
}
}
// PolicyWithStatusExecuted records metric when policy is executed with added status information (e.g., circuit breaker open).
func (m *resiliencyMetrics) PolicyWithStatusExecuted(resiliencyName, namespace string, policy PolicyType, flowDirection PolicyFlowDirection, target string, status string) {
if m.enabled {
_ = stats.RecordWithTags(
m.ctx,
diagUtils.WithTags(m.executionCount.Name(), appIDKey, m.appID, resiliencyNameKey, resiliencyName, policyKey, string(policy),
namespaceKey, namespace, flowDirectionKey, string(flowDirection), targetKey, target, statusKey, status),
m.executionCount.M(1),
)
}
}
// PolicyExecuted records metric when policy is executed.
func (m *resiliencyMetrics) PolicyExecuted(resiliencyName, namespace string, policy PolicyType, flowDirection PolicyFlowDirection, target string) {
m.PolicyWithStatusExecuted(resiliencyName, namespace, policy, flowDirection, target, "")
}
// PolicyActivated records metric when policy is activated after a failure
func (m *resiliencyMetrics) PolicyActivated(resiliencyName, namespace string, policy PolicyType, flowDirection PolicyFlowDirection, target string) {
m.PolicyWithStatusActivated(resiliencyName, namespace, policy, flowDirection, target, "")
}
// PolicyWithStatusActivated records metric when policy is activated after a failure or in the case of circuit breaker after a state change. with added state/status (e.g., circuit breaker open).
func (m *resiliencyMetrics) PolicyWithStatusActivated(resiliencyName, namespace string, policy PolicyType, flowDirection PolicyFlowDirection, target string, status string) {
if m.enabled {
_ = stats.RecordWithTags(
m.ctx,
diagUtils.WithTags(m.activationsCount.Name(), appIDKey, m.appID, resiliencyNameKey, resiliencyName, policyKey, string(policy),
namespaceKey, namespace, flowDirectionKey, string(flowDirection), targetKey, target, statusKey, status),
m.activationsCount.M(1),
)
}
}
func ResiliencyActorTarget(actorType string) string {
return "actor_" + actorType
}
func ResiliencyAppTarget(app string) string {
return "app_" + app
}
func ResiliencyComponentTarget(name string, componentType string) string {
return componentType + "_" + name
}
|
mikeee/dapr
|
pkg/diagnostics/resiliency_monitoring.go
|
GO
|
mit
| 4,610 |
package diagnostics_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
resiliencyV1alpha "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1"
diag "github.com/dapr/dapr/pkg/diagnostics"
"github.com/dapr/dapr/pkg/resiliency"
"github.com/dapr/dapr/pkg/resiliency/breaker"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
)
const (
resiliencyCountViewName = "resiliency/count"
resiliencyActivationViewName = "resiliency/activations_total"
resiliencyLoadedViewName = "resiliency/loaded"
testAppID = "fakeID"
testResiliencyName = "testResiliency"
testResiliencyNamespace = "testNamespace"
testStateStoreName = "testStateStore"
)
func cleanupRegisteredViews() {
diag.CleanupRegisteredViews(
resiliencyCountViewName,
resiliencyLoadedViewName,
resiliencyActivationViewName)
}
func TestResiliencyCountMonitoring(t *testing.T) {
tests := []struct {
name string
unitFn func()
wantTags []tag.Tag
wantNumberOfRows int
wantErr bool
appID string
}{
{
name: "EndpointPolicy",
appID: testAppID,
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
_ = r.EndpointPolicy("fakeApp", "fakeEndpoint")
},
wantNumberOfRows: 3,
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyAppTarget("fakeApp")),
diag.NewTag(diag.StatusKey.Name(), "closed"),
},
},
{
name: "ActorPreLockPolicy",
appID: testAppID,
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
_ = r.ActorPreLockPolicy("fakeActor", "fakeActorId")
},
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyActorTarget("fakeActor")),
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)),
},
wantNumberOfRows: 2,
},
{
name: "ActorPostLockPolicy",
appID: testAppID,
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
_ = r.ActorPostLockPolicy("fakeActor", "fakeActorId")
},
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyActorTarget("fakeActor")),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
},
wantNumberOfRows: 1,
},
{
name: "ComponentOutboundPolicy",
appID: testAppID,
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, testStateStoreName)
_ = r.ComponentOutboundPolicy(testStateStoreName, resiliency.Statestore)
},
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyComponentTarget(testStateStoreName, string(resiliency.Statestore))),
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)),
},
wantNumberOfRows: 3,
},
{
name: "ComponentInboundPolicy",
appID: testAppID,
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, testStateStoreName)
_ = r.ComponentInboundPolicy(testStateStoreName, resiliency.Statestore)
},
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.InboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyComponentTarget(testStateStoreName, string(resiliency.Statestore))),
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)),
},
wantNumberOfRows: 3,
},
{
name: "ComponentInboundDefaultPolicy",
appID: testAppID,
unitFn: func() {
r := createDefaultTestResiliency(testResiliencyName, testResiliencyNamespace)
_ = r.ComponentInboundPolicy(testStateStoreName, resiliency.Statestore)
},
wantNumberOfRows: 3,
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.InboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyComponentTarget(testStateStoreName, string(resiliency.Statestore))),
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)),
},
},
{
name: "ComponentOutboundDefaultPolicy",
appID: testAppID,
unitFn: func() {
r := createDefaultTestResiliency(testResiliencyName, testResiliencyNamespace)
_ = r.ComponentOutboundPolicy(testStateStoreName, resiliency.Statestore)
},
wantNumberOfRows: 2,
wantTags: []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyComponentTarget(testStateStoreName, string(resiliency.Statestore))),
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cleanupRegisteredViews()
require.NoError(t, diag.InitMetrics(test.appID, "fakeRuntimeNamespace", nil, nil, false))
test.unitFn()
rows, err := view.RetrieveData(resiliencyCountViewName)
if test.wantErr {
require.Error(t, err)
}
require.NoError(t, err)
require.Len(t, rows, test.wantNumberOfRows)
for _, wantTag := range test.wantTags {
diag.RequireTagExist(t, rows, wantTag)
}
})
}
}
func TestResiliencyCountMonitoringCBStates(t *testing.T) {
tests := []struct {
name string
unitFn func()
wantNumberOfRows int
wantCbStateTagCount map[tag.Tag]int64
}{
{
name: "EndpointPolicyCloseState",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
for i := 0; i < 2; i++ {
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, nil
})
}
},
wantNumberOfRows: 3,
wantCbStateTagCount: map[tag.Tag]int64{diag.NewTag(diag.StatusKey.Name(), "closed"): 2},
},
{
name: "EndpointPolicyOpenState",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
for i := 0; i < 3; i++ {
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
}
},
wantNumberOfRows: 4,
wantCbStateTagCount: map[tag.Tag]int64{
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)): 2,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateOpen)): 1,
},
},
{
name: "EndpointPolicyHalfOpenState",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
for i := 0; i < 3; i++ {
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
}
// let the circuit breaker to go to half open state (5x cb timeout)
time.Sleep(500 * time.Millisecond)
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
},
wantNumberOfRows: 5,
wantCbStateTagCount: map[tag.Tag]int64{
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)): 2,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateOpen)): 1,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateHalfOpen)): 1,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cleanupRegisteredViews()
require.NoError(t, diag.InitMetrics(testAppID, "fakeRuntimeNamespace", nil, nil, false))
test.unitFn()
rows, err := view.RetrieveData(resiliencyCountViewName)
require.NoError(t, err)
require.Len(t, rows, test.wantNumberOfRows)
wantedTags := []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyAppTarget("fakeApp")),
}
for _, wantTag := range wantedTags {
diag.RequireTagExist(t, rows, wantTag)
}
for cbTag, wantCount := range test.wantCbStateTagCount {
gotCount := diag.GetValueForObservationWithTagSet(
rows, map[tag.Tag]bool{cbTag: true, diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)): true})
require.Equal(t, wantCount, gotCount)
}
})
}
}
func TestResiliencyActivationsCountMonitoring(t *testing.T) {
tests := []struct {
name string
unitFn func()
wantNumberOfRows int
wantCbStateTagCount map[tag.Tag]int64
wantTags []tag.Tag
wantRetriesCount int64
wantTimeoutCount int64
wantCBChangeCount int64
}{
{
name: "EndpointPolicyNoActivations",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
for i := 0; i < 2; i++ {
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, nil
})
}
},
wantNumberOfRows: 0,
},
{
name: "EndpointPolicyOneRetryNoCBTrip",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
},
wantNumberOfRows: 1,
wantRetriesCount: 1,
wantTags: []tag.Tag{
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
},
wantCbStateTagCount: map[tag.Tag]int64{
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)): 0,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateOpen)): 0,
},
},
{
name: "EndpointPolicyTwoRetryWithCBTrip",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
for i := 0; i < 2; i++ {
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
}
},
wantNumberOfRows: 2,
wantRetriesCount: 2,
wantTags: []tag.Tag{
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
},
wantCbStateTagCount: map[tag.Tag]int64{
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)): 0,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateOpen)): 1,
},
},
{
name: "EndpointPolicyTwoRetryWithCBTripTimeout",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
time.Sleep(500 * time.Millisecond)
return nil, fmt.Errorf("fake error")
})
policyRunner = resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
},
wantNumberOfRows: 3,
wantRetriesCount: 2,
wantTimeoutCount: 1,
wantTags: []tag.Tag{
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
},
wantCbStateTagCount: map[tag.Tag]int64{
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)): 0,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateOpen)): 1,
},
},
{
name: "EndpointPolicyOpenAndCloseState",
unitFn: func() {
r := createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStateStore")
for i := 0; i < 2; i++ {
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
}
// let the circuit breaker to go to half open state (5x cb timeout) and then return success to close it
time.Sleep(1000 * time.Millisecond)
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, nil
})
// now open the circuit breaker again
for i := 0; i < 2; i++ {
policyDef := r.EndpointPolicy("fakeApp", "fakeEndpoint")
policyRunner := resiliency.NewRunner[any](context.Background(), policyDef)
_, _ = policyRunner(func(ctx context.Context) (interface{}, error) {
return nil, fmt.Errorf("fake error")
})
}
},
wantNumberOfRows: 3,
wantRetriesCount: 4,
wantTags: []tag.Tag{
diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)),
diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)),
},
wantCbStateTagCount: map[tag.Tag]int64{
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateClosed)): 1,
diag.NewTag(diag.StatusKey.Name(), string(breaker.StateOpen)): 2,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cleanupRegisteredViews()
require.NoError(t, diag.InitMetrics(testAppID, "fakeRuntimeNamespace", nil, nil, false))
test.unitFn()
rows, err := view.RetrieveData(resiliencyActivationViewName)
require.NoError(t, err)
require.Len(t, rows, test.wantNumberOfRows)
if test.wantNumberOfRows == 0 {
return
}
wantedTags := []tag.Tag{
diag.NewTag("app_id", testAppID),
diag.NewTag("name", testResiliencyName),
diag.NewTag("namespace", testResiliencyNamespace),
diag.NewTag(diag.FlowDirectionKey.Name(), string(diag.OutboundPolicyFlowDirection)),
diag.NewTag(diag.TargetKey.Name(), diag.ResiliencyAppTarget("fakeApp")),
}
wantedTags = append(wantedTags, test.wantTags...)
for _, wantTag := range wantedTags {
diag.RequireTagExist(t, rows, wantTag)
}
for cbTag, wantCount := range test.wantCbStateTagCount {
gotCount := diag.GetValueForObservationWithTagSet(
rows, map[tag.Tag]bool{cbTag: true, diag.NewTag(diag.PolicyKey.Name(), string(diag.CircuitBreakerPolicy)): true})
require.Equal(t, wantCount, gotCount)
}
gotRetriesCount := diag.GetValueForObservationWithTagSet(
rows, map[tag.Tag]bool{diag.NewTag(diag.PolicyKey.Name(), string(diag.RetryPolicy)): true})
require.Equal(t, test.wantRetriesCount, gotRetriesCount)
gotTimeoutCount := diag.GetValueForObservationWithTagSet(
rows, map[tag.Tag]bool{diag.NewTag(diag.PolicyKey.Name(), string(diag.TimeoutPolicy)): true})
require.Equal(t, test.wantTimeoutCount, gotTimeoutCount)
})
}
}
func createTestResiliency(resiliencyName string, resiliencyNamespace string, stateStoreName string) *resiliency.Resiliency {
r := resiliency.FromConfigurations(logger.NewLogger("fake-logger"), newTestResiliencyConfig(
resiliencyName,
resiliencyNamespace,
"fakeApp",
"fakeActor",
stateStoreName,
))
return r
}
func createDefaultTestResiliency(resiliencyName string, resiliencyNamespace string) *resiliency.Resiliency {
r := resiliency.FromConfigurations(logger.NewLogger("fake-logger"), newTestDefaultResiliencyConfig(
resiliencyName, resiliencyNamespace,
))
return r
}
func TestResiliencyLoadedMonitoring(t *testing.T) {
t.Run(resiliencyLoadedViewName, func(t *testing.T) {
cleanupRegisteredViews()
require.NoError(t, diag.InitMetrics(testAppID, "fakeRuntimeNamespace", nil, nil, false))
_ = createTestResiliency(testResiliencyName, testResiliencyNamespace, "fakeStoreName")
rows, err := view.RetrieveData(resiliencyLoadedViewName)
require.NoError(t, err)
require.Len(t, rows, 1)
diag.RequireTagExist(t, rows, diag.NewTag("app_id", testAppID))
diag.RequireTagExist(t, rows, diag.NewTag("name", testResiliencyName))
diag.RequireTagExist(t, rows, diag.NewTag("namespace", testResiliencyNamespace))
})
}
func newTestDefaultResiliencyConfig(resiliencyName, resiliencyNamespace string) *resiliencyV1alpha.Resiliency {
return &resiliencyV1alpha.Resiliency{
ObjectMeta: metav1.ObjectMeta{
Name: resiliencyName,
Namespace: resiliencyNamespace,
},
Spec: resiliencyV1alpha.ResiliencySpec{
Policies: resiliencyV1alpha.Policies{
CircuitBreakers: map[string]resiliencyV1alpha.CircuitBreaker{
"DefaultComponentCircuitBreakerPolicy": {
Interval: "0",
Timeout: "100ms",
Trip: "consecutiveFailures > 2",
MaxRequests: 1,
},
},
Retries: map[string]resiliencyV1alpha.Retry{
"DefaultComponentInboundRetryPolicy": {
Policy: "constant",
Duration: "10ms",
MaxRetries: ptr.Of(3),
},
},
Timeouts: map[string]string{
"DefaultTimeoutPolicy": "100ms",
},
},
},
}
}
func newTestResiliencyConfig(resiliencyName, resiliencyNamespace, appName, actorType, storeName string) *resiliencyV1alpha.Resiliency {
return &resiliencyV1alpha.Resiliency{
ObjectMeta: metav1.ObjectMeta{
Name: resiliencyName,
Namespace: resiliencyNamespace,
},
Spec: resiliencyV1alpha.ResiliencySpec{
Policies: resiliencyV1alpha.Policies{
Timeouts: map[string]string{
"testTimeout": "100ms",
},
Retries: map[string]resiliencyV1alpha.Retry{
"testRetry": {
Policy: "constant",
Duration: "10ms",
MaxRetries: ptr.Of(3),
},
},
CircuitBreakers: map[string]resiliencyV1alpha.CircuitBreaker{
"testCB": {
Interval: "0",
Timeout: "100ms",
Trip: "consecutiveFailures > 4",
MaxRequests: 1,
},
},
},
Targets: resiliencyV1alpha.Targets{
Apps: map[string]resiliencyV1alpha.EndpointPolicyNames{
appName: {
Timeout: "testTimeout",
Retry: "testRetry",
CircuitBreaker: "testCB",
CircuitBreakerCacheSize: 100,
},
},
Actors: map[string]resiliencyV1alpha.ActorPolicyNames{
actorType: {
Timeout: "testTimeout",
Retry: "testRetry",
CircuitBreaker: "testCB",
CircuitBreakerScope: "both",
CircuitBreakerCacheSize: 5000,
},
},
Components: map[string]resiliencyV1alpha.ComponentPolicyNames{
storeName: {
Outbound: resiliencyV1alpha.PolicyNames{
Timeout: "testTimeout",
Retry: "testRetry",
CircuitBreaker: "testCB",
},
Inbound: resiliencyV1alpha.PolicyNames{
Timeout: "testTimeout",
Retry: "testRetry",
CircuitBreaker: "testCB",
},
},
},
},
},
}
}
|
mikeee/dapr
|
pkg/diagnostics/resiliency_monitoring_test.go
|
GO
|
mit
| 22,466 |
package diagnostics
import (
"context"
"strconv"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
"github.com/dapr/dapr/pkg/security/spiffe"
)
// Tag keys.
var (
componentKey = tag.MustNewKey("component")
failReasonKey = tag.MustNewKey("reason")
operationKey = tag.MustNewKey("operation")
actorTypeKey = tag.MustNewKey("actor_type")
trustDomainKey = tag.MustNewKey("trustDomain")
namespaceKey = tag.MustNewKey("namespace")
resiliencyNameKey = tag.MustNewKey("name")
policyKey = tag.MustNewKey("policy")
componentNameKey = tag.MustNewKey("componentName")
destinationAppIDKey = tag.MustNewKey("dst_app_id")
sourceAppIDKey = tag.MustNewKey("src_app_id")
statusKey = tag.MustNewKey("status")
flowDirectionKey = tag.MustNewKey("flow_direction")
targetKey = tag.MustNewKey("target")
typeKey = tag.MustNewKey("type")
)
const (
typeUnary = "unary"
typeStreaming = "streaming"
)
// serviceMetrics holds dapr runtime metric monitoring methods.
type serviceMetrics struct {
// component metrics
componentLoaded *stats.Int64Measure
componentInitCompleted *stats.Int64Measure
componentInitFailed *stats.Int64Measure
// mTLS metrics
mtlsInitCompleted *stats.Int64Measure
mtlsInitFailed *stats.Int64Measure
mtlsWorkloadCertRotated *stats.Int64Measure
mtlsWorkloadCertRotatedFailed *stats.Int64Measure
// Actor metrics
actorStatusReportTotal *stats.Int64Measure
actorStatusReportFailedTotal *stats.Int64Measure
actorTableOperationRecvTotal *stats.Int64Measure
actorRebalancedTotal *stats.Int64Measure
actorDeactivationTotal *stats.Int64Measure
actorDeactivationFailedTotal *stats.Int64Measure
actorPendingCalls *stats.Int64Measure
actorReminders *stats.Int64Measure
actorReminderFiredTotal *stats.Int64Measure
actorTimers *stats.Int64Measure
actorTimerFiredTotal *stats.Int64Measure
// Access Control Lists for Service Invocation metrics
appPolicyActionAllowed *stats.Int64Measure
globalPolicyActionAllowed *stats.Int64Measure
appPolicyActionBlocked *stats.Int64Measure
globalPolicyActionBlocked *stats.Int64Measure
// Service Invocation metrics
serviceInvocationRequestSentTotal *stats.Int64Measure
serviceInvocationRequestReceivedTotal *stats.Int64Measure
serviceInvocationResponseSentTotal *stats.Int64Measure
serviceInvocationResponseReceivedTotal *stats.Int64Measure
serviceInvocationResponseReceivedLatency *stats.Float64Measure
appID string
ctx context.Context
enabled bool
}
// newServiceMetrics returns serviceMetrics instance with default service metric stats.
func newServiceMetrics() *serviceMetrics {
return &serviceMetrics{
// Runtime Component metrics
componentLoaded: stats.Int64(
"runtime/component/loaded",
"The number of successfully loaded components.",
stats.UnitDimensionless),
componentInitCompleted: stats.Int64(
"runtime/component/init_total",
"The number of initialized components.",
stats.UnitDimensionless),
componentInitFailed: stats.Int64(
"runtime/component/init_fail_total",
"The number of component initialization failures.",
stats.UnitDimensionless),
// mTLS
mtlsInitCompleted: stats.Int64(
"runtime/mtls/init_total",
"The number of successful mTLS authenticator initialization.",
stats.UnitDimensionless),
mtlsInitFailed: stats.Int64(
"runtime/mtls/init_fail_total",
"The number of mTLS authenticator init failures.",
stats.UnitDimensionless),
mtlsWorkloadCertRotated: stats.Int64(
"runtime/mtls/workload_cert_rotated_total",
"The number of the successful workload certificate rotations.",
stats.UnitDimensionless),
mtlsWorkloadCertRotatedFailed: stats.Int64(
"runtime/mtls/workload_cert_rotated_fail_total",
"The number of the failed workload certificate rotations.",
stats.UnitDimensionless),
// Actor
actorStatusReportTotal: stats.Int64(
"runtime/actor/status_report_total",
"The number of the successful status reports to placement service.",
stats.UnitDimensionless),
actorStatusReportFailedTotal: stats.Int64(
"runtime/actor/status_report_fail_total",
"The number of the failed status reports to placement service.",
stats.UnitDimensionless),
actorTableOperationRecvTotal: stats.Int64(
"runtime/actor/table_operation_recv_total",
"The number of the received actor placement table operations.",
stats.UnitDimensionless),
actorRebalancedTotal: stats.Int64(
"runtime/actor/rebalanced_total",
"The number of the actor rebalance requests.",
stats.UnitDimensionless),
actorDeactivationTotal: stats.Int64(
"runtime/actor/deactivated_total",
"The number of the successful actor deactivation.",
stats.UnitDimensionless),
actorDeactivationFailedTotal: stats.Int64(
"runtime/actor/deactivated_failed_total",
"The number of the failed actor deactivation.",
stats.UnitDimensionless),
actorPendingCalls: stats.Int64(
"runtime/actor/pending_actor_calls",
"The number of pending actor calls waiting to acquire the per-actor lock.",
stats.UnitDimensionless),
actorTimers: stats.Int64(
"runtime/actor/timers",
"The number of actor timer requests.",
stats.UnitDimensionless),
actorReminders: stats.Int64(
"runtime/actor/reminders",
"The number of actor reminder requests.",
stats.UnitDimensionless),
actorReminderFiredTotal: stats.Int64(
"runtime/actor/reminders_fired_total",
"The number of actor reminders fired requests.",
stats.UnitDimensionless),
actorTimerFiredTotal: stats.Int64(
"runtime/actor/timers_fired_total",
"The number of actor timers fired requests.",
stats.UnitDimensionless),
// Access Control Lists for service invocation
appPolicyActionAllowed: stats.Int64(
"runtime/acl/app_policy_action_allowed_total",
"The number of requests allowed by the app specific action specified in the access control policy.",
stats.UnitDimensionless),
globalPolicyActionAllowed: stats.Int64(
"runtime/acl/global_policy_action_allowed_total",
"The number of requests allowed by the global action specified in the access control policy.",
stats.UnitDimensionless),
appPolicyActionBlocked: stats.Int64(
"runtime/acl/app_policy_action_blocked_total",
"The number of requests blocked by the app specific action specified in the access control policy.",
stats.UnitDimensionless),
globalPolicyActionBlocked: stats.Int64(
"runtime/acl/global_policy_action_blocked_total",
"The number of requests blocked by the global action specified in the access control policy.",
stats.UnitDimensionless),
// Service Invocation
serviceInvocationRequestSentTotal: stats.Int64(
"runtime/service_invocation/req_sent_total",
"The number of requests sent via service invocation.",
stats.UnitDimensionless),
serviceInvocationRequestReceivedTotal: stats.Int64(
"runtime/service_invocation/req_recv_total",
"The number of requests received via service invocation.",
stats.UnitDimensionless),
serviceInvocationResponseSentTotal: stats.Int64(
"runtime/service_invocation/res_sent_total",
"The number of responses sent via service invocation.",
stats.UnitDimensionless),
serviceInvocationResponseReceivedTotal: stats.Int64(
"runtime/service_invocation/res_recv_total",
"The number of responses received via service invocation.",
stats.UnitDimensionless),
serviceInvocationResponseReceivedLatency: stats.Float64(
"runtime/service_invocation/res_recv_latency_ms",
"The latency of service invocation response.",
stats.UnitMilliseconds),
// TODO: use the correct context for each request
ctx: context.Background(),
enabled: false,
}
}
// Init initialize metrics views for metrics.
func (s *serviceMetrics) Init(appID string) error {
s.appID = appID
s.enabled = true
return view.Register(
diagUtils.NewMeasureView(s.componentLoaded, []tag.Key{appIDKey}, view.Count()),
diagUtils.NewMeasureView(s.componentInitCompleted, []tag.Key{appIDKey, componentKey}, view.Count()),
diagUtils.NewMeasureView(s.componentInitFailed, []tag.Key{appIDKey, componentKey, failReasonKey, componentNameKey}, view.Count()),
diagUtils.NewMeasureView(s.mtlsInitCompleted, []tag.Key{appIDKey}, view.Count()),
diagUtils.NewMeasureView(s.mtlsInitFailed, []tag.Key{appIDKey, failReasonKey}, view.Count()),
diagUtils.NewMeasureView(s.mtlsWorkloadCertRotated, []tag.Key{appIDKey}, view.Count()),
diagUtils.NewMeasureView(s.mtlsWorkloadCertRotatedFailed, []tag.Key{appIDKey, failReasonKey}, view.Count()),
diagUtils.NewMeasureView(s.actorStatusReportTotal, []tag.Key{appIDKey, actorTypeKey, operationKey}, view.Count()),
diagUtils.NewMeasureView(s.actorStatusReportFailedTotal, []tag.Key{appIDKey, actorTypeKey, operationKey, failReasonKey}, view.Count()),
diagUtils.NewMeasureView(s.actorTableOperationRecvTotal, []tag.Key{appIDKey, actorTypeKey, operationKey}, view.Count()),
diagUtils.NewMeasureView(s.actorRebalancedTotal, []tag.Key{appIDKey, actorTypeKey}, view.Count()),
diagUtils.NewMeasureView(s.actorDeactivationTotal, []tag.Key{appIDKey, actorTypeKey}, view.Count()),
diagUtils.NewMeasureView(s.actorDeactivationFailedTotal, []tag.Key{appIDKey, actorTypeKey}, view.Count()),
diagUtils.NewMeasureView(s.actorPendingCalls, []tag.Key{appIDKey, actorTypeKey}, view.Count()),
diagUtils.NewMeasureView(s.actorTimers, []tag.Key{appIDKey, actorTypeKey}, view.LastValue()),
diagUtils.NewMeasureView(s.actorReminders, []tag.Key{appIDKey, actorTypeKey}, view.LastValue()),
diagUtils.NewMeasureView(s.actorReminderFiredTotal, []tag.Key{appIDKey, actorTypeKey, successKey}, view.Count()),
diagUtils.NewMeasureView(s.actorTimerFiredTotal, []tag.Key{appIDKey, actorTypeKey, successKey}, view.Count()),
diagUtils.NewMeasureView(s.appPolicyActionAllowed, []tag.Key{appIDKey, trustDomainKey, namespaceKey}, view.Count()),
diagUtils.NewMeasureView(s.globalPolicyActionAllowed, []tag.Key{appIDKey, trustDomainKey, namespaceKey}, view.Count()),
diagUtils.NewMeasureView(s.appPolicyActionBlocked, []tag.Key{appIDKey, trustDomainKey, namespaceKey}, view.Count()),
diagUtils.NewMeasureView(s.globalPolicyActionBlocked, []tag.Key{appIDKey, trustDomainKey, namespaceKey}, view.Count()),
diagUtils.NewMeasureView(s.serviceInvocationRequestSentTotal, []tag.Key{appIDKey, destinationAppIDKey, typeKey}, view.Count()),
diagUtils.NewMeasureView(s.serviceInvocationRequestReceivedTotal, []tag.Key{appIDKey, sourceAppIDKey}, view.Count()),
diagUtils.NewMeasureView(s.serviceInvocationResponseSentTotal, []tag.Key{appIDKey, destinationAppIDKey, statusKey}, view.Count()),
diagUtils.NewMeasureView(s.serviceInvocationResponseReceivedTotal, []tag.Key{appIDKey, sourceAppIDKey, statusKey, typeKey}, view.Count()),
diagUtils.NewMeasureView(s.serviceInvocationResponseReceivedLatency, []tag.Key{appIDKey, sourceAppIDKey, statusKey}, defaultLatencyDistribution),
)
}
// ComponentLoaded records metric when component is loaded successfully.
func (s *serviceMetrics) ComponentLoaded() {
if s.enabled {
stats.RecordWithTags(s.ctx, diagUtils.WithTags(s.componentLoaded.Name(), appIDKey, s.appID), s.componentLoaded.M(1))
}
}
// ComponentInitialized records metric when component is initialized.
func (s *serviceMetrics) ComponentInitialized(component string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.componentInitCompleted.Name(), appIDKey, s.appID, componentKey, component),
s.componentInitCompleted.M(1))
}
}
// ComponentInitFailed records metric when component initialization is failed.
func (s *serviceMetrics) ComponentInitFailed(component string, reason string, name string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.componentInitFailed.Name(), appIDKey, s.appID, componentKey, component, failReasonKey, reason, componentNameKey, name),
s.componentInitFailed.M(1))
}
}
// MTLSInitCompleted records metric when component is initialized.
func (s *serviceMetrics) MTLSInitCompleted() {
if s.enabled {
stats.RecordWithTags(s.ctx, diagUtils.WithTags(s.mtlsInitCompleted.Name(), appIDKey, s.appID), s.mtlsInitCompleted.M(1))
}
}
// MTLSInitFailed records metric when component initialization is failed.
func (s *serviceMetrics) MTLSInitFailed(reason string) {
if s.enabled {
stats.RecordWithTags(
s.ctx, diagUtils.WithTags(s.mtlsInitFailed.Name(), appIDKey, s.appID, failReasonKey, reason),
s.mtlsInitFailed.M(1))
}
}
// MTLSWorkLoadCertRotationCompleted records metric when workload certificate rotation is succeeded.
func (s *serviceMetrics) MTLSWorkLoadCertRotationCompleted() {
if s.enabled {
stats.RecordWithTags(s.ctx, diagUtils.WithTags(s.mtlsWorkloadCertRotated.Name(), appIDKey, s.appID), s.mtlsWorkloadCertRotated.M(1))
}
}
// MTLSWorkLoadCertRotationFailed records metric when workload certificate rotation is failed.
func (s *serviceMetrics) MTLSWorkLoadCertRotationFailed(reason string) {
if s.enabled {
stats.RecordWithTags(
s.ctx, diagUtils.WithTags(s.mtlsWorkloadCertRotatedFailed.Name(), appIDKey, s.appID, failReasonKey, reason),
s.mtlsWorkloadCertRotatedFailed.M(1))
}
}
// ActorStatusReported records metrics when status is reported to placement service.
func (s *serviceMetrics) ActorStatusReported(operation string) {
if s.enabled {
stats.RecordWithTags(
s.ctx, diagUtils.WithTags(s.actorStatusReportTotal.Name(), appIDKey, s.appID, operationKey, operation),
s.actorStatusReportTotal.M(1))
}
}
// ActorStatusReportFailed records metrics when status report to placement service is failed.
func (s *serviceMetrics) ActorStatusReportFailed(operation string, reason string) {
if s.enabled {
stats.RecordWithTags(
s.ctx, diagUtils.WithTags(s.actorStatusReportFailedTotal.Name(), appIDKey, s.appID, operationKey, operation, failReasonKey, reason),
s.actorStatusReportFailedTotal.M(1))
}
}
// ActorPlacementTableOperationReceived records metric when runtime receives table operation.
func (s *serviceMetrics) ActorPlacementTableOperationReceived(operation string) {
if s.enabled {
stats.RecordWithTags(
s.ctx, diagUtils.WithTags(s.actorTableOperationRecvTotal.Name(), appIDKey, s.appID, operationKey, operation),
s.actorTableOperationRecvTotal.M(1))
}
}
// ActorRebalanced records metric when actors are drained.
func (s *serviceMetrics) ActorRebalanced(actorType string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorRebalancedTotal.Name(), appIDKey, s.appID, actorTypeKey, actorType),
s.actorRebalancedTotal.M(1))
}
}
// ActorDeactivated records metric when actor is deactivated.
func (s *serviceMetrics) ActorDeactivated(actorType string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorDeactivationTotal.Name(), appIDKey, s.appID, actorTypeKey, actorType),
s.actorDeactivationTotal.M(1))
}
}
// ActorDeactivationFailed records metric when actor deactivation is failed.
func (s *serviceMetrics) ActorDeactivationFailed(actorType string, reason string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorDeactivationFailedTotal.Name(), appIDKey, s.appID, actorTypeKey, actorType, failReasonKey, reason),
s.actorDeactivationFailedTotal.M(1))
}
}
// ActorReminderFired records metric when actor reminder is fired.
func (s *serviceMetrics) ActorReminderFired(actorType string, success bool) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorReminderFiredTotal.Name(), appIDKey, s.appID, actorTypeKey, actorType, successKey, strconv.FormatBool(success)),
s.actorReminderFiredTotal.M(1))
}
}
// ActorTimerFired records metric when actor timer is fired.
func (s *serviceMetrics) ActorTimerFired(actorType string, success bool) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorTimerFiredTotal.Name(), appIDKey, s.appID, actorTypeKey, actorType, successKey, strconv.FormatBool(success)),
s.actorTimerFiredTotal.M(1))
}
}
// ActorReminders records the current number of reminders for an actor type.
func (s *serviceMetrics) ActorReminders(actorType string, reminders int64) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorReminders.Name(), appIDKey, s.appID, actorTypeKey, actorType),
s.actorReminders.M(reminders))
}
}
// ActorTimers records the current number of timers for an actor type.
func (s *serviceMetrics) ActorTimers(actorType string, timers int64) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorTimers.Name(), appIDKey, s.appID, actorTypeKey, actorType),
s.actorTimers.M(timers))
}
}
// ReportActorPendingCalls records the current pending actor locks.
func (s *serviceMetrics) ReportActorPendingCalls(actorType string, pendingLocks int32) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(s.actorPendingCalls.Name(), appIDKey, s.appID, actorTypeKey, actorType),
s.actorPendingCalls.M(int64(pendingLocks)))
}
}
// RequestAllowedByAppAction records the requests allowed due to a match with the action specified in the access control policy for the app.
func (s *serviceMetrics) RequestAllowedByAppAction(spiffeID *spiffe.Parsed) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.appPolicyActionAllowed.Name(),
appIDKey, spiffeID.AppID(),
trustDomainKey, spiffeID.TrustDomain().String(),
namespaceKey, spiffeID.Namespace()),
s.appPolicyActionAllowed.M(1))
}
}
// RequestBlockedByAppAction records the requests blocked due to a match with the action specified in the access control policy for the app.
func (s *serviceMetrics) RequestBlockedByAppAction(spiffeID *spiffe.Parsed) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.appPolicyActionBlocked.Name(),
appIDKey, spiffeID.AppID(),
trustDomainKey, spiffeID.TrustDomain().String(),
namespaceKey, spiffeID.Namespace()),
s.appPolicyActionBlocked.M(1))
}
}
// RequestAllowedByGlobalAction records the requests allowed due to a match with the global action in the access control policy.
func (s *serviceMetrics) RequestAllowedByGlobalAction(spiffeID *spiffe.Parsed) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.globalPolicyActionAllowed.Name(),
appIDKey, spiffeID.AppID(),
trustDomainKey, spiffeID.TrustDomain().String(),
namespaceKey, spiffeID.Namespace()),
s.globalPolicyActionAllowed.M(1))
}
}
// RequestBlockedByGlobalAction records the requests blocked due to a match with the global action in the access control policy.
func (s *serviceMetrics) RequestBlockedByGlobalAction(spiffeID *spiffe.Parsed) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.globalPolicyActionBlocked.Name(),
appIDKey, spiffeID.AppID(),
trustDomainKey, spiffeID.TrustDomain().String(),
namespaceKey, spiffeID.Namespace()),
s.globalPolicyActionBlocked.M(1))
}
}
// ServiceInvocationRequestSent records the number of service invocation requests sent.
func (s *serviceMetrics) ServiceInvocationRequestSent(destinationAppID string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationRequestSentTotal.Name(),
appIDKey, s.appID,
destinationAppIDKey, destinationAppID,
typeKey, typeUnary,
),
s.serviceInvocationRequestSentTotal.M(1))
}
}
// ServiceInvocationRequestSent records the number of service invocation requests sent.
func (s *serviceMetrics) ServiceInvocationStreamingRequestSent(destinationAppID string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationRequestSentTotal.Name(),
appIDKey, s.appID,
destinationAppIDKey, destinationAppID,
typeKey, typeStreaming),
s.serviceInvocationRequestSentTotal.M(1))
}
}
// ServiceInvocationRequestReceived records the number of service invocation requests received.
func (s *serviceMetrics) ServiceInvocationRequestReceived(sourceAppID string) {
if s.enabled {
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationRequestReceivedTotal.Name(),
appIDKey, s.appID,
sourceAppIDKey, sourceAppID),
s.serviceInvocationRequestReceivedTotal.M(1))
}
}
// ServiceInvocationResponseSent records the number of service invocation responses sent.
func (s *serviceMetrics) ServiceInvocationResponseSent(destinationAppID string, status int32) {
if s.enabled {
statusCode := strconv.Itoa(int(status))
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationResponseSentTotal.Name(),
appIDKey, s.appID,
destinationAppIDKey, destinationAppID,
statusKey, statusCode),
s.serviceInvocationResponseSentTotal.M(1))
}
}
// ServiceInvocationResponseReceived records the number of service invocation responses received.
func (s *serviceMetrics) ServiceInvocationResponseReceived(sourceAppID string, status int32, start time.Time) {
if s.enabled {
statusCode := strconv.Itoa(int(status))
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationResponseReceivedTotal.Name(),
appIDKey, s.appID,
sourceAppIDKey, sourceAppID,
statusKey, statusCode,
typeKey, typeUnary),
s.serviceInvocationResponseReceivedTotal.M(1))
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationResponseReceivedLatency.Name(),
appIDKey, s.appID,
sourceAppIDKey, sourceAppID,
statusKey, statusCode),
s.serviceInvocationResponseReceivedLatency.M(ElapsedSince(start)))
}
}
// ServiceInvocationStreamingResponseReceived records the number of service invocation responses received for streaming operations.
// this is mainly targeted to recording errors for proxying gRPC streaming calls
func (s *serviceMetrics) ServiceInvocationStreamingResponseReceived(sourceAppID string, status int32) {
if s.enabled {
statusCode := strconv.Itoa(int(status))
stats.RecordWithTags(
s.ctx,
diagUtils.WithTags(
s.serviceInvocationResponseReceivedTotal.Name(),
appIDKey, s.appID,
sourceAppIDKey, sourceAppID,
statusKey, statusCode,
typeKey, typeStreaming),
s.serviceInvocationResponseReceivedTotal.M(1))
}
}
|
mikeee/dapr
|
pkg/diagnostics/service_monitoring.go
|
GO
|
mit
| 22,497 |
package diagnostics
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.opencensus.io/stats/view"
)
func servicesMetrics() *serviceMetrics {
s := newServiceMetrics()
s.Init("testAppId")
return s
}
func TestServiceInvocation(t *testing.T) {
t.Run("record service invocation request sent", func(t *testing.T) {
s := servicesMetrics()
s.ServiceInvocationRequestSent("testAppId2")
viewData, _ := view.RetrieveData("runtime/service_invocation/req_sent_total")
v := view.Find("runtime/service_invocation/req_sent_total")
allTagsPresent(t, v, viewData[0].Tags)
RequireTagExist(t, viewData, NewTag(typeKey.Name(), typeUnary))
})
t.Run("record service invocation streaming request sent", func(t *testing.T) {
s := servicesMetrics()
s.ServiceInvocationStreamingRequestSent("testAppId2")
viewData, _ := view.RetrieveData("runtime/service_invocation/req_sent_total")
v := view.Find("runtime/service_invocation/req_sent_total")
allTagsPresent(t, v, viewData[0].Tags)
RequireTagExist(t, viewData, NewTag(typeKey.Name(), typeStreaming))
})
t.Run("record service invocation request received", func(t *testing.T) {
s := servicesMetrics()
s.ServiceInvocationRequestReceived("testAppId")
viewData, _ := view.RetrieveData("runtime/service_invocation/req_recv_total")
v := view.Find("runtime/service_invocation/req_recv_total")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record service invocation response sent", func(t *testing.T) {
s := servicesMetrics()
s.ServiceInvocationResponseSent("testAppId2", 200)
viewData, _ := view.RetrieveData("runtime/service_invocation/res_sent_total")
v := view.Find("runtime/service_invocation/res_sent_total")
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("record service invocation response received", func(t *testing.T) {
s := servicesMetrics()
s.ServiceInvocationResponseReceived("testAppId", 200, time.Now())
viewData, _ := view.RetrieveData("runtime/service_invocation/res_recv_total")
v := view.Find("runtime/service_invocation/res_recv_total")
allTagsPresent(t, v, viewData[0].Tags)
viewData2, _ := view.RetrieveData("runtime/service_invocation/res_recv_latency_ms")
v2 := view.Find("runtime/service_invocation/res_recv_latency_ms")
allTagsPresent(t, v2, viewData2[0].Tags)
})
}
func TestSerivceMonitoringInit(t *testing.T) {
c := servicesMetrics()
assert.True(t, c.enabled)
assert.Equal(t, "testAppId", c.appID)
}
// export for diagnostics_test package only unexported keys
var (
FlowDirectionKey = flowDirectionKey
TargetKey = targetKey
StatusKey = statusKey
PolicyKey = policyKey
)
|
mikeee/dapr
|
pkg/diagnostics/service_monitoring_test.go
|
GO
|
mit
| 2,663 |
//go:build unit
package diagnostics
import (
"fmt"
"reflect"
"testing"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/dapr/dapr/utils"
)
// NewTag is a helper to create an opencensus tag that can be used in the different helpers here
func NewTag(key string, value string) tag.Tag {
return tag.Tag{
Key: tag.MustNewKey(key),
Value: value,
}
}
// GetValueForObservationWithTagSet is a helper to find a row out of a slice of rows retrieved when executing view.RetrieveData
// This particular row should have the tags present in the tag set.
func GetValueForObservationWithTagSet(rows []*view.Row, wantedTagSetCount map[tag.Tag]bool) int64 {
for _, row := range rows {
foundTags := 0
for _, aTag := range row.Tags {
if wantedTagSetCount[aTag] {
foundTags++
}
}
if foundTags == len(wantedTagSetCount) {
return row.Data.(*view.CountData).Value
}
}
return 0
}
// RequireTagExist tries to find a tag in a slice of rows return from view.RetrieveData
func RequireTagExist(t *testing.T, rows []*view.Row, wantedTag tag.Tag) {
t.Helper()
var found bool
outerLoop:
for _, row := range rows {
for _, aTag := range row.Tags {
if reflect.DeepEqual(wantedTag, aTag) {
found = true
break outerLoop
}
}
}
require.True(t, found, fmt.Sprintf("did not find tag (%s) in rows:", wantedTag), rows)
}
// RequireTagNotExist checks a tag in a slice of rows return from view.RetrieveData is not present
func RequireTagNotExist(t *testing.T, rows []*view.Row, wantedTag tag.Tag) {
t.Helper()
var found bool
outerLoop:
for _, row := range rows {
for _, aTag := range row.Tags {
if reflect.DeepEqual(wantedTag, aTag) {
found = true
break outerLoop
}
}
}
require.False(t, found, fmt.Sprintf("found tag (%s) in rows:", wantedTag), rows)
}
// CleanupRegisteredViews is a safe method to removed registered views to avoid errors when running tests on the same metrics
func CleanupRegisteredViews(viewNames ...string) {
var views []*view.View
defaultViewsToClean := []string{
"runtime/actor/timers",
"runtime/actor/reminders",
}
// append default views to clean if not already present
for _, v := range defaultViewsToClean {
if !utils.Contains(viewNames, v) {
viewNames = append(viewNames, v)
}
}
for _, v := range viewNames {
if v := view.Find(v); v != nil {
views = append(views, v)
}
}
view.Unregister(views...)
}
|
mikeee/dapr
|
pkg/diagnostics/testutils_unit.go
|
GO
|
mit
| 2,461 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"encoding/hex"
"fmt"
"strings"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/dapr/dapr/pkg/config"
diagConsts "github.com/dapr/dapr/pkg/diagnostics/consts"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
const (
daprHeaderPrefix = "dapr-"
daprHeaderBinSuffix = "-bin"
tracerName = "dapr-diagnostics"
)
var tracer trace.Tracer = otel.Tracer(tracerName)
// SpanContextToW3CString returns the SpanContext string representation.
func SpanContextToW3CString(sc trace.SpanContext) string {
traceID := sc.TraceID()
spanID := sc.SpanID()
traceFlags := sc.TraceFlags()
return fmt.Sprintf("%x-%x-%x-%x",
[]byte{supportedVersion},
traceID[:],
spanID[:],
[]byte{byte(traceFlags)})
}
// TraceStateToW3CString extracts the TraceState from given SpanContext and returns its string representation.
func TraceStateToW3CString(sc trace.SpanContext) string {
return sc.TraceState().String()
}
// SpanContextFromW3CString extracts a span context from given string which got earlier from SpanContextToW3CString format.
func SpanContextFromW3CString(h string) (sc trace.SpanContext, ok bool) {
if h == "" {
return trace.SpanContext{}, false
}
sections := strings.Split(h, "-")
if len(sections) < 4 {
return trace.SpanContext{}, false
}
if len(sections[0]) != 2 {
return trace.SpanContext{}, false
}
ver, err := hex.DecodeString(sections[0])
if err != nil {
return trace.SpanContext{}, false
}
version := int(ver[0])
if version > maxVersion {
return trace.SpanContext{}, false
}
if version == 0 && len(sections) != 4 {
return trace.SpanContext{}, false
}
if len(sections[1]) != 32 {
return trace.SpanContext{}, false
}
tid, err := trace.TraceIDFromHex(sections[1])
if err != nil {
return trace.SpanContext{}, false
}
sc = sc.WithTraceID(tid)
if len(sections[2]) != 16 {
return trace.SpanContext{}, false
}
sid, err := trace.SpanIDFromHex(sections[2])
if err != nil {
return trace.SpanContext{}, false
}
sc = sc.WithSpanID(sid)
opts, err := hex.DecodeString(sections[3])
if err != nil || len(opts) < 1 {
return trace.SpanContext{}, false
}
sc = sc.WithTraceFlags(trace.TraceFlags(opts[0]))
// Don't allow all zero trace or span ID.
if sc.TraceID() == [16]byte{} || sc.SpanID() == [8]byte{} {
return trace.SpanContext{}, false
}
return sc, true
}
// TraceStateFromW3CString extracts a span tracestate from given string which got earlier from TraceStateFromW3CString format.
func TraceStateFromW3CString(h string) *trace.TraceState {
if h == "" {
ts := trace.TraceState{}
return &ts
}
ts, err := trace.ParseTraceState(h)
if err != nil {
ts = trace.TraceState{}
return &ts
}
return &ts
}
// AddAttributesToSpan adds the given attributes in the span.
func AddAttributesToSpan(span trace.Span, attributes map[string]string) {
if span == nil {
return
}
var attrs []attribute.KeyValue
for k, v := range attributes {
// Skip if key is for internal use.
if !strings.HasPrefix(k, diagConsts.DaprInternalSpanAttrPrefix) && v != "" {
attrs = append(attrs, attribute.String(k, v))
}
}
if len(attrs) > 0 {
span.SetAttributes(attrs...)
}
}
// ConstructInputBindingSpanAttributes creates span attributes for InputBindings.
func ConstructInputBindingSpanAttributes(bindingName, url string) map[string]string {
return map[string]string{
diagConsts.DBNameSpanAttributeKey: bindingName,
diagConsts.GrpcServiceSpanAttributeKey: diagConsts.DaprGRPCDaprService,
diagConsts.DBSystemSpanAttributeKey: diagConsts.BindingBuildingBlockType,
diagConsts.DBConnectionStringSpanAttributeKey: url,
}
}
// ConstructSubscriptionSpanAttributes creates span attributes for Pubsub subscription.
func ConstructSubscriptionSpanAttributes(topic string) map[string]string {
return map[string]string{
diagConsts.MessagingSystemSpanAttributeKey: diagConsts.PubsubBuildingBlockType,
diagConsts.MessagingDestinationSpanAttributeKey: topic,
diagConsts.MessagingDestinationKindSpanAttributeKey: diagConsts.MessagingDestinationTopicKind,
}
}
// StartInternalCallbackSpan starts trace span for internal callback such as input bindings and pubsub subscription.
func StartInternalCallbackSpan(ctx context.Context, spanName string, parent trace.SpanContext, spec *config.TracingSpec) (context.Context, trace.Span) {
if spec == nil || !diagUtils.IsTracingEnabled(spec.SamplingRate) {
return ctx, nil
}
ctx = trace.ContextWithRemoteSpanContext(ctx, parent)
ctx, span := tracer.Start(ctx, spanName, trace.WithSpanKind(trace.SpanKindClient))
return ctx, span
}
func TraceIDAndStateFromSpan(span trace.Span) (string, string) {
var corID, traceState string
if span != nil {
sc := span.SpanContext()
if !sc.Equal(trace.SpanContext{}) {
corID = SpanContextToW3CString(sc)
}
if sc.TraceState().Len() > 0 {
traceState = TraceStateToW3CString(sc)
}
}
return corID, traceState
}
|
mikeee/dapr
|
pkg/diagnostics/tracing.go
|
GO
|
mit
| 5,609 |
package diagnostics
import (
sdktrace "go.opentelemetry.io/otel/sdk/trace"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
func NewDaprTraceSampler(samplingRateString string) sdktrace.Sampler {
samplingRate := diagUtils.GetTraceSamplingRate(samplingRateString)
return sdktrace.ParentBased(sdktrace.TraceIDRatioBased(samplingRate))
}
|
mikeee/dapr
|
pkg/diagnostics/tracing_sampler.go
|
GO
|
mit
| 349 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"encoding/hex"
"fmt"
"math/rand"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/dapr/pkg/config"
diagConsts "github.com/dapr/dapr/pkg/diagnostics/consts"
"go.opentelemetry.io/otel"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
)
func TestSpanContextToW3CString(t *testing.T) {
t.Run("empty SpanContext", func(t *testing.T) {
expected := "00-00000000000000000000000000000000-0000000000000000-00"
sc := trace.SpanContext{}
got := SpanContextToW3CString(sc)
assert.Equal(t, expected, got)
})
t.Run("valid SpanContext", func(t *testing.T) {
expected := "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"
scConfig := trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
}
sc := trace.NewSpanContext(scConfig)
got := SpanContextToW3CString(sc)
assert.Equal(t, expected, got)
})
}
func TestTraceStateToW3CString(t *testing.T) {
t.Run("empty Tracestate", func(t *testing.T) {
sc := trace.SpanContext{}
got := TraceStateToW3CString(sc)
assert.Empty(t, got)
})
t.Run("valid Tracestate", func(t *testing.T) {
ts := trace.TraceState{}
ts, _ = ts.Insert("key", "value")
sc := trace.SpanContext{}
sc = sc.WithTraceState(ts)
got := TraceStateToW3CString(sc)
assert.Equal(t, "key=value", got)
})
}
func TestSpanContextFromW3CString(t *testing.T) {
t.Run("empty SpanContext", func(t *testing.T) {
sc := "00-00000000000000000000000000000000-0000000000000000-00"
expected := trace.SpanContext{}
got, _ := SpanContextFromW3CString(sc)
assert.Equal(t, expected, got)
})
t.Run("valid SpanContext", func(t *testing.T) {
sc := "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"
scConfig := trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
}
expected := trace.NewSpanContext(scConfig)
got, _ := SpanContextFromW3CString(sc)
assert.Equal(t, expected, got)
})
}
func TestTraceStateFromW3CString(t *testing.T) {
t.Run("empty Tracestate", func(t *testing.T) {
ts := trace.TraceState{}
sc := trace.SpanContext{}
sc = sc.WithTraceState(ts)
scText := TraceStateToW3CString(sc)
got := TraceStateFromW3CString(scText)
assert.Equal(t, ts, *got)
})
t.Run("valid Tracestate", func(t *testing.T) {
ts := trace.TraceState{}
ts, _ = ts.Insert("key", "value")
sc := trace.SpanContext{}
sc = sc.WithTraceState(ts)
scText := TraceStateToW3CString(sc)
got := TraceStateFromW3CString(scText)
assert.Equal(t, ts, *got)
})
t.Run("invalid Tracestate", func(t *testing.T) {
ts := trace.TraceState{}
// A non-parsable tracestate should equate back to an empty one.
got := TraceStateFromW3CString("bad tracestate")
assert.Equal(t, ts, *got)
})
}
func TestStartInternalCallbackSpan(t *testing.T) {
exp := newOtelFakeExporter()
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
)
defer func() { _ = tp.Shutdown(context.Background()) }()
otel.SetTracerProvider(tp)
t.Run("traceparent is provided and sampling is enabled", func(t *testing.T) {
traceSpec := &config.TracingSpec{SamplingRate: "1"}
scConfig := trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
}
parent := trace.NewSpanContext(scConfig)
ctx := context.Background()
_, gotSp := StartInternalCallbackSpan(ctx, "testSpanName", parent, traceSpec)
sc := gotSp.SpanContext()
traceID := sc.TraceID()
spanID := sc.SpanID()
assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:]))
assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:]))
})
t.Run("traceparent is provided with sampling flag = 1 but sampling is disabled", func(t *testing.T) {
traceSpec := &config.TracingSpec{SamplingRate: "0"}
scConfig := trace.SpanContextConfig{
TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54},
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(1),
}
parent := trace.NewSpanContext(scConfig)
ctx := context.Background()
ctx, gotSp := StartInternalCallbackSpan(ctx, "testSpanName", parent, traceSpec)
assert.Nil(t, gotSp)
assert.NotNil(t, ctx)
})
t.Run("traceparent is provided with sampling flag = 0 and sampling is enabled (but not P=1.00)", func(t *testing.T) {
// We use a fixed seed for the RNG so we can use an exact number here
const expectSampled = 0
const numTraces = 100000
sampledCount := runTraces(t, "test_trace", numTraces, "0.01", true, 0)
require.Equal(t, expectSampled, sampledCount, "Expected to sample %d traces but sampled %d", expectSampled, sampledCount)
require.Less(t, sampledCount, numTraces, "Expected to sample fewer than the total number of traces, but sampled all of them!")
})
t.Run("traceparent is provided with sampling flag = 0 and sampling is enabled (and P=1.00)", func(t *testing.T) {
const expectSampled = 0
const numTraces = 1000
sampledCount := runTraces(t, "test_trace", numTraces, "1.00", true, 0)
require.Equal(t, expectSampled, sampledCount, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount)
})
t.Run("traceparent is provided with sampling flag = 1 and sampling is enabled (but not P=1.00)", func(t *testing.T) {
const numTraces = 1000
sampledCount := runTraces(t, "test_trace", numTraces, "0.00001", true, 1)
require.Equal(t, numTraces, sampledCount, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount)
})
t.Run("traceparent is not provided and sampling is enabled (but not P=1.00)", func(t *testing.T) {
// We use a fixed seed for the RNG so we can use an exact number here
const expectSampled = 1000 // we allow for a 10% margin of error to account for randomness
const numTraces = 100000
sampledCount := runTraces(t, "test_trace", numTraces, "0.01", false, 0)
require.InEpsilon(t, expectSampled, sampledCount, 0.1, "Expected to sample %d (+/- 10%) traces but sampled %d", expectSampled, sampledCount)
require.Less(t, sampledCount, numTraces, "Expected to sample fewer than the total number of traces, but sampled all of them!")
})
t.Run("traceparent is not provided and sampling is enabled (and P=1.00)", func(t *testing.T) {
const numTraces = 1000
sampledCount := runTraces(t, "test_trace", numTraces, "1.00", false, 0)
require.Equal(t, numTraces, sampledCount, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount)
})
t.Run("traceparent is not provided and sampling is enabled (but almost 0 P=0.00001)", func(t *testing.T) {
const numTraces = 1000
sampledCount := runTraces(t, "test_trace", numTraces, "0.00001", false, 0)
require.Less(t, sampledCount, int(numTraces*.001), "Expected to sample no traces (+/- 10%) but only sampled %d", sampledCount)
})
}
func runTraces(t *testing.T, testName string, numTraces int, samplingRate string, hasParentSpanContext bool, parentTraceFlag int) int {
d := NewDaprTraceSampler(samplingRate)
tracerOptions := []sdktrace.TracerProviderOption{
sdktrace.WithSampler(d),
}
tp := sdktrace.NewTracerProvider(tracerOptions...)
tracerName := fmt.Sprintf("%s_%s", testName, samplingRate)
otel.SetTracerProvider(tp)
testTracer := otel.Tracer(tracerName)
// This is taken from otel's tests for the ratio sampler so we can generate IDs
idg := defaultIDGenerator()
sampledCount := 0
for i := 0; i < numTraces; i++ {
ctx := context.Background()
if hasParentSpanContext {
traceID, _ := idg.NewIDs(context.Background())
scConfig := trace.SpanContextConfig{
TraceID: traceID,
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: trace.TraceFlags(parentTraceFlag),
}
parent := trace.NewSpanContext(scConfig)
ctx = trace.ContextWithRemoteSpanContext(ctx, parent)
}
ctx, span := testTracer.Start(ctx, "testTraceSpan", trace.WithSpanKind(trace.SpanKindClient))
assert.NotNil(t, span)
assert.NotNil(t, ctx)
if span.SpanContext().IsSampled() {
sampledCount += 1
}
}
return sampledCount
}
// This test would allow us to know when the span attribute keys are
// modified in go.opentelemetry.io/otel/semconv library, and thus in
// the spec.
func TestOtelConventionStrings(t *testing.T) {
assert.Equal(t, "db.system", diagConsts.DBSystemSpanAttributeKey)
assert.Equal(t, "db.name", diagConsts.DBNameSpanAttributeKey)
assert.Equal(t, "db.statement", diagConsts.DBStatementSpanAttributeKey)
assert.Equal(t, "db.connection_string", diagConsts.DBConnectionStringSpanAttributeKey)
assert.Equal(t, "topic", diagConsts.MessagingDestinationTopicKind)
assert.Equal(t, "messaging.system", diagConsts.MessagingSystemSpanAttributeKey)
assert.Equal(t, "messaging.destination", diagConsts.MessagingDestinationSpanAttributeKey)
assert.Equal(t, "messaging.destination_kind", diagConsts.MessagingDestinationKindSpanAttributeKey)
assert.Equal(t, "rpc.service", diagConsts.GrpcServiceSpanAttributeKey)
assert.Equal(t, "net.peer.name", diagConsts.NetPeerNameSpanAttributeKey)
}
// Otel Fake Exporter implements an open telemetry span exporter that does nothing.
type otelFakeExporter struct{}
// newOtelFakeExporter returns an Open Telemetry Fake Span Exporter
func newOtelFakeExporter() *otelFakeExporter {
return &otelFakeExporter{}
}
// ExportSpans implements the open telemetry span exporter interface.
func (e *otelFakeExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
return nil
}
// Shutdown implements the open telemetry span exporter interface.
func (e *otelFakeExporter) Shutdown(ctx context.Context) error {
return nil
}
// Otel Fake Span Processor implements an open telemetry span processor that calls a call back in the OnEnd method.
type otelFakeSpanProcessor struct {
cb func(s sdktrace.ReadOnlySpan)
}
// newOtelFakeSpanProcessor returns an Open Telemetry Fake Span Processor
func newOtelFakeSpanProcessor(f func(s sdktrace.ReadOnlySpan)) *otelFakeSpanProcessor {
return &otelFakeSpanProcessor{
cb: f,
}
}
// OnStart implements the SpanProcessor interface.
func (o *otelFakeSpanProcessor) OnStart(parent context.Context, s sdktrace.ReadWriteSpan) {
}
// OnEnd implements the SpanProcessor interface and calls the callback function provided on init
func (o *otelFakeSpanProcessor) OnEnd(s sdktrace.ReadOnlySpan) {
o.cb(s)
}
// Shutdown implements the SpanProcessor interface.
func (o *otelFakeSpanProcessor) Shutdown(ctx context.Context) error {
return nil
}
// ForceFlush implements the SpanProcessor interface.
func (o *otelFakeSpanProcessor) ForceFlush(ctx context.Context) error {
return nil
}
// This was taken from the otel testing to generate IDs
// origin: go.opentelemetry.io/otel/sdk@v1.11.1/trace/id_generator.go
// Copyright: The OpenTelemetry Authors
// License (Apache 2.0): https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.11.1/LICENSE
// IDGenerator allows custom generators for TraceID and SpanID.
type IDGenerator interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// NewIDs returns a new trace and span ID.
NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// NewSpanID returns a ID for a new span in the trace with traceID.
NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
type randomIDGenerator struct {
sync.Mutex
randSource *rand.Rand
}
var _ IDGenerator = &randomIDGenerator{}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
gen.Lock()
defer gen.Unlock()
sid := trace.SpanID{}
_, _ = gen.randSource.Read(sid[:])
return sid
}
// NewIDs returns a non-zero trace ID and a non-zero span ID from a
// randomly-chosen sequence.
func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
gen.Lock()
defer gen.Unlock()
tid := trace.TraceID{}
_, _ = gen.randSource.Read(tid[:])
sid := trace.SpanID{}
_, _ = gen.randSource.Read(sid[:])
return tid, sid
}
func defaultIDGenerator() IDGenerator {
gen := &randomIDGenerator{
// Use a fixed seed to make the tests deterministic.
randSource: rand.New(rand.NewSource(1)), //nolint:gosec
}
return gen
}
func TestTraceIDAndStateFromSpan(t *testing.T) {
t.Run("non-empty span, id and state are not empty", func(t *testing.T) {
idg := defaultIDGenerator()
traceID, _ := idg.NewIDs(context.Background())
scConfig := trace.SpanContextConfig{
TraceID: traceID,
SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183},
TraceFlags: 1,
}
ts := trace.TraceState{}
ts, _ = ts.Insert("key", "value")
scConfig.TraceState = ts
parent := trace.NewSpanContext(scConfig)
ctx := context.Background()
ctx = trace.ContextWithRemoteSpanContext(ctx, parent)
_, span := tracer.Start(ctx, "testTraceSpan", trace.WithSpanKind(trace.SpanKindClient))
id, state := TraceIDAndStateFromSpan(span)
assert.NotEmpty(t, id)
assert.NotEmpty(t, state)
})
t.Run("empty span, id and state are empty", func(t *testing.T) {
span := trace.SpanFromContext(context.Background())
id, state := TraceIDAndStateFromSpan(span)
assert.Empty(t, id)
assert.Empty(t, state)
})
t.Run("nil span, id and state are empty", func(t *testing.T) {
id, state := TraceIDAndStateFromSpan(nil)
assert.Empty(t, id)
assert.Empty(t, state)
})
}
|
mikeee/dapr
|
pkg/diagnostics/tracing_test.go
|
GO
|
mit
| 14,818 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"regexp"
"strings"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/dapr/dapr/pkg/config"
)
var metricsRules map[string][]regexPair
type regexPair struct {
regex *regexp.Regexp
replace string
}
// NewMeasureView creates opencensus View instance using stats.Measure.
func NewMeasureView(measure stats.Measure, keys []tag.Key, aggregation *view.Aggregation) *view.View {
return &view.View{
Name: measure.Name(),
Description: measure.Description(),
Measure: measure,
TagKeys: keys,
Aggregation: aggregation,
}
}
// WithTags converts tag key and value pairs to tag.Mutator array.
// WithTags(key1, value1, key2, value2) returns
// []tag.Mutator{tag.Upsert(key1, value1), tag.Upsert(key2, value2)}.
func WithTags(name string, opts ...interface{}) []tag.Mutator {
tagMutators := make([]tag.Mutator, 0, len(opts)/2)
for i := 0; i < len(opts)-1; i += 2 {
key, ok := opts[i].(tag.Key)
if !ok {
break
}
value, ok := opts[i+1].(string)
if !ok {
break
}
// skip if value is empty
if value == "" {
continue
}
if len(metricsRules) > 0 {
pairs := metricsRules[strings.ReplaceAll(name, "_", "/")+key.Name()]
for _, p := range pairs {
value = p.regex.ReplaceAllString(value, p.replace)
}
}
tagMutators = append(tagMutators, tag.Upsert(key, value))
}
return tagMutators
}
// AddNewTagKey adds new tag keys to existing view.
func AddNewTagKey(views []*view.View, key *tag.Key) []*view.View {
for _, v := range views {
v.TagKeys = append(v.TagKeys, *key)
}
return views
}
// CreateRulesMap generates a fast lookup map for metrics regex.
func CreateRulesMap(rules []config.MetricsRule) error {
newMetricsRules := make(map[string][]regexPair, len(rules))
for _, r := range rules {
// strip the metric name of known runtime prefixes and mutate them to fit stat names
r.Name = strings.Replace(r.Name, "dapr_", "", 1)
r.Name = strings.ReplaceAll(r.Name, "_", "/")
for _, l := range r.Labels {
key := r.Name + l.Name
newMetricsRules[key] = make([]regexPair, len(l.Regex))
i := 0
for k, v := range l.Regex {
regex, err := regexp.Compile(v)
if err != nil {
return fmt.Errorf("failed to compile regex for rule %s/%s: %w", key, k, err)
}
newMetricsRules[key][i] = regexPair{
regex: regex,
replace: k,
}
i++
}
}
}
metricsRules = newMetricsRules
return nil
}
|
mikeee/dapr
|
pkg/diagnostics/utils/metrics_utils.go
|
GO
|
mit
| 3,040 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opencensus.io/tag"
"github.com/dapr/dapr/pkg/config"
)
func TestWithTags(t *testing.T) {
t.Run("one tag", func(t *testing.T) {
appKey := tag.MustNewKey("app_id")
mutators := WithTags("", appKey, "test")
assert.Len(t, mutators, 1)
})
t.Run("two tags", func(t *testing.T) {
appKey := tag.MustNewKey("app_id")
operationKey := tag.MustNewKey("operation")
mutators := WithTags("", appKey, "test", operationKey, "op")
assert.Len(t, mutators, 2)
})
t.Run("three tags", func(t *testing.T) {
appKey := tag.MustNewKey("app_id")
operationKey := tag.MustNewKey("operation")
methodKey := tag.MustNewKey("method")
mutators := WithTags("", appKey, "test", operationKey, "op", methodKey, "method")
assert.Len(t, mutators, 3)
})
t.Run("two tags with wrong value type", func(t *testing.T) {
appKey := tag.MustNewKey("app_id")
operationKey := tag.MustNewKey("operation")
mutators := WithTags("", appKey, "test", operationKey, 1)
assert.Len(t, mutators, 1)
})
t.Run("skip empty value key", func(t *testing.T) {
appKey := tag.MustNewKey("app_id")
operationKey := tag.MustNewKey("operation")
methodKey := tag.MustNewKey("method")
mutators := WithTags("", appKey, "", operationKey, "op", methodKey, "method")
assert.Len(t, mutators, 2)
})
}
func TestCreateRulesMap(t *testing.T) {
t.Run("invalid rule", func(t *testing.T) {
err := CreateRulesMap([]config.MetricsRule{
{
Name: "test",
Labels: []config.MetricLabel{
{
Name: "test",
Regex: map[string]string{
"TEST": "[",
},
},
},
},
})
require.Error(t, err)
})
t.Run("valid rule", func(t *testing.T) {
err := CreateRulesMap([]config.MetricsRule{
{
Name: "test",
Labels: []config.MetricLabel{
{
Name: "label",
Regex: map[string]string{
"TEST": "/.+",
},
},
},
},
})
require.NoError(t, err)
assert.NotNil(t, metricsRules)
assert.Len(t, metricsRules, 1)
assert.Len(t, metricsRules["testlabel"], 1)
assert.Equal(t, "TEST", metricsRules["testlabel"][0].replace)
assert.NotNil(t, metricsRules["testlabel"][0].regex)
})
}
|
mikeee/dapr
|
pkg/diagnostics/utils/metrics_utils_test.go
|
GO
|
mit
| 2,813 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"net/http"
"strconv"
"github.com/valyala/fasthttp"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
"github.com/dapr/kit/logger"
)
type daprContextKey string
const (
defaultSamplingRate = 1e-4
spanContextKey daprContextKey = "span"
)
var emptySpanContext trace.SpanContext
// StdoutExporter implements an open telemetry span exporter that writes to stdout.
type StdoutExporter struct {
log logger.Logger
}
// NewStdOutExporter returns a StdOutExporter
func NewStdOutExporter() *StdoutExporter {
return &StdoutExporter{logger.NewLogger("dapr.runtime.trace")}
}
// ExportSpans implements the open telemetry span exporter interface.
func (e *StdoutExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
const msg = "[%s] Trace: %s Span: %s/%s Time: [%s -> %s] Annotations: %+v"
for _, sd := range spans {
var parentSpanID trace.SpanID
if sd.Parent().IsValid() {
parentSpanID = sd.Parent().SpanID()
}
e.log.Infof(msg, sd.Name(), sd.SpanContext().TraceID(), parentSpanID, sd.SpanContext().SpanID(), sd.StartTime(), sd.EndTime(), sd.Events())
}
return nil
}
// Shutdown implements the open telemetry span exporter interface.
func (e *StdoutExporter) Shutdown(ctx context.Context) error {
return nil
}
// NullExporter implements an open telemetry span exporter that discards all telemetry.
type NullExporter struct{}
// NewNullExporter returns a NullExporter
func NewNullExporter() *NullExporter {
return &NullExporter{}
}
// ExportSpans implements the open telemetry span exporter interface.
func (e *NullExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
return nil
}
// Shutdown implements the open telemetry span exporter interface.
func (e *NullExporter) Shutdown(ctx context.Context) error {
return nil
}
// GetTraceSamplingRate parses the given rate and returns the parsed rate.
func GetTraceSamplingRate(rate string) float64 {
f, err := strconv.ParseFloat(rate, 64)
if err != nil {
return defaultSamplingRate
}
return f
}
// IsTracingEnabled parses the given rate and returns false if sampling rate is explicitly set 0.
func IsTracingEnabled(rate string) bool {
return GetTraceSamplingRate(rate) != 0
}
// SpanFromContext returns the Span stored in a context, or nil or trace.noopSpan{} if there isn't one.
func SpanFromContext(ctx context.Context) trace.Span {
// TODO: Remove fasthttp compatibility when no HTTP API using contexts depend on fasthttp
var val any
if reqCtx, ok := ctx.(*fasthttp.RequestCtx); ok {
val = reqCtx.UserValue(spanContextKey)
} else {
val = ctx.Value(spanContextKey)
}
if val != nil {
span, ok := val.(trace.Span)
if ok {
return span
}
}
// Return the default span, which can be a noop
return trace.SpanFromContext(ctx)
}
// AddSpanToFasthttpContext adds the span to the fasthttp request context.
// TODO: Remove fasthttp compatibility when no HTTP API using contexts depend on fasthttp.
func AddSpanToFasthttpContext(ctx *fasthttp.RequestCtx, span trace.Span) {
ctx.SetUserValue(spanContextKey, span)
}
// AddSpanToRequest sets span into a request context.
func AddSpanToRequest(r *http.Request, span trace.Span) {
ctx := context.WithValue(r.Context(), spanContextKey, span)
*r = *(r.WithContext(ctx))
}
// BinaryFromSpanContext returns the binary format representation of a SpanContext.
//
// If sc is the zero value, Binary returns nil.
func BinaryFromSpanContext(sc trace.SpanContext) []byte {
traceID := sc.TraceID()
spanID := sc.SpanID()
traceFlags := sc.TraceFlags()
if sc.Equal(emptySpanContext) {
return nil
}
var b [29]byte
copy(b[2:18], traceID[:])
b[18] = 1
copy(b[19:27], spanID[:])
b[27] = 2
b[28] = uint8(traceFlags)
return b[:]
}
// SpanContextFromBinary returns the SpanContext represented by b.
//
// If b has an unsupported version ID or contains no TraceID, SpanContextFromBinary returns with ok==false.
func SpanContextFromBinary(b []byte) (sc trace.SpanContext, ok bool) {
var scConfig trace.SpanContextConfig
if len(b) == 0 || b[0] != 0 {
return trace.SpanContext{}, false
}
b = b[1:]
if len(b) >= 17 && b[0] == 0 {
copy(scConfig.TraceID[:], b[1:17])
b = b[17:]
} else {
return trace.SpanContext{}, false
}
if len(b) >= 9 && b[0] == 1 {
copy(scConfig.SpanID[:], b[1:9])
b = b[9:]
}
if len(b) >= 2 && b[0] == 2 {
scConfig.TraceFlags = trace.TraceFlags(b[1])
}
sc = trace.NewSpanContext(scConfig)
return sc, true
}
|
mikeee/dapr
|
pkg/diagnostics/utils/trace_utils.go
|
GO
|
mit
| 5,074 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"net/http"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
)
func TestSpanFromContext(t *testing.T) {
t.Run("not nil span", func(t *testing.T) {
r, _ := http.NewRequest(http.MethodGet, "http://test.local/method", nil)
var sp trace.Span
AddSpanToRequest(r, sp)
assert.NotNil(t, SpanFromContext(r.Context()))
})
t.Run("nil span", func(t *testing.T) {
r, _ := http.NewRequest(http.MethodGet, "http://test.local/method", nil)
AddSpanToRequest(r, nil)
sp := SpanFromContext(r.Context())
expectedType := "trace.noopSpan"
gotType := reflect.TypeOf(sp).String()
assert.Equal(t, expectedType, gotType)
})
t.Run("not nil span for context", func(t *testing.T) {
ctx := context.Background()
exp := newOtelFakeExporter()
tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(exp))
tracer := tp.Tracer("dapr-diagnostics-utils-tests")
ctx, sp := tracer.Start(ctx, "testSpan", trace.WithSpanKind(trace.SpanKindClient))
expectedTraceID := sp.SpanContext().TraceID()
expectedSpanID := sp.SpanContext().SpanID()
newCtx := trace.ContextWithSpan(ctx, sp)
gotSp := SpanFromContext(newCtx)
assert.NotNil(t, gotSp)
assert.Equal(t, expectedTraceID, gotSp.SpanContext().TraceID())
assert.Equal(t, expectedSpanID, gotSp.SpanContext().SpanID())
})
t.Run("nil span for context", func(t *testing.T) {
ctx := context.Background()
exp := newOtelFakeExporter()
_ = sdktrace.NewTracerProvider(sdktrace.WithBatcher(exp))
newCtx := trace.ContextWithSpan(ctx, nil)
sp := SpanFromContext(newCtx)
expectedType := "trace.noopSpan"
gotType := reflect.TypeOf(sp).String()
assert.Equal(t, expectedType, gotType)
})
t.Run("nil", func(t *testing.T) {
ctx := context.Background()
exp := newOtelFakeExporter()
_ = sdktrace.NewTracerProvider(sdktrace.WithBatcher(exp))
newCtx := trace.ContextWithSpan(ctx, nil)
sp := SpanFromContext(newCtx)
expectedType := "trace.noopSpan"
gotType := reflect.TypeOf(sp).String()
assert.Equal(t, expectedType, gotType)
})
}
// otelFakeExporter implements an open telemetry span exporter that does nothing.
type otelFakeExporter struct{}
// newOtelFakeExporter returns a otelFakeExporter
func newOtelFakeExporter() *otelFakeExporter {
return &otelFakeExporter{}
}
// ExportSpans implements the open telemetry span exporter interface.
func (e *otelFakeExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
return nil
}
// Shutdown implements the open telemetry span exporter interface.
func (e *otelFakeExporter) Shutdown(ctx context.Context) error {
return nil
}
|
mikeee/dapr
|
pkg/diagnostics/utils/trace_utils_test.go
|
GO
|
mit
| 3,266 |
package diagnostics
import (
"testing"
"github.com/stretchr/testify/assert"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
func allTagsPresent(t *testing.T, v *view.View, tags []tag.Tag) {
for _, k := range v.TagKeys {
found := false
if k.Name() == "" {
continue
}
for _, tag := range tags {
if tag.Key.Name() == "" {
continue
}
if k.Name() == tag.Key.Name() {
found = true
break
}
}
assert.True(t, found)
}
}
|
mikeee/dapr
|
pkg/diagnostics/utils_test.go
|
GO
|
mit
| 470 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diagnostics
import (
"context"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils"
)
var (
workflowNameKey = tag.MustNewKey("workflow_name")
activityNameKey = tag.MustNewKey("activity_name")
)
const (
StatusSuccess = "success"
StatusFailed = "failed"
StatusRecoverable = "recoverable"
CreateWorkflow = "create_workflow"
GetWorkflow = "get_workflow"
AddEvent = "add_event"
PurgeWorkflow = "purge_workflow"
WorkflowEvent = "event"
Timer = "timer"
)
type workflowMetrics struct {
// workflowOperationCount records count of Successful/Failed requests to Create/Get/Purge Workflow and Add Events.
workflowOperationCount *stats.Int64Measure
// workflowOperationLatency records latency of response for workflow operation requests.
workflowOperationLatency *stats.Float64Measure
// workflowExecutionCount records count of Successful/Failed/Recoverable workflow executions.
workflowExecutionCount *stats.Int64Measure
// activityExecutionCount records count of Successful/Failed/Recoverable activity executions.
activityExecutionCount *stats.Int64Measure
// activityExecutionLatency records time taken to run an activity to completion.
activityExecutionLatency *stats.Float64Measure
// workflowExecutionLatency records time taken to run a workflow to completion.
workflowExecutionLatency *stats.Float64Measure
// workflowSchedulingLatency records time taken between workflow execution request and actual workflow execution
workflowSchedulingLatency *stats.Float64Measure
appID string
enabled bool
namespace string
}
func newWorkflowMetrics() *workflowMetrics {
return &workflowMetrics{
workflowOperationCount: stats.Int64(
"runtime/workflow/operation/count",
"The number of successful/failed workflow operation requests.",
stats.UnitDimensionless),
workflowOperationLatency: stats.Float64(
"runtime/workflow/operation/latency",
"The latencies of responses for workflow operation requests.",
stats.UnitMilliseconds),
workflowExecutionCount: stats.Int64(
"runtime/workflow/execution/count",
"The number of successful/failed/recoverable workflow executions.",
stats.UnitDimensionless),
activityExecutionCount: stats.Int64(
"runtime/workflow/activity/execution/count",
"The number of successful/failed/recoverable activity executions.",
stats.UnitDimensionless),
activityExecutionLatency: stats.Float64(
"runtime/workflow/activity/execution/latency",
"The total time taken to run an activity to completion.",
stats.UnitMilliseconds),
workflowExecutionLatency: stats.Float64(
"runtime/workflow/execution/latency",
"The total time taken to run workflow to completion.",
stats.UnitMilliseconds),
workflowSchedulingLatency: stats.Float64(
"runtime/workflow/scheduling/latency",
"Interval between workflow execution request and workflow execution.",
stats.UnitMilliseconds),
}
}
func (w *workflowMetrics) IsEnabled() bool {
return w != nil && w.enabled
}
// Init registers the workflow metrics views.
func (w *workflowMetrics) Init(appID, namespace string) error {
w.appID = appID
w.enabled = true
w.namespace = namespace
return view.Register(
diagUtils.NewMeasureView(w.workflowOperationCount, []tag.Key{appIDKey, namespaceKey, operationKey, statusKey}, view.Count()),
diagUtils.NewMeasureView(w.workflowOperationLatency, []tag.Key{appIDKey, namespaceKey, operationKey, statusKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(w.workflowExecutionCount, []tag.Key{appIDKey, namespaceKey, workflowNameKey, statusKey}, view.Count()),
diagUtils.NewMeasureView(w.activityExecutionCount, []tag.Key{appIDKey, namespaceKey, activityNameKey, statusKey}, view.Count()),
diagUtils.NewMeasureView(w.activityExecutionLatency, []tag.Key{appIDKey, namespaceKey, activityNameKey, statusKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(w.workflowExecutionLatency, []tag.Key{appIDKey, namespaceKey, workflowNameKey, statusKey}, defaultLatencyDistribution),
diagUtils.NewMeasureView(w.workflowSchedulingLatency, []tag.Key{appIDKey, namespaceKey, workflowNameKey}, defaultLatencyDistribution))
}
// WorkflowOperationEvent records total number of Successful/Failed workflow Operations requests. It also records latency for those requests.
func (w *workflowMetrics) WorkflowOperationEvent(ctx context.Context, operation, status string, elapsed float64) {
if !w.IsEnabled() {
return
}
stats.RecordWithTags(ctx, diagUtils.WithTags(w.workflowOperationCount.Name(), appIDKey, w.appID, namespaceKey, w.namespace, operationKey, operation, statusKey, status), w.workflowOperationCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(ctx, diagUtils.WithTags(w.workflowOperationLatency.Name(), appIDKey, w.appID, namespaceKey, w.namespace, operationKey, operation, statusKey, status), w.workflowOperationLatency.M(elapsed))
}
}
// WorkflowExecutionEvent records total number of Successful/Failed/Recoverable workflow executions.
// Execution latency for workflow is not supported yet.
func (w *workflowMetrics) WorkflowExecutionEvent(ctx context.Context, workflowName, status string) {
if !w.IsEnabled() {
return
}
stats.RecordWithTags(ctx, diagUtils.WithTags(w.workflowExecutionCount.Name(), appIDKey, w.appID, namespaceKey, w.namespace, workflowNameKey, workflowName, statusKey, status), w.workflowExecutionCount.M(1))
}
func (w *workflowMetrics) WorkflowExecutionLatency(ctx context.Context, workflowName, status string, elapsed float64) {
if !w.IsEnabled() {
return
}
if elapsed > 0 {
stats.RecordWithTags(ctx, diagUtils.WithTags(w.workflowExecutionLatency.Name(), appIDKey, w.appID, namespaceKey, w.namespace, workflowNameKey, workflowName, statusKey, status), w.workflowExecutionLatency.M(elapsed))
}
}
func (w *workflowMetrics) WorkflowSchedulingLatency(ctx context.Context, workflowName string, elapsed float64) {
if !w.IsEnabled() {
return
}
if elapsed > 0 {
stats.RecordWithTags(ctx, diagUtils.WithTags(w.workflowSchedulingLatency.Name(), appIDKey, w.appID, namespaceKey, w.namespace, workflowNameKey, workflowName), w.workflowSchedulingLatency.M(elapsed))
}
}
// ActivityExecutionEvent records total number of Successful/Failed/Recoverable workflow executions. It also records latency for these executions.
func (w *workflowMetrics) ActivityExecutionEvent(ctx context.Context, activityName, status string, elapsed float64) {
if !w.IsEnabled() {
return
}
stats.RecordWithTags(ctx, diagUtils.WithTags(w.activityExecutionCount.Name(), appIDKey, w.appID, namespaceKey, w.namespace, activityNameKey, activityName, statusKey, status), w.activityExecutionCount.M(1))
if elapsed > 0 {
stats.RecordWithTags(ctx, diagUtils.WithTags(w.activityExecutionLatency.Name(), appIDKey, w.appID, namespaceKey, w.namespace, activityNameKey, activityName, statusKey, status), w.activityExecutionLatency.M(elapsed))
}
}
|
mikeee/dapr
|
pkg/diagnostics/workflow_monitoring.go
|
GO
|
mit
| 7,611 |
package diagnostics
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"go.opencensus.io/stats/view"
)
func initWorkflowMetrics() *workflowMetrics {
w := newWorkflowMetrics()
w.Init("test", "default")
return w
}
func TestOperations(t *testing.T) {
t.Run("record operation requests", func(t *testing.T) {
countMetricName := "runtime/workflow/operation/count"
latencyMetricName := "runtime/workflow/operation/latency"
t.Run("Create Operation Request", func(t *testing.T) {
t.Run("Failed Create Operation request count", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), CreateWorkflow, StatusFailed, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Successful Create Operation request count", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), CreateWorkflow, StatusSuccess, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Create Operation request latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), CreateWorkflow, StatusSuccess, 1)
viewData, _ := view.RetrieveData(latencyMetricName)
v := view.Find(latencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(1), viewData[0].Data.(*view.DistributionData).Min, 0)
})
})
t.Run("Get Operation Request", func(t *testing.T) {
t.Run("Failed Get Operation Request", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), GetWorkflow, StatusFailed, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Successful Get Operation Request", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), GetWorkflow, StatusSuccess, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Get Operation request latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), GetWorkflow, StatusSuccess, 1)
viewData, _ := view.RetrieveData(latencyMetricName)
v := view.Find(latencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(1), viewData[0].Data.(*view.DistributionData).Min, 0)
})
})
t.Run("Add Event request", func(t *testing.T) {
t.Run("Failed Add Event request", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), AddEvent, StatusFailed, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Successful Add Event request", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), AddEvent, StatusSuccess, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Add Event Operation latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), AddEvent, StatusSuccess, 1)
viewData, _ := view.RetrieveData(latencyMetricName)
v := view.Find(latencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(1), viewData[0].Data.(*view.DistributionData).Min, 0)
})
})
t.Run("Purge Workflow Request", func(t *testing.T) {
t.Run("Failed Purge workflow request", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), PurgeWorkflow, StatusFailed, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Successful Purge workflow request", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), PurgeWorkflow, StatusSuccess, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Purge workflow Operation latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowOperationEvent(context.Background(), PurgeWorkflow, StatusSuccess, 1)
viewData, _ := view.RetrieveData(latencyMetricName)
v := view.Find(latencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(1), viewData[0].Data.(*view.DistributionData).Min, 0)
})
})
})
}
func TestExecution(t *testing.T) {
t.Run("record activity executions", func(t *testing.T) {
countMetricName := "runtime/workflow/activity/execution/count"
latencyMetricName := "runtime/workflow/activity/execution/latency"
activityName := "test-activity"
t.Run("Failed with retryable error", func(t *testing.T) {
w := initWorkflowMetrics()
w.ActivityExecutionEvent(context.Background(), activityName, StatusRecoverable, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Failed with not-retryable error", func(t *testing.T) {
w := initWorkflowMetrics()
w.ActivityExecutionEvent(context.Background(), activityName, StatusFailed, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Successful activity execution", func(t *testing.T) {
w := initWorkflowMetrics()
w.ActivityExecutionEvent(context.Background(), activityName, StatusSuccess, 0)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("activity execution latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.ActivityExecutionEvent(context.Background(), activityName, StatusSuccess, 1)
viewData, _ := view.RetrieveData(latencyMetricName)
v := view.Find(latencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(1), viewData[0].Data.(*view.DistributionData).Min, 0)
})
})
t.Run("record workflow executions", func(t *testing.T) {
countMetricName := "runtime/workflow/execution/count"
executionLatencyMetricName := "runtime/workflow/execution/latency"
schedulingLatencyMetricName := "runtime/workflow/scheduling/latency"
workflowName := "test-workflow"
t.Run("Failed with retryable error", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowExecutionEvent(context.Background(), workflowName, StatusRecoverable)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Failed with not-retryable error", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowExecutionEvent(context.Background(), workflowName, StatusFailed)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("Successful workflow execution", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowExecutionEvent(context.Background(), workflowName, StatusSuccess)
viewData, _ := view.RetrieveData(countMetricName)
v := view.Find(countMetricName)
allTagsPresent(t, v, viewData[0].Tags)
})
t.Run("workflow execution latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowExecutionLatency(context.Background(), workflowName, StatusSuccess, 20)
viewData, _ := view.RetrieveData(executionLatencyMetricName)
v := view.Find(executionLatencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(20), viewData[0].Data.(*view.DistributionData).Min, 0)
})
t.Run("workflow scheduling latency", func(t *testing.T) {
w := initWorkflowMetrics()
w.WorkflowSchedulingLatency(context.Background(), workflowName, 10)
viewData, _ := view.RetrieveData(schedulingLatencyMetricName)
v := view.Find(schedulingLatencyMetricName)
allTagsPresent(t, v, viewData[0].Tags)
assert.InEpsilon(t, float64(10), viewData[0].Data.(*view.DistributionData).Min, 0)
})
})
}
|
mikeee/dapr
|
pkg/diagnostics/workflow_monitoring_test.go
|
GO
|
mit
| 8,572 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package encryption
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
b64 "encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"github.com/dapr/components-contrib/secretstores"
commonapi "github.com/dapr/dapr/pkg/apis/common"
"github.com/dapr/dapr/pkg/apis/components/v1alpha1"
)
type Algorithm string
const (
primaryEncryptionKey = "primaryEncryptionKey"
secondaryEncryptionKey = "secondaryEncryptionKey"
errPrefix = "failed to extract encryption key"
AESGCMAlgorithm = "AES-GCM"
)
// ComponentEncryptionKeys holds the encryption keys set for a component.
type ComponentEncryptionKeys struct {
Primary Key
Secondary Key
}
// Key holds the key to encrypt an arbitrary object.
type Key struct {
Key string
Name string
cipherObj cipher.AEAD
}
// ComponentEncryptionKey checks if a component definition contains an encryption key and extracts it using the supplied secret store.
func ComponentEncryptionKey(component v1alpha1.Component, secretStore secretstores.SecretStore) (ComponentEncryptionKeys, error) {
if secretStore == nil {
return ComponentEncryptionKeys{}, nil
}
var cek ComponentEncryptionKeys
for _, m := range component.Spec.Metadata {
// search for primary encryption key
var valid bool
if m.Name == primaryEncryptionKey {
if len(m.Value.Raw) > 0 {
// encryption key is already extracted by the Operator
cek.Primary = Key{
Key: m.Value.String(),
Name: m.SecretKeyRef.Name,
}
continue
}
valid = true
} else if m.Name == secondaryEncryptionKey {
if len(m.Value.Raw) > 0 {
cek.Secondary = Key{
Key: m.Value.String(),
Name: m.SecretKeyRef.Name,
}
continue
}
valid = true
}
if !valid {
continue
}
key, err := tryGetEncryptionKeyFromMetadataItem(component.Namespace, m, secretStore)
if err != nil {
return ComponentEncryptionKeys{}, fmt.Errorf("%s: %w", errPrefix, err)
}
if m.Name == primaryEncryptionKey {
cek.Primary = key
} else if m.Name == secondaryEncryptionKey {
cek.Secondary = key
}
}
if cek.Primary.Key != "" {
cipherObj, err := createCipher(cek.Primary, AESGCMAlgorithm)
if err != nil {
return ComponentEncryptionKeys{}, err
}
cek.Primary.cipherObj = cipherObj
}
if cek.Secondary.Key != "" {
cipherObj, err := createCipher(cek.Secondary, AESGCMAlgorithm)
if err != nil {
return ComponentEncryptionKeys{}, err
}
cek.Secondary.cipherObj = cipherObj
}
return cek, nil
}
func tryGetEncryptionKeyFromMetadataItem(namespace string, item commonapi.NameValuePair, secretStore secretstores.SecretStore) (Key, error) {
if item.SecretKeyRef.Name == "" {
return Key{}, fmt.Errorf("%s: secretKeyRef cannot be empty", errPrefix)
}
// TODO: cascade context.
r, err := secretStore.GetSecret(context.TODO(), secretstores.GetSecretRequest{
Name: item.SecretKeyRef.Name,
Metadata: map[string]string{
"namespace": namespace,
},
})
if err != nil {
return Key{}, fmt.Errorf("%s: %w", errPrefix, err)
}
key := item.SecretKeyRef.Key
if key == "" {
key = item.SecretKeyRef.Name
}
if val, ok := r.Data[key]; ok {
if val == "" {
return Key{}, fmt.Errorf("%s: encryption key cannot be empty", errPrefix)
}
return Key{
Key: r.Data[key],
Name: item.SecretKeyRef.Name,
}, nil
}
return Key{}, nil
}
// Encrypt takes a byte array and encrypts it using a supplied encryption key.
func encrypt(value []byte, key Key) ([]byte, error) {
nsize := make([]byte, key.cipherObj.NonceSize())
if _, err := io.ReadFull(rand.Reader, nsize); err != nil {
return value, err
}
return key.cipherObj.Seal(nsize, nsize, value, nil), nil
}
// Decrypt takes a byte array and decrypts it using a supplied encryption key.
func decrypt(value []byte, key Key) ([]byte, error) {
enc, err := b64.StdEncoding.DecodeString(string(value))
if err != nil {
return value, err
}
nsize := key.cipherObj.NonceSize()
nonce, ciphertext := enc[:nsize], enc[nsize:]
return key.cipherObj.Open(nil, nonce, ciphertext, nil)
}
func createCipher(key Key, algorithm Algorithm) (cipher.AEAD, error) {
keyBytes, err := hex.DecodeString(key.Key)
if err != nil {
return nil, err
}
switch algorithm {
// Other authenticated ciphers can be added if needed, e.g. golang.org/x/crypto/chacha20poly1305
case AESGCMAlgorithm:
block, err := aes.NewCipher(keyBytes)
if err != nil {
return nil, err
}
return cipher.NewGCM(block)
}
return nil, errors.New("unsupported algorithm")
}
|
mikeee/dapr
|
pkg/encryption/encryption.go
|
GO
|
mit
| 5,090 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package encryption
import (
"context"
"crypto/rand"
"encoding/hex"
"testing"
"github.com/dapr/components-contrib/metadata"
"github.com/dapr/components-contrib/secretstores"
commonapi "github.com/dapr/dapr/pkg/apis/common"
"github.com/dapr/dapr/pkg/apis/components/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type mockSecretStore struct {
secretstores.SecretStore
primaryKey string
secondaryKey string
}
func (m *mockSecretStore) Init(ctx context.Context, metadata secretstores.Metadata) error {
if val, ok := metadata.Properties["primaryKey"]; ok {
m.primaryKey = val
}
if val, ok := metadata.Properties["secondaryKey"]; ok {
m.secondaryKey = val
}
return nil
}
func (m *mockSecretStore) GetSecret(ctx context.Context, req secretstores.GetSecretRequest) (secretstores.GetSecretResponse, error) {
return secretstores.GetSecretResponse{
Data: map[string]string{
"primaryKey": m.primaryKey,
"secondaryKey": m.secondaryKey,
},
}, nil
}
func (m *mockSecretStore) BulkGetSecret(ctx context.Context, req secretstores.BulkGetSecretRequest) (secretstores.BulkGetSecretResponse, error) {
return secretstores.BulkGetSecretResponse{}, nil
}
func TestComponentEncryptionKey(t *testing.T) {
t.Run("component has a primary and secondary encryption keys", func(t *testing.T) {
component := v1alpha1.Component{
ObjectMeta: metav1.ObjectMeta{
Name: "statestore",
},
Spec: v1alpha1.ComponentSpec{
Metadata: []commonapi.NameValuePair{
{
Name: primaryEncryptionKey,
SecretKeyRef: commonapi.SecretKeyRef{
Name: "primaryKey",
},
},
{
Name: secondaryEncryptionKey,
SecretKeyRef: commonapi.SecretKeyRef{
Name: "secondaryKey",
},
},
},
},
}
bytes := make([]byte, 32)
rand.Read(bytes)
primaryKey := hex.EncodeToString(bytes)
rand.Read(bytes)
secondaryKey := hex.EncodeToString(bytes[:16]) // 128-bit key
secretStore := &mockSecretStore{}
secretStore.Init(context.Background(), secretstores.Metadata{Base: metadata.Base{
Properties: map[string]string{
"primaryKey": primaryKey,
"secondaryKey": secondaryKey,
},
}})
keys, err := ComponentEncryptionKey(component, secretStore)
require.NoError(t, err)
assert.Equal(t, primaryKey, keys.Primary.Key)
assert.Equal(t, secondaryKey, keys.Secondary.Key)
})
t.Run("keys empty when no secret store is present and no error", func(t *testing.T) {
component := v1alpha1.Component{
ObjectMeta: metav1.ObjectMeta{
Name: "statestore",
},
Spec: v1alpha1.ComponentSpec{
Metadata: []commonapi.NameValuePair{
{
Name: primaryEncryptionKey,
SecretKeyRef: commonapi.SecretKeyRef{
Name: "primaryKey",
},
},
{
Name: secondaryEncryptionKey,
SecretKeyRef: commonapi.SecretKeyRef{
Name: "secondaryKey",
},
},
},
},
}
keys, err := ComponentEncryptionKey(component, nil)
assert.Empty(t, keys.Primary.Key)
assert.Empty(t, keys.Secondary.Key)
require.NoError(t, err)
})
t.Run("no error when component doesn't have encryption keys", func(t *testing.T) {
component := v1alpha1.Component{
ObjectMeta: metav1.ObjectMeta{
Name: "statestore",
},
Spec: v1alpha1.ComponentSpec{
Metadata: []commonapi.NameValuePair{
{
Name: "something",
},
},
},
}
_, err := ComponentEncryptionKey(component, nil)
require.NoError(t, err)
})
}
func TestTryGetEncryptionKeyFromMetadataItem(t *testing.T) {
t.Run("no secretRef on valid item", func(t *testing.T) {
secretStore := &mockSecretStore{}
secretStore.Init(context.Background(), secretstores.Metadata{Base: metadata.Base{
Properties: map[string]string{
"primaryKey": "123",
"secondaryKey": "456",
},
}})
_, err := tryGetEncryptionKeyFromMetadataItem("", commonapi.NameValuePair{}, secretStore)
require.Error(t, err)
})
}
func TestCreateCipher(t *testing.T) {
t.Run("invalid key", func(t *testing.T) {
cipherObj, err := createCipher(Key{
Key: "123",
}, AESGCMAlgorithm)
assert.Nil(t, cipherObj)
require.Error(t, err)
})
t.Run("valid 256-bit key", func(t *testing.T) {
bytes := make([]byte, 32)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
cipherObj, err := createCipher(Key{
Key: key,
}, AESGCMAlgorithm)
assert.NotNil(t, cipherObj)
require.NoError(t, err)
})
t.Run("valid 192-bit key", func(t *testing.T) {
bytes := make([]byte, 24)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
cipherObj, err := createCipher(Key{
Key: key,
}, AESGCMAlgorithm)
assert.NotNil(t, cipherObj)
require.NoError(t, err)
})
t.Run("valid 128-bit key", func(t *testing.T) {
bytes := make([]byte, 16)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
cipherObj, err := createCipher(Key{
Key: key,
}, AESGCMAlgorithm)
assert.NotNil(t, cipherObj)
require.NoError(t, err)
})
t.Run("invalid key size", func(t *testing.T) {
bytes := make([]byte, 18)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
cipherObj, err := createCipher(Key{
Key: key,
}, AESGCMAlgorithm)
assert.Nil(t, cipherObj)
require.Error(t, err)
})
t.Run("invalid algorithm", func(t *testing.T) {
bytes := make([]byte, 32)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
cipherObj, err := createCipher(Key{
Key: key,
}, "3DES")
assert.Nil(t, cipherObj)
require.Error(t, err)
})
}
|
mikeee/dapr
|
pkg/encryption/encryption_test.go
|
GO
|
mit
| 6,112 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package encryption
import (
"bytes"
b64 "encoding/base64"
"fmt"
)
var encryptedStateStores = map[string]ComponentEncryptionKeys{}
const (
separator = "||"
)
// AddEncryptedStateStore adds an encrypted state store and an associated encryption key to a list.
func AddEncryptedStateStore(storeName string, keys ComponentEncryptionKeys) bool {
if _, ok := encryptedStateStores[storeName]; ok {
return false
}
encryptedStateStores[storeName] = keys
return true
}
// EncryptedStateStore returns a bool that indicates if a state stores supports encryption.
func EncryptedStateStore(storeName string) bool {
_, ok := encryptedStateStores[storeName]
return ok
}
// TryEncryptValue will try to encrypt a byte array if the state store has associated encryption keys.
// The function will append the name of the key to the value for later extraction.
// If no encryption keys exist, the function will return the bytes unmodified.
func TryEncryptValue(storeName string, value []byte) ([]byte, error) {
keys := encryptedStateStores[storeName]
enc, err := encrypt(value, keys.Primary)
if err != nil {
return value, err
}
sEnc := b64.StdEncoding.EncodeToString(enc) + separator + keys.Primary.Name
return []byte(sEnc), nil
}
// TryDecryptValue will try to decrypt a byte array if the state store has associated encryption keys.
// If no encryption keys exist, the function will return the bytes unmodified.
func TryDecryptValue(storeName string, value []byte) ([]byte, error) {
if len(value) == 0 {
return []byte(""), nil
}
keys := encryptedStateStores[storeName]
// extract the decryption key that should be appended to the value
ind := bytes.LastIndex(value, []byte(separator))
keyName := string(value[ind+len(separator):])
if len(keyName) == 0 {
return value, fmt.Errorf("could not decrypt data for state store %s: encryption key name not found on record", storeName)
}
var key Key
if keys.Primary.Name == keyName {
key = keys.Primary
} else if keys.Secondary.Name == keyName {
key = keys.Secondary
}
return decrypt(value[:ind], key)
}
|
mikeee/dapr
|
pkg/encryption/state.go
|
GO
|
mit
| 2,639 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package encryption
import (
"crypto/rand"
"encoding/base64"
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAddEncryptedStateStore(t *testing.T) {
t.Run("state store doesn't exist", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
r := AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: Key{
Name: "primary",
Key: "1234",
},
})
assert.True(t, r)
assert.Equal(t, "primary", encryptedStateStores["test"].Primary.Name)
})
t.Run("state store exists", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
r := AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: Key{
Name: "primary",
Key: "1234",
},
})
assert.True(t, r)
assert.Equal(t, "primary", encryptedStateStores["test"].Primary.Name)
r = AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: Key{
Name: "primary",
Key: "1234",
},
})
assert.False(t, r)
})
}
func TestTryEncryptValue(t *testing.T) {
t.Run("state store without keys", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
ok := EncryptedStateStore("test")
assert.False(t, ok)
})
t.Run("state store with AES256 primary key, value encrypted and decrypted successfully", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
bytes := make([]byte, 32)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
pr := Key{
Name: "primary",
Key: key,
}
cipherObj, _ := createCipher(pr, AESGCMAlgorithm)
pr.cipherObj = cipherObj
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: pr,
})
v := []byte("hello")
r, err := TryEncryptValue("test", v)
require.NoError(t, err)
assert.NotEqual(t, v, r)
dr, err := TryDecryptValue("test", r)
require.NoError(t, err)
assert.Equal(t, v, dr)
})
t.Run("state store with AES256 secondary key, value encrypted and decrypted successfully", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
bytes := make([]byte, 32)
rand.Read(bytes)
primaryKey := hex.EncodeToString(bytes)
pr := Key{
Name: "primary",
Key: primaryKey,
}
cipherObj, _ := createCipher(pr, AESGCMAlgorithm)
pr.cipherObj = cipherObj
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: pr,
})
v := []byte("hello")
r, err := TryEncryptValue("test", v)
require.NoError(t, err)
assert.NotEqual(t, v, r)
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{
Secondary: pr,
})
dr, err := TryDecryptValue("test", r)
require.NoError(t, err)
assert.Equal(t, v, dr)
})
t.Run("state store with AES256 primary key, base64 string value encrypted and decrypted successfully", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
bytes := make([]byte, 32)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
pr := Key{
Name: "primary",
Key: key,
}
cipherObj, _ := createCipher(pr, AESGCMAlgorithm)
pr.cipherObj = cipherObj
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: pr,
})
v := []byte("hello")
s := base64.StdEncoding.EncodeToString(v)
r, err := TryEncryptValue("test", []byte(s))
require.NoError(t, err)
assert.NotEqual(t, v, r)
dr, err := TryDecryptValue("test", r)
require.NoError(t, err)
assert.Equal(t, []byte(s), dr)
})
t.Run("state store with AES128 primary key, value encrypted and decrypted successfully", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
bytes := make([]byte, 16)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
pr := Key{
Name: "primary",
Key: key,
}
cipherObj, _ := createCipher(pr, AESGCMAlgorithm)
pr.cipherObj = cipherObj
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: pr,
})
v := []byte("hello world")
r, err := TryEncryptValue("test", v)
require.NoError(t, err)
assert.NotEqual(t, v, r)
dr, err := TryDecryptValue("test", r)
require.NoError(t, err)
assert.Equal(t, v, dr)
})
}
func TestTryDecryptValue(t *testing.T) {
t.Run("empty value", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
bytes := make([]byte, 16)
rand.Read(bytes)
key := hex.EncodeToString(bytes)
pr := Key{
Name: "primary",
Key: key,
}
cipherObj, _ := createCipher(pr, AESGCMAlgorithm)
pr.cipherObj = cipherObj
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{
Primary: pr,
})
dr, err := TryDecryptValue("test", nil)
require.NoError(t, err)
assert.Empty(t, dr)
})
}
func TestEncryptedStateStore(t *testing.T) {
t.Run("store supports encryption", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
AddEncryptedStateStore("test", ComponentEncryptionKeys{})
ok := EncryptedStateStore("test")
assert.True(t, ok)
})
t.Run("store doesn't support encryption", func(t *testing.T) {
encryptedStateStores = map[string]ComponentEncryptionKeys{}
ok := EncryptedStateStore("test")
assert.False(t, ok)
})
}
|
mikeee/dapr
|
pkg/encryption/state_test.go
|
GO
|
mit
| 6,144 |
package expr
import (
"encoding/json"
"strings"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls"
exprProto "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const missingVariableMessage = "undeclared reference to '"
type Expr struct {
expr string
program cel.Program
}
func (e *Expr) DecodeString(value string) (err error) {
var ast *cel.Ast
var env *cel.Env
variables := make([]*exprProto.Decl, 0, 10)
found := make(map[string]struct{}, 10)
for {
env, err = cel.NewEnv(cel.Declarations(variables...))
if err != nil {
return err
}
var iss *cel.Issues
ast, iss = env.Compile(value)
if iss.Err() != nil {
for _, e := range iss.Errors() {
if strings.HasPrefix(e.Message, missingVariableMessage) {
msg := e.Message[len(missingVariableMessage):]
msg = msg[0:strings.IndexRune(msg, '\'')]
if _, exists := found[msg]; exists {
continue
}
variables = append(variables, decls.NewVar(msg, decls.Any))
found[msg] = struct{}{}
} else {
return iss.Err()
}
}
} else {
break
}
}
prg, err := env.Program(ast)
if err != nil {
return err
}
*e = Expr{
expr: value,
program: prg,
}
return nil
}
func (e *Expr) Eval(variables map[string]interface{}) (interface{}, error) {
if e.program == nil {
err := e.DecodeString(e.expr)
if err != nil {
return nil, err
}
}
out, _, err := e.program.Eval(variables)
if err != nil {
return nil, err
}
return out.Value(), nil
}
func (e *Expr) Expr() string {
return e.expr
}
func (e *Expr) String() string {
if e == nil {
return ""
}
return e.expr
}
func (e *Expr) MarshalJSON() ([]byte, error) {
return json.Marshal(e.expr)
}
func (e *Expr) UnmarshalJSON(b []byte) error {
var code string
if err := json.Unmarshal(b, &code); err != nil {
return err
}
return e.DecodeString(code)
}
|
mikeee/dapr
|
pkg/expr/expr.go
|
GO
|
mit
| 1,887 |
package expr_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/dapr/pkg/expr"
)
func TestEval(t *testing.T) {
var e expr.Expr
code := `(has(input.test) && input.test == 1234) || (has(result.test) && result.test == 5678)`
err := e.DecodeString(code)
require.NoError(t, err)
assert.Equal(t, code, e.String())
result, err := e.Eval(map[string]interface{}{
"input": map[string]interface{}{
"test": 1234,
},
"result": map[string]interface{}{
"test": 5678,
},
})
require.NoError(t, err)
assert.True(t, result.(bool))
}
func TestJSONMarshal(t *testing.T) {
var e expr.Expr
exprBytes := []byte(`"(has(input.test) && input.test == 1234) || (has(result.test) && result.test == 5678)"`)
err := e.UnmarshalJSON(exprBytes)
require.NoError(t, err)
assert.Equal(t, `(has(input.test) && input.test == 1234) || (has(result.test) && result.test == 5678)`, e.Expr())
_, err = e.MarshalJSON()
require.NoError(t, err)
}
func TestEmptyProgramNoPanic(t *testing.T) {
var e expr.Expr
r, err := e.Eval(map[string]interface{}{})
assert.Nil(t, r)
require.Error(t, err)
}
var result interface{}
func BenchmarkEval(b *testing.B) {
var e expr.Expr
err := e.DecodeString(`(has(input.test) && input.test == 1234) || (has(result.test) && result.test == 5678)`)
require.NoError(b, err)
data := map[string]interface{}{
"input": map[string]interface{}{
"test": 1234,
},
"result": map[string]interface{}{
"test": 5678,
},
}
var r interface{}
for n := 0; n < b.N; n++ {
r, _ = e.Eval(data)
}
result = r
}
|
mikeee/dapr
|
pkg/expr/expr_test.go
|
GO
|
mit
| 1,605 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package health
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"sync/atomic"
"time"
"github.com/dapr/kit/logger"
)
// Server is the interface for the healthz server.
type Server interface {
Run(context.Context, string, int) error
Ready()
}
type server struct {
ready atomic.Bool
targetsReady atomic.Int64
targets int64
router http.Handler
log logger.Logger
}
type Options struct {
RouterOptions []RouterOptions
Targets *int
Log logger.Logger
}
type RouterOptions func(log logger.Logger) (string, http.Handler)
func NewRouterOptions(path string, handler http.Handler) RouterOptions {
return func(log logger.Logger) (string, http.Handler) {
return path, handler
}
}
func NewJSONDataRouterOptions[T any](path string, getter func() (T, error)) RouterOptions {
return func(log logger.Logger) (string, http.Handler) {
return path, http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
data, err := getter()
if err != nil {
writer.WriteHeader(http.StatusInternalServerError)
writer.Write([]byte(err.Error()))
return
}
writer.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(writer).Encode(data)
if err != nil {
log.Warnf("failed to encode json to response writer: %s", err.Error())
writer.WriteHeader(http.StatusInternalServerError)
writer.Write([]byte(err.Error()))
return
}
})
}
}
// NewServer returns a new healthz server.
func NewServer(opts Options) Server {
targets := 1
if opts.Targets != nil {
targets = *opts.Targets
}
s := &server{
log: opts.Log,
targets: int64(targets),
}
router := http.NewServeMux()
router.Handle("/healthz", s.healthz())
// add public handlers to the router
for _, option := range opts.RouterOptions {
path, handler := option(s.log)
router.Handle(path, handler)
}
s.router = router
return s
}
// Ready sets a ready state for the endpoint handlers.
func (s *server) Ready() {
s.targetsReady.Add(1)
if s.targetsReady.Load() >= s.targets {
s.ready.Store(true)
}
}
// Run starts a net/http server with a healthz endpoint.
func (s *server) Run(ctx context.Context, listenAddress string, port int) error {
//nolint:gosec
srv := &http.Server{
Addr: fmt.Sprintf("%s:%d", listenAddress, port),
Handler: s.router,
BaseContext: func(_ net.Listener) context.Context { return ctx },
}
serveErr := make(chan error, 1)
go func() {
s.log.Infof("Healthz server is listening on %s", srv.Addr)
err := srv.ListenAndServe()
if !errors.Is(err, http.ErrServerClosed) {
serveErr <- err
return
}
serveErr <- nil
}()
select {
case err := <-serveErr:
return err
case <-ctx.Done():
// nop
}
s.log.Info("Healthz server is shutting down")
shutdownCtx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
return errors.Join(srv.Shutdown(shutdownCtx), <-serveErr)
}
// healthz is a health endpoint handler.
func (s *server) healthz() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var status int
if s.ready.Load() {
status = http.StatusOK
} else {
status = http.StatusServiceUnavailable
}
w.WriteHeader(status)
})
}
|
mikeee/dapr
|
pkg/health/server.go
|
GO
|
mit
| 3,837 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.