code
stringlengths 0
56.1M
| repo_name
stringclasses 515
values | path
stringlengths 2
147
| language
stringclasses 447
values | license
stringclasses 7
values | size
int64 0
56.8M
|
---|---|---|---|---|---|
//go:build !allcomponents && !stablecomponents
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
// This file is built when neither "allcomponents" nor "stablecomponents" are set as Go build tags.
// Its purpose is to provide a more user-friendly error to developers.
func init() {
panic("When building github.com/dapr/dapr/cmd/daprd, you must use either '-tags stablecomponents' or `-tags allcomponents'")
}
|
mikeee/dapr
|
cmd/daprd/components/zz_notag.go
|
GO
|
mit
| 942 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/dapr/dapr/cmd/daprd/app"
)
func main() {
app.Run()
}
|
mikeee/dapr
|
cmd/daprd/main.go
|
GO
|
mit
| 652 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"math"
"os"
"strconv"
"time"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/dapr/dapr/pkg/buildinfo"
"github.com/dapr/dapr/pkg/config"
"github.com/dapr/dapr/pkg/config/protocol"
"github.com/dapr/dapr/pkg/cors"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/runtime"
"github.com/dapr/dapr/pkg/security/consts"
"github.com/dapr/kit/logger"
)
type Options struct {
AppID string
ComponentsPath string
ControlPlaneAddress string
ControlPlaneTrustDomain string
ControlPlaneNamespace string
SentryAddress string
TrustAnchors []byte
AllowedOrigins string
EnableProfiling bool
AppMaxConcurrency int
EnableMTLS bool
AppSSL bool
MaxRequestSize int // In bytes
ResourcesPath []string
AppProtocol string
EnableAPILogging *bool
RuntimeVersion bool
BuildInfo bool
WaitCommand bool
DaprHTTPPort string
DaprAPIGRPCPort string
ProfilePort string
DaprInternalGRPCPort string
DaprInternalGRPCListenAddress string
DaprPublicPort string
DaprPublicListenAddress string
AppPort string
DaprGracefulShutdownSeconds int
DaprBlockShutdownDuration *time.Duration
ActorsService string
RemindersService string
DaprAPIListenAddresses string
AppHealthProbeInterval int
AppHealthProbeTimeout int
AppHealthThreshold int
EnableAppHealthCheck bool
Mode string
Config []string
UnixDomainSocket string
ReadBufferSize int // In bytes
DisableBuiltinK8sSecretStore bool
AppHealthCheckPath string
AppChannelAddress string
Logger logger.Options
Metrics *metrics.Options
}
func New(origArgs []string) (*Options, error) {
opts := Options{
EnableAPILogging: new(bool),
DaprBlockShutdownDuration: new(time.Duration),
MaxRequestSize: runtime.DefaultMaxRequestBodySize,
ReadBufferSize: runtime.DefaultReadBufferSize,
}
var (
maxRequestSizeMB int
maxBodySize string
readBufferSizeKB int
readBufferSize string
)
// We are using pflag to parse the CLI flags
// pflag is a drop-in replacement for the standard library's "flag" package, however…
// There's one key difference: with the stdlib's "flag" package, there are no short-hand options so options can be defined with a single slash (such as "daprd -mode").
// With pflag, single slashes are reserved for shorthands.
// So, we are doing this thing where we iterate through all args and double-up the slash if it's single
// This works *as long as* we don't start using shorthand flags (which haven't been in use so far).
args := make([]string, len(origArgs))
for i, a := range origArgs {
if len(a) > 2 && a[0] == '-' && a[1] != '-' {
args[i] = "-" + a
} else {
args[i] = a
}
}
// Create a flag set
fs := pflag.NewFlagSet("daprd", pflag.ExitOnError)
fs.SortFlags = true
fs.StringVar(&opts.Mode, "mode", string(modes.StandaloneMode), "Runtime mode for Dapr")
fs.StringVar(&opts.DaprHTTPPort, "dapr-http-port", strconv.Itoa(runtime.DefaultDaprHTTPPort), "HTTP port for Dapr API to listen on")
fs.StringVar(&opts.DaprAPIListenAddresses, "dapr-listen-addresses", runtime.DefaultAPIListenAddress, "One or more addresses for the Dapr API to listen on, CSV limited")
fs.StringVar(&opts.DaprPublicPort, "dapr-public-port", "", "Public port for Dapr Health and Metadata to listen on")
fs.StringVar(&opts.DaprPublicListenAddress, "dapr-public-listen-address", "", "Public listen address for Dapr Health and Metadata")
fs.StringVar(&opts.DaprAPIGRPCPort, "dapr-grpc-port", strconv.Itoa(runtime.DefaultDaprAPIGRPCPort), "gRPC port for the Dapr API to listen on")
fs.StringVar(&opts.DaprInternalGRPCPort, "dapr-internal-grpc-port", "", "gRPC port for the Dapr Internal API to listen on")
fs.StringVar(&opts.DaprInternalGRPCListenAddress, "dapr-internal-grpc-listen-address", "", "gRPC listen address for the Dapr Internal API")
fs.StringVar(&opts.AppPort, "app-port", "", "The port the application is listening on")
fs.StringVar(&opts.ProfilePort, "profile-port", strconv.Itoa(runtime.DefaultProfilePort), "The port for the profile server")
fs.StringVar(&opts.AppProtocol, "app-protocol", string(protocol.HTTPProtocol), "Protocol for the application: grpc, grpcs, http, https, h2c")
fs.StringVar(&opts.ComponentsPath, "components-path", "", "Alias for --resources-path")
fs.MarkDeprecated("components-path", "use --resources-path")
fs.StringSliceVar(&opts.ResourcesPath, "resources-path", nil, "Path for resources directory. If not specified, no resources will be loaded. Can be passed multiple times")
fs.StringSliceVar(&opts.Config, "config", nil, "Path to config file, or name of a configuration object. In standalone mode, can be passed multiple times")
fs.StringVar(&opts.AppID, "app-id", "", "A unique ID for Dapr. Used for Service Discovery and state")
fs.StringVar(&opts.ControlPlaneAddress, "control-plane-address", "", "Address for a Dapr control plane")
fs.StringVar(&opts.SentryAddress, "sentry-address", "", "Address for the Sentry CA service")
fs.StringVar(&opts.ControlPlaneTrustDomain, "control-plane-trust-domain", "localhost", "Trust domain of the Dapr control plane")
fs.StringVar(&opts.ControlPlaneNamespace, "control-plane-namespace", "default", "Namespace of the Dapr control plane")
fs.StringVar(&opts.AllowedOrigins, "allowed-origins", cors.DefaultAllowedOrigins, "Allowed HTTP origins")
fs.BoolVar(&opts.EnableProfiling, "enable-profiling", false, "Enable profiling")
fs.BoolVar(&opts.RuntimeVersion, "version", false, "Prints the runtime version")
fs.BoolVar(&opts.BuildInfo, "build-info", false, "Prints the build info")
fs.BoolVar(&opts.WaitCommand, "wait", false, "wait for Dapr outbound ready")
fs.IntVar(&opts.AppMaxConcurrency, "app-max-concurrency", -1, "Controls the concurrency level when forwarding requests to user code; set to -1 for no limits")
fs.BoolVar(&opts.EnableMTLS, "enable-mtls", false, "Enables automatic mTLS for daprd-to-daprd communication channels")
fs.BoolVar(&opts.AppSSL, "app-ssl", false, "Sets the URI scheme of the app to https and attempts a TLS connection")
fs.MarkDeprecated("app-ssl", "use '--app-protocol https|grpcs'")
fs.IntVar(&maxRequestSizeMB, "dapr-http-max-request-size", runtime.DefaultMaxRequestBodySize>>20, "Max size of request body in MB")
fs.MarkDeprecated("dapr-http-max-request-size", "use '--max-body-size "+strconv.Itoa(runtime.DefaultMaxRequestBodySize>>20)+"Mi'")
fs.StringVar(&maxBodySize, "max-body-size", strconv.Itoa(runtime.DefaultMaxRequestBodySize>>20)+"Mi", "Max size of request body for the Dapr HTTP and gRPC servers, as a resource quantity")
fs.IntVar(&readBufferSizeKB, "dapr-http-read-buffer-size", runtime.DefaultReadBufferSize>>10, "Max size of read buffer, in KB (also used to handle request headers)")
fs.MarkDeprecated("dapr-http-read-buffer-size", "use '--read-buffer-size "+strconv.Itoa(runtime.DefaultReadBufferSize>>10)+"Ki'")
fs.StringVar(&readBufferSize, "read-buffer-size", strconv.Itoa(runtime.DefaultReadBufferSize>>10)+"Ki", "Max size of read buffer, as a resource quantity (also used to handle request headers)")
fs.StringVar(&opts.UnixDomainSocket, "unix-domain-socket", "", "Path to a unix domain socket dir mount. If specified, Dapr API servers will use Unix Domain Sockets")
fs.IntVar(&opts.DaprGracefulShutdownSeconds, "dapr-graceful-shutdown-seconds", int(runtime.DefaultGracefulShutdownDuration/time.Second), "Graceful shutdown time in seconds")
fs.DurationVar(opts.DaprBlockShutdownDuration, "dapr-block-shutdown-duration", 0, "If enabled, will block graceful shutdown after terminate signal is received until either the given duration has elapsed or the app reports unhealthy. Disabled by default")
fs.BoolVar(opts.EnableAPILogging, "enable-api-logging", false, "Enable API logging for API calls")
fs.BoolVar(&opts.DisableBuiltinK8sSecretStore, "disable-builtin-k8s-secret-store", false, "Disable the built-in Kubernetes Secret Store")
fs.BoolVar(&opts.EnableAppHealthCheck, "enable-app-health-check", false, "Enable health checks for the application using the protocol defined with app-protocol")
fs.StringVar(&opts.AppHealthCheckPath, "app-health-check-path", runtime.DefaultAppHealthCheckPath, "Path used for health checks; HTTP only")
fs.IntVar(&opts.AppHealthProbeInterval, "app-health-probe-interval", int(config.AppHealthConfigDefaultProbeInterval/time.Second), "Interval to probe for the health of the app in seconds")
fs.IntVar(&opts.AppHealthProbeTimeout, "app-health-probe-timeout", int(config.AppHealthConfigDefaultProbeTimeout/time.Millisecond), "Timeout for app health probes in milliseconds")
fs.IntVar(&opts.AppHealthThreshold, "app-health-threshold", int(config.AppHealthConfigDefaultThreshold), "Number of consecutive failures for the app to be considered unhealthy")
fs.StringVar(&opts.AppChannelAddress, "app-channel-address", runtime.DefaultChannelAddress, "The network address the application listens on")
// Add flags for actors, placement, and reminders
// --placement-host-address is a legacy (but not deprecated) flag that is translated to the actors-service flag
var placementServiceHostAddr string
fs.StringVar(&placementServiceHostAddr, "placement-host-address", "", "Addresses for Dapr Actor Placement servers (overrides actors-service)")
fs.StringVar(&opts.ActorsService, "actors-service", "", "Type and address of the actors service, in the format 'type:address'")
fs.StringVar(&opts.RemindersService, "reminders-service", "", "Type and address of the reminders service, in the format 'type:address'")
// Add flags for logger and metrics
opts.Logger = logger.DefaultOptions()
opts.Logger.AttachCmdFlags(fs.StringVar, fs.BoolVar)
opts.Metrics = metrics.DefaultMetricOptions()
opts.Metrics.AttachCmdFlags(fs.StringVar, fs.BoolVar)
// Ignore errors; flagset is set for ExitOnError
_ = fs.Parse(args)
// flag.Parse() will always set a value to "enableAPILogging", and it will be false whether it's explicitly set to false or unset
// For this flag, we need the third state (unset) so we need to do a bit more work here to check if it's unset, then mark "enableAPILogging" as nil
if !*opts.EnableAPILogging && !fs.Changed("enable-api-logging") {
opts.EnableAPILogging = nil
}
// If placement-host-address is set, that always takes priority over actors-service
if placementServiceHostAddr != "" {
opts.ActorsService = "placement:" + placementServiceHostAddr
}
// Max body size
// max-body-size has priority over dapr-http-max-request-size
if fs.Changed("max-body-size") {
q, err := resource.ParseQuantity(maxBodySize)
if err != nil {
return nil, fmt.Errorf("invalid value for 'max-body-size' option: %w", err)
}
opts.MaxRequestSize, err = getQuantityBytes(q)
if err != nil {
return nil, fmt.Errorf("invalid value for 'max-body-size' option: %w", err)
}
} else if fs.Changed("dapr-http-max-request-size") {
if maxRequestSizeMB > 0 {
opts.MaxRequestSize = maxRequestSizeMB << 20
} else {
opts.MaxRequestSize = maxRequestSizeMB
}
}
// Read buffer size
// read-buffer-size has priority over dapr-http-read-buffer-size
if fs.Changed("read-buffer-size") {
q, err := resource.ParseQuantity(readBufferSize)
if err != nil {
return nil, fmt.Errorf("invalid value for 'read-buffer-size' option: %w", err)
}
opts.ReadBufferSize, err = getQuantityBytes(q)
if err != nil {
return nil, fmt.Errorf("invalid value for 'read-buffer-size' option: %w", err)
}
} else if fs.Changed("dapr-http-read-buffer-size") {
if readBufferSizeKB > 0 {
opts.ReadBufferSize = readBufferSizeKB << 10
} else {
opts.ReadBufferSize = readBufferSizeKB
}
}
opts.TrustAnchors = []byte(os.Getenv(consts.TrustAnchorsEnvVar))
if !fs.Changed("control-plane-namespace") {
ns, ok := os.LookupEnv(consts.ControlPlaneNamespaceEnvVar)
if ok {
opts.ControlPlaneNamespace = ns
}
}
if !fs.Changed("control-plane-trust-domain") {
td, ok := os.LookupEnv(consts.ControlPlaneTrustDomainEnvVar)
if ok {
opts.ControlPlaneTrustDomain = td
}
}
if !fs.Changed("dapr-block-shutdown-duration") {
opts.DaprBlockShutdownDuration = nil
}
return &opts, nil
}
// getQuantityBytes returns the number of bytes in the quantity.
func getQuantityBytes(q resource.Quantity) (int, error) {
if q.IsZero() {
return 0, nil
}
val, ok := q.AsInt64()
if !ok || (buildinfo.PtrSize == 32 && val > math.MaxInt32) {
return 0, fmt.Errorf("cannot get bytes from resource quantity value '%v'", q)
}
return int(val), nil
}
|
mikeee/dapr
|
cmd/daprd/options/options.go
|
GO
|
mit
| 13,698 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/runtime"
)
func TestAppFlag(t *testing.T) {
opts, err := New([]string{
"-app-id", "testapp", // Single dash
"--app-port", "80",
"--app-protocol", "http",
"--metrics-port", strconv.Itoa(10000),
})
require.NoError(t, err)
assert.EqualValues(t, "testapp", opts.AppID)
assert.EqualValues(t, "80", opts.AppPort)
assert.EqualValues(t, "http", opts.AppProtocol)
}
func TestStandaloneGlobalConfig(t *testing.T) {
opts, err := New([]string{
"--app-id", "testapp",
"-mode", string(modes.StandaloneMode), // Single dash
"--config", "../../../pkg/config/testdata/metric_disabled.yaml",
"--metrics-port", strconv.Itoa(10000),
})
require.NoError(t, err)
assert.EqualValues(t, "testapp", opts.AppID)
assert.EqualValues(t, string(modes.StandaloneMode), opts.Mode)
assert.Equal(t, []string{"../../../pkg/config/testdata/metric_disabled.yaml"}, opts.Config)
}
func TestEnableAPILogging(t *testing.T) {
t.Run("explicitly enabled", func(t *testing.T) {
opts, err := New([]string{
"-enable-api-logging", // Single dash
})
require.NoError(t, err)
require.NotNil(t, opts.EnableAPILogging)
assert.True(t, *opts.EnableAPILogging)
})
t.Run("explicitly enabled with true written out", func(t *testing.T) {
opts, err := New([]string{
"--enable-api-logging=true",
})
require.NoError(t, err)
require.NotNil(t, opts.EnableAPILogging)
assert.True(t, *opts.EnableAPILogging)
})
t.Run("explicitly disabled", func(t *testing.T) {
opts, err := New([]string{
"-enable-api-logging=false", // Single dash
})
require.NoError(t, err)
require.NotNil(t, opts.EnableAPILogging)
assert.False(t, *opts.EnableAPILogging)
})
t.Run("flag is unset", func(t *testing.T) {
opts, err := New([]string{})
require.NoError(t, err)
require.Nil(t, opts.EnableAPILogging)
})
}
func TestMultipleConfig(t *testing.T) {
t.Run("config flag not defined", func(t *testing.T) {
opts, err := New([]string{})
require.NoError(t, err)
require.Empty(t, opts.Config)
})
t.Run("single config", func(t *testing.T) {
opts, err := New([]string{
"--config", "cfg1.yaml",
})
require.NoError(t, err)
require.Equal(t, []string{"cfg1.yaml"}, opts.Config)
})
t.Run("comma-separated configs", func(t *testing.T) {
opts, err := New([]string{
"-config=cfg1.yaml,cfg2.yaml", // Single dash
})
require.NoError(t, err)
require.Equal(t, []string{"cfg1.yaml", "cfg2.yaml"}, opts.Config)
})
t.Run("multiple config flags", func(t *testing.T) {
opts, err := New([]string{
"-config=cfg1.yaml", // Single dash
"-config", "cfg2.yaml", // Single dash
})
require.NoError(t, err)
require.Equal(t, []string{"cfg1.yaml", "cfg2.yaml"}, opts.Config)
})
t.Run("multiple config flags and comma-separated values", func(t *testing.T) {
opts, err := New([]string{
"-config=cfg1.yaml", // Single dash
"--config", "cfg2.yaml,cfg3.yaml",
})
require.NoError(t, err)
require.Equal(t, []string{"cfg1.yaml", "cfg2.yaml", "cfg3.yaml"}, opts.Config)
})
}
func TestMaxBodySize(t *testing.T) {
t.Run("No max-body-size", func(t *testing.T) {
opts, err := New([]string{})
require.NoError(t, err)
assert.Equal(t, runtime.DefaultMaxRequestBodySize, opts.MaxRequestSize)
})
t.Run("max-body-size is unitless", func(t *testing.T) {
opts, err := New([]string{
"--max-body-size", "400",
})
require.NoError(t, err)
assert.Equal(t, 400, opts.MaxRequestSize)
})
t.Run("max-body-size with unit", func(t *testing.T) {
opts, err := New([]string{
"--max-body-size", "2Mi",
})
require.NoError(t, err)
assert.Equal(t, 2<<20, opts.MaxRequestSize)
})
t.Run("dapr-http-max-request-size", func(t *testing.T) {
opts, err := New([]string{
"--dapr-http-max-request-size", "2",
})
require.NoError(t, err)
assert.Equal(t, 2<<20, opts.MaxRequestSize)
})
t.Run("max-body-size has priority over dapr-http-max-request-size", func(t *testing.T) {
opts, err := New([]string{
"--max-body-size", "1Mi",
"--dapr-http-max-request-size", "2",
})
require.NoError(t, err)
assert.Equal(t, 1<<20, opts.MaxRequestSize)
})
t.Run("max-body-size set to 0", func(t *testing.T) {
opts, err := New([]string{
"--max-body-size", "0",
})
require.NoError(t, err)
assert.Equal(t, 0, opts.MaxRequestSize)
})
t.Run("max-body-size set to -1", func(t *testing.T) {
opts, err := New([]string{
"--max-body-size", "-1",
})
require.NoError(t, err)
assert.Equal(t, -1, opts.MaxRequestSize)
})
t.Run("dapr-http-max-request-size set to 0", func(t *testing.T) {
opts, err := New([]string{
"--dapr-http-max-request-size", "0",
})
require.NoError(t, err)
assert.Equal(t, 0, opts.MaxRequestSize)
})
t.Run("dapr-http-max-request-size set to -1", func(t *testing.T) {
opts, err := New([]string{
"--dapr-http-max-request-size", "-1",
})
require.NoError(t, err)
assert.Equal(t, -1, opts.MaxRequestSize)
})
t.Run("max-body-size is invalid", func(t *testing.T) {
_, err := New([]string{
"--max-body-size", "bad",
})
require.Error(t, err)
})
}
func TestReadBufferSize(t *testing.T) {
t.Run("No read-buffer-size", func(t *testing.T) {
opts, err := New([]string{})
require.NoError(t, err)
assert.Equal(t, runtime.DefaultReadBufferSize, opts.ReadBufferSize)
})
t.Run("read-buffer-size is unitless", func(t *testing.T) {
opts, err := New([]string{
"--read-buffer-size", "400",
})
require.NoError(t, err)
assert.Equal(t, 400, opts.ReadBufferSize)
})
t.Run("read-buffer-size with unit", func(t *testing.T) {
opts, err := New([]string{
"--read-buffer-size", "2Ki",
})
require.NoError(t, err)
assert.Equal(t, 2<<10, opts.ReadBufferSize)
})
t.Run("dapr-http-read-buffer-size", func(t *testing.T) {
opts, err := New([]string{
"--dapr-http-read-buffer-size", "2",
})
require.NoError(t, err)
assert.Equal(t, 2<<10, opts.ReadBufferSize)
})
t.Run("read-buffer-size has priority over dapr-http-read-buffer-size", func(t *testing.T) {
opts, err := New([]string{
"--read-buffer-size", "1Ki",
"--dapr-http-read-buffer-size", "2",
})
require.NoError(t, err)
assert.Equal(t, 1<<10, opts.ReadBufferSize)
})
t.Run("read-buffer-size set to 0", func(t *testing.T) {
opts, err := New([]string{
"--read-buffer-size", "0",
})
require.NoError(t, err)
assert.Equal(t, 0, opts.ReadBufferSize)
})
t.Run("read-buffer-size set to -1", func(t *testing.T) {
opts, err := New([]string{
"--read-buffer-size", "-1",
})
require.NoError(t, err)
assert.Equal(t, -1, opts.ReadBufferSize)
})
t.Run("dapr-http-read-buffer-size set to 0", func(t *testing.T) {
opts, err := New([]string{
"--dapr-http-read-buffer-size", "0",
})
require.NoError(t, err)
assert.Equal(t, 0, opts.ReadBufferSize)
})
t.Run("dapr-http-read-buffer-size set to -1", func(t *testing.T) {
opts, err := New([]string{
"--dapr-http-read-buffer-size", "-1",
})
require.NoError(t, err)
assert.Equal(t, -1, opts.ReadBufferSize)
})
t.Run("read-buffer-size is invalid", func(t *testing.T) {
_, err := New([]string{
"--read-buffer-size", "bad",
})
require.Error(t, err)
})
}
func TestControlPlaneEnvVar(t *testing.T) {
t.Run("should default CLI flags if not defined", func(t *testing.T) {
opts, err := New([]string{})
require.NoError(t, err)
assert.EqualValues(t, "localhost", opts.ControlPlaneTrustDomain)
assert.EqualValues(t, "default", opts.ControlPlaneNamespace)
})
t.Run("should use CLI flags if defined", func(t *testing.T) {
opts, err := New([]string{
"--control-plane-namespace", "flag-namespace",
"--control-plane-trust-domain", "flag-trust-domain",
})
require.NoError(t, err)
assert.EqualValues(t, "flag-trust-domain", opts.ControlPlaneTrustDomain)
assert.EqualValues(t, "flag-namespace", opts.ControlPlaneNamespace)
})
t.Run("should use env vars if flags were not defined", func(t *testing.T) {
t.Setenv("DAPR_CONTROLPLANE_NAMESPACE", "env-namespace")
t.Setenv("DAPR_CONTROLPLANE_TRUST_DOMAIN", "env-trust-domain")
opts, err := New([]string{})
require.NoError(t, err)
assert.EqualValues(t, "env-trust-domain", opts.ControlPlaneTrustDomain)
assert.EqualValues(t, "env-namespace", opts.ControlPlaneNamespace)
})
t.Run("should priorities CLI flags if both flags and env vars are defined", func(t *testing.T) {
t.Setenv("DAPR_CONTROLPLANE_NAMESPACE", "env-namespace")
t.Setenv("DAPR_CONTROLPLANE_TRUST_DOMAIN", "env-trust-domain")
opts, err := New([]string{
"--control-plane-namespace", "flag-namespace",
"--control-plane-trust-domain", "flag-trust-domain",
})
require.NoError(t, err)
assert.EqualValues(t, "flag-trust-domain", opts.ControlPlaneTrustDomain)
assert.EqualValues(t, "flag-namespace", opts.ControlPlaneNamespace)
})
}
|
mikeee/dapr
|
cmd/daprd/options/options_test.go
|
GO
|
mit
| 9,534 |
# Dapr injector documentation
Please see the [Dapr injector documentation](https://docs.dapr.io/concepts/dapr-services/sidecar-injector/) for more information.
|
mikeee/dapr
|
cmd/injector/README.md
|
Markdown
|
mit
| 160 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"encoding/base64"
"fmt"
"os"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/dapr/dapr/cmd/injector/options"
"github.com/dapr/dapr/pkg/buildinfo"
scheme "github.com/dapr/dapr/pkg/client/clientset/versioned"
"github.com/dapr/dapr/pkg/health"
"github.com/dapr/dapr/pkg/injector/service"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/security"
"github.com/dapr/dapr/utils"
"github.com/dapr/kit/concurrency"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/signals"
)
var log = logger.NewLogger("dapr.injector")
func Run() {
opts := options.New(os.Args[1:])
// Apply options to all loggers
err := logger.ApplyOptionsToLoggers(&opts.Logger)
if err != nil {
log.Fatal(err)
}
log.Infof("Starting Dapr Sidecar Injector -- version %s -- commit %s", buildinfo.Version(), buildinfo.Commit())
log.Infof("Log level set to: %s", opts.Logger.OutputLevel)
metricsExporter := metrics.NewExporterWithOptions(log, metrics.DefaultMetricNamespace, opts.Metrics)
err = utils.SetEnvVariables(map[string]string{
utils.KubeConfigVar: opts.Kubeconfig,
})
if err != nil {
log.Fatalf("Error set env: %v", err)
}
// Initialize injector service metrics
err = service.InitMetrics()
if err != nil {
log.Fatal(err)
}
ctx := signals.Context()
cfg, err := service.GetConfig()
if err != nil {
log.Fatalf("Error getting config: %v", err)
}
kubeClient := utils.GetKubeClient()
conf := utils.GetConfig()
daprClient, err := scheme.NewForConfig(conf)
if err != nil {
log.Fatalf("Error creating Dapr client: %v", err)
}
uids, err := service.AllowedControllersServiceAccountUID(ctx, cfg, kubeClient)
if err != nil {
log.Fatalf("Failed to get authentication uids from services accounts: %s", err)
}
namespace, err := security.CurrentNamespaceOrError()
if err != nil {
log.Fatalf("Failed to get current namespace: %s", err)
}
secProvider, err := security.New(ctx, security.Options{
SentryAddress: cfg.SentryAddress,
ControlPlaneTrustDomain: cfg.ControlPlaneTrustDomain,
ControlPlaneNamespace: namespace,
TrustAnchorsFile: &cfg.TrustAnchorsFile,
AppID: "dapr-injector",
MTLSEnabled: true,
Mode: modes.KubernetesMode,
})
if err != nil {
log.Fatal(err)
}
inj, err := service.NewInjector(service.Options{
Port: opts.Port,
ListenAddress: opts.ListenAddress,
AuthUIDs: uids,
Config: cfg,
DaprClient: daprClient,
KubeClient: kubeClient,
ControlPlaneNamespace: security.CurrentNamespace(),
ControlPlaneTrustDomain: cfg.ControlPlaneTrustDomain,
})
if err != nil {
log.Fatalf("Error creating injector: %v", err)
}
healthzServer := health.NewServer(health.Options{Log: log})
caBundleCh := make(chan []byte)
mngr := concurrency.NewRunnerManager(
metricsExporter.Run,
secProvider.Run,
func(ctx context.Context) error {
sec, rerr := secProvider.Handler(ctx)
if rerr != nil {
return rerr
}
sentryID, rerr := security.SentryID(sec.ControlPlaneTrustDomain(), security.CurrentNamespace())
if err != nil {
return rerr
}
return inj.Run(ctx,
sec.TLSServerConfigNoClientAuth(),
sentryID,
sec.CurrentTrustAnchors,
)
},
func(ctx context.Context) error {
readyErr := inj.Ready(ctx)
if readyErr != nil {
return readyErr
}
healthzServer.Ready()
<-ctx.Done()
return nil
},
func(ctx context.Context) error {
healhtzErr := healthzServer.Run(ctx, opts.HealthzListenAddress, opts.HealthzPort)
if healhtzErr != nil {
return fmt.Errorf("failed to start healthz server: %w", healhtzErr)
}
return nil
},
func(ctx context.Context) error {
sec, rErr := secProvider.Handler(ctx)
if rErr != nil {
return rErr
}
sec.WatchTrustAnchors(ctx, caBundleCh)
return nil
},
// Watch for changes to the trust anchors and update the webhook
// configuration on events.
func(ctx context.Context) error {
sec, rerr := secProvider.Handler(ctx)
if rerr != nil {
return rerr
}
caBundle, rErr := sec.CurrentTrustAnchors(ctx)
if rErr != nil {
return rErr
}
// Patch the mutating webhook configuration with the current trust
// anchors.
// Re-patch every time the trust anchors change.
for {
_, rErr = kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Patch(ctx,
"dapr-sidecar-injector",
types.JSONPatchType,
[]byte(`[{"op":"replace","path":"/webhooks/0/clientConfig/caBundle","value":"`+base64.StdEncoding.EncodeToString(caBundle)+`"}]`),
metav1.PatchOptions{},
)
if rErr != nil {
return rErr
}
select {
case caBundle = <-caBundleCh:
case <-ctx.Done():
return nil
}
}
},
)
err = mngr.Run(ctx)
if err != nil {
log.Fatalf("Error running injector: %v", err)
}
log.Info("Dapr sidecar injector shut down gracefully")
}
|
mikeee/dapr
|
cmd/injector/app/app.go
|
GO
|
mit
| 5,639 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/dapr/dapr/cmd/injector/app"
)
func main() {
app.Run()
}
|
mikeee/dapr
|
cmd/injector/main.go
|
GO
|
mit
| 655 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/client-go/util/homedir"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/kit/logger"
)
type Options struct {
HealthzPort int
HealthzListenAddress string
Kubeconfig string
Port int
ListenAddress string
Logger logger.Options
Metrics *metrics.Options
}
func New(origArgs []string) *Options {
var opts Options
// We are using pflag to parse the CLI flags
// pflag is a drop-in replacement for the standard library's "flag" package, however…
// There's one key difference: with the stdlib's "flag" package, there are no short-hand options so options can be defined with a single slash (such as "daprd -mode").
// With pflag, single slashes are reserved for shorthands.
// So, we are doing this thing where we iterate through all args and double-up the slash if it's single
// This works *as long as* we don't start using shorthand flags (which haven't been in use so far).
args := make([]string, len(origArgs))
for i, a := range origArgs {
if len(a) > 2 && a[0] == '-' && a[1] != '-' {
args[i] = "-" + a
} else {
args[i] = a
}
}
// Create a flag set
fs := pflag.NewFlagSet("sentry", pflag.ExitOnError)
fs.SortFlags = true
fs.IntVar(&opts.HealthzPort, "healthz-port", 8080, "The port used for health checks")
fs.StringVar(&opts.HealthzListenAddress, "healthz-listen-address", "", "The listening address for the healthz server")
fs.IntVar(&opts.Port, "port", 4000, "The port used for the injector service")
fs.StringVar(&opts.ListenAddress, "listen-address", "", "The listen address for the injector service")
if home := homedir.HomeDir(); home != "" {
fs.StringVar(&opts.Kubeconfig, "kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
fs.StringVar(&opts.Kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
}
opts.Logger = logger.DefaultOptions()
opts.Logger.AttachCmdFlags(fs.StringVar, fs.BoolVar)
opts.Metrics = metrics.DefaultMetricOptions()
opts.Metrics.AttachCmdFlags(fs.StringVar, fs.BoolVar)
// Ignore errors; flagset is set for ExitOnError
_ = fs.Parse(args)
return &opts
}
|
mikeee/dapr
|
cmd/injector/options/options.go
|
GO
|
mit
| 2,833 |
# Dapr operator documentation
Please see the [Dapr operator documentation](https://docs.dapr.io/concepts/dapr-services/operator/) for more information.
|
mikeee/dapr
|
cmd/operator/README.md
|
Markdown
|
mit
| 152 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/dapr/dapr/cmd/operator/options"
"github.com/dapr/dapr/pkg/buildinfo"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/operator"
"github.com/dapr/dapr/pkg/operator/monitoring"
"github.com/dapr/kit/concurrency"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/signals"
)
var log = logger.NewLogger("dapr.operator")
func Run() {
opts := options.New()
// Apply options to all loggers.
if err := logger.ApplyOptionsToLoggers(&opts.Logger); err != nil {
log.Fatal(err)
}
log.Infof("Starting Dapr Operator -- version %s -- commit %s", buildinfo.Version(), buildinfo.Commit())
log.Infof("Log level set to: %s", opts.Logger.OutputLevel)
metricsExporter := metrics.NewExporterWithOptions(log, metrics.DefaultMetricNamespace, opts.Metrics)
if err := monitoring.InitMetrics(); err != nil {
log.Fatal(err)
}
ctx := signals.Context()
op, err := operator.NewOperator(ctx, operator.Options{
Config: opts.Config,
TrustAnchorsFile: opts.TrustAnchorsFile,
LeaderElection: !opts.DisableLeaderElection,
WatchdogMaxRestartsPerMin: opts.MaxPodRestartsPerMinute,
WatchNamespace: opts.WatchNamespace,
ServiceReconcilerEnabled: !opts.DisableServiceReconciler,
ArgoRolloutServiceReconcilerEnabled: opts.EnableArgoRolloutServiceReconciler,
WatchdogEnabled: opts.WatchdogEnabled,
WatchdogInterval: opts.WatchdogInterval,
WatchdogCanPatchPodLabels: opts.WatchdogCanPatchPodLabels,
APIPort: opts.APIPort,
APIListenAddress: opts.APIListenAddress,
HealthzPort: opts.HealthzPort,
HealthzListenAddress: opts.HealthzListenAddress,
WebhookServerPort: opts.WebhookServerPort,
WebhookServerListenAddress: opts.WebhookServerListenAddress,
})
if err != nil {
log.Fatalf("error creating operator: %v", err)
}
err = concurrency.NewRunnerManager(
metricsExporter.Run,
op.Run,
).Run(ctx)
if err != nil {
log.Fatalf("error running operator: %v", err)
}
log.Info("operator shut down gracefully")
}
|
mikeee/dapr
|
cmd/operator/app/app.go
|
GO
|
mit
| 2,812 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/dapr/dapr/cmd/operator/app"
)
func main() {
app.Run()
}
|
mikeee/dapr
|
cmd/operator/main.go
|
GO
|
mit
| 655 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"flag"
"strings"
"time"
"k8s.io/klog"
"github.com/dapr/dapr/pkg/metrics"
securityConsts "github.com/dapr/dapr/pkg/security/consts"
"github.com/dapr/kit/logger"
)
const (
// defaultDaprSystemConfigName is the default resource object name for Dapr System Config.
defaultDaprSystemConfigName = "daprsystem"
// defaultWatchInterval is the default value for watch-interval, in seconds (note this is a string as `once` is an acceptable value too).
defaultWatchInterval = "0"
// defaultMaxPodRestartsPerMinute is the default value for max-pod-restarts-per-minute.
defaultMaxPodRestartsPerMinute = 20
)
var log = logger.NewLogger("dapr.operator.options")
type Options struct {
Config string
MaxPodRestartsPerMinute int
DisableLeaderElection bool
DisableServiceReconciler bool
WatchNamespace string
EnableArgoRolloutServiceReconciler bool
WatchdogEnabled bool
WatchdogInterval time.Duration
watchdogIntervalStr string
WatchdogCanPatchPodLabels bool
TrustAnchorsFile string
Logger logger.Options
Metrics *metrics.Options
APIPort int
APIListenAddress string
HealthzPort int
HealthzListenAddress string
WebhookServerPort int
WebhookServerListenAddress string
}
func New() *Options {
var opts Options
// This resets the flags on klog, which will otherwise try to log to the FS.
klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
klog.InitFlags(klogFlags)
klogFlags.Set("logtostderr", "true")
flag.StringVar(&opts.Config, "config", defaultDaprSystemConfigName, "Path to config file, or name of a configuration object")
flag.StringVar(&opts.watchdogIntervalStr, "watch-interval", defaultWatchInterval, "Interval for polling pods' state, e.g. '2m'. Set to '0' to disable, or 'once' to only run once when the operator starts")
flag.IntVar(&opts.MaxPodRestartsPerMinute, "max-pod-restarts-per-minute", defaultMaxPodRestartsPerMinute, "Maximum number of pods in an invalid state that can be restarted per minute")
flag.BoolVar(&opts.DisableLeaderElection, "disable-leader-election", false, "Disable leader election for operator")
flag.BoolVar(&opts.DisableServiceReconciler, "disable-service-reconciler", false, "Disable the Service reconciler for Dapr-enabled Deployments and StatefulSets")
flag.StringVar(&opts.WatchNamespace, "watch-namespace", "", "Namespace to watch Dapr annotated resources in")
flag.BoolVar(&opts.EnableArgoRolloutServiceReconciler, "enable-argo-rollout-service-reconciler", false, "Enable the service reconciler for Dapr-enabled Argo Rollouts")
flag.BoolVar(&opts.WatchdogCanPatchPodLabels, "watchdog-can-patch-pod-labels", false, "Allow watchdog to patch pod labels to set pods with sidecar present")
flag.StringVar(&opts.TrustAnchorsFile, "trust-anchors-file", securityConsts.ControlPlaneDefaultTrustAnchorsPath, "Filepath to the trust anchors for the Dapr control plane")
flag.IntVar(&opts.APIPort, "port", 6500, "The port for the operator API server to listen on")
flag.StringVar(&opts.APIListenAddress, "listen-address", "", "The listening address for the operator API server")
flag.IntVar(&opts.HealthzPort, "healthz-port", 8080, "The port for the healthz server to listen on")
flag.StringVar(&opts.HealthzListenAddress, "healthz-listen-address", "", "The listening address for the healthz server")
flag.IntVar(&opts.WebhookServerPort, "webhook-server-port", 19443, "The port for the webhook server to listen on")
flag.StringVar(&opts.WebhookServerListenAddress, "webhook-server-listen-address", "", "The listening address for the webhook server")
opts.Logger = logger.DefaultOptions()
opts.Logger.AttachCmdFlags(flag.StringVar, flag.BoolVar)
opts.Metrics = metrics.DefaultMetricOptions()
opts.Metrics.AttachCmdFlags(flag.StringVar, flag.BoolVar)
flag.Parse()
wilc := strings.ToLower(opts.watchdogIntervalStr)
switch wilc {
case "0", "false", "f", "no", "off":
// Disabled - do nothing
default:
opts.WatchdogEnabled = true
if wilc != "once" {
dur, err := time.ParseDuration(opts.watchdogIntervalStr)
if err != nil {
log.Fatalf("invalid value for watch-interval: %v", err)
}
if dur < time.Second {
log.Fatalf("invalid watch-interval value: if not '0' or 'once', must be at least 1s")
}
opts.WatchdogInterval = dur
}
}
return &opts
}
|
mikeee/dapr
|
cmd/operator/options/options.go
|
GO
|
mit
| 5,174 |
# Dapr placement documentation
Please see the [Dapr placement documentation](https://docs.dapr.io/concepts/dapr-services/placement/) for more information.
|
mikeee/dapr
|
cmd/placement/README.md
|
Markdown
|
mit
| 155 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"fmt"
"math"
"os"
"strconv"
"github.com/dapr/dapr/cmd/placement/options"
"github.com/dapr/dapr/pkg/buildinfo"
"github.com/dapr/dapr/pkg/health"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/placement"
"github.com/dapr/dapr/pkg/placement/monitoring"
"github.com/dapr/dapr/pkg/placement/raft"
"github.com/dapr/dapr/pkg/security"
"github.com/dapr/kit/concurrency"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/ptr"
"github.com/dapr/kit/signals"
)
var log = logger.NewLogger("dapr.placement")
func Run() {
opts := options.New(os.Args[1:])
// Apply options to all loggers.
if err := logger.ApplyOptionsToLoggers(&opts.Logger); err != nil {
log.Fatal(err)
}
log.Infof("Starting Dapr Placement Service -- version %s -- commit %s", buildinfo.Version(), buildinfo.Commit())
log.Infof("Log level set to: %s", opts.Logger.OutputLevel)
metricsExporter := metrics.NewExporterWithOptions(log, metrics.DefaultMetricNamespace, opts.Metrics)
err := monitoring.InitMetrics()
if err != nil {
log.Fatal(err)
}
// Start Raft cluster.
raftServer := raft.New(raft.Options{
ID: opts.RaftID,
InMem: opts.RaftInMemEnabled,
Peers: opts.RaftPeers,
LogStorePath: opts.RaftLogStorePath,
ReplicationFactor: int64(opts.ReplicationFactor),
MinAPILevel: uint32(opts.MinAPILevel),
MaxAPILevel: uint32(opts.MaxAPILevel),
})
if raftServer == nil {
log.Fatal("Failed to create raft server.")
}
ctx := signals.Context()
secProvider, err := security.New(ctx, security.Options{
SentryAddress: opts.SentryAddress,
ControlPlaneTrustDomain: opts.TrustDomain,
ControlPlaneNamespace: security.CurrentNamespace(),
TrustAnchorsFile: &opts.TrustAnchorsFile,
AppID: "dapr-placement",
MTLSEnabled: opts.TLSEnabled,
Mode: modes.DaprMode(opts.Mode),
})
if err != nil {
log.Fatal(err)
}
placementOpts := placement.PlacementServiceOpts{
RaftNode: raftServer,
SecProvider: secProvider,
}
if opts.MinAPILevel >= 0 && opts.MinAPILevel < math.MaxInt32 {
placementOpts.MinAPILevel = uint32(opts.MinAPILevel)
}
if opts.MaxAPILevel >= 0 && opts.MaxAPILevel < math.MaxInt32 {
placementOpts.MaxAPILevel = ptr.Of(uint32(opts.MaxAPILevel))
}
apiServer := placement.NewPlacementService(placementOpts)
err = concurrency.NewRunnerManager(
func(ctx context.Context) error {
sec, serr := secProvider.Handler(ctx)
if serr != nil {
return serr
}
return raftServer.StartRaft(ctx, sec, nil)
},
metricsExporter.Run,
secProvider.Run,
apiServer.MonitorLeadership,
func(ctx context.Context) error {
var metadataOptions []health.RouterOptions
if opts.MetadataEnabled {
metadataOptions = append(metadataOptions, health.NewJSONDataRouterOptions[*placement.PlacementTables]("/placement/state", apiServer.GetPlacementTables))
}
healthzServer := health.NewServer(health.Options{
Log: log,
RouterOptions: metadataOptions,
})
healthzServer.Ready()
if healthzErr := healthzServer.Run(ctx, opts.HealthzListenAddress, opts.HealthzPort); healthzErr != nil {
return fmt.Errorf("failed to start healthz server: %w", healthzErr)
}
return nil
},
func(ctx context.Context) error {
return apiServer.Run(ctx, opts.PlacementListenAddress, strconv.Itoa(opts.PlacementPort))
},
).Run(ctx)
if err != nil {
log.Fatal(err)
}
log.Info("Placement service shut down gracefully")
}
|
mikeee/dapr
|
cmd/placement/app/app.go
|
GO
|
mit
| 4,144 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/dapr/dapr/cmd/placement/app"
)
func main() {
app.Run()
}
|
mikeee/dapr
|
cmd/placement/main.go
|
GO
|
mit
| 656 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"os"
"strings"
"github.com/spf13/pflag"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/placement/raft"
"github.com/dapr/dapr/pkg/security"
securityConsts "github.com/dapr/dapr/pkg/security/consts"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/utils"
)
const (
//nolint:gosec
defaultCredentialsPath = "/var/run/dapr/credentials"
defaultHealthzPort = 8080
defaultPlacementPort = 50005
defaultReplicationFactor = 100
envMetadataEnabled = "DAPR_PLACEMENT_METADATA_ENABLED"
)
type Options struct {
// Raft protocol configurations
RaftID string
raftPeerFlag []string
RaftPeers []raft.PeerInfo
RaftInMemEnabled bool
RaftLogStorePath string
// Placement server configurations
PlacementPort int
PlacementListenAddress string
HealthzPort int
HealthzListenAddress string
MetadataEnabled bool
MaxAPILevel int
MinAPILevel int
TLSEnabled bool
TrustDomain string
TrustAnchorsFile string
SentryAddress string
Mode string
ReplicationFactor int
// Log and metrics configurations
Logger logger.Options
Metrics *metrics.Options
}
func New(origArgs []string) *Options {
// We are using pflag to parse the CLI flags
// pflag is a drop-in replacement for the standard library's "flag" package, however…
// There's one key difference: with the stdlib's "flag" package, there are no short-hand options so options can be defined with a single slash (such as "daprd -mode").
// With pflag, single slashes are reserved for shorthands.
// So, we are doing this thing where we iterate through all args and double-up the slash if it's single
// This works *as long as* we don't start using shorthand flags (which haven't been in use so far).
args := make([]string, len(origArgs))
for i, a := range origArgs {
if len(a) > 2 && a[0] == '-' && a[1] != '-' {
args[i] = "-" + a
} else {
args[i] = a
}
}
// Default options
opts := Options{
MetadataEnabled: utils.IsTruthy(os.Getenv(envMetadataEnabled)),
}
// Create a flag set
fs := pflag.NewFlagSet("sentry", pflag.ExitOnError)
fs.SortFlags = true
fs.StringVar(&opts.RaftID, "id", "dapr-placement-0", "Placement server ID")
fs.StringSliceVar(&opts.raftPeerFlag, "initial-cluster", []string{"dapr-placement-0=127.0.0.1:8201"}, "raft cluster peers")
fs.BoolVar(&opts.RaftInMemEnabled, "inmem-store-enabled", true, "Enable in-memory log and snapshot store unless --raft-logstore-path is set")
fs.StringVar(&opts.RaftLogStorePath, "raft-logstore-path", "", "raft log store path.")
fs.IntVar(&opts.PlacementPort, "port", defaultPlacementPort, "sets the gRPC port for the placement service")
fs.StringVar(&opts.PlacementListenAddress, "listen-address", "", "The listening address for the placement service")
fs.IntVar(&opts.HealthzPort, "healthz-port", defaultHealthzPort, "sets the HTTP port for the healthz server")
fs.StringVar(&opts.HealthzListenAddress, "healthz-listen-address", "", "The listening address for the healthz server")
fs.BoolVar(&opts.TLSEnabled, "tls-enabled", false, "Should TLS be enabled for the placement gRPC server")
fs.BoolVar(&opts.MetadataEnabled, "metadata-enabled", opts.MetadataEnabled, "Expose the placement tables on the healthz server")
fs.IntVar(&opts.MaxAPILevel, "max-api-level", 10, "If set to >= 0, causes the reported 'api-level' in the cluster to never exceed this value")
fs.IntVar(&opts.MinAPILevel, "min-api-level", 0, "Enforces a minimum 'api-level' in the cluster")
fs.IntVar(&opts.ReplicationFactor, "replicationFactor", defaultReplicationFactor, "sets the replication factor for actor distribution on vnodes")
fs.StringVar(&opts.TrustDomain, "trust-domain", "localhost", "Trust domain for the Dapr control plane")
fs.StringVar(&opts.TrustAnchorsFile, "trust-anchors-file", securityConsts.ControlPlaneDefaultTrustAnchorsPath, "Filepath to the trust anchors for the Dapr control plane")
fs.StringVar(&opts.SentryAddress, "sentry-address", fmt.Sprintf("dapr-sentry.%s.svc:443", security.CurrentNamespace()), "Address of the Sentry service")
fs.StringVar(&opts.Mode, "mode", string(modes.StandaloneMode), "Runtime mode for Placement")
opts.Logger = logger.DefaultOptions()
opts.Logger.AttachCmdFlags(fs.StringVar, fs.BoolVar)
opts.Metrics = metrics.DefaultMetricOptions()
opts.Metrics.AttachCmdFlags(fs.StringVar, fs.BoolVar)
// Ignore errors; flagset is set for ExitOnError
_ = fs.Parse(args)
opts.RaftPeers = parsePeersFromFlag(opts.raftPeerFlag)
if opts.RaftLogStorePath != "" {
opts.RaftInMemEnabled = false
}
return &opts
}
func parsePeersFromFlag(val []string) []raft.PeerInfo {
peers := make([]raft.PeerInfo, len(val))
i := 0
for _, addr := range val {
peer := strings.SplitN(addr, "=", 3)
if len(peer) != 2 {
continue
}
peers[i] = raft.PeerInfo{
ID: strings.TrimSpace(peer[0]),
Address: strings.TrimSpace(peer[1]),
}
i++
}
return peers[:i]
}
|
mikeee/dapr
|
cmd/placement/options/options.go
|
GO
|
mit
| 5,617 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/dapr/dapr/pkg/placement/raft"
)
func TestAppFlag(t *testing.T) {
opts := New([]string{})
assert.EqualValues(t, "dapr-placement-0", opts.RaftID)
assert.EqualValues(t, []raft.PeerInfo{{ID: "dapr-placement-0", Address: "127.0.0.1:8201"}}, opts.RaftPeers)
assert.EqualValues(t, true, opts.RaftInMemEnabled)
assert.EqualValues(t, "", opts.RaftLogStorePath)
assert.EqualValues(t, 50005, opts.PlacementPort)
assert.EqualValues(t, 8080, opts.HealthzPort)
assert.EqualValues(t, false, opts.TLSEnabled)
assert.EqualValues(t, false, opts.MetadataEnabled)
assert.EqualValues(t, 100, opts.ReplicationFactor)
assert.EqualValues(t, "localhost", opts.TrustDomain)
assert.EqualValues(t, "/var/run/secrets/dapr.io/tls/ca.crt", opts.TrustAnchorsFile)
assert.EqualValues(t, "dapr-sentry.default.svc:443", opts.SentryAddress)
assert.EqualValues(t, "info", opts.Logger.OutputLevel)
assert.EqualValues(t, false, opts.Logger.JSONFormatEnabled)
assert.EqualValues(t, true, opts.Metrics.MetricsEnabled)
assert.EqualValues(t, "9090", opts.Metrics.Port)
}
func TestInitialCluster(t *testing.T) {
peerAddressTests := []struct {
name string
in []string
out []raft.PeerInfo
}{
{
"one address",
[]string{
"--initial-cluster", "node0=127.0.0.1:3030",
},
[]raft.PeerInfo{
{ID: "node0", Address: "127.0.0.1:3030"},
},
}, {
"three addresses in two flags",
[]string{
"--initial-cluster", "node0=127.0.0.1:3030",
"--initial-cluster", "node1=127.0.0.1:3031,node2=127.0.0.1:3032",
},
[]raft.PeerInfo{
{ID: "node0", Address: "127.0.0.1:3030"},
{ID: "node1", Address: "127.0.0.1:3031"},
{ID: "node2", Address: "127.0.0.1:3032"},
},
}, {
"one address is invalid",
[]string{
"--initial-cluster", "127.0.0.1:3030,node1=127.0.0.1:3031,node2=127.0.0.1:3032",
},
[]raft.PeerInfo{
{ID: "node1", Address: "127.0.0.1:3031"},
{ID: "node2", Address: "127.0.0.1:3032"},
},
},
}
for _, tt := range peerAddressTests {
t.Run(tt.name, func(t *testing.T) {
opts := New(tt.in)
assert.EqualValues(t, tt.out, opts.RaftPeers)
})
}
}
|
mikeee/dapr
|
cmd/placement/options/options_test.go
|
GO
|
mit
| 2,758 |
# Dapr sentry documentation
Please see the [Dapr sentry documentation](https://docs.dapr.io/concepts/dapr-services/sentry/) for more information.
|
mikeee/dapr
|
cmd/sentry/README.md
|
Markdown
|
mit
| 146 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/dapr/dapr/cmd/sentry/options"
"github.com/dapr/dapr/pkg/buildinfo"
"github.com/dapr/dapr/pkg/health"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/sentry"
"github.com/dapr/dapr/pkg/sentry/config"
"github.com/dapr/dapr/pkg/sentry/monitoring"
"github.com/dapr/dapr/utils"
"github.com/dapr/kit/concurrency"
"github.com/dapr/kit/fswatcher"
"github.com/dapr/kit/logger"
"github.com/dapr/kit/signals"
)
var log = logger.NewLogger("dapr.sentry")
func Run() {
opts := options.New(os.Args[1:])
// Apply options to all loggers
if err := logger.ApplyOptionsToLoggers(&opts.Logger); err != nil {
log.Fatal(err)
}
log.Infof("Starting Dapr Sentry certificate authority -- version %s -- commit %s", buildinfo.Version(), buildinfo.Commit())
log.Infof("Log level set to: %s", opts.Logger.OutputLevel)
metricsExporter := metrics.NewExporterWithOptions(log, metrics.DefaultMetricNamespace, opts.Metrics)
if err := utils.SetEnvVariables(map[string]string{
utils.KubeConfigVar: opts.Kubeconfig,
}); err != nil {
log.Fatalf("Error setting env: %v", err)
}
if err := monitoring.InitMetrics(); err != nil {
log.Fatal(err)
}
issuerCertPath := filepath.Join(opts.IssuerCredentialsPath, opts.IssuerCertFilename)
issuerKeyPath := filepath.Join(opts.IssuerCredentialsPath, opts.IssuerKeyFilename)
rootCertPath := filepath.Join(opts.IssuerCredentialsPath, opts.RootCAFilename)
cfg, err := config.FromConfigName(opts.ConfigName)
if err != nil {
log.Fatal(err)
}
cfg.IssuerCertPath = issuerCertPath
cfg.IssuerKeyPath = issuerKeyPath
cfg.RootCertPath = rootCertPath
cfg.TrustDomain = opts.TrustDomain
cfg.Port = opts.Port
cfg.ListenAddress = opts.ListenAddress
var (
watchDir = filepath.Dir(cfg.IssuerCertPath)
issuerEvent = make(chan struct{})
mngr = concurrency.NewRunnerManager()
)
// We use runner manager inception here since we want the inner manager to be
// restarted when the CA server needs to be restarted because of file events.
// We don't want to restart the healthz server and file watcher on file
// events (as well as wanting to terminate the program on signals).
caMngrFactory := func() *concurrency.RunnerManager {
return concurrency.NewRunnerManager(
sentry.New(cfg).Start,
func(ctx context.Context) error {
select {
case <-ctx.Done():
return nil
case <-issuerEvent:
monitoring.IssuerCertChanged()
log.Debug("Received issuer credentials changed signal")
select {
case <-ctx.Done():
return nil
// Batch all signals within 2s of each other
case <-time.After(2 * time.Second):
log.Warn("Issuer credentials changed; reloading")
return nil
}
}
},
)
}
// CA Server
err = mngr.Add(func(ctx context.Context) error {
for {
runErr := caMngrFactory().Run(ctx)
if runErr != nil {
return runErr
}
// Catch outer context cancellation to exit.
select {
case <-ctx.Done():
return nil
default:
}
}
})
if err != nil {
log.Fatal(err)
}
// Watch for changes in the watchDir
fs, err := fswatcher.New(fswatcher.Options{
Targets: []string{watchDir},
})
if err != nil {
log.Fatal(err)
}
if err = mngr.Add(func(ctx context.Context) error {
log.Infof("Starting watch on filesystem directory: %s", watchDir)
return fs.Run(ctx, issuerEvent)
}); err != nil {
log.Fatal(err)
}
// Healthz server
err = mngr.Add(func(ctx context.Context) error {
healthzServer := health.NewServer(health.Options{Log: log})
healthzServer.Ready()
runErr := healthzServer.Run(ctx, opts.HealthzListenAddress, opts.HealthzPort)
if runErr != nil {
return fmt.Errorf("failed to start healthz server: %s", runErr)
}
return nil
})
if err != nil {
log.Fatal(err)
}
mngr.Add(metricsExporter.Run)
// Run the runner manager.
if err := mngr.Run(signals.Context()); err != nil {
log.Fatal(err)
}
log.Info("Sentry shut down gracefully")
}
|
mikeee/dapr
|
cmd/sentry/app/app.go
|
GO
|
mit
| 4,587 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"github.com/dapr/dapr/cmd/sentry/app"
)
func main() {
app.Run()
}
|
mikeee/dapr
|
cmd/sentry/main.go
|
GO
|
mit
| 653 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/client-go/util/homedir"
"github.com/dapr/dapr/pkg/metrics"
"github.com/dapr/dapr/pkg/sentry/config"
"github.com/dapr/kit/logger"
)
const (
//nolint:gosec
defaultCredentialsPath = "/var/run/secrets/dapr.io/credentials"
// defaultDaprSystemConfigName is the default resource object name for Dapr System Config.
defaultDaprSystemConfigName = "daprsystem"
)
type Options struct {
ConfigName string
Port int
ListenAddress string
HealthzPort int
HealthzListenAddress string
IssuerCredentialsPath string
TrustDomain string
Kubeconfig string
Logger logger.Options
Metrics *metrics.Options
RootCAFilename string
IssuerCertFilename string
IssuerKeyFilename string
}
func New(origArgs []string) *Options {
// We are using pflag to parse the CLI flags
// pflag is a drop-in replacement for the standard library's "flag" package, however…
// There's one key difference: with the stdlib's "flag" package, there are no short-hand options so options can be defined with a single slash (such as "daprd -mode").
// With pflag, single slashes are reserved for shorthands.
// So, we are doing this thing where we iterate through all args and double-up the slash if it's single
// This works *as long as* we don't start using shorthand flags (which haven't been in use so far).
args := make([]string, len(origArgs))
for i, a := range origArgs {
if len(a) > 2 && a[0] == '-' && a[1] != '-' {
args[i] = "-" + a
} else {
args[i] = a
}
}
var opts Options
// Create a flag set
fs := pflag.NewFlagSet("sentry", pflag.ExitOnError)
fs.SortFlags = true
fs.StringVar(&opts.ConfigName, "config", defaultDaprSystemConfigName, "Path to config file, or name of a configuration object")
fs.StringVar(&opts.IssuerCredentialsPath, "issuer-credentials", defaultCredentialsPath, "Path to the credentials directory holding the issuer data")
fs.StringVar(&opts.RootCAFilename, "issuer-ca-filename", config.DefaultRootCertFilename, "Certificate Authority certificate filename")
fs.StringVar(&opts.IssuerCertFilename, "issuer-certificate-filename", config.DefaultIssuerCertFilename, "Issuer certificate filename")
fs.StringVar(&opts.IssuerKeyFilename, "issuer-key-filename", config.DefaultIssuerKeyFilename, "Issuer private key filename")
fs.StringVar(&opts.TrustDomain, "trust-domain", "localhost", "The CA trust domain")
fs.IntVar(&opts.Port, "port", config.DefaultPort, "The port for the sentry server to listen on")
fs.StringVar(&opts.ListenAddress, "listen-address", "", "The listen address for the sentry server")
fs.IntVar(&opts.HealthzPort, "healthz-port", 8080, "The port for the healthz server to listen on")
fs.StringVar(&opts.HealthzListenAddress, "healthz-listen-address", "", "The listening address for the healthz server")
if home := homedir.HomeDir(); home != "" {
fs.StringVar(&opts.Kubeconfig, "kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
fs.StringVar(&opts.Kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
}
opts.Logger = logger.DefaultOptions()
opts.Logger.AttachCmdFlags(fs.StringVar, fs.BoolVar)
opts.Metrics = metrics.DefaultMetricOptions()
opts.Metrics.AttachCmdFlags(fs.StringVar, fs.BoolVar)
// Ignore errors; flagset is set for ExitOnError
_ = fs.Parse(args)
return &opts
}
|
mikeee/dapr
|
cmd/sentry/options/options.go
|
GO
|
mit
| 4,074 |
## Overview
| packages | description |
| ---------- | ---------------------------------------------------------------------- |
| common | common protos that are imported by multiple packages |
| internals | internal gRPC and protobuf definitions which is used for Dapr internal |
| runtime | Dapr and App Callback services and its associated protobuf messages |
| operator | Dapr Operator gRPC service |
| placement | Dapr Placement service |
| sentry | Dapr Sentry for CA service |
| components | Dapr gRPC-based components services |
## Proto client generation
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
3. Generate gRPC proto clients
```bash
make gen-proto
```
## Update e2e test apps
Whenever there are breaking changes in the proto files, we need to update the e2e test apps to use the correct version of dapr dependencies. This can be done by navigating to the tests folder and running the commands:-
```
# Use the last commit of dapr.
./update_testapps_dependencies.sh be08e5520173beb93e5d5f047dbde405e78db658
```
**Note**: On Windows, use the mingw tools to execute the bash script
Check in all the go.mod files for the test apps that have now been modified to point to the latest dapr version.
|
mikeee/dapr
|
dapr/README.md
|
Markdown
|
mit
| 1,619 |
# Common
This folder is intended for the Common protos amongst the packages in the `dapr/proto` folder.
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/common/v1/README.md
|
Markdown
|
mit
| 503 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.common.v1;
import "google/protobuf/any.proto";
option csharp_namespace = "Dapr.Client.Autogen.Grpc.v1";
option java_outer_classname = "CommonProtos";
option java_package = "io.dapr.v1";
option go_package = "github.com/dapr/dapr/pkg/proto/common/v1;common";
// HTTPExtension includes HTTP verb and querystring
// when Dapr runtime delivers HTTP content.
//
// For example, when callers calls http invoke api
// POST http://localhost:3500/v1.0/invoke/<app_id>/method/<method>?query1=value1&query2=value2
//
// Dapr runtime will parse POST as a verb and extract querystring to quersytring map.
message HTTPExtension {
// Type of HTTP 1.1 Methods
// RFC 7231: https://tools.ietf.org/html/rfc7231#page-24
// RFC 5789: https://datatracker.ietf.org/doc/html/rfc5789
enum Verb {
NONE = 0;
GET = 1;
HEAD = 2;
POST = 3;
PUT = 4;
DELETE = 5;
CONNECT = 6;
OPTIONS = 7;
TRACE = 8;
PATCH = 9;
}
// Required. HTTP verb.
Verb verb = 1;
// Optional. querystring represents an encoded HTTP url query string in the following format: name=value&name2=value2
string querystring = 2;
}
// InvokeRequest is the message to invoke a method with the data.
// This message is used in InvokeService of Dapr gRPC Service and OnInvoke
// of AppCallback gRPC service.
message InvokeRequest {
// Required. method is a method name which will be invoked by caller.
string method = 1;
// Required in unary RPCs. Bytes value or Protobuf message which caller sent.
// Dapr treats Any.value as bytes type if Any.type_url is unset.
google.protobuf.Any data = 2;
// The type of data content.
//
// This field is required if data delivers http request body
// Otherwise, this is optional.
string content_type = 3;
// HTTP specific fields if request conveys http-compatible request.
//
// This field is required for http-compatible request. Otherwise,
// this field is optional.
HTTPExtension http_extension = 4;
}
// InvokeResponse is the response message including data and its content type
// from app callback.
// This message is used in InvokeService of Dapr gRPC Service and OnInvoke
// of AppCallback gRPC service.
message InvokeResponse {
// Required in unary RPCs. The content body of InvokeService response.
google.protobuf.Any data = 1;
// Required. The type of data content.
string content_type = 2;
}
// Chunk of data sent in a streaming request or response.
// This is used in requests including InternalInvokeRequestStream.
message StreamPayload {
// Data sent in the chunk.
// The amount of data included in each chunk is up to the discretion of the sender, and can be empty.
// Additionally, the amount of data doesn't need to be fixed and subsequent messages can send more, or less, data.
// Receivers must not make assumptions about the number of bytes they'll receive in each chunk.
bytes data = 1;
// Sequence number. This is a counter that starts from 0 and increments by 1 on each chunk sent.
uint64 seq = 2;
}
// StateItem represents state key, value, and additional options to save state.
message StateItem {
// Required. The state key
string key = 1;
// Required. The state data for key
bytes value = 2;
// The entity tag which represents the specific version of data.
// The exact ETag format is defined by the corresponding data store.
Etag etag = 3;
// The metadata which will be passed to state store component.
map<string,string> metadata = 4;
// Options for concurrency and consistency to save the state.
StateOptions options = 5;
}
// Etag represents a state item version
message Etag {
// value sets the etag value
string value = 1;
}
// StateOptions configures concurrency and consistency for state operations
message StateOptions {
// Enum describing the supported concurrency for state.
enum StateConcurrency {
CONCURRENCY_UNSPECIFIED = 0;
CONCURRENCY_FIRST_WRITE = 1;
CONCURRENCY_LAST_WRITE = 2;
}
// Enum describing the supported consistency for state.
enum StateConsistency {
CONSISTENCY_UNSPECIFIED = 0;
CONSISTENCY_EVENTUAL = 1;
CONSISTENCY_STRONG = 2;
}
StateConcurrency concurrency = 1;
StateConsistency consistency = 2;
}
// ConfigurationItem represents all the configuration with its name(key).
message ConfigurationItem {
// Required. The value of configuration item.
string value = 1;
// Version is response only and cannot be fetched. Store is not expected to keep all versions available
string version = 2;
// the metadata which will be passed to/from configuration store component.
map<string,string> metadata = 3;
}
|
mikeee/dapr
|
dapr/proto/common/v1/common.proto
|
proto
|
mit
| 5,241 |
# Pluggable Components APIs
This folder is intended for Pluggable Components.
These protos are for a feature of Dapr which allows users to write their own components that sit outside of the Daprd binary. For more details about pluggable components please refer to the [docs](https://docs.dapr.io/developing-applications/develop-components/pluggable-components/develop-pluggable/).
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/components/v1/README.md
|
Markdown
|
mit
| 782 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.components.v1;
import "dapr/proto/components/v1/common.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/components/v1;components";
// Interface for input bindings
service InputBinding {
// Initializes the inputbinding component component with the given metadata.
rpc Init(InputBindingInitRequest) returns (InputBindingInitResponse) {}
// Establishes a stream with the server, which sends messages down to the
// client. The client streams acknowledgements back to the server. The server
// will close the stream and return the status on any error. In case of closed
// connection, the client should re-establish the stream.
rpc Read(stream ReadRequest) returns (stream ReadResponse) {}
// Ping the InputBinding. Used for liveness porpuses.
rpc Ping(PingRequest) returns (PingResponse) {}
}
service OutputBinding {
// Initializes the outputbinding component component with the given metadata.
rpc Init(OutputBindingInitRequest) returns (OutputBindingInitResponse) {}
// Invoke remote systems with optional payloads.
rpc Invoke(InvokeRequest) returns (InvokeResponse) {}
// ListOperations list system supported operations.
rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {}
// Ping the OutputBinding. Used for liveness porpuses.
rpc Ping(PingRequest) returns (PingResponse) {}
}
// reserved for future-proof extensibility
message ListOperationsRequest {}
message ListOperationsResponse {
// the list of all supported component operations.
repeated string operations = 1;
}
// InputBindingInitRequest is the request for initializing the input binding
// component.
message InputBindingInitRequest {
// The metadata request.
MetadataRequest metadata = 1;
}
// reserved for future-proof extensibility
message InputBindingInitResponse {}
// OutputBindingInitRequest is the request for initializing the output binding
// component.
message OutputBindingInitRequest {
// The metadata request.
MetadataRequest metadata = 1;
}
// reserved for future-proof extensibility
message OutputBindingInitResponse {}
// Used for describing errors when ack'ing messages.
message AckResponseError {
string message = 1;
}
message ReadRequest {
// The handle response.
bytes response_data = 1;
// The unique message ID.
string message_id = 2;
// Optional, should not be fulfilled when the message was successfully
// handled.
AckResponseError response_error = 3;
}
message ReadResponse {
// The Read binding Data.
bytes data = 1;
// The message metadata
map<string, string> metadata = 2;
// The message content type.
string content_type = 3;
// The {transient} message ID used for ACK-ing it later.
string message_id = 4;
}
// Used for invoking systems with optional payload.
message InvokeRequest {
// The invoke payload.
bytes data = 1;
// The invoke metadata.
map<string, string> metadata = 2;
// The system supported operation.
string operation = 3;
}
// Response from the invoked system.
message InvokeResponse {
// The response payload.
bytes data = 1;
// The response metadata.
map<string, string> metadata = 2;
// The response content-type.
string content_type = 3;
}
|
mikeee/dapr
|
dapr/proto/components/v1/bindings.proto
|
proto
|
mit
| 3,810 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.components.v1;
option csharp_namespace = "Dapr.Client.Autogen.Grpc.v1";
option java_outer_classname = "ComponentProtos";
option java_package = "io.dapr.v1";
option go_package = "github.com/dapr/dapr/pkg/proto/components/v1;components";
// Base metadata request for all components
message MetadataRequest {
map<string, string> properties = 1;
}
// reserved for future-proof extensibility
message FeaturesRequest {}
message FeaturesResponse {
repeated string features = 1;
}
// reserved for future-proof extensibility
message PingRequest {}
// reserved for future-proof extensibility
message PingResponse {}
|
mikeee/dapr
|
dapr/proto/components/v1/common.proto
|
proto
|
mit
| 1,215 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.components.v1;
import "dapr/proto/components/v1/common.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/components/v1;components";
// PubSub service provides a gRPC interface for pubsub components.
service PubSub {
// Initializes the pubsub component with the given metadata.
rpc Init(PubSubInitRequest) returns (PubSubInitResponse) {}
// Returns a list of implemented pubsub features.
rpc Features(FeaturesRequest) returns (FeaturesResponse) {}
// Publish publishes a new message for the given topic.
rpc Publish(PublishRequest) returns (PublishResponse) {}
rpc BulkPublish(BulkPublishRequest) returns (BulkPublishResponse) {}
// Establishes a stream with the server (PubSub component), which sends
// messages down to the client (daprd). The client streams acknowledgements
// back to the server. The server will close the stream and return the status
// on any error. In case of closed connection, the client should re-establish
// the stream. The first message MUST contain a `topic` attribute on it that
// should be used for the entire streaming pull.
rpc PullMessages(stream PullMessagesRequest)
returns (stream PullMessagesResponse) {}
// Ping the pubsub. Used for liveness porpuses.
rpc Ping(PingRequest) returns (PingResponse) {}
}
// Used for describing errors when ack'ing messages.
message AckMessageError {
string message = 1;
}
// Used for acknowledge a message.
message PullMessagesRequest {
// Required. The subscribed topic for which to initialize the new stream. This
// must be provided in the first request on the stream, and must not be set in
// subsequent requests from client to server.
Topic topic = 1;
// The unique message ID.
string ack_message_id = 2;
// Optional, should not be fulfilled when the message was successfully
// handled.
AckMessageError ack_error = 3;
}
// PubSubInitRequest is the request for initializing the pubsub component.
message PubSubInitRequest {
// The metadata request.
MetadataRequest metadata = 1;
}
// reserved for future-proof extensibility
message PubSubInitResponse {}
message PublishRequest {
bytes data = 1;
// The pubsub name.
string pubsub_name = 2;
// The publishing topic.
string topic = 3;
// Message metadata.
map<string, string> metadata = 4;
// The data content type.
string content_type = 5;
}
message BulkPublishRequest {
repeated BulkMessageEntry entries = 1;
string pubsub_name = 2;
string topic = 3;
map<string, string> metadata = 4;
}
message BulkMessageEntry {
string entry_id = 1;
bytes event = 2;
string content_type = 3;
map<string, string> metadata = 4;
}
message BulkPublishResponse {
repeated BulkPublishResponseFailedEntry failed_entries = 1;
}
message BulkPublishResponseFailedEntry {
string entry_id = 1;
string error = 2;
}
// reserved for future-proof extensibility
message PublishResponse {}
message Topic {
// The topic name desired to be subscribed
string name = 1;
// Metadata related subscribe request.
map<string, string> metadata = 2;
}
message PullMessagesResponse {
// The message content.
bytes data = 1;
// The topic where the message come from.
string topic_name = 2;
// The message related metadata.
map<string, string> metadata = 3;
// The message content type.
string content_type = 4;
// The message {transient} ID. Its used for ack'ing it later.
string id = 5;
}
|
mikeee/dapr
|
dapr/proto/components/v1/pubsub.proto
|
proto
|
mit
| 4,039 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.components.v1;
import "dapr/proto/components/v1/common.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/components/v1;components";
// Interface for secret store.
service SecretStore {
// Initializes the secret store with the given metadata.
rpc Init(SecretStoreInitRequest) returns (SecretStoreInitResponse) {}
// Returns a list of implemented secret store features.
rpc Features(FeaturesRequest) returns (FeaturesResponse) {}
// Get an individual secret from the store.
rpc Get(GetSecretRequest) returns (GetSecretResponse) {}
// Get all secrets from the store.
rpc BulkGet(BulkGetSecretRequest) returns (BulkGetSecretResponse) {}
// Ping the pubsub. Used for liveness porpuses.
rpc Ping(PingRequest) returns (PingResponse) {}
}
// Request to initialize the secret store.
message SecretStoreInitRequest {
MetadataRequest metadata = 1;
}
// Response from initialization.
message SecretStoreInitResponse {}
// GetSecretRequest is the message to get secret from secret store.
message GetSecretRequest {
// The name of secret key.
string key = 1;
// The metadata which will be sent to secret store components.
map<string, string> metadata = 2;
}
// GetSecretResponse is the response message to convey the requested secret.
message GetSecretResponse {
// data is the secret value. Some secret store, such as kubernetes secret
// store, can save multiple secrets for single secret key.
map<string, string> data = 1;
}
// BulkGetSecretRequest is the message to get the secrets from secret store.
message BulkGetSecretRequest {
// The metadata which will be sent to secret store components.
map<string, string> metadata = 1;
}
// SecretResponse is a map of decrypted string/string values
message SecretResponse {
map<string, string> secrets = 1;
}
// BulkGetSecretResponse is the response message to convey the requested secrets.
message BulkGetSecretResponse {
// data hold the secret values. Some secret store, such as kubernetes secret
// store, can save multiple secrets for single secret key.
map<string, SecretResponse> data = 1;
}
|
mikeee/dapr
|
dapr/proto/components/v1/secretstore.proto
|
proto
|
mit
| 2,726 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.components.v1;
import "google/protobuf/any.proto";
import "dapr/proto/components/v1/common.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/components/v1;components";
// QueriableStateStore service provides a gRPC interface for querier state store
// components. It was designed to embed query features to the StateStore Service
// as a complementary service.
service QueriableStateStore {
// Query performs a query request on the statestore.
rpc Query(QueryRequest) returns (QueryResponse) {}
}
message Sorting {
// The key that should be used for sorting.
string key = 1;
enum Order {
ASC = 0;
DESC = 1;
}
// The order that should be used.
Order order = 2;
}
message Pagination {
// Maximum of results that should be returned.
int64 limit = 1;
// The pagination token.
string token = 2;
}
message Query {
// Filters that should be applied.
map<string, google.protobuf.Any> filter = 1;
// The sort order.
repeated Sorting sort = 2;
// The query pagination params.
Pagination pagination = 3;
}
// QueryRequest is for querying state store.
message QueryRequest {
// The query to be performed.
Query query = 1;
// Request associated metadata.
map<string, string> metadata = 2;
}
// QueryItem is an object representing a single entry in query results.
message QueryItem {
// The returned item Key.
string key = 1;
// The returned item Data.
bytes data = 2;
// The returned item ETag
Etag etag = 3;
// The returned error string.
string error = 4;
// The returned contenttype
string content_type = 5;
}
// QueryResponse is the query response.
message QueryResponse {
// The query response items.
repeated QueryItem items = 1;
// The response token.
string token = 2;
// Response associated metadata.
map<string, string> metadata = 3;
}
// TransactionalStateStore service provides a gRPC interface for transactional
// state store components. It was designed to embed transactional features to
// the StateStore Service as a complementary service.
service TransactionalStateStore {
// Transact executes multiples operation in a transactional environment.
rpc Transact(TransactionalStateRequest) returns (TransactionalStateResponse) {
}
}
// TransactionalStateOperation describes operation type, key, and value for
// transactional operation.
message TransactionalStateOperation {
// request is either delete or set.
oneof request {
DeleteRequest delete = 1;
SetRequest set = 2;
}
}
// TransactionalStateRequest describes a transactional operation against a state
// store that comprises multiple types of operations The Request field is either
// a DeleteRequest or SetRequest.
message TransactionalStateRequest {
// Operations that should be performed.
repeated TransactionalStateOperation operations = 1;
// Request associated metadata.
map<string, string> metadata = 2;
}
// reserved for future-proof extensibility
message TransactionalStateResponse {}
// StateStore service provides a gRPC interface for state store components.
service StateStore {
// Initializes the state store component with the given metadata.
rpc Init(InitRequest) returns (InitResponse) {}
// Returns a list of implemented state store features.
rpc Features(FeaturesRequest) returns (FeaturesResponse) {}
// Deletes the specified key from the state store.
rpc Delete(DeleteRequest) returns (DeleteResponse) {}
// Get data from the given key.
rpc Get(GetRequest) returns (GetResponse) {}
// Sets the value of the specified key.
rpc Set(SetRequest) returns (SetResponse) {}
// Ping the state store. Used for liveness porpuses.
rpc Ping(PingRequest) returns (PingResponse) {}
// Deletes many keys at once.
rpc BulkDelete(BulkDeleteRequest) returns (BulkDeleteResponse) {}
// Retrieves many keys at once.
rpc BulkGet(BulkGetRequest) returns (BulkGetResponse) {}
// Set the value of many keys at once.
rpc BulkSet(BulkSetRequest) returns (BulkSetResponse) {}
}
// Etag represents a state item version
message Etag {
// value sets the etag value
string value = 1;
}
// StateOptions configures concurrency and consistency for state operations
message StateOptions {
// Enum describing the supported concurrency for state.
enum StateConcurrency {
CONCURRENCY_UNSPECIFIED = 0;
CONCURRENCY_FIRST_WRITE = 1;
CONCURRENCY_LAST_WRITE = 2;
}
// Enum describing the supported consistency for state.
enum StateConsistency {
CONSISTENCY_UNSPECIFIED = 0;
CONSISTENCY_EVENTUAL = 1;
CONSISTENCY_STRONG = 2;
}
StateConcurrency concurrency = 1;
StateConsistency consistency = 2;
}
// InitRequest is the request for initializing the component.
message InitRequest {
MetadataRequest metadata = 1;
}
// reserved for future-proof extensibility
message InitResponse {}
message GetRequest {
// The key that should be retrieved.
string key = 1;
// Request associated metadata.
map<string, string> metadata = 2;
// The get consistency level.
StateOptions.StateConsistency consistency = 3;
}
message GetResponse {
// The data of the GetRequest response.
bytes data = 1;
// The etag of the associated key.
Etag etag = 2;
// Metadata related to the response.
map<string, string> metadata = 3;
// The response data contenttype
string content_type = 4;
}
message DeleteRequest {
// The key that should be deleted.
string key = 1;
// The etag is used as a If-Match header, to allow certain levels of
// consistency.
Etag etag = 2;
// The request metadata.
map<string, string> metadata = 3;
StateOptions options = 4;
}
// reserved for future-proof extensibility
message DeleteResponse {}
message SetRequest {
// The key that should be set.
string key = 1;
// Value is the desired content of the given key.
bytes value = 2;
// The etag is used as a If-Match header, to allow certain levels of
// consistency.
Etag etag = 3;
// The request metadata.
map<string, string> metadata = 4;
// The Set request options.
StateOptions options = 5;
// The data contenttype
string content_type = 6;
}
// reserved for future-proof extensibility
message SetResponse {}
message BulkDeleteRequestOptions {
int64 parallelism = 1;
}
message BulkDeleteRequest {
repeated DeleteRequest items = 1;
BulkDeleteRequestOptions options = 2;
}
// reserved for future-proof extensibility
message BulkDeleteResponse {}
message BulkGetRequestOptions {
int64 parallelism = 1;
}
message BulkGetRequest {
repeated GetRequest items = 1;
BulkGetRequestOptions options = 2;
}
message BulkStateItem {
// The key of the fetched item.
string key = 1;
// The associated data of the fetched item.
bytes data = 2;
// The item ETag
Etag etag = 3;
// A fetch error if there's some.
string error = 4;
// The State Item metadata.
map<string, string> metadata = 5;
// The data contenttype
string content_type = 6;
}
message BulkGetResponse {
repeated BulkStateItem items = 1;
}
message BulkSetRequestOptions {
int64 parallelism = 1;
}
message BulkSetRequest {
repeated SetRequest items = 1;
BulkSetRequestOptions options = 2;
}
// reserved for future-proof extensibility
message BulkSetResponse {}
// TransactionalStoreMultiMaxSize service provides a gRPC interface for
// compatible transactional state store components which return the maximum
// number of operations that can be performed in a single transaction.
service TransactionalStoreMultiMaxSize {
// MultiMaxSize returns the maximum number of operations that can be performed
// in a single transaction.
rpc MultiMaxSize(MultiMaxSizeRequest) returns (MultiMaxSizeResponse) {}
}
// MultiMaxSizeRequest is the request for MultiMaxSize. It is empty because
// there are no parameters.
message MultiMaxSizeRequest {}
// MultiMaxSizeResponse is the response for MultiMaxSize.
message MultiMaxSizeResponse {
// The maximum number of operations that can be performed in a single
// transaction.
int64 max_size = 1;
}
|
mikeee/dapr
|
dapr/proto/components/v1/state.proto
|
proto
|
mit
| 8,633 |
# Internal APIs
This folder is intended for the Internal APIs that the `daprd` sidecars use to communicate with each other.
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/internals/v1/README.md
|
Markdown
|
mit
| 523 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.internals.v1;
option go_package = "github.com/dapr/dapr/pkg/proto/internals/v1;internals";
// APIVersion represents the version of Dapr Runtime API.
enum APIVersion {
// unspecified apiversion
APIVERSION_UNSPECIFIED = 0;
// Dapr API v1
V1 = 1;
}
|
mikeee/dapr
|
dapr/proto/internals/v1/apiversion.proto
|
proto
|
mit
| 857 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.internals.v1;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/internals/v1;internals";
// Reminder represents a reminder that is stored in the Dapr actor state store.
message Reminder {
string actor_id = 1;
string actor_type = 2;
string name = 3;
bytes data = 4;
string period = 5;
google.protobuf.Timestamp registered_time = 6;
string due_time = 7;
google.protobuf.Timestamp expiration_time = 8;
}
// Reminders is a collection of reminders.
message Reminders {
repeated Reminder reminders = 1;
}
|
mikeee/dapr
|
dapr/proto/internals/v1/reminders.proto
|
proto
|
mit
| 1,165 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.internals.v1;
import "dapr/proto/common/v1/common.proto";
import "dapr/proto/internals/v1/apiversion.proto";
import "dapr/proto/internals/v1/status.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/internals/v1;internals";
// ServiceInvocation service is used to exchange the data between
// caller dapr runtime and callee dapr runtime.
//
// The request message includes caller's HTTP/gRPC request
// and deliver callee's response including status code.
// The response status of rpc methods represents of internal gRPC
// connection status, not callee's response status.
//
// Thus, ServiceInvocation gRPC response returns OK in most cases
// regardless of callee's response.
service ServiceInvocation {
// Invokes a method of the specific actor.
rpc CallActor (InternalInvokeRequest) returns (InternalInvokeResponse) {}
// Invokes a method of the specific service.
rpc CallLocal (InternalInvokeRequest) returns (InternalInvokeResponse) {}
// Invokes a method of the specific service using a stream of data.
// Although this uses a bi-directional stream, it behaves as a "simple RPC" in which the caller sends the full request (chunked in multiple messages in the stream), then reads the full response (chunked in the stream).
// Each message in the stream contains a `InternalInvokeRequestStream` (for caller) or `InternalInvokeResponseStream` (for callee):
// - The first message in the stream MUST contain a `request` (caller) or `response` (callee) message with all required properties present.
// - The first message in the stream MAY contain a `payload`, which is not required and may be empty.
// - Subsequent messages (any message except the first one in the stream) MUST contain a `payload` and MUST NOT contain any other property (like `request` or `response`).
// - Each message with a `payload` MUST contain a sequence number in `seq`, which is a counter that starts from 0 and MUST be incremented by 1 in each chunk. The `seq` counter MUST NOT be included if the message does not have a `payload`.
// - When the sender has completed sending the data, it MUST call `CloseSend` on the stream.
// The caller and callee must send at least one message in the stream. If only 1 message is sent in each direction, that message must contain both a `request`/`response` (the `payload` may be empty).
rpc CallLocalStream (stream InternalInvokeRequestStream) returns (stream InternalInvokeResponseStream) {}
}
// Actor represents actor using actor_type and actor_id
message Actor {
// Required. The type of actor.
string actor_type = 1;
// Required. The ID of actor type (actor_type)
string actor_id = 2;
}
// InternalInvokeRequest is the message to transfer caller's data to callee
// for service invocation. This includes callee's app id and caller's request data.
message InternalInvokeRequest {
// Required. The version of Dapr runtime API.
APIVersion ver = 1;
// Required. metadata holds caller's HTTP headers or gRPC metadata.
map<string, ListStringValue> metadata = 2;
// Required. message including caller's invocation request.
common.v1.InvokeRequest message = 3;
// Actor type and id. This field is used only for
// actor service invocation.
Actor actor = 4;
}
// InternalInvokeResponse is the message to transfer callee's response to caller
// for service invocation.
message InternalInvokeResponse {
// Required. HTTP/gRPC status.
Status status = 1;
// Required. The app callback response headers.
map<string, ListStringValue> headers = 2;
// App callback response trailers.
// This will be used only for gRPC app callback
map<string, ListStringValue> trailers = 3;
// Callee's invocation response message.
common.v1.InvokeResponse message = 4;
}
// InternalInvokeRequestStream is a variant of InternalInvokeRequest used in streaming RPCs.
message InternalInvokeRequestStream {
// Request details.
// This does not contain any data in message.data.
InternalInvokeRequest request = 1;
// Chunk of data.
common.v1.StreamPayload payload = 2;
}
// InternalInvokeResponseStream is a variant of InternalInvokeResponse used in streaming RPCs.
message InternalInvokeResponseStream {
// Response details.
// This does not contain any data in message.data.
InternalInvokeResponse response = 1;
// Chunk of data.
common.v1.StreamPayload payload = 2;
}
// ListStringValue represents string value array
message ListStringValue {
// The array of string.
repeated string values = 1;
}
|
mikeee/dapr
|
dapr/proto/internals/v1/service_invocation.proto
|
proto
|
mit
| 5,114 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.internals.v1;
import "google/protobuf/any.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/internals/v1;internals";
// Status represents the response status for HTTP and gRPC app channel.
message Status {
// Required. The status code
int32 code = 1;
// Error message
string message = 2;
// A list of messages that carry the error details
repeated google.protobuf.Any details = 3;
}
|
mikeee/dapr
|
dapr/proto/internals/v1/status.proto
|
proto
|
mit
| 1,010 |
# Operator Service APIs
This folder is intended for the `operator` service APIs that manage the component updates and provides Kubernetes services endpoints for Dapr. For more details about the `operator` service please refer to the [docs](https://docs.dapr.io/concepts/dapr-services/operator/).
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/operator/v1/README.md
|
Markdown
|
mit
| 695 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.operator.v1;
import "google/protobuf/empty.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/operator/v1;operator";
service Operator {
// Sends events to Dapr sidecars upon component changes.
rpc ComponentUpdate (ComponentUpdateRequest) returns (stream ComponentUpdateEvent) {}
// Returns a list of available components
rpc ListComponents (ListComponentsRequest) returns (ListComponentResponse) {}
// Returns a given configuration by name
rpc GetConfiguration (GetConfigurationRequest) returns (GetConfigurationResponse) {}
// Returns a list of pub/sub subscriptions
rpc ListSubscriptions (google.protobuf.Empty) returns (ListSubscriptionsResponse) {}
// Returns a given resiliency configuration by name
rpc GetResiliency (GetResiliencyRequest) returns (GetResiliencyResponse) {}
// Returns a list of resiliency configurations
rpc ListResiliency (ListResiliencyRequest) returns (ListResiliencyResponse) {}
// Returns a list of pub/sub subscriptions, ListSubscriptionsRequest to expose pod info
rpc ListSubscriptionsV2 (ListSubscriptionsRequest) returns (ListSubscriptionsResponse) {}
// Sends events to Dapr sidecars upon subscription changes.
rpc SubscriptionUpdate (SubscriptionUpdateRequest) returns (stream SubscriptionUpdateEvent) {}
// Returns a list of http endpoints
rpc ListHTTPEndpoints (ListHTTPEndpointsRequest) returns (ListHTTPEndpointsResponse) {}
// Sends events to Dapr sidecars upon http endpoint changes.
rpc HTTPEndpointUpdate (HTTPEndpointUpdateRequest) returns (stream HTTPEndpointUpdateEvent) {}
}
// ResourceEventType is the type of event to a resource.
enum ResourceEventType {
// UNKNOWN indicates that the event type is unknown.
UNKNOWN = 0;
// CREATED indicates that the resource has been created.
CREATED = 1;
// UPDATED indicates that the resource has been updated.
UPDATED = 2;
// DELETED indicates that the resource has been deleted.
DELETED = 3;
}
// ListComponentsRequest is the request to get components for a sidecar in namespace.
message ListComponentsRequest {
string namespace = 1;
string podName = 2;
}
// ComponentUpdateRequest is the request to get updates about new components for a given namespace.
message ComponentUpdateRequest {
string namespace = 1;
string podName = 2;
}
// ComponentUpdateEvent includes the updated component event.
message ComponentUpdateEvent {
bytes component = 1;
// type is the type of event.
ResourceEventType type = 2;
}
// ListComponentResponse includes the list of available components.
message ListComponentResponse {
repeated bytes components = 1;
}
// GetConfigurationRequest is the request message to get the configuration.
message GetConfigurationRequest {
string name = 1;
string namespace = 2;
string podName = 3;
}
// GetConfigurationResponse includes the requested configuration.
message GetConfigurationResponse {
bytes configuration = 1;
}
// ListSubscriptionsResponse includes pub/sub subscriptions.
message ListSubscriptionsResponse {
repeated bytes subscriptions = 1;
}
// SubscriptionUpdateRequest is the request to get updates about new
// subscriptions for a given namespace.
message SubscriptionUpdateRequest {
string namespace = 1;
string podName = 2;
}
// SubscriptionUpdateEvent includes the updated subscription event.
message SubscriptionUpdateEvent {
bytes subscription = 1;
// type is the type of event.
ResourceEventType type = 2;
}
// GetResiliencyRequest is the request to get a resiliency configuration.
message GetResiliencyRequest {
string name = 1;
string namespace = 2;
}
// GetResiliencyResponse includes the requested resiliency configuration.
message GetResiliencyResponse {
bytes resiliency = 1;
}
// ListResiliencyRequest is the requests to get resiliency configurations for a sidecar namespace.
message ListResiliencyRequest {
string namespace = 1;
}
// ListResiliencyResponse includes the list of available resiliency configurations.
message ListResiliencyResponse {
repeated bytes resiliencies = 1;
}
message ListSubscriptionsRequest {
string podName = 1;
string namespace = 2;
}
// GetHTTPEndpointRequest is the request to get an http endpoint configuration.
message GetHTTPEndpointRequest {
string name = 1;
string namespace = 2;
}
// GetHTTPEndpointResponse includes the requested http endpoint configuration.
message GetHTTPEndpointResponse {
bytes http_endpoint = 1;
}
// ListHTTPEndpointsResponse includes the list of available http endpoint configurations.
message ListHTTPEndpointsResponse {
repeated bytes http_endpoints = 1;
}
message ListHTTPEndpointsRequest {
string namespace = 1;
}
// HTTPEndpointsUpdateRequest is the request to get updates about new http endpoints for a given namespace.
message HTTPEndpointUpdateRequest {
string namespace = 1;
string pod_name = 2;
}
// HTTPEndpointsUpdateEvent includes the updated http endpoint event.
message HTTPEndpointUpdateEvent {
bytes http_endpoints = 1;
}
|
mikeee/dapr
|
dapr/proto/operator/v1/operator.proto
|
proto
|
mit
| 5,604 |
# Placement Service APIs
This folder is intended for `placement` service APIs that the `daprd` sidecars use to communicate with the `placement` Control Plane Service. For more details about the `placement` service please refer to the [docs](https://docs.dapr.io/concepts/dapr-services/placement/).
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/placement/v1/README.md
|
Markdown
|
mit
| 697 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.placement.v1;
option go_package = "github.com/dapr/dapr/pkg/proto/placement/v1;placement";
// Placement service is used to report Dapr runtime host status.
service Placement {
// Reports Dapr actor status and retrieves actor placement table.
rpc ReportDaprStatus(stream Host) returns (stream PlacementOrder) {}
}
message PlacementOrder {
PlacementTables tables = 1;
string operation = 2;
}
message PlacementTables {
map<string, PlacementTable> entries = 1;
string version = 2;
// Minimum observed version of the Actor APIs supported by connected runtimes
uint32 api_level = 3;
int64 replication_factor = 4;
}
message PlacementTable {
map<uint64, string> hosts = 1;
repeated uint64 sorted_set = 2;
map<string, Host> load_map = 3;
int64 total_load = 4;
}
message Host {
string name = 1;
int64 port = 2;
int64 load = 3;
repeated string entities = 4;
string id = 5;
string pod = 6;
// Version of the Actor APIs supported by the Dapr runtime
uint32 api_level = 7;
string namespace = 8;
}
|
mikeee/dapr
|
dapr/proto/placement/v1/placement.proto
|
proto
|
mit
| 1,633 |
# Public Dapr Runtime APIs
This folder is intended for user-facing APIs.
- `dapr.proto` is used by the services implemented by the Dapr runtime. Apps calling into the Dapr runtime use these services.
- `appcallback.proto` is for services implemented by apps to receive messages from the Dapr runtime.
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/runtime/v1/README.md
|
Markdown
|
mit
| 702 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.runtime.v1;
import "google/protobuf/empty.proto";
import "dapr/proto/common/v1/common.proto";
import "google/protobuf/struct.proto";
option csharp_namespace = "Dapr.AppCallback.Autogen.Grpc.v1";
option java_outer_classname = "DaprAppCallbackProtos";
option java_package = "io.dapr.v1";
option go_package = "github.com/dapr/dapr/pkg/proto/runtime/v1;runtime";
// AppCallback V1 allows user application to interact with Dapr runtime.
// User application needs to implement AppCallback service if it needs to
// receive message from dapr runtime.
service AppCallback {
// Invokes service method with InvokeRequest.
rpc OnInvoke (common.v1.InvokeRequest) returns (common.v1.InvokeResponse) {}
// Lists all topics subscribed by this app.
rpc ListTopicSubscriptions(google.protobuf.Empty) returns (ListTopicSubscriptionsResponse) {}
// Subscribes events from Pubsub
rpc OnTopicEvent(TopicEventRequest) returns (TopicEventResponse) {}
// Lists all input bindings subscribed by this app.
rpc ListInputBindings(google.protobuf.Empty) returns (ListInputBindingsResponse) {}
// Listens events from the input bindings
//
// User application can save the states or send the events to the output
// bindings optionally by returning BindingEventResponse.
rpc OnBindingEvent(BindingEventRequest) returns (BindingEventResponse) {}
}
// AppCallbackHealthCheck V1 is an optional extension to AppCallback V1 to implement
// the HealthCheck method.
service AppCallbackHealthCheck {
// Health check.
rpc HealthCheck(google.protobuf.Empty) returns (HealthCheckResponse) {}
}
// AppCallbackAlpha V1 is an optional extension to AppCallback V1 to opt
// for Alpha RPCs.
service AppCallbackAlpha {
// Subscribes bulk events from Pubsub
rpc OnBulkTopicEventAlpha1(TopicEventBulkRequest) returns (TopicEventBulkResponse) {}
}
// TopicEventRequest message is compatible with CloudEvent spec v1.0
// https://github.com/cloudevents/spec/blob/v1.0/spec.md
message TopicEventRequest {
// id identifies the event. Producers MUST ensure that source + id
// is unique for each distinct event. If a duplicate event is re-sent
// (e.g. due to a network error) it MAY have the same id.
string id = 1;
// source identifies the context in which an event happened.
// Often this will include information such as the type of the
// event source, the organization publishing the event or the process
// that produced the event. The exact syntax and semantics behind
// the data encoded in the URI is defined by the event producer.
string source = 2;
// The type of event related to the originating occurrence.
string type = 3;
// The version of the CloudEvents specification.
string spec_version = 4;
// The content type of data value.
string data_content_type = 5;
// The content of the event.
bytes data = 7;
// The pubsub topic which publisher sent to.
string topic = 6;
// The name of the pubsub the publisher sent to.
string pubsub_name = 8;
// The matching path from TopicSubscription/routes (if specified) for this event.
// This value is used by OnTopicEvent to "switch" inside the handler.
string path = 9;
// The map of additional custom properties to be sent to the app. These are considered to be cloud event extensions.
google.protobuf.Struct extensions = 10;
}
// TopicEventResponse is response from app on published message
message TopicEventResponse {
// TopicEventResponseStatus allows apps to have finer control over handling of the message.
enum TopicEventResponseStatus {
// SUCCESS is the default behavior: message is acknowledged and not retried or logged.
SUCCESS = 0;
// RETRY status signals Dapr to retry the message as part of an expected scenario (no warning is logged).
RETRY = 1;
// DROP status signals Dapr to drop the message as part of an unexpected scenario (warning is logged).
DROP = 2;
}
// The list of output bindings.
TopicEventResponseStatus status = 1;
}
// TopicEventCERequest message is compatible with CloudEvent spec v1.0
message TopicEventCERequest {
// The unique identifier of this cloud event.
string id = 1;
// source identifies the context in which an event happened.
string source = 2;
// The type of event related to the originating occurrence.
string type = 3;
// The version of the CloudEvents specification.
string spec_version = 4;
// The content type of data value.
string data_content_type = 5;
// The content of the event.
bytes data = 6;
// Custom attributes which includes cloud event extensions.
google.protobuf.Struct extensions = 7;
}
// TopicEventBulkRequestEntry represents a single message inside a bulk request
message TopicEventBulkRequestEntry {
// Unique identifier for the message.
string entry_id = 1;
// The content of the event.
oneof event {
bytes bytes = 2;
TopicEventCERequest cloud_event = 3;
}
// content type of the event contained.
string content_type = 4;
// The metadata associated with the event.
map<string,string> metadata = 5;
}
// TopicEventBulkRequest represents request for bulk message
message TopicEventBulkRequest {
// Unique identifier for the bulk request.
string id = 1;
// The list of items inside this bulk request.
repeated TopicEventBulkRequestEntry entries = 2;
// The metadata associated with the this bulk request.
map<string,string> metadata = 3;
// The pubsub topic which publisher sent to.
string topic = 4;
// The name of the pubsub the publisher sent to.
string pubsub_name = 5;
// The type of event related to the originating occurrence.
string type = 6;
// The matching path from TopicSubscription/routes (if specified) for this event.
// This value is used by OnTopicEvent to "switch" inside the handler.
string path = 7;
}
// TopicEventBulkResponseEntry Represents single response, as part of TopicEventBulkResponse, to be
// sent by subscibed App for the corresponding single message during bulk subscribe
message TopicEventBulkResponseEntry {
// Unique identifier associated the message.
string entry_id = 1;
// The status of the response.
TopicEventResponse.TopicEventResponseStatus status = 2;
}
// AppBulkResponse is response from app on published message
message TopicEventBulkResponse {
// The list of all responses for the bulk request.
repeated TopicEventBulkResponseEntry statuses = 1;
}
// BindingEventRequest represents input bindings event.
message BindingEventRequest {
// Required. The name of the input binding component.
string name = 1;
// Required. The payload that the input bindings sent
bytes data = 2;
// The metadata set by the input binging components.
map<string,string> metadata = 3;
}
// BindingEventResponse includes operations to save state or
// send data to output bindings optionally.
message BindingEventResponse {
// The name of state store where states are saved.
string store_name = 1;
// The state key values which will be stored in store_name.
repeated common.v1.StateItem states = 2;
// BindingEventConcurrency is the kind of concurrency
enum BindingEventConcurrency {
// SEQUENTIAL sends data to output bindings specified in "to" sequentially.
SEQUENTIAL = 0;
// PARALLEL sends data to output bindings specified in "to" in parallel.
PARALLEL = 1;
}
// The list of output bindings.
repeated string to = 3;
// The content which will be sent to "to" output bindings.
bytes data = 4;
// The concurrency of output bindings to send data to
// "to" output bindings list. The default is SEQUENTIAL.
BindingEventConcurrency concurrency = 5;
}
// ListTopicSubscriptionsResponse is the message including the list of the subscribing topics.
message ListTopicSubscriptionsResponse {
// The list of topics.
repeated TopicSubscription subscriptions = 1;
}
// TopicSubscription represents topic and metadata.
message TopicSubscription {
// Required. The name of the pubsub containing the topic below to subscribe to.
string pubsub_name = 1;
// Required. The name of topic which will be subscribed
string topic = 2;
// The optional properties used for this topic's subscription e.g. session id
map<string,string> metadata = 3;
// The optional routing rules to match against. In the gRPC interface, OnTopicEvent
// is still invoked but the matching path is sent in the TopicEventRequest.
TopicRoutes routes = 5;
// The optional dead letter queue for this topic to send events to.
string dead_letter_topic = 6;
// The optional bulk subscribe settings for this topic.
BulkSubscribeConfig bulk_subscribe = 7;
}
message TopicRoutes {
// The list of rules for this topic.
repeated TopicRule rules = 1;
// The default path for this topic.
string default = 2;
}
message TopicRule {
// The optional CEL expression used to match the event.
// If the match is not specified, then the route is considered
// the default.
string match = 1;
// The path used to identify matches for this subscription.
// This value is passed in TopicEventRequest and used by OnTopicEvent to "switch"
// inside the handler.
string path = 2;
}
// BulkSubscribeConfig is the message to pass settings for bulk subscribe
message BulkSubscribeConfig {
// Required. Flag to enable/disable bulk subscribe
bool enabled = 1;
// Optional. Max number of messages to be sent in a single bulk request
int32 max_messages_count = 2;
// Optional. Max duration to wait for messages to be sent in a single bulk request
int32 max_await_duration_ms = 3;
}
// ListInputBindingsResponse is the message including the list of input bindings.
message ListInputBindingsResponse {
// The list of input bindings.
repeated string bindings = 1;
}
// HealthCheckResponse is the message with the response to the health check.
// This message is currently empty as used as placeholder.
message HealthCheckResponse {}
|
mikeee/dapr
|
dapr/proto/runtime/v1/appcallback.proto
|
proto
|
mit
| 10,575 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.runtime.v1;
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "dapr/proto/common/v1/common.proto";
import "dapr/proto/runtime/v1/appcallback.proto";
option csharp_namespace = "Dapr.Client.Autogen.Grpc.v1";
option java_outer_classname = "DaprProtos";
option java_package = "io.dapr.v1";
option go_package = "github.com/dapr/dapr/pkg/proto/runtime/v1;runtime";
// Dapr service provides APIs to user application to access Dapr building blocks.
service Dapr {
// Invokes a method on a remote Dapr app.
// Deprecated: Use proxy mode service invocation instead.
rpc InvokeService(InvokeServiceRequest) returns (common.v1.InvokeResponse) {}
// Gets the state for a specific key.
rpc GetState(GetStateRequest) returns (GetStateResponse) {}
// Gets a bulk of state items for a list of keys
rpc GetBulkState(GetBulkStateRequest) returns (GetBulkStateResponse) {}
// Saves the state for a specific key.
rpc SaveState(SaveStateRequest) returns (google.protobuf.Empty) {}
// Queries the state.
rpc QueryStateAlpha1(QueryStateRequest) returns (QueryStateResponse) {}
// Deletes the state for a specific key.
rpc DeleteState(DeleteStateRequest) returns (google.protobuf.Empty) {}
// Deletes a bulk of state items for a list of keys
rpc DeleteBulkState(DeleteBulkStateRequest) returns (google.protobuf.Empty) {}
// Executes transactions for a specified store
rpc ExecuteStateTransaction(ExecuteStateTransactionRequest) returns (google.protobuf.Empty) {}
// Publishes events to the specific topic.
rpc PublishEvent(PublishEventRequest) returns (google.protobuf.Empty) {}
// Bulk Publishes multiple events to the specified topic.
rpc BulkPublishEventAlpha1(BulkPublishRequest) returns (BulkPublishResponse) {}
// SubscribeTopicEventsAlpha1 subscribes to a PubSub topic and receives topic
// events from it.
rpc SubscribeTopicEventsAlpha1(stream SubscribeTopicEventsRequestAlpha1) returns (stream TopicEventRequest) {}
// Invokes binding data to specific output bindings
rpc InvokeBinding(InvokeBindingRequest) returns (InvokeBindingResponse) {}
// Gets secrets from secret stores.
rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) {}
// Gets a bulk of secrets
rpc GetBulkSecret(GetBulkSecretRequest) returns (GetBulkSecretResponse) {}
// Register an actor timer.
rpc RegisterActorTimer(RegisterActorTimerRequest) returns (google.protobuf.Empty) {}
// Unregister an actor timer.
rpc UnregisterActorTimer(UnregisterActorTimerRequest) returns (google.protobuf.Empty) {}
// Register an actor reminder.
rpc RegisterActorReminder(RegisterActorReminderRequest) returns (google.protobuf.Empty) {}
// Unregister an actor reminder.
rpc UnregisterActorReminder(UnregisterActorReminderRequest) returns (google.protobuf.Empty) {}
// Gets the state for a specific actor.
rpc GetActorState(GetActorStateRequest) returns (GetActorStateResponse) {}
// Executes state transactions for a specified actor
rpc ExecuteActorStateTransaction(ExecuteActorStateTransactionRequest) returns (google.protobuf.Empty) {}
// InvokeActor calls a method on an actor.
rpc InvokeActor (InvokeActorRequest) returns (InvokeActorResponse) {}
// GetConfiguration gets configuration from configuration store.
rpc GetConfigurationAlpha1(GetConfigurationRequest) returns (GetConfigurationResponse) {}
// GetConfiguration gets configuration from configuration store.
rpc GetConfiguration(GetConfigurationRequest) returns (GetConfigurationResponse) {}
// SubscribeConfiguration gets configuration from configuration store and subscribe the updates event by grpc stream
rpc SubscribeConfigurationAlpha1(SubscribeConfigurationRequest) returns (stream SubscribeConfigurationResponse) {}
// SubscribeConfiguration gets configuration from configuration store and subscribe the updates event by grpc stream
rpc SubscribeConfiguration(SubscribeConfigurationRequest) returns (stream SubscribeConfigurationResponse) {}
// UnSubscribeConfiguration unsubscribe the subscription of configuration
rpc UnsubscribeConfigurationAlpha1(UnsubscribeConfigurationRequest) returns (UnsubscribeConfigurationResponse) {}
// UnSubscribeConfiguration unsubscribe the subscription of configuration
rpc UnsubscribeConfiguration(UnsubscribeConfigurationRequest) returns (UnsubscribeConfigurationResponse) {}
// TryLockAlpha1 tries to get a lock with an expiry.
rpc TryLockAlpha1(TryLockRequest)returns (TryLockResponse) {}
// UnlockAlpha1 unlocks a lock.
rpc UnlockAlpha1(UnlockRequest)returns (UnlockResponse) {}
// EncryptAlpha1 encrypts a message using the Dapr encryption scheme and a key stored in the vault.
rpc EncryptAlpha1(stream EncryptRequest) returns (stream EncryptResponse);
// DecryptAlpha1 decrypts a message using the Dapr encryption scheme and a key stored in the vault.
rpc DecryptAlpha1(stream DecryptRequest) returns (stream DecryptResponse);
// Gets metadata of the sidecar
rpc GetMetadata (GetMetadataRequest) returns (GetMetadataResponse) {}
// Sets value in extended metadata of the sidecar
rpc SetMetadata (SetMetadataRequest) returns (google.protobuf.Empty) {}
// SubtleGetKeyAlpha1 returns the public part of an asymmetric key stored in the vault.
rpc SubtleGetKeyAlpha1(SubtleGetKeyRequest) returns (SubtleGetKeyResponse);
// SubtleEncryptAlpha1 encrypts a small message using a key stored in the vault.
rpc SubtleEncryptAlpha1(SubtleEncryptRequest) returns (SubtleEncryptResponse);
// SubtleDecryptAlpha1 decrypts a small message using a key stored in the vault.
rpc SubtleDecryptAlpha1(SubtleDecryptRequest) returns (SubtleDecryptResponse);
// SubtleWrapKeyAlpha1 wraps a key using a key stored in the vault.
rpc SubtleWrapKeyAlpha1(SubtleWrapKeyRequest) returns (SubtleWrapKeyResponse);
// SubtleUnwrapKeyAlpha1 unwraps a key using a key stored in the vault.
rpc SubtleUnwrapKeyAlpha1(SubtleUnwrapKeyRequest) returns (SubtleUnwrapKeyResponse);
// SubtleSignAlpha1 signs a message using a key stored in the vault.
rpc SubtleSignAlpha1(SubtleSignRequest) returns (SubtleSignResponse);
// SubtleVerifyAlpha1 verifies the signature of a message using a key stored in the vault.
rpc SubtleVerifyAlpha1(SubtleVerifyRequest) returns (SubtleVerifyResponse);
// Starts a new instance of a workflow
rpc StartWorkflowAlpha1 (StartWorkflowRequest) returns (StartWorkflowResponse) {}
// Gets details about a started workflow instance
rpc GetWorkflowAlpha1 (GetWorkflowRequest) returns (GetWorkflowResponse) {}
// Purge Workflow
rpc PurgeWorkflowAlpha1 (PurgeWorkflowRequest) returns (google.protobuf.Empty) {}
// Terminates a running workflow instance
rpc TerminateWorkflowAlpha1 (TerminateWorkflowRequest) returns (google.protobuf.Empty) {}
// Pauses a running workflow instance
rpc PauseWorkflowAlpha1 (PauseWorkflowRequest) returns (google.protobuf.Empty) {}
// Resumes a paused workflow instance
rpc ResumeWorkflowAlpha1 (ResumeWorkflowRequest) returns (google.protobuf.Empty) {}
// Raise an event to a running workflow instance
rpc RaiseEventWorkflowAlpha1 (RaiseEventWorkflowRequest) returns (google.protobuf.Empty) {}
// Starts a new instance of a workflow
rpc StartWorkflowBeta1 (StartWorkflowRequest) returns (StartWorkflowResponse) {}
// Gets details about a started workflow instance
rpc GetWorkflowBeta1 (GetWorkflowRequest) returns (GetWorkflowResponse) {}
// Purge Workflow
rpc PurgeWorkflowBeta1 (PurgeWorkflowRequest) returns (google.protobuf.Empty) {}
// Terminates a running workflow instance
rpc TerminateWorkflowBeta1 (TerminateWorkflowRequest) returns (google.protobuf.Empty) {}
// Pauses a running workflow instance
rpc PauseWorkflowBeta1 (PauseWorkflowRequest) returns (google.protobuf.Empty) {}
// Resumes a paused workflow instance
rpc ResumeWorkflowBeta1 (ResumeWorkflowRequest) returns (google.protobuf.Empty) {}
// Raise an event to a running workflow instance
rpc RaiseEventWorkflowBeta1 (RaiseEventWorkflowRequest) returns (google.protobuf.Empty) {}
// Shutdown the sidecar
rpc Shutdown (ShutdownRequest) returns (google.protobuf.Empty) {}
}
// InvokeServiceRequest represents the request message for Service invocation.
message InvokeServiceRequest {
// Required. Callee's app id.
string id = 1;
// Required. message which will be delivered to callee.
common.v1.InvokeRequest message = 3;
}
// GetStateRequest is the message to get key-value states from specific state store.
message GetStateRequest {
// The name of state store.
string store_name = 1;
// The key of the desired state
string key = 2;
// The read consistency of the state store.
common.v1.StateOptions.StateConsistency consistency = 3;
// The metadata which will be sent to state store components.
map<string, string> metadata = 4;
}
// GetBulkStateRequest is the message to get a list of key-value states from specific state store.
message GetBulkStateRequest {
// The name of state store.
string store_name = 1;
// The keys to get.
repeated string keys = 2;
// The number of parallel operations executed on the state store for a get operation.
int32 parallelism = 3;
// The metadata which will be sent to state store components.
map<string, string> metadata = 4;
}
// GetBulkStateResponse is the response conveying the list of state values.
message GetBulkStateResponse {
// The list of items containing the keys to get values for.
repeated BulkStateItem items = 1;
}
// BulkStateItem is the response item for a bulk get operation.
// Return values include the item key, data and etag.
message BulkStateItem {
// state item key
string key = 1;
// The byte array data
bytes data = 2;
// The entity tag which represents the specific version of data.
// ETag format is defined by the corresponding data store.
string etag = 3;
// The error that was returned from the state store in case of a failed get operation.
string error = 4;
// The metadata which will be sent to app.
map<string, string> metadata = 5;
}
// GetStateResponse is the response conveying the state value and etag.
message GetStateResponse {
// The byte array data
bytes data = 1;
// The entity tag which represents the specific version of data.
// ETag format is defined by the corresponding data store.
string etag = 2;
// The metadata which will be sent to app.
map<string, string> metadata = 3;
}
// DeleteStateRequest is the message to delete key-value states in the specific state store.
message DeleteStateRequest {
// The name of state store.
string store_name = 1;
// The key of the desired state
string key = 2;
// The entity tag which represents the specific version of data.
// The exact ETag format is defined by the corresponding data store.
common.v1.Etag etag = 3;
// State operation options which includes concurrency/
// consistency/retry_policy.
common.v1.StateOptions options = 4;
// The metadata which will be sent to state store components.
map<string, string> metadata = 5;
}
// DeleteBulkStateRequest is the message to delete a list of key-value states from specific state store.
message DeleteBulkStateRequest {
// The name of state store.
string store_name = 1;
// The array of the state key values.
repeated common.v1.StateItem states = 2;
}
// SaveStateRequest is the message to save multiple states into state store.
message SaveStateRequest {
// The name of state store.
string store_name = 1;
// The array of the state key values.
repeated common.v1.StateItem states = 2;
}
// QueryStateRequest is the message to query state store.
message QueryStateRequest {
// The name of state store.
string store_name = 1 [json_name = "storeName"];
// The query in JSON format.
string query = 2;
// The metadata which will be sent to state store components.
map<string, string> metadata = 3;
}
message QueryStateItem {
// The object key.
string key = 1;
// The object value.
bytes data = 2;
// The entity tag which represents the specific version of data.
// ETag format is defined by the corresponding data store.
string etag = 3;
// The error message indicating an error in processing of the query result.
string error = 4;
}
// QueryStateResponse is the response conveying the query results.
message QueryStateResponse {
// An array of query results.
repeated QueryStateItem results = 1;
// Pagination token.
string token = 2;
// The metadata which will be sent to app.
map<string, string> metadata = 3;
}
// PublishEventRequest is the message to publish event data to pubsub topic
message PublishEventRequest {
// The name of the pubsub component
string pubsub_name = 1;
// The pubsub topic
string topic = 2;
// The data which will be published to topic.
bytes data = 3;
// The content type for the data (optional).
string data_content_type = 4;
// The metadata passing to pub components
//
// metadata property:
// - key : the key of the message.
map<string, string> metadata = 5;
}
// BulkPublishRequest is the message to bulk publish events to pubsub topic
message BulkPublishRequest {
// The name of the pubsub component
string pubsub_name = 1;
// The pubsub topic
string topic = 2;
// The entries which contain the individual events and associated details to be published
repeated BulkPublishRequestEntry entries = 3;
// The request level metadata passing to to the pubsub components
map<string, string> metadata = 4;
}
// BulkPublishRequestEntry is the message containing the event to be bulk published
message BulkPublishRequestEntry {
// The request scoped unique ID referring to this message. Used to map status in response
string entry_id = 1;
// The event which will be pulished to the topic
bytes event = 2;
// The content type for the event
string content_type = 3;
// The event level metadata passing to the pubsub component
map<string, string> metadata = 4;
}
// BulkPublishResponse is the message returned from a BulkPublishEvent call
message BulkPublishResponse {
// The entries for different events that failed publish in the BulkPublishEvent call
repeated BulkPublishResponseFailedEntry failedEntries = 1;
}
// BulkPublishResponseFailedEntry is the message containing the entryID and error of a failed event in BulkPublishEvent call
message BulkPublishResponseFailedEntry {
// The response scoped unique ID referring to this message
string entry_id = 1;
// The error message if any on failure
string error = 2;
}
// SubscribeTopicEventsRequestAlpha1 is a message containing the details for
// subscribing to a topic via streaming.
// The first message must always be the initial request. All subsequent
// messages must be event responses.
message SubscribeTopicEventsRequestAlpha1 {
oneof subscribe_topic_events_request_type {
SubscribeTopicEventsInitialRequestAlpha1 initial_request = 1;
SubscribeTopicEventsResponseAlpha1 event_response = 2;
}
}
// SubscribeTopicEventsInitialRequestAlpha1 is the initial message containing the
// details for subscribing to a topic via streaming.
message SubscribeTopicEventsInitialRequestAlpha1 {
// The name of the pubsub component
string pubsub_name = 1;
// The pubsub topic
string topic = 2;
// The metadata passing to pub components
//
// metadata property:
// - key : the key of the message.
map<string, string> metadata = 3;
// dead_letter_topic is the topic to which messages that fail to be processed
// are sent.
optional string dead_letter_topic = 4;
}
// SubscribeTopicEventsResponseAlpha1 is a message containing the result of a
// subscription to a topic.
message SubscribeTopicEventsResponseAlpha1 {
// id is the unique identifier for the subscription request.
string id = 1;
// status is the result of the subscription request.
TopicEventResponse status = 2;
}
// InvokeBindingRequest is the message to send data to output bindings
message InvokeBindingRequest {
// The name of the output binding to invoke.
string name = 1;
// The data which will be sent to output binding.
bytes data = 2;
// The metadata passing to output binding components
//
// Common metadata property:
// - ttlInSeconds : the time to live in seconds for the message.
// If set in the binding definition will cause all messages to
// have a default time to live. The message ttl overrides any value
// in the binding definition.
map<string, string> metadata = 3;
// The name of the operation type for the binding to invoke
string operation = 4;
}
// InvokeBindingResponse is the message returned from an output binding invocation
message InvokeBindingResponse {
// The data which will be sent to output binding.
bytes data = 1;
// The metadata returned from an external system
map<string, string> metadata = 2;
}
// GetSecretRequest is the message to get secret from secret store.
message GetSecretRequest {
// The name of secret store.
string store_name = 1 [json_name = "storeName"];
// The name of secret key.
string key = 2;
// The metadata which will be sent to secret store components.
map<string, string> metadata = 3;
}
// GetSecretResponse is the response message to convey the requested secret.
message GetSecretResponse {
// data is the secret value. Some secret store, such as kubernetes secret
// store, can save multiple secrets for single secret key.
map<string, string> data = 1;
}
// GetBulkSecretRequest is the message to get the secrets from secret store.
message GetBulkSecretRequest {
// The name of secret store.
string store_name = 1 [json_name = "storeName"];
// The metadata which will be sent to secret store components.
map<string, string> metadata = 2;
}
// SecretResponse is a map of decrypted string/string values
message SecretResponse {
map<string, string> secrets = 1;
}
// GetBulkSecretResponse is the response message to convey the requested secrets.
message GetBulkSecretResponse {
// data hold the secret values. Some secret store, such as kubernetes secret
// store, can save multiple secrets for single secret key.
map<string, SecretResponse> data = 1;
}
// TransactionalStateOperation is the message to execute a specified operation with a key-value pair.
message TransactionalStateOperation {
// The type of operation to be executed
string operationType = 1;
// State values to be operated on
common.v1.StateItem request = 2;
}
// ExecuteStateTransactionRequest is the message to execute multiple operations on a specified store.
message ExecuteStateTransactionRequest {
// Required. name of state store.
string storeName = 1;
// Required. transactional operation list.
repeated TransactionalStateOperation operations = 2;
// The metadata used for transactional operations.
map<string, string> metadata = 3;
}
// RegisterActorTimerRequest is the message to register a timer for an actor of a given type and id.
message RegisterActorTimerRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
string name = 3;
string due_time = 4 [json_name = "dueTime"];
string period = 5;
string callback = 6;
bytes data = 7;
string ttl = 8;
}
// UnregisterActorTimerRequest is the message to unregister an actor timer
message UnregisterActorTimerRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
string name = 3;
}
// RegisterActorReminderRequest is the message to register a reminder for an actor of a given type and id.
message RegisterActorReminderRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
string name = 3;
string due_time = 4 [json_name = "dueTime"];
string period = 5;
bytes data = 6;
string ttl = 7;
}
// UnregisterActorReminderRequest is the message to unregister an actor reminder.
message UnregisterActorReminderRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
string name = 3;
}
// GetActorStateRequest is the message to get key-value states from specific actor.
message GetActorStateRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
string key = 3;
}
// GetActorStateResponse is the response conveying the actor's state value.
message GetActorStateResponse {
bytes data = 1;
// The metadata which will be sent to app.
map<string, string> metadata = 2;
}
// ExecuteActorStateTransactionRequest is the message to execute multiple operations on a specified actor.
message ExecuteActorStateTransactionRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
repeated TransactionalActorStateOperation operations = 3;
}
// TransactionalActorStateOperation is the message to execute a specified operation with a key-value pair.
message TransactionalActorStateOperation {
string operationType = 1;
string key = 2;
google.protobuf.Any value = 3;
// The metadata used for transactional operations.
//
// Common metadata property:
// - ttlInSeconds : the time to live in seconds for the stored value.
map<string, string> metadata = 4;
}
// InvokeActorRequest is the message to call an actor.
message InvokeActorRequest {
string actor_type = 1 [json_name = "actorType"];
string actor_id = 2 [json_name = "actorId"];
string method = 3;
bytes data = 4;
map<string, string> metadata = 5;
}
// InvokeActorResponse is the method that returns an actor invocation response.
message InvokeActorResponse {
bytes data = 1;
}
// GetMetadataRequest is the message for the GetMetadata request.
message GetMetadataRequest {
// Empty
}
// GetMetadataResponse is a message that is returned on GetMetadata rpc call.
message GetMetadataResponse {
string id = 1;
// Deprecated alias for actor_runtime.active_actors.
repeated ActiveActorsCount active_actors_count = 2 [json_name = "actors", deprecated = true];
repeated RegisteredComponents registered_components = 3 [json_name = "components"];
map<string, string> extended_metadata = 4 [json_name = "extended"];
repeated PubsubSubscription subscriptions = 5 [json_name = "subscriptions"];
repeated MetadataHTTPEndpoint http_endpoints = 6 [json_name = "httpEndpoints"];
AppConnectionProperties app_connection_properties = 7 [json_name = "appConnectionProperties"];
string runtime_version = 8 [json_name = "runtimeVersion"];
repeated string enabled_features = 9 [json_name = "enabledFeatures"];
ActorRuntime actor_runtime = 10 [json_name = "actorRuntime"];
}
message ActorRuntime {
enum ActorRuntimeStatus {
// Indicates that the actor runtime is still being initialized.
INITIALIZING = 0;
// Indicates that the actor runtime is disabled.
// This normally happens when Dapr is started without "placement-host-address"
DISABLED = 1;
// Indicates the actor runtime is running, either as an actor host or client.
RUNNING = 2;
}
// Contains an enum indicating whether the actor runtime has been initialized.
ActorRuntimeStatus runtime_status = 1 [json_name = "runtimeStatus"];
// Count of active actors per type.
repeated ActiveActorsCount active_actors = 2 [json_name = "activeActors"];
// Indicates whether the actor runtime is ready to host actors.
bool host_ready = 3 [json_name = "hostReady"];
// Custom message from the placement provider.
string placement = 4 [json_name = "placement"];
}
message ActiveActorsCount {
string type = 1;
int32 count = 2;
}
message RegisteredComponents {
string name = 1;
string type = 2;
string version = 3;
repeated string capabilities = 4;
}
message MetadataHTTPEndpoint {
string name = 1 [json_name = "name"];
}
message AppConnectionProperties {
int32 port = 1;
string protocol = 2;
string channel_address = 3 [json_name = "channelAddress"];
int32 max_concurrency = 4 [json_name = "maxConcurrency"];
AppConnectionHealthProperties health = 5;
}
message AppConnectionHealthProperties {
string health_check_path = 1 [json_name = "healthCheckPath"];
string health_probe_interval = 2 [json_name = "healthProbeInterval"];
string health_probe_timeout = 3 [json_name = "healthProbeTimeout"];
int32 health_threshold = 4 [json_name = "healthThreshold"];
}
message PubsubSubscription {
string pubsub_name = 1 [json_name = "pubsubname"];
string topic = 2 [json_name = "topic"];
map<string,string> metadata = 3 [json_name = "metadata"];
PubsubSubscriptionRules rules = 4 [json_name = "rules"];
string dead_letter_topic = 5 [json_name = "deadLetterTopic"];
}
message PubsubSubscriptionRules {
repeated PubsubSubscriptionRule rules = 1;
}
message PubsubSubscriptionRule {
string match = 1;
string path = 2;
}
message SetMetadataRequest {
string key = 1;
string value = 2;
}
// GetConfigurationRequest is the message to get a list of key-value configuration from specified configuration store.
message GetConfigurationRequest {
// Required. The name of configuration store.
string store_name = 1;
// Optional. The key of the configuration item to fetch.
// If set, only query for the specified configuration items.
// Empty list means fetch all.
repeated string keys = 2;
// Optional. The metadata which will be sent to configuration store components.
map<string, string> metadata = 3;
}
// GetConfigurationResponse is the response conveying the list of configuration values.
// It should be the FULL configuration of specified application which contains all of its configuration items.
message GetConfigurationResponse {
map<string, common.v1.ConfigurationItem> items = 1;
}
// SubscribeConfigurationRequest is the message to get a list of key-value configuration from specified configuration store.
message SubscribeConfigurationRequest {
// The name of configuration store.
string store_name = 1;
// Optional. The key of the configuration item to fetch.
// If set, only query for the specified configuration items.
// Empty list means fetch all.
repeated string keys = 2;
// The metadata which will be sent to configuration store components.
map<string, string> metadata = 3;
}
// UnSubscribeConfigurationRequest is the message to stop watching the key-value configuration.
message UnsubscribeConfigurationRequest {
// The name of configuration store.
string store_name = 1;
// The id to unsubscribe.
string id = 2;
}
message SubscribeConfigurationResponse {
// Subscribe id, used to stop subscription.
string id = 1;
// The list of items containing configuration values
map<string, common.v1.ConfigurationItem> items = 2;
}
message UnsubscribeConfigurationResponse {
bool ok = 1;
string message = 2;
}
message TryLockRequest {
// Required. The lock store name,e.g. `redis`.
string store_name = 1 [json_name = "storeName"];
// Required. resource_id is the lock key. e.g. `order_id_111`
// It stands for "which resource I want to protect"
string resource_id = 2 [json_name = "resourceId"];
// Required. lock_owner indicate the identifier of lock owner.
// You can generate a uuid as lock_owner.For example,in golang:
//
// req.LockOwner = uuid.New().String()
//
// This field is per request,not per process,so it is different for each request,
// which aims to prevent multi-thread in the same process trying the same lock concurrently.
//
// The reason why we don't make it automatically generated is:
// 1. If it is automatically generated,there must be a 'my_lock_owner_id' field in the response.
// This name is so weird that we think it is inappropriate to put it into the api spec
// 2. If we change the field 'my_lock_owner_id' in the response to 'lock_owner',which means the current lock owner of this lock,
// we find that in some lock services users can't get the current lock owner.Actually users don't need it at all.
// 3. When reentrant lock is needed,the existing lock_owner is required to identify client and check "whether this client can reenter this lock".
// So this field in the request shouldn't be removed.
string lock_owner = 3 [json_name = "lockOwner"];
// Required. The time before expiry.The time unit is second.
int32 expiry_in_seconds = 4 [json_name = "expiryInSeconds"];
}
message TryLockResponse {
bool success = 1;
}
message UnlockRequest {
string store_name = 1 [json_name = "storeName"];
// resource_id is the lock key.
string resource_id = 2 [json_name = "resourceId"];
string lock_owner = 3 [json_name = "lockOwner"];
}
message UnlockResponse {
enum Status {
SUCCESS = 0;
LOCK_DOES_NOT_EXIST = 1;
LOCK_BELONGS_TO_OTHERS = 2;
INTERNAL_ERROR = 3;
}
Status status = 1;
}
// SubtleGetKeyRequest is the request object for SubtleGetKeyAlpha1.
message SubtleGetKeyRequest {
enum KeyFormat {
// PEM (PKIX) (default)
PEM = 0;
// JSON (JSON Web Key) as string
JSON = 1;
}
// Name of the component
string component_name = 1 [json_name="componentName"];
// Name (or name/version) of the key to use in the key vault
string name = 2;
// Response format
KeyFormat format = 3;
}
// SubtleGetKeyResponse is the response for SubtleGetKeyAlpha1.
message SubtleGetKeyResponse {
// Name (or name/version) of the key.
// This is returned as response too in case there is a version.
string name = 1;
// Public key, encoded in the requested format
string public_key = 2 [json_name="publicKey"];
}
// SubtleEncryptRequest is the request for SubtleEncryptAlpha1.
message SubtleEncryptRequest {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Message to encrypt.
bytes plaintext = 2;
// Algorithm to use, as in the JWA standard.
string algorithm = 3;
// Name (or name/version) of the key.
string key_name = 4 [json_name="keyName"];
// Nonce / initialization vector.
// Ignored with asymmetric ciphers.
bytes nonce = 5;
// Associated Data when using AEAD ciphers (optional).
bytes associated_data = 6 [json_name="associatedData"];
}
// SubtleEncryptResponse is the response for SubtleEncryptAlpha1.
message SubtleEncryptResponse {
// Encrypted ciphertext.
bytes ciphertext = 1;
// Authentication tag.
// This is nil when not using an authenticated cipher.
bytes tag = 2;
}
// SubtleDecryptRequest is the request for SubtleDecryptAlpha1.
message SubtleDecryptRequest {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Message to decrypt.
bytes ciphertext = 2;
// Algorithm to use, as in the JWA standard.
string algorithm = 3;
// Name (or name/version) of the key.
string key_name = 4 [json_name="keyName"];
// Nonce / initialization vector.
// Ignored with asymmetric ciphers.
bytes nonce = 5;
// Authentication tag.
// This is nil when not using an authenticated cipher.
bytes tag = 6;
// Associated Data when using AEAD ciphers (optional).
bytes associated_data = 7 [json_name="associatedData"];
}
// SubtleDecryptResponse is the response for SubtleDecryptAlpha1.
message SubtleDecryptResponse {
// Decrypted plaintext.
bytes plaintext = 1;
}
// SubtleWrapKeyRequest is the request for SubtleWrapKeyAlpha1.
message SubtleWrapKeyRequest {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Key to wrap
bytes plaintext_key = 2 [json_name="plaintextKey"];
// Algorithm to use, as in the JWA standard.
string algorithm = 3;
// Name (or name/version) of the key.
string key_name = 4 [json_name="keyName"];
// Nonce / initialization vector.
// Ignored with asymmetric ciphers.
bytes nonce = 5;
// Associated Data when using AEAD ciphers (optional).
bytes associated_data = 6 [json_name="associatedData"];
}
// SubtleWrapKeyResponse is the response for SubtleWrapKeyAlpha1.
message SubtleWrapKeyResponse {
// Wrapped key.
bytes wrapped_key = 1 [json_name="wrappedKey"];
// Authentication tag.
// This is nil when not using an authenticated cipher.
bytes tag = 2;
}
// SubtleUnwrapKeyRequest is the request for SubtleUnwrapKeyAlpha1.
message SubtleUnwrapKeyRequest {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Wrapped key.
bytes wrapped_key = 2 [json_name="wrappedKey"];
// Algorithm to use, as in the JWA standard.
string algorithm = 3;
// Name (or name/version) of the key.
string key_name = 4 [json_name="keyName"];
// Nonce / initialization vector.
// Ignored with asymmetric ciphers.
bytes nonce = 5;
// Authentication tag.
// This is nil when not using an authenticated cipher.
bytes tag = 6;
// Associated Data when using AEAD ciphers (optional).
bytes associated_data = 7 [json_name="associatedData"];
}
// SubtleUnwrapKeyResponse is the response for SubtleUnwrapKeyAlpha1.
message SubtleUnwrapKeyResponse {
// Key in plaintext
bytes plaintext_key = 1 [json_name="plaintextKey"];
}
// SubtleSignRequest is the request for SubtleSignAlpha1.
message SubtleSignRequest {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Digest to sign.
bytes digest = 2;
// Algorithm to use, as in the JWA standard.
string algorithm = 3;
// Name (or name/version) of the key.
string key_name = 4 [json_name="keyName"];
}
// SubtleSignResponse is the response for SubtleSignAlpha1.
message SubtleSignResponse {
// The signature that was computed
bytes signature = 1;
}
// SubtleVerifyRequest is the request for SubtleVerifyAlpha1.
message SubtleVerifyRequest {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Digest of the message.
bytes digest = 2;
// Algorithm to use, as in the JWA standard.
string algorithm = 3;
// Name (or name/version) of the key.
string key_name = 4 [json_name="keyName"];
// Signature to verify.
bytes signature = 5;
}
// SubtleVerifyResponse is the response for SubtleVerifyAlpha1.
message SubtleVerifyResponse {
// True if the signature is valid.
bool valid = 1;
}
// EncryptRequest is the request for EncryptAlpha1.
message EncryptRequest {
// Request details. Must be present in the first message only.
EncryptRequestOptions options = 1;
// Chunk of data of arbitrary size.
common.v1.StreamPayload payload = 2;
}
// EncryptRequestOptions contains options for the first message in the EncryptAlpha1 request.
message EncryptRequestOptions {
// Name of the component. Required.
string component_name = 1 [json_name="componentName"];
// Name (or name/version) of the key. Required.
string key_name = 2 [json_name="keyName"];
// Key wrapping algorithm to use. Required.
// Supported options include: A256KW (alias: AES), A128CBC, A192CBC, A256CBC, RSA-OAEP-256 (alias: RSA).
string key_wrap_algorithm = 3;
// Cipher used to encrypt data (optional): "aes-gcm" (default) or "chacha20-poly1305"
string data_encryption_cipher = 10;
// If true, the encrypted document does not contain a key reference.
// In that case, calls to the Decrypt method must provide a key reference (name or name/version).
// Defaults to false.
bool omit_decryption_key_name = 11 [json_name="omitDecryptionKeyName"];
// Key reference to embed in the encrypted document (name or name/version).
// This is helpful if the reference of the key used to decrypt the document is different from the one used to encrypt it.
// If unset, uses the reference of the key used to encrypt the document (this is the default behavior).
// This option is ignored if omit_decryption_key_name is true.
string decryption_key_name = 12 [json_name="decryptionKeyName"];
}
// EncryptResponse is the response for EncryptAlpha1.
message EncryptResponse {
// Chunk of data.
common.v1.StreamPayload payload = 1;
}
// DecryptRequest is the request for DecryptAlpha1.
message DecryptRequest {
// Request details. Must be present in the first message only.
DecryptRequestOptions options = 1;
// Chunk of data of arbitrary size.
common.v1.StreamPayload payload = 2;
}
// DecryptRequestOptions contains options for the first message in the DecryptAlpha1 request.
message DecryptRequestOptions {
// Name of the component
string component_name = 1 [json_name="componentName"];
// Name (or name/version) of the key to decrypt the message.
// Overrides any key reference included in the message if present.
// This is required if the message doesn't include a key reference (i.e. was created with omit_decryption_key_name set to true).
string key_name = 12 [json_name="keyName"];
}
// DecryptResponse is the response for DecryptAlpha1.
message DecryptResponse {
// Chunk of data.
common.v1.StreamPayload payload = 1;
}
// GetWorkflowRequest is the request for GetWorkflowBeta1.
message GetWorkflowRequest {
// ID of the workflow instance to query.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
}
// GetWorkflowResponse is the response for GetWorkflowBeta1.
message GetWorkflowResponse {
// ID of the workflow instance.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow.
string workflow_name = 2 [json_name = "workflowName"];
// The time at which the workflow instance was created.
google.protobuf.Timestamp created_at = 3 [json_name = "createdAt"];
// The last time at which the workflow instance had its state changed.
google.protobuf.Timestamp last_updated_at = 4 [json_name = "lastUpdatedAt"];
// The current status of the workflow instance, for example, "PENDING", "RUNNING", "SUSPENDED", "COMPLETED", "FAILED", and "TERMINATED".
string runtime_status = 5 [json_name = "runtimeStatus"];
// Additional component-specific properties of the workflow instance.
map<string, string> properties = 6;
}
// StartWorkflowRequest is the request for StartWorkflowBeta1.
message StartWorkflowRequest {
// The ID to assign to the started workflow instance. If empty, a random ID is generated.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
// Name of the workflow.
string workflow_name = 3 [json_name = "workflowName"];
// Additional component-specific options for starting the workflow instance.
map<string, string> options = 4;
// Input data for the workflow instance.
bytes input = 5;
}
// StartWorkflowResponse is the response for StartWorkflowBeta1.
message StartWorkflowResponse {
// ID of the started workflow instance.
string instance_id = 1 [json_name = "instanceID"];
}
// TerminateWorkflowRequest is the request for TerminateWorkflowBeta1.
message TerminateWorkflowRequest {
// ID of the workflow instance to terminate.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
}
// PauseWorkflowRequest is the request for PauseWorkflowBeta1.
message PauseWorkflowRequest {
// ID of the workflow instance to pause.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
}
// ResumeWorkflowRequest is the request for ResumeWorkflowBeta1.
message ResumeWorkflowRequest {
// ID of the workflow instance to resume.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
}
// RaiseEventWorkflowRequest is the request for RaiseEventWorkflowBeta1.
message RaiseEventWorkflowRequest {
// ID of the workflow instance to raise an event for.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
// Name of the event.
string event_name = 3 [json_name = "eventName"];
// Data associated with the event.
bytes event_data = 4;
}
// PurgeWorkflowRequest is the request for PurgeWorkflowBeta1.
message PurgeWorkflowRequest {
// ID of the workflow instance to purge.
string instance_id = 1 [json_name = "instanceID"];
// Name of the workflow component.
string workflow_component = 2 [json_name = "workflowComponent"];
}
// ShutdownRequest is the request for Shutdown.
message ShutdownRequest {
// Empty
}
|
mikeee/dapr
|
dapr/proto/runtime/v1/dapr.proto
|
proto
|
mit
| 41,262 |
# Sentry Service APIs
This folder is intended for `sentry` service APIs that the `daprd` sidecars use to manage mTLS between services and acts as a certificate authority. The `sentry` service is also used by the Dapr control plane services. For more details about the `sentry` service please refer to the [docs](https://docs.dapr.io/concepts/dapr-services/sentry/).
## Proto client generation
Pre-requisites:
1. Install protoc version: [v4.24.4](https://github.com/protocolbuffers/protobuf/releases/tag/v4.24.4)
2. Install protoc-gen-go and protoc-gen-go-grpc
```bash
make init-proto
```
*If* protoc is already installed:
3. Generate gRPC proto clients from the root of the project
```bash
make gen-proto
```
4. See the auto-generated files in `pkg/proto`
|
mikeee/dapr
|
dapr/proto/sentry/v1/README.md
|
Markdown
|
mit
| 765 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
package dapr.proto.sentry.v1;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/dapr/dapr/pkg/proto/sentry/v1;sentry";
service CA {
// A request for a time-bound certificate to be signed.
//
// The requesting side must provide an id for both loosely based
// And strong based identities.
rpc SignCertificate (SignCertificateRequest) returns (SignCertificateResponse) {}
}
message SignCertificateRequest {
enum TokenValidator {
// Not specified - use the default value.
UNKNOWN = 0;
// Insecure validator (default on self-hosted).
INSECURE = 1;
// Kubernetes validator (default on Kubernetes).
KUBERNETES = 2;
// JWKS validator.
JWKS = 3;
}
string id = 1;
string token = 2;
string trust_domain = 3;
string namespace = 4;
// A PEM-encoded x509 CSR.
bytes certificate_signing_request = 5;
// Name of the validator to use, if not the default for the environemtn.
TokenValidator token_validator = 6;
}
message SignCertificateResponse {
// A PEM-encoded x509 Certificate.
bytes workload_certificate = 1;
// A list of PEM-encoded x509 Certificates that establish the trust chain
// between the workload certificate and the well-known trust root cert.
repeated bytes trust_chain_certificates = 2;
google.protobuf.Timestamp valid_until = 3;
}
|
mikeee/dapr
|
dapr/proto/sentry/v1/sentry.proto
|
proto
|
mit
| 1,921 |
# current directory must be ./dist
FROM gcr.io/distroless/base-nossl:nonroot
ARG PKG_FILES
WORKDIR /
COPY /$PKG_FILES /
|
mikeee/dapr
|
docker/Dockerfile
|
Dockerfile
|
mit
| 121 |
# current directory must be ./dist
FROM golang:1.22.3
ARG PKG_FILES
RUN go install github.com/go-delve/delve/cmd/dlv@latest
RUN mv /go/bin/dlv /
WORKDIR /
COPY /$PKG_FILES /
|
mikeee/dapr
|
docker/Dockerfile-debug
|
none
|
mit
| 176 |
# Based on https://github.com/microsoft/vscode-dev-containers/tree/v0.224.3/containers/go/.devcontainer/base.Dockerfile
# [Choice] Go version: 1, 1.22.3, etc
ARG GOVERSION=1.22.3
FROM golang:${GOVERSION}-bullseye
# [Option] Install zsh
ARG INSTALL_ZSH="true"
# [Options] Versions
ARG KUBECTL_VERSION="latest"
ARG HELM_VERSION="latest"
ARG MINIKUBE_VERSION="latest"
ARG DAPR_CLI_VERSION="latest"
ARG PROTOC_VERSION="24.4"
ARG PROTOC_GEN_GO_VERSION="1.32.0"
ARG PROTOC_GEN_GO_GRPC_VERSION="1.3.0"
ARG GOLANGCI_LINT_VERSION="1.55.2"
# This Dockerfile adds a non-root 'dapr' user with sudo access. However, for Linux,
# this user's GID/UID must match your local user UID/GID to avoid permission issues
# with bind mounts. Update USER_UID / USER_GID if yours is not 1000. See
# https://aka.ms/vscode-remote/containers/non-root-user for details.
ARG USERNAME=dapr
ARG USER_UID=1000
ARG USER_GID=$USER_UID
# Other env vars
ENV GO111MODULE=auto
ENV CGO_ENABLED=0
ENV DOCKER_BUILDKIT=1
ENV DAPR_DEFAULT_IMAGE_REGISTRY=GHCR
# Setup image using library scripts and configure non-root user.
COPY library-scripts/* custom-scripts/* first-run-notice.txt /tmp/staging/
RUN apt-get update \
#
# Install needed packages and setup the environment and non-root user
&& bash /tmp/staging/common-debian.sh "${INSTALL_ZSH}" "${USERNAME}" "${USER_UID}" "${USER_GID}" "true" "true" "true" \
#
# Additional custom configurations for non-root user.
&& bash /tmp/staging/setup-user.sh "${USERNAME}" "${PATH}" \
#
# Install Docker CLI and Engine for Docker-in-Docker (using Docker CE).
&& bash /tmp/staging/docker-in-docker-debian.sh "true" "${USERNAME}" "false" "latest" \
#
# Install Kubectl, Helm and Minkikube.
&& bash /tmp/staging/kubectl-helm-debian.sh "${KUBECTL_VERSION}" "${HELM_VERSION}" "${MINIKUBE_VERSION}" \
#
# Install Go tools.
&& bash /tmp/staging/go-debian.sh "none" "/usr/local/go" "/go" "${USERNAME}" "false" \
#
# Install tools for Dapr.
&& bash /tmp/staging/install-dapr-tools.sh "${USERNAME}" "/usr/local/go" "/go" "${DAPR_CLI_VERSION}" "${PROTOC_VERSION}" "${PROTOC_GEN_GO_VERSION}" "${PROTOC_GEN_GO_GRPC_VERSION}" "${GOLANGCI_LINT_VERSION}" \
#
# Copy our init scripts to /usr/local/share.
&& mv -f -t /usr/local/share/ /tmp/staging/docker-bind-mount.sh /tmp/staging/devcontainer-init.sh /tmp/staging/setup-docker-multiarch.sh \
&& chmod +x /usr/local/share/docker-bind-mount.sh /usr/local/share/devcontainer-init.sh /usr/local/share/setup-docker-multiarch.sh \
&& chown ${USERNAME}:root /usr/local/share/docker-bind-mount.sh /usr/local/share/devcontainer-init.sh /usr/local/share/setup-docker-multiarch.sh \
#
# Move the first run notice to the correct location for Codespaces.
&& mkdir -p /usr/local/etc/vscode-dev-containers/ \
&& mv -f /tmp/staging/first-run-notice.txt /usr/local/etc/vscode-dev-containers/ \
#
# Set permissions for the workspace folder
&& mkdir -p /workspaces && chown ${USERNAME} /workspaces \
#
# Clean up packages and the staging folder.
&& apt-get autoremove -y && apt-get clean -y && rm -rf /tmp/staging
# Mount for docker-in-docker
VOLUME [ "/var/lib/docker" ]
# Initialize Dapr devcontainer script
ENTRYPOINT [ "/usr/local/share/devcontainer-init.sh" ]
CMD [ "sleep", "infinity" ]
USER ${USERNAME}
|
mikeee/dapr
|
docker/Dockerfile-dev
|
none
|
mit
| 3,368 |
# Note: current directory must be ./dist
ARG MARINER_VERSION=2.0
FROM mcr.microsoft.com/cbl-mariner/distroless/minimal:${MARINER_VERSION}-nonroot
ARG PKG_FILES
# The base image specifies the user with its name ("nonroot").
# This is the equivalent UID: Kubernetes requires a numeric ID for "runAsRoot: false" support
USER 65532
WORKDIR /
COPY /$PKG_FILES /
|
mikeee/dapr
|
docker/Dockerfile-mariner
|
none
|
mit
| 358 |
ARG WINDOWS_VERSION=1809
#This version confirmed to work for both github and AKS
FROM ghcr.io/dapr/windows-base:$WINDOWS_VERSION
ARG PKG_FILES
WORKDIR /
COPY /$PKG_FILES.exe /
ENTRYPOINT [ "/setup-certificates.cmd", "&", "cmd", "/c" ]
|
mikeee/dapr
|
docker/Dockerfile-windows
|
none
|
mit
| 236 |
ARG WINDOWS_VERSION=1809
#This version confirmed to work for both github and AKS
FROM mcr.microsoft.com/windows/servercore:$WINDOWS_VERSION as servercore
# Nanoserver is missing exactly one dll that go needs. The servercore image has this dll, but
# is nearly 2GB. This has the potential to be somewhat fragile, but it is the only way I can
# find to keep the container image size small. To keep from doing this rather expensive
# multistage build every time, we build a small base image and push it to our repo
FROM mcr.microsoft.com/windows/nanoserver:$WINDOWS_VERSION
COPY --from=servercore /windows/system32/netapi32.dll /windows/system32/netapi32.dll
# certoc is required to add new certificates to the root store.
COPY --from=servercore /windows/system32/certoc.exe /windows/system32/certoc.exe
COPY windows-base-scripts/setup-certificates.cmd /setup-certificates.cmd
|
mikeee/dapr
|
docker/Dockerfile-windows-base
|
none
|
mit
| 877 |
ARG WINDOWS_VERSION=1809
FROM mcr.microsoft.com/windows/servercore:$WINDOWS_VERSION
RUN powershell -Command \
Add-WindowsFeature Web-Server; \
Invoke-WebRequest -UseBasicParsing -Uri "https://dotnetbinaries.blob.core.windows.net/servicemonitor/2.0.1.10/ServiceMonitor.exe" -OutFile "C:\ServiceMonitor.exe"
EXPOSE 80
ENTRYPOINT ["C:\\ServiceMonitor.exe", "w3svc"]
RUN powershell -Command \
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; \
Invoke-WebRequest -Method Get -Uri https://windows.php.net/downloads/releases/latest/php-8.0-nts-Win32-vs16-x64-latest.zip -OutFile c:\php.zip ; \
Expand-Archive -Path c:\php.zip -DestinationPath c:\php ; \
Remove-Item c:\php.zip -Force
ADD https://aka.ms/vs/16/release/VC_redist.x64.exe vc-redist.exe
RUN start /w vc-redist.exe /q /norestart & del vc-redist.exe
RUN powershell.exe -executionpolicy bypass "Add-WindowsFeature Web-CGI"
ADD https://curl.haxx.se/ca/cacert.pem ca.pem
RUN powershell -command cp ca.pem "C:\php\\" && powershell -command cp c:\php\php.ini-production c:\php\php.ini
RUN powershell -command echo 'extension=php_mbstring.dll' 'extension=php_curl.dll' 'extension=php_openssl.dll' '[PHP_FILEINFO]' 'extension=php_fileinfo.dll' ' ' ' ' 'curl.cainfo = c:\php\ca.pem' >> C:\php\php.ini
RUN %windir%\system32\inetsrv\appcmd set config /section:system.webServer/fastCGI /+[fullPath='c:\php\php-cgi.exe']
RUN %windir%\system32\inetsrv\appcmd set config /section:system.webServer/handlers /+[name='PHP_via_FastCGI',path='*.php',verb='*',modules='FastCgiModule',scriptProcessor='c:\PHP\php-cgi.exe',resourceType='Either']
RUN %windir%\system32\inetsrv\appcmd.exe set config -section:system.webServer/fastCgi /[fullPath='c:\PHP\php-cgi.exe'].instanceMaxRequests:10000
RUN %windir%\system32\inetsrv\appcmd.exe set config /section:defaultDocument /enabled:true /+files.[value='index.php']
RUN setx PATH /M %PATH%;C:\PHP
RUN powershell -NoProfile -Command Remove-Item -Recurse C:\inetpub\wwwroot\*
|
mikeee/dapr
|
docker/Dockerfile-windows-php-base
|
none
|
mit
| 2,010 |
ARG WINDOWS_VERSION=1809
FROM python:3.9-windowsservercore-$WINDOWS_VERSION as python-servercore
FROM mcr.microsoft.com/windows/nanoserver:$WINDOWS_VERSION
COPY --from=python-servercore C:\\Python C:\\Python
USER ContainerAdministrator
ENV PYTHONPATH C:\\Python;C:\\Python\\Scripts;C:\\Python\\DLLs;C:\\Python\\Lib;C:\\Python\\Lib\\plat-win;C:\\Python\\Lib\\site-packages
RUN setx.exe /m PATH %PATH%;%PYTHONPATH%
RUN setx.exe /m PYTHONPATH %PYTHONPATH%
RUN setx.exe /m PIP_CACHE_DIR C:\Users\ContainerUser\AppData\Local\pip\Cache
RUN reg.exe ADD HKLM\SYSTEM\CurrentControlSet\Control\FileSystem /v LongPathsEnabled /t REG_DWORD /d 1 /f
RUN pip install --upgrade pip
CMD ["C:\\python\\python.exe"]
|
mikeee/dapr
|
docker/Dockerfile-windows-python-base
|
none
|
mit
| 702 |
# Dockerfiles
This includes dockerfiles to build Dapr release and debug images and development container images for go dev environment.
* Dockerfile: Dapr Release Image
* Dockerfile-debug: Dapr Debug Image - WIP
* Dockerfile-dev: Development container image for VS Code Remote-Containers and GitHub Codespaces.
## Dev Container
### Container build args
The Dev Container can be rebuilt with custom options. Relevant args (and their default values) include:
* `GOVERSION` (default: `1.22.3`)
* `INSTALL_ZSH` (default: `true`)
* `KUBECTL_VERSION` (default: `latest`)
* `HELM_VERSION` (default: `latest`)
* `MINIKUBE_VERSION` (default: `latest`)
* `DAPR_CLI_VERSION` (default: `latest`)
* `PROTOC_VERSION` (default: `24.4`)
* `PROTOC_GEN_GO_VERSION` (default: `1.32`)
* `PROTOC_GEN_GO_GRPC_VERSION` (default: `1.3`)
* `GOLANGCI_LINT_VERSION` (default: `1.45.2`)
### Setup multi-arch Docker builds
Building multi-arch Docker images requires using QEMU, which we do not distribute as part of the container image.
To configure Docker within the dev container for multi-arch builds, within the dev container run this command:
```sh
/usr/local/share/setup-docker-multiarch.sh
```
|
mikeee/dapr
|
docker/README.md
|
Markdown
|
mit
| 1,182 |
#!/usr/bin/env bash
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Initializes the devcontainer tasks each time the container starts.
# Users can edit this copy under /usr/local/share in the container to
# customize this as needed for their custom localhost bindings.
set -e
echo "Running devcontainer-init.sh ..."
# Clone kubectl and minikube config from host if requested when running local devcontainer.
if [[ "${SYNC_LOCALHOST_KUBECONFIG,,}" == "true" && "${CODESPACES,,}" != "true" ]]; then
mkdir -p ${HOME}/.kube
if [ -d "${HOME}/.kube-localhost" ]; then
cp -r ${HOME}/.kube-localhost/* ${HOME}/.kube
fi
# [EXPERIMENTAL] As a convenience feature, when using localhost minikube cluster in the devcontainer,
# attempt to clone the credentials from the default localhost .minikube profile and fixup
# the container's copy of .kube/config with the correct endpoint and path to cloned credentials.
# It does not support modifying the minikube configuration from the container (minikube needs to already
# be started on the local host) and assumes the only kubernetes context pointing to a localhost
# server (i.e. 127.0.0.1 address) belongs to the minikube default profile and should be updated.
if [ -d "${HOME}/.minikube-localhost" ]; then
mkdir -p ${HOME}/.minikube
if [ -r ${HOME}/.minikube-localhost/ca.crt ]; then
cp -r ${HOME}/.minikube-localhost/ca.crt ${HOME}/.minikube
sed -i -r "s|(\s*certificate-authority:\s).*|\\1${HOME}\/.minikube\/ca.crt|g" ${HOME}/.kube/config
fi
if [ -r ${HOME}/.minikube-localhost/profiles/minikube/client.crt ]; then
cp -r ${HOME}/.minikube-localhost/profiles/minikube/client.crt ${HOME}/.minikube
sed -i -r "s|(\s*client-certificate:\s).*|\\1${HOME}\/.minikube\/client.crt|g" ${HOME}/.kube/config
fi
if [ -r ${HOME}/.minikube-localhost/profiles/minikube/client.key ]; then
cp -r ${HOME}/.minikube-localhost/profiles/minikube/client.key ${HOME}/.minikube
sed -i -r "s|(\s*client-key:\s).*|\\1${HOME}\/.minikube\/client.key|g" ${HOME}/.kube/config
fi
if [ -r ${HOME}/.minikube-localhost/profiles/minikube/config.json ]; then
ENDPOINT=$(grep -E '\"IP\":|\"Port\":' ${HOME}/.minikube-localhost/profiles/minikube/config.json \
| sed -r '{N;s/\s*\"IP\": \"(.+)\",\s*\"Port\": ([0-9]*),/\1:\2/;}')
sed -i -r 's/(server: https:\/\/)127.0.0.1:[0-9]*(.*)/\1'"${ENDPOINT}"'\2/' ${HOME}/.kube/config
fi
fi
fi
# Invoke /usr/local/share/docker-bind-mount.sh or docker-init.sh as appropriate
set +e
if [[ "${BIND_LOCALHOST_DOCKER,,}" == "true" ]]; then
echo "Invoking docker-bind-mount.sh ..."
exec /usr/local/share/docker-bind-mount.sh "$@"
else
echo "Invoking docker-init.sh ..."
exec /usr/local/share/docker-init.sh "$@"
fi
|
mikeee/dapr
|
docker/custom-scripts/devcontainer-init.sh
|
Shell
|
mit
| 3,442 |
#!/usr/bin/env bash
# Source: Adapted from https://github.com/microsoft/vscode-dev-containers/blob/v0.224.3/script-library/docker-debian.sh
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
# Wrapper function to only use sudo if not already root
sudoIf()
{
if [ "$(id -u)" -ne 0 ]; then
sudo "$@"
else
"$@"
fi
}
### Diff start
USERNAME=$(whoami)
SOURCE_SOCKET=/var/run/docker-host.sock
TARGET_SOCKET=/var/run/docker.sock
ENABLE_NONROOT_DOCKER="true"
if [ "${SOURCE_SOCKET}" != "${TARGET_SOCKET}" ]; then
sudoIf touch "${SOURCE_SOCKET}"
sudoIf ln -s "${SOURCE_SOCKET}" "${TARGET_SOCKET}"
fi
### Diff end
SOCAT_PATH_BASE=/tmp/vscr-docker-from-docker
SOCAT_LOG=${SOCAT_PATH_BASE}.log
SOCAT_PID=${SOCAT_PATH_BASE}.pid
# Log messages
log()
{
echo -e "[$(date)] $@" | sudoIf tee -a ${SOCAT_LOG} > /dev/null
}
echo -e "\n** $(date) **" | sudoIf tee -a ${SOCAT_LOG} > /dev/null
log "Ensuring ${USERNAME} has access to ${SOURCE_SOCKET} via ${TARGET_SOCKET}"
# If enabled, try to add a docker group with the right GID. If the group is root,
# fall back on using socat to forward the docker socket to another unix socket so
# that we can set permissions on it without affecting the host.
if [ "${ENABLE_NONROOT_DOCKER}" = "true" ] && [ "${SOURCE_SOCKET}" != "${TARGET_SOCKET}" ] && [ "${USERNAME}" != "root" ] && [ "${USERNAME}" != "0" ]; then
SOCKET_GID=$(stat -c '%g' ${SOURCE_SOCKET})
if [ "${SOCKET_GID}" != "0" ]; then
log "Adding user to group with GID ${SOCKET_GID}."
if [ "$(cat /etc/group | grep :${SOCKET_GID}:)" = "" ]; then
sudoIf groupadd --gid ${SOCKET_GID} docker-host
fi
# Add user to group if not already in it
if [ "$(id ${USERNAME} | grep -E "groups.*(=|,)${SOCKET_GID}\(")" = "" ]; then
sudoIf usermod -aG ${SOCKET_GID} ${USERNAME}
fi
else
# Enable proxy if not already running
if [ ! -f "${SOCAT_PID}" ] || ! ps -p $(cat ${SOCAT_PID}) > /dev/null; then
log "Enabling socket proxy."
log "Proxying ${SOURCE_SOCKET} to ${TARGET_SOCKET} for vscode"
sudoIf rm -rf ${TARGET_SOCKET}
(sudoIf socat UNIX-LISTEN:${TARGET_SOCKET},fork,mode=660,user=${USERNAME} UNIX-CONNECT:${SOURCE_SOCKET} 2>&1 | sudoIf tee -a ${SOCAT_LOG} > /dev/null & echo "$!" | sudoIf tee ${SOCAT_PID} > /dev/null)
else
log "Socket proxy already running."
fi
fi
log "Success"
fi
# Execute whatever commands were passed in (if any). This allows us
# to set this script to ENTRYPOINT while still executing the default CMD.
set +e
exec "$@"
|
mikeee/dapr
|
docker/custom-scripts/docker-bind-mount.sh
|
Shell
|
mit
| 3,180 |
#!/usr/bin/env bash
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Syntax: ./install-dapr-tools.sh [USERNAME] [GOROOT] [GOPATH] [DAPR_CLI_VERSION] [PROTOC_VERSION] [PROTOC_GEN_GO_VERSION] [PROTOC_GEN_GO_GRPC_VERSION] [GOLANGCI_LINT_VERSION]
USERNAME=${1:-"dapr"}
GOROOT=${2:-"/usr/local/go"}
GOPATH=${3:-"/go"}
DAPR_CLI_VERSION=${4:-""}
PROTOC_VERSION=${5:-"24.4"}
PROTOC_GEN_GO_VERSION=${6:-"1.32.0"}
PROTOC_GEN_GO_GRPC_VERSION=${7:-"1.3.0"}
GOLANGCI_LINT_VERSION=${8:-"1.55.2"}
set -e
if [ "$(id -u)" -ne 0 ]; then
echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
exit 1
fi
# Install socat
apt-get install -y socat
# Install Dapr CLI
dapr_cli_ver=""
if [ "${DAPR_CLI_VERSION}" != "latest" ]; then
dapr_cli_ver="${DAPR_CLI_VERSION}"
fi
wget -q https://raw.githubusercontent.com/dapr/cli/master/install/install.sh -O - | /bin/bash -s "${dapr_cli_ver}"
# Install protoc compiler required by 'make gen-proto'
architecture="$(uname -m)"
case $architecture in
x86_64) architecture="x86_64";;
aarch64 | armv8*) architecture="aarch_64";;
i?86) architecture="x86_32";;
*) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
esac
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-${architecture}.zip
curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP}"
unzip -o "${PROTOC_ZIP}" -d /usr/local bin/protoc
chmod -R 755 /usr/local/bin/protoc
unzip -o "${PROTOC_ZIP}" -d /usr/local 'include/*'
chmod -R 755 /usr/local/include/google/protobuf
rm -f "${PROTOC_ZIP}"
# Install protoc-gen-go and protoc-gen-go-grpc
# Must be installed as the non-root user
export GOBIN="${GOPATH}/bin"
sudo -u ${USERNAME} --preserve-env=GOPATH,GOBIN,GOROOT \
go install "google.golang.org/protobuf/cmd/protoc-gen-go@v${PROTOC_GEN_GO_VERSION}"
sudo -u ${USERNAME} --preserve-env=GOPATH,GOBIN,GOROOT \
go install "google.golang.org/grpc/cmd/protoc-gen-go-grpc@v${PROTOC_GEN_GO_GRPC_VERSION}"
# Install golangci-lint using the recommended method (best to avoid using go install according to their docs)
# Must be installed as the non-root user
sudo -u ${USERNAME} --preserve-env=GOLANGCI_LINT_VERSION,GOPATH,GOBIN,GOROOT \
sh -c 'curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b "${GOBIN}" "v${GOLANGCI_LINT_VERSION}"'
|
mikeee/dapr
|
docker/custom-scripts/install-dapr-tools.sh
|
Shell
|
mit
| 2,940 |
#!/usr/bin/env bash
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script sets up the current environment to be able to build multi-arch Docker images, installing QEMU
set -e
# Set up QEMU
docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64,arm
# Create a buildx builder with support for multi-arch
docker buildx create --use --name mybuilder
|
mikeee/dapr
|
docker/custom-scripts/setup-docker-multiarch.sh
|
Shell
|
mit
| 901 |
#!/usr/bin/env bash
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Syntax: ./setup-user.sh [USERNAME] [SECURE_PATH_BASE]
USERNAME=${1:-"dapr"}
SECURE_PATH_BASE=${2:-$PATH}
set -e
if [ "$(id -u)" -ne 0 ]; then
echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
exit 1
fi
# Update the secure_path base
echo "Defaults secure_path=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin:${SECURE_PATH_BASE}\"" >> /etc/sudoers.d/secure_path
# Create the ~/.local/bin folder
sudo -u ${USERNAME} mkdir -p /home/${USERNAME}/.local/bin
|
mikeee/dapr
|
docker/custom-scripts/setup-user.sh
|
Shell
|
mit
| 1,164 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Docker image build and push setting
DOCKER:=docker
DOCKERFILE_DIR?=./docker
# If set to true, only the `dapr` image will be built and pushed to the registry.
# This is a "kitchen sink" image that contains all the components.
# The Helm charts will also be configured to use this image.
# This is useful for faster development and testing experience.
# If set to false, individual images for daprd, operator, sentry, injector,
# and placement will be built and pushed to the registry.
ONLY_DAPR_IMAGE?=false
DAPR_SYSTEM_IMAGE_NAME?=$(RELEASE_NAME)
DAPR_RUNTIME_IMAGE_NAME?=daprd
DAPR_PLACEMENT_IMAGE_NAME?=placement
DAPR_SENTRY_IMAGE_NAME?=sentry
DAPR_OPERATOR_IMAGE_NAME?=operator
DAPR_INJECTOR_IMAGE_NAME?=injector
# build docker image for linux
BIN_PATH=$(OUT_DIR)/$(TARGET_OS)_$(TARGET_ARCH)
ifeq ($(TARGET_OS), windows)
DOCKERFILE?=Dockerfile-windows
BIN_PATH := $(BIN_PATH)/release
else ifeq ($(origin DEBUG), undefined)
DOCKERFILE?=Dockerfile
BIN_PATH := $(BIN_PATH)/release
else ifeq ($(DEBUG),0)
DOCKERFILE?=Dockerfile
BIN_PATH := $(BIN_PATH)/release
else
DOCKERFILE?=Dockerfile-debug
BIN_PATH := $(BIN_PATH)/debug
endif
DOCKER_IMAGE_ARCH:=amd64
ifeq ($(TARGET_ARCH),arm)
DOCKER_IMAGE_ARCH:=arm/v7
else ifeq ($(TARGET_ARCH),arm64)
DOCKER_IMAGE_ARCH:=arm64/v8
endif
DOCKER_IMAGE_PLATFORM=$(TARGET_OS)/$(DOCKER_IMAGE_ARCH)
DOCKER_BUILDX_NAME=daprbuild_multi
DOCKER_BUILDX_PLATFORMS=$(TARGET_OS)/arm/v7,$(TARGET_OS)/arm64/v8,$(TARGET_OS)/amd64
# Supported docker image architecture
DOCKER_MULTI_ARCH?=linux-amd64 linux-arm linux-arm64 windows-1809-amd64 windows-ltsc2022-amd64
################################################################################
# Target: docker-build, docker-push #
################################################################################
# If WINDOWS_VERSION is set, use it as the Windows version in the docker image tag.
# Example, foo.io/dapr/dapr:1.10.0-rc.2-windows-ltsc2022-amd64 where ltsc2022 is the Windows version.
# If unset, use a simpler tag, example, foo.io/dapr/dapr:1.10.0-rc.2-windows-amd64.
ifneq ($(WINDOWS_VERSION),)
BUILD_ARGS=--build-arg WINDOWS_VERSION=$(WINDOWS_VERSION)
DOCKER_IMAGE_VARIANT=$(TARGET_OS)-$(WINDOWS_VERSION)-$(TARGET_ARCH)
else
DOCKER_IMAGE_VARIANT=$(TARGET_OS)-$(TARGET_ARCH)
endif
ifeq ($(MANIFEST_TAG),)
MANIFEST_TAG=$(DAPR_TAG)
endif
ifeq ($(MANIFEST_LATEST_TAG),)
# artursouza: this is intentional - latest manifest tag will point to immutable version tags.
# For example: latest -> 1.11.0-linux-amd64 1.11.0-linux-arm 1.11.0-linux-arm64 ...
MANIFEST_LATEST_TAG=$(DAPR_TAG)
endif
LINUX_BINS_OUT_DIR=$(OUT_DIR)/linux_$(GOARCH)
DOCKER_IMAGE=$(DAPR_REGISTRY)/$(DAPR_SYSTEM_IMAGE_NAME)
DAPR_RUNTIME_DOCKER_IMAGE=$(DAPR_REGISTRY)/$(DAPR_RUNTIME_IMAGE_NAME)
DAPR_PLACEMENT_DOCKER_IMAGE=$(DAPR_REGISTRY)/$(DAPR_PLACEMENT_IMAGE_NAME)
DAPR_SENTRY_DOCKER_IMAGE=$(DAPR_REGISTRY)/$(DAPR_SENTRY_IMAGE_NAME)
DAPR_OPERATOR_DOCKER_IMAGE=$(DAPR_REGISTRY)/$(DAPR_OPERATOR_IMAGE_NAME)
DAPR_INJECTOR_DOCKER_IMAGE=$(DAPR_REGISTRY)/$(DAPR_INJECTOR_IMAGE_NAME)
BUILD_TAG=$(DAPR_TAG)-$(DOCKER_IMAGE_VARIANT)
# To use buildx: https://github.com/docker/buildx#docker-ce
export DOCKER_CLI_EXPERIMENTAL=enabled
# check the required environment variables
check-docker-env:
ifeq ($(DAPR_REGISTRY),)
$(error DAPR_REGISTRY environment variable must be set)
endif
ifeq ($(DAPR_TAG),)
$(error DAPR_TAG environment variable must be set)
endif
check-arch:
ifeq ($(TARGET_OS),)
$(error TARGET_OS environment variable must be set)
endif
ifeq ($(TARGET_ARCH),)
$(error TARGET_ARCH environment variable must be set)
endif
docker-build: SHELL := $(shell which bash)
docker-build: check-docker-env check-arch
$(info Building $(DOCKER_IMAGE):$(DAPR_TAG) docker images ...)
ifeq ($(TARGET_ARCH),$(TARGET_ARCH_LOCAL))
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) build --output type=docker --build-arg PKG_FILES=* $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DOCKER_IMAGE):$(BUILD_TAG)
else
$(DOCKER) build --output type=docker --build-arg PKG_FILES=* $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DOCKER_IMAGE):$(BUILD_TAG)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) build --output type=docker --build-arg PKG_FILES=daprd $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_RUNTIME_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) build --output type=docker --build-arg PKG_FILES=placement $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_PLACEMENT_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) build --output type=docker --build-arg PKG_FILES=sentry $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_SENTRY_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) build --output type=docker --build-arg PKG_FILES=operator $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_OPERATOR_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) build --output type=docker --build-arg PKG_FILES=injector $(BUILD_ARGS) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_INJECTOR_DOCKER_IMAGE):$(BUILD_TAG); \
fi
endif
else
-$(DOCKER) run --privileged --rm tonistiigi/binfmt:qemu-v7.0.0 --install all
-$(DOCKER) buildx create --use --name $(DOCKER_BUILDX_NAME) --platform $(DOCKER_BUILDX_PLATFORMS)
$(DOCKER) buildx inspect $(DOCKER_BUILDX_NAME) --bootstrap
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) buildx build --build-arg PKG_FILES=* $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DOCKER_IMAGE):$(BUILD_TAG) --provenance=false
else
$(DOCKER) buildx build --build-arg PKG_FILES=* $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DOCKER_IMAGE):$(BUILD_TAG) --provenance=false
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=daprd $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_RUNTIME_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false; \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=placement $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_PLACEMENT_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false; \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=sentry $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_SENTRY_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false; \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=operator $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_OPERATOR_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false; \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=injector $(BUILD_ARGS) --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_INJECTOR_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false; \
fi
endif
endif
# push docker image to the registry
docker-push: SHELL := $(shell which bash)
docker-push: docker-build
$(info Pushing $(DOCKER_IMAGE):$(DAPR_TAG) docker images ...)
ifeq ($(TARGET_ARCH),$(TARGET_ARCH_LOCAL))
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) push $(DOCKER_IMAGE):$(BUILD_TAG)
else
$(DOCKER) push $(DOCKER_IMAGE):$(BUILD_TAG)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) push $(DAPR_RUNTIME_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) push $(DAPR_PLACEMENT_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) push $(DAPR_SENTRY_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) push $(DAPR_OPERATOR_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) push $(DAPR_INJECTOR_DOCKER_IMAGE):$(BUILD_TAG); \
fi
endif
else
-$(DOCKER) run --privileged --rm tonistiigi/binfmt:qemu-v7.0.0 --install all
-$(DOCKER) buildx create --use --name $(DOCKER_BUILDX_NAME) --platform $(DOCKER_BUILDX_PLATFORMS)
$(DOCKER) buildx inspect $(DOCKER_BUILDX_NAME) --bootstrap
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) buildx build --build-arg PKG_FILES=* --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push
else
$(DOCKER) buildx build --build-arg PKG_FILES=* --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=daprd --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_RUNTIME_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push; \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=placement --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_PLACEMENT_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push; \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=sentry --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_SENTRY_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push; \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=operator --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_OPERATOR_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push; \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) buildx build --build-arg PKG_FILES=injector --platform $(DOCKER_IMAGE_PLATFORM) -f $(DOCKERFILE_DIR)/$(DOCKERFILE) $(BIN_PATH) -t $(DAPR_INJECTOR_DOCKER_IMAGE):$(BUILD_TAG) --provenance=false --push; \
fi
endif
endif
# push docker image to kind cluster
docker-push-kind: SHELL := $(shell which bash)
docker-push-kind: docker-build
$(info Pushing $(DOCKER_IMAGE_TAG) docker image to kind cluster...)
ifeq ($(ONLY_DAPR_IMAGE),true)
kind load docker-image $(DOCKER_IMAGE):$(BUILD_TAG)
else
kind load docker-image $(DOCKER_IMAGE):$(BUILD_TAG)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
kind load docker-image $(DAPR_RUNTIME_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
kind load docker-image $(DAPR_PLACEMENT_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
kind load docker-image $(DAPR_SENTRY_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
kind load docker-image $(DAPR_OPERATOR_DOCKER_IMAGE):$(BUILD_TAG); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
kind load docker-image $(DAPR_INJECTOR_DOCKER_IMAGE):$(BUILD_TAG); \
fi
endif
# publish muti-arch docker image to the registry
docker-manifest-create: SHELL := $(shell which bash)
docker-manifest-create: check-docker-env
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) manifest create $(DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DOCKER_IMAGE):$(MANIFEST_TAG)-%)
else
$(DOCKER) manifest create $(DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DOCKER_IMAGE):$(MANIFEST_TAG)-%)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) manifest create $(DAPR_RUNTIME_DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_RUNTIME_DOCKER_IMAGE):$(MANIFEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) manifest create $(DAPR_PLACEMENT_DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_PLACEMENT_DOCKER_IMAGE):$(MANIFEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) manifest create $(DAPR_SENTRY_DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_SENTRY_DOCKER_IMAGE):$(MANIFEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) manifest create $(DAPR_OPERATOR_DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_OPERATOR_DOCKER_IMAGE):$(MANIFEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) manifest create $(DAPR_INJECTOR_DOCKER_IMAGE):$(DAPR_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_INJECTOR_DOCKER_IMAGE):$(MANIFEST_TAG)-%); \
fi
endif
ifeq ($(LATEST_RELEASE),true)
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) manifest create $(DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%)
else
$(DOCKER) manifest create $(DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) manifest create $(DAPR_RUNTIME_DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_RUNTIME_DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) manifest create $(DAPR_PLACEMENT_DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_PLACEMENT_DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) manifest create $(DAPR_SENTRY_DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_SENTRY_DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) manifest create $(DAPR_OPERATOR_DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_OPERATOR_DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) manifest create $(DAPR_INJECTOR_DOCKER_IMAGE):$(LATEST_TAG) $(DOCKER_MULTI_ARCH:%=$(DAPR_INJECTOR_DOCKER_IMAGE):$(MANIFEST_LATEST_TAG)-%); \
fi
endif
endif
docker-publish: SHELL := $(shell which bash)
docker-publish: docker-manifest-create
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) manifest push $(DOCKER_IMAGE):$(DAPR_TAG)
else
$(DOCKER) manifest push $(DOCKER_IMAGE):$(DAPR_TAG)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) manifest push $(DAPR_RUNTIME_DOCKER_IMAGE):$(DAPR_TAG); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) manifest push $(DAPR_PLACEMENT_DOCKER_IMAGE):$(DAPR_TAG); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) manifest push $(DAPR_SENTRY_DOCKER_IMAGE):$(DAPR_TAG); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) manifest push $(DAPR_OPERATOR_DOCKER_IMAGE):$(DAPR_TAG); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) manifest push $(DAPR_INJECTOR_DOCKER_IMAGE):$(DAPR_TAG); \
fi
endif
ifeq ($(LATEST_RELEASE),true)
ifeq ($(ONLY_DAPR_IMAGE),true)
$(DOCKER) manifest push $(DOCKER_IMAGE):$(LATEST_TAG)
else
$(DOCKER) manifest push $(DOCKER_IMAGE):$(LATEST_TAG)
if [[ "$(BINARIES)" == *"daprd"* ]]; then \
$(DOCKER) manifest push $(DAPR_RUNTIME_DOCKER_IMAGE):$(LATEST_TAG); \
fi
if [[ "$(BINARIES)" == *"placement"* ]]; then \
$(DOCKER) manifest push $(DAPR_PLACEMENT_DOCKER_IMAGE):$(LATEST_TAG); \
fi
if [[ "$(BINARIES)" == *"sentry"* ]]; then \
$(DOCKER) manifest push $(DAPR_SENTRY_DOCKER_IMAGE):$(LATEST_TAG); \
fi
if [[ "$(BINARIES)" == *"operator"* ]]; then \
$(DOCKER) manifest push $(DAPR_OPERATOR_DOCKER_IMAGE):$(LATEST_TAG); \
fi
if [[ "$(BINARIES)" == *"injector"* ]]; then \
$(DOCKER) manifest push $(DAPR_INJECTOR_DOCKER_IMAGE):$(LATEST_TAG); \
fi
endif
endif
check-windows-version:
ifeq ($(WINDOWS_VERSION),)
$(error WINDOWS_VERSION environment variable must be set)
endif
docker-windows-base-build: check-windows-version
$(DOCKER) build --output type=docker --build-arg WINDOWS_VERSION=$(WINDOWS_VERSION) -f $(DOCKERFILE_DIR)/$(DOCKERFILE)-base $(DOCKERFILE_DIR) -t $(DAPR_REGISTRY)/windows-base:$(WINDOWS_VERSION)
$(DOCKER) build --output type=docker --build-arg WINDOWS_VERSION=$(WINDOWS_VERSION) -f $(DOCKERFILE_DIR)/$(DOCKERFILE)-php-base $(DOCKERFILE_DIR) -t $(DAPR_REGISTRY)/windows-php-base:$(WINDOWS_VERSION)
$(DOCKER) build --output type=docker --build-arg WINDOWS_VERSION=$(WINDOWS_VERSION) -f $(DOCKERFILE_DIR)/$(DOCKERFILE)-python-base $(DOCKERFILE_DIR) -t $(DAPR_REGISTRY)/windows-python-base:$(WINDOWS_VERSION)
docker-windows-base-push: check-windows-version
$(DOCKER) push $(DAPR_REGISTRY)/windows-base:$(WINDOWS_VERSION)
$(DOCKER) push $(DAPR_REGISTRY)/windows-php-base:$(WINDOWS_VERSION)
$(DOCKER) push $(DAPR_REGISTRY)/windows-python-base:$(WINDOWS_VERSION)
################################################################################
# Target: build-dev-container, push-dev-container #
################################################################################
# Update whenever you upgrade dev container image
DEV_CONTAINER_VERSION_TAG?=latest
# Use this to pin a specific version of the Dapr CLI to a devcontainer
DEV_CONTAINER_CLI_TAG?=1.9.0
# Dapr container image name
DEV_CONTAINER_IMAGE_NAME=dapr-dev
DEV_CONTAINER_DOCKERFILE=Dockerfile-dev
DOCKERFILE_DIR=./docker
check-docker-env-for-dev-container:
ifeq ($(DAPR_REGISTRY),)
$(error DAPR_REGISTRY environment variable must be set)
endif
build-dev-container:
ifeq ($(DAPR_REGISTRY),)
$(info DAPR_REGISTRY environment variable not set, tagging image without registry prefix.)
$(info `make tag-dev-container` should be run with DAPR_REGISTRY before `make push-dev-container.)
$(DOCKER) build --output type=docker --build-arg DAPR_CLI_VERSION=$(DEV_CONTAINER_CLI_TAG) -f $(DOCKERFILE_DIR)/$(DEV_CONTAINER_DOCKERFILE) $(DOCKERFILE_DIR)/. -t $(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG)
else
$(DOCKER) build --output type=docker --build-arg DAPR_CLI_VERSION=$(DEV_CONTAINER_CLI_TAG) -f $(DOCKERFILE_DIR)/$(DEV_CONTAINER_DOCKERFILE) $(DOCKERFILE_DIR)/. -t $(DAPR_REGISTRY)/$(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG)
endif
tag-dev-container: check-docker-env-for-dev-container
$(DOCKER) tag $(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG) $(DAPR_REGISTRY)/$(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG)
push-dev-container: check-docker-env-for-dev-container
$(DOCKER) push $(DAPR_REGISTRY)/$(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG)
build-dev-container-all-arch:
ifeq ($(DAPR_REGISTRY),)
$(info DAPR_REGISTRY environment variable not set, tagging image without registry prefix.)
$(DOCKER) buildx build \
--build-arg DAPR_CLI_VERSION=$(DEV_CONTAINER_CLI_TAG) \
-f $(DOCKERFILE_DIR)/$(DEV_CONTAINER_DOCKERFILE) \
--platform linux/amd64,linux/arm64 \
-t $(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG) \
$(DOCKERFILE_DIR)/. \
--provenance=false
else
$(DOCKER) buildx build \
--build-arg DAPR_CLI_VERSION=$(DEV_CONTAINER_CLI_TAG) \
-f $(DOCKERFILE_DIR)/$(DEV_CONTAINER_DOCKERFILE) \
--platform linux/amd64,linux/arm64 \
-t $(DAPR_REGISTRY)/$(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG) \
$(DOCKERFILE_DIR)/. \
--provenance=false
endif
push-dev-container-all-arch: check-docker-env-for-dev-container
$(DOCKER) buildx build \
--build-arg DAPR_CLI_VERSION=$(DEV_CONTAINER_CLI_TAG) \
-f $(DOCKERFILE_DIR)/$(DEV_CONTAINER_DOCKERFILE) \
--platform linux/amd64,linux/arm64 \
--push \
-t $(DAPR_REGISTRY)/$(DEV_CONTAINER_IMAGE_NAME):$(DEV_CONTAINER_VERSION_TAG) \
$(DOCKERFILE_DIR)/. \
--provenance=false
|
mikeee/dapr
|
docker/docker.mk
|
mk
|
mit
| 20,055 |
👋 Welcome to the Dapr dev container! You are on our Dapr Contributors image.
It includes everything to build, run and test the https://github.com/dapr/dapr
and https://github.com/dapr/components-contrib repositories.
📚 Dapr contributing docs can be found at: https://docs.dapr.io/contributing
🎓 If you are looking to run the Dapr quickstarts and tutorials, head over to
https://github.com/dapr/quickstarts instead.
|
mikeee/dapr
|
docker/first-run-notice.txt
|
Text
|
mit
| 427 |
# Maintenance Note
The contents of this folder are sourced from [vscode-dev-containers](https://github.com/microsoft/vscode-dev-containers) repository's [script-library folder](https://github.com/microsoft/vscode-dev-containers/tree/master/script-library). Any changes that are necessary should be contributed upstream to that repository instead.
|
mikeee/dapr
|
docker/library-scripts/README.md
|
Markdown
|
mit
| 348 |
#!/usr/bin/env bash
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Initializes the devcontainer tasks each time the container starts.
# Users can edit this copy under /usr/local/share in the container to
# customize this as needed for their custom localhost bindings.
# Source: https://github.com/microsoft/vscode-dev-containers/blob/v0.224.3/script-library/common-debian.sh
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
#
# Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/common.md
# Maintainer: The VS Code and Codespaces Teams
#
# Syntax: ./common-debian.sh [install zsh flag] [username] [user UID] [user GID] [upgrade packages flag] [install Oh My Zsh! flag] [Add non-free packages]
set -e
INSTALL_ZSH=${1:-"true"}
USERNAME=${2:-"automatic"}
USER_UID=${3:-"automatic"}
USER_GID=${4:-"automatic"}
UPGRADE_PACKAGES=${5:-"true"}
INSTALL_OH_MYS=${6:-"true"}
ADD_NON_FREE_PACKAGES=${7:-"false"}
SCRIPT_DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
MARKER_FILE="/usr/local/etc/vscode-dev-containers/common"
if [ "$(id -u)" -ne 0 ]; then
echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
exit 1
fi
# Ensure that login shells get the correct path if the user updated the PATH using ENV.
rm -f /etc/profile.d/00-restore-env.sh
echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
chmod +x /etc/profile.d/00-restore-env.sh
# If in automatic mode, determine if a user already exists, if not use vscode
if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
USERNAME=""
POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
if id -u ${CURRENT_USER} > /dev/null 2>&1; then
USERNAME=${CURRENT_USER}
break
fi
done
if [ "${USERNAME}" = "" ]; then
USERNAME=vscode
fi
elif [ "${USERNAME}" = "none" ]; then
USERNAME=root
USER_UID=0
USER_GID=0
fi
# Load markers to see which steps have already run
if [ -f "${MARKER_FILE}" ]; then
echo "Marker file found:"
cat "${MARKER_FILE}"
source "${MARKER_FILE}"
fi
# Ensure apt is in non-interactive to avoid prompts
export DEBIAN_FRONTEND=noninteractive
# Function to call apt-get if needed
apt_get_update_if_needed()
{
if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
echo "Running apt-get update..."
apt-get update
else
echo "Skipping apt-get update."
fi
}
# Run install apt-utils to avoid debconf warning then verify presence of other common developer tools and dependencies
if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then
package_list="apt-utils \
openssh-client \
gnupg2 \
dirmngr \
iproute2 \
procps \
lsof \
htop \
net-tools \
psmisc \
curl \
wget \
rsync \
ca-certificates \
unzip \
zip \
nano \
vim-tiny \
less \
jq \
lsb-release \
apt-transport-https \
dialog \
libc6 \
libgcc1 \
libkrb5-3 \
libgssapi-krb5-2 \
libicu[0-9][0-9] \
liblttng-ust0 \
libstdc++6 \
zlib1g \
locales \
sudo \
ncdu \
man-db \
strace \
manpages \
manpages-dev \
init-system-helpers"
# Needed for adding manpages-posix and manpages-posix-dev which are non-free packages in Debian
if [ "${ADD_NON_FREE_PACKAGES}" = "true" ]; then
# Bring in variables from /etc/os-release like VERSION_CODENAME
. /etc/os-release
sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list
sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list
sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list
sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list
sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list
sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list
sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list
sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list
# Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html
sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list
sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list
echo "Running apt-get update..."
apt-get update
package_list="${package_list} manpages-posix manpages-posix-dev"
else
apt_get_update_if_needed
fi
# Install libssl1.1 if available
if [[ ! -z $(apt-cache --names-only search ^libssl1.1$) ]]; then
package_list="${package_list} libssl1.1"
fi
# Install appropriate version of libssl1.0.x if available
libssl_package=$(dpkg-query -f '${db:Status-Abbrev}\t${binary:Package}\n' -W 'libssl1\.0\.?' 2>&1 || echo '')
if [ "$(echo "$LIlibssl_packageBSSL" | grep -o 'libssl1\.0\.[0-9]:' | uniq | sort | wc -l)" -eq 0 ]; then
if [[ ! -z $(apt-cache --names-only search ^libssl1.0.2$) ]]; then
# Debian 9
package_list="${package_list} libssl1.0.2"
elif [[ ! -z $(apt-cache --names-only search ^libssl1.0.0$) ]]; then
# Ubuntu 18.04, 16.04, earlier
package_list="${package_list} libssl1.0.0"
fi
fi
echo "Packages to verify are installed: ${package_list}"
apt-get -y install --no-install-recommends ${package_list} 2> >( grep -v 'debconf: delaying package configuration, since apt-utils is not installed' >&2 )
# Install git if not already installed (may be more recent than distro version)
if ! type git > /dev/null 2>&1; then
apt-get -y install --no-install-recommends git
fi
PACKAGES_ALREADY_INSTALLED="true"
fi
# Get to latest versions of all packages
if [ "${UPGRADE_PACKAGES}" = "true" ]; then
apt_get_update_if_needed
apt-get -y upgrade --no-install-recommends
apt-get autoremove -y
fi
# Ensure at least the en_US.UTF-8 UTF-8 locale is available.
# Common need for both applications and things like the agnoster ZSH theme.
if [ "${LOCALE_ALREADY_SET}" != "true" ] && ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then
echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
locale-gen
LOCALE_ALREADY_SET="true"
fi
# Create or update a non-root user to match UID/GID.
group_name="${USERNAME}"
if id -u ${USERNAME} > /dev/null 2>&1; then
# User exists, update if needed
if [ "${USER_GID}" != "automatic" ] && [ "$USER_GID" != "$(id -g $USERNAME)" ]; then
group_name="$(id -gn $USERNAME)"
groupmod --gid $USER_GID ${group_name}
usermod --gid $USER_GID $USERNAME
fi
if [ "${USER_UID}" != "automatic" ] && [ "$USER_UID" != "$(id -u $USERNAME)" ]; then
usermod --uid $USER_UID $USERNAME
fi
else
# Create user
if [ "${USER_GID}" = "automatic" ]; then
groupadd $USERNAME
else
groupadd --gid $USER_GID $USERNAME
fi
if [ "${USER_UID}" = "automatic" ]; then
useradd -s /bin/bash --gid $USERNAME -m $USERNAME
else
useradd -s /bin/bash --uid $USER_UID --gid $USERNAME -m $USERNAME
fi
fi
# Add add sudo support for non-root user
if [ "${USERNAME}" != "root" ] && [ "${EXISTING_NON_ROOT_USER}" != "${USERNAME}" ]; then
echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME
chmod 0440 /etc/sudoers.d/$USERNAME
EXISTING_NON_ROOT_USER="${USERNAME}"
fi
# ** Shell customization section **
if [ "${USERNAME}" = "root" ]; then
user_rc_path="/root"
else
user_rc_path="/home/${USERNAME}"
fi
# Restore user .bashrc defaults from skeleton file if it doesn't exist or is empty
if [ ! -f "${user_rc_path}/.bashrc" ] || [ ! -s "${user_rc_path}/.bashrc" ] ; then
cp /etc/skel/.bashrc "${user_rc_path}/.bashrc"
fi
# Restore user .profile defaults from skeleton file if it doesn't exist or is empty
if [ ! -f "${user_rc_path}/.profile" ] || [ ! -s "${user_rc_path}/.profile" ] ; then
cp /etc/skel/.profile "${user_rc_path}/.profile"
fi
# .bashrc/.zshrc snippet
rc_snippet="$(cat << 'EOF'
if [ -z "${USER}" ]; then export USER=$(whoami); fi
if [[ "${PATH}" != *"$HOME/.local/bin"* ]]; then export PATH="${PATH}:$HOME/.local/bin"; fi
# Display optional first run image specific notice if configured and terminal is interactive
if [ -t 1 ] && [[ "${TERM_PROGRAM}" = "vscode" || "${TERM_PROGRAM}" = "codespaces" ]] && [ ! -f "$HOME/.config/vscode-dev-containers/first-run-notice-already-displayed" ]; then
if [ -f "/usr/local/etc/vscode-dev-containers/first-run-notice.txt" ]; then
cat "/usr/local/etc/vscode-dev-containers/first-run-notice.txt"
elif [ -f "/workspaces/.codespaces/shared/first-run-notice.txt" ]; then
cat "/workspaces/.codespaces/shared/first-run-notice.txt"
fi
mkdir -p "$HOME/.config/vscode-dev-containers"
# Mark first run notice as displayed after 10s to avoid problems with fast terminal refreshes hiding it
((sleep 10s; touch "$HOME/.config/vscode-dev-containers/first-run-notice-already-displayed") &)
fi
# Set the default git editor if not already set
if [ -z "$(git config --get core.editor)" ] && [ -z "${GIT_EDITOR}" ]; then
if [ "${TERM_PROGRAM}" = "vscode" ]; then
if [[ -n $(command -v code-insiders) && -z $(command -v code) ]]; then
export GIT_EDITOR="code-insiders --wait"
else
export GIT_EDITOR="code --wait"
fi
fi
fi
EOF
)"
# code shim, it fallbacks to code-insiders if code is not available
cat << 'EOF' > /usr/local/bin/code
#!/bin/sh
get_in_path_except_current() {
which -a "$1" | grep -A1 "$0" | grep -v "$0"
}
code="$(get_in_path_except_current code)"
if [ -n "$code" ]; then
exec "$code" "$@"
elif [ "$(command -v code-insiders)" ]; then
exec code-insiders "$@"
else
echo "code or code-insiders is not installed" >&2
exit 127
fi
EOF
chmod +x /usr/local/bin/code
# systemctl shim - tells people to use 'service' if systemd is not running
cat << 'EOF' > /usr/local/bin/systemctl
#!/bin/sh
set -e
if [ -d "/run/systemd/system" ]; then
exec /bin/systemctl/systemctl "$@"
else
echo '\n"systemd" is not running in this container due to its overhead.\nUse the "service" command to start services instead. e.g.: \n\nservice --status-all'
fi
EOF
chmod +x /usr/local/bin/systemctl
# Codespaces bash and OMZ themes - partly inspired by https://github.com/ohmyzsh/ohmyzsh/blob/master/themes/robbyrussell.zsh-theme
codespaces_bash="$(cat \
<<'EOF'
# Codespaces bash prompt theme
__bash_prompt() {
local userpart='`export XIT=$? \
&& [ ! -z "${GITHUB_USER}" ] && echo -n "\[\033[0;32m\]@${GITHUB_USER} " || echo -n "\[\033[0;32m\]\u " \
&& [ "$XIT" -ne "0" ] && echo -n "\[\033[1;31m\]➜" || echo -n "\[\033[0m\]➜"`'
local gitbranch='`\
if [ "$(git config --get codespaces-theme.hide-status 2>/dev/null)" != 1 ]; then \
export BRANCH=$(git symbolic-ref --short HEAD 2>/dev/null || git rev-parse --short HEAD 2>/dev/null); \
if [ "${BRANCH}" != "" ]; then \
echo -n "\[\033[0;36m\](\[\033[1;31m\]${BRANCH}" \
&& if git ls-files --error-unmatch -m --directory --no-empty-directory -o --exclude-standard ":/*" > /dev/null 2>&1; then \
echo -n " \[\033[1;33m\]✗"; \
fi \
&& echo -n "\[\033[0;36m\]) "; \
fi; \
fi`'
local lightblue='\[\033[1;34m\]'
local removecolor='\[\033[0m\]'
PS1="${userpart} ${lightblue}\w ${gitbranch}${removecolor}\$ "
unset -f __bash_prompt
}
__bash_prompt
EOF
)"
codespaces_zsh="$(cat \
<<'EOF'
# Codespaces zsh prompt theme
__zsh_prompt() {
local prompt_username
if [ ! -z "${GITHUB_USER}" ]; then
prompt_username="@${GITHUB_USER}"
else
prompt_username="%n"
fi
PROMPT="%{$fg[green]%}${prompt_username} %(?:%{$reset_color%}➜ :%{$fg_bold[red]%}➜ )" # User/exit code arrow
PROMPT+='%{$fg_bold[blue]%}%(5~|%-1~/…/%3~|%4~)%{$reset_color%} ' # cwd
PROMPT+='$([ "$(git config --get codespaces-theme.hide-status 2>/dev/null)" != 1 ] && git_prompt_info)' # Git status
PROMPT+='%{$fg[white]%}$ %{$reset_color%}'
unset -f __zsh_prompt
}
ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg_bold[cyan]%}(%{$fg_bold[red]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%} "
ZSH_THEME_GIT_PROMPT_DIRTY=" %{$fg_bold[yellow]%}✗%{$fg_bold[cyan]%})"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[cyan]%})"
__zsh_prompt
EOF
)"
# Add RC snippet and custom bash prompt
if [ "${RC_SNIPPET_ALREADY_ADDED}" != "true" ]; then
echo "${rc_snippet}" >> /etc/bash.bashrc
echo "${codespaces_bash}" >> "${user_rc_path}/.bashrc"
echo 'export PROMPT_DIRTRIM=4' >> "${user_rc_path}/.bashrc"
if [ "${USERNAME}" != "root" ]; then
echo "${codespaces_bash}" >> "/root/.bashrc"
echo 'export PROMPT_DIRTRIM=4' >> "/root/.bashrc"
fi
chown ${USERNAME}:${group_name} "${user_rc_path}/.bashrc"
RC_SNIPPET_ALREADY_ADDED="true"
fi
# Optionally install and configure zsh and Oh My Zsh!
if [ "${INSTALL_ZSH}" = "true" ]; then
if ! type zsh > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get install -y zsh
fi
if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then
echo "${rc_snippet}" >> /etc/zsh/zshrc
ZSH_ALREADY_INSTALLED="true"
fi
# Adapted, simplified inline Oh My Zsh! install steps that adds, defaults to a codespaces theme.
# See https://github.com/ohmyzsh/ohmyzsh/blob/master/tools/install.sh for official script.
oh_my_install_dir="${user_rc_path}/.oh-my-zsh"
if [ ! -d "${oh_my_install_dir}" ] && [ "${INSTALL_OH_MYS}" = "true" ]; then
template_path="${oh_my_install_dir}/templates/zshrc.zsh-template"
user_rc_file="${user_rc_path}/.zshrc"
umask g-w,o-w
mkdir -p ${oh_my_install_dir}
git clone --depth=1 \
-c core.eol=lf \
-c core.autocrlf=false \
-c fsck.zeroPaddedFilemode=ignore \
-c fetch.fsck.zeroPaddedFilemode=ignore \
-c receive.fsck.zeroPaddedFilemode=ignore \
"https://github.com/ohmyzsh/ohmyzsh" "${oh_my_install_dir}" 2>&1
echo -e "$(cat "${template_path}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${user_rc_file}
sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="codespaces"/g' ${user_rc_file}
mkdir -p ${oh_my_install_dir}/custom/themes
echo "${codespaces_zsh}" > "${oh_my_install_dir}/custom/themes/codespaces.zsh-theme"
# Shrink git while still enabling updates
cd "${oh_my_install_dir}"
git repack -a -d -f --depth=1 --window=1
# Copy to non-root user if one is specified
if [ "${USERNAME}" != "root" ]; then
cp -rf "${user_rc_file}" "${oh_my_install_dir}" /root
chown -R ${USERNAME}:${group_name} "${user_rc_path}"
fi
fi
fi
# Persist image metadata info, script if meta.env found in same directory
meta_info_script="$(cat << 'EOF'
#!/bin/sh
. /usr/local/etc/vscode-dev-containers/meta.env
# Minimal output
if [ "$1" = "version" ] || [ "$1" = "image-version" ]; then
echo "${VERSION}"
exit 0
elif [ "$1" = "release" ]; then
echo "${GIT_REPOSITORY_RELEASE}"
exit 0
elif [ "$1" = "content" ] || [ "$1" = "content-url" ] || [ "$1" = "contents" ] || [ "$1" = "contents-url" ]; then
echo "${CONTENTS_URL}"
exit 0
fi
#Full output
echo
echo "Development container image information"
echo
if [ ! -z "${VERSION}" ]; then echo "- Image version: ${VERSION}"; fi
if [ ! -z "${DEFINITION_ID}" ]; then echo "- Definition ID: ${DEFINITION_ID}"; fi
if [ ! -z "${VARIANT}" ]; then echo "- Variant: ${VARIANT}"; fi
if [ ! -z "${GIT_REPOSITORY}" ]; then echo "- Source code repository: ${GIT_REPOSITORY}"; fi
if [ ! -z "${GIT_REPOSITORY_RELEASE}" ]; then echo "- Source code release/branch: ${GIT_REPOSITORY_RELEASE}"; fi
if [ ! -z "${BUILD_TIMESTAMP}" ]; then echo "- Timestamp: ${BUILD_TIMESTAMP}"; fi
if [ ! -z "${CONTENTS_URL}" ]; then echo && echo "More info: ${CONTENTS_URL}"; fi
echo
EOF
)"
if [ -f "${SCRIPT_DIR}/meta.env" ]; then
mkdir -p /usr/local/etc/vscode-dev-containers/
cp -f "${SCRIPT_DIR}/meta.env" /usr/local/etc/vscode-dev-containers/meta.env
echo "${meta_info_script}" > /usr/local/bin/devcontainer-info
chmod +x /usr/local/bin/devcontainer-info
fi
# Write marker file
mkdir -p "$(dirname "${MARKER_FILE}")"
echo -e "\
PACKAGES_ALREADY_INSTALLED=${PACKAGES_ALREADY_INSTALLED}\n\
LOCALE_ALREADY_SET=${LOCALE_ALREADY_SET}\n\
EXISTING_NON_ROOT_USER=${EXISTING_NON_ROOT_USER}\n\
RC_SNIPPET_ALREADY_ADDED=${RC_SNIPPET_ALREADY_ADDED}\n\
ZSH_ALREADY_INSTALLED=${ZSH_ALREADY_INSTALLED}" > "${MARKER_FILE}"
echo "Done!"
|
mikeee/dapr
|
docker/library-scripts/common-debian.sh
|
Shell
|
mit
| 19,545 |
#!/usr/bin/env bash
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Initializes the devcontainer tasks each time the container starts.
# Users can edit this copy under /usr/local/share in the container to
# customize this as needed for their custom localhost bindings.
# Source: https://github.com/microsoft/vscode-dev-containers/blob/v0.224.3/script-library/docker-in-docker-debian.sh
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
#
# Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
# Maintainer: The VS Code and Codespaces Teams
#
# Syntax: ./docker-in-docker-debian.sh [enable non-root docker access flag] [non-root user] [use moby] [Engine/CLI Version]
ENABLE_NONROOT_DOCKER=${1:-"true"}
USERNAME=${2:-"automatic"}
USE_MOBY=${3:-"true"}
DOCKER_VERSION=${4:-"latest"} # The Docker/Moby Engine + CLI should match in version
MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
set -e
if [ "$(id -u)" -ne 0 ]; then
echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
exit 1
fi
# Determine the appropriate non-root user
if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
USERNAME=""
POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
if id -u ${CURRENT_USER} > /dev/null 2>&1; then
USERNAME=${CURRENT_USER}
break
fi
done
if [ "${USERNAME}" = "" ]; then
USERNAME=root
fi
elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
USERNAME=root
fi
# Get central common setting
get_common_setting() {
if [ "${common_settings_file_loaded}" != "true" ]; then
curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping."
common_settings_file_loaded=true
fi
if [ -f "/tmp/vsdc-settings.env" ]; then
local multi_line=""
if [ "$2" = "true" ]; then multi_line="-z"; fi
local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')"
if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi
fi
echo "$1=${!1}"
}
# Function to run apt-get if needed
apt_get_update_if_needed()
{
if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
echo "Running apt-get update..."
apt-get update
else
echo "Skipping apt-get update."
fi
}
# Checks if packages are installed and installs them if not
check_packages() {
if ! dpkg -s "$@" > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get -y install --no-install-recommends "$@"
fi
}
# Ensure apt is in non-interactive to avoid prompts
export DEBIAN_FRONTEND=noninteractive
# Install dependencies
check_packages apt-transport-https curl ca-certificates pigz iptables gnupg2 dirmngr
if ! type git > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get -y install git
fi
# Swap to legacy iptables for compatibility
if type iptables-legacy > /dev/null 2>&1; then
update-alternatives --set iptables /usr/sbin/iptables-legacy
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
fi
# Source /etc/os-release to get OS info
. /etc/os-release
# Fetch host/container arch.
architecture="$(dpkg --print-architecture)"
# Set up the necessary apt repos (either Microsoft's or Docker's)
if [ "${USE_MOBY}" = "true" ]; then
# Name of open source engine/cli
engine_package_name="moby-engine"
cli_package_name="moby-cli"
# Import key safely and import Microsoft apt repo
get_common_setting MICROSOFT_GPG_KEYS_URI
curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
else
# Name of licensed engine/cli
engine_package_name="docker-ce"
cli_package_name="docker-ce-cli"
# Import key safely and import Docker apt repo
curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
fi
# Refresh apt lists
apt-get update
# Soft version matching
if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
# Empty, meaning grab whatever "latest" is in apt repo
engine_version_suffix=""
cli_version_suffix=""
else
# Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
# Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
set +e # Don't exit if finding version fails - will handle gracefully
cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
set -e
if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
echo "(!) No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
exit 1
fi
echo "engine_version_suffix ${engine_version_suffix}"
echo "cli_version_suffix ${cli_version_suffix}"
fi
# Install Docker / Moby CLI if not already installed
if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
echo "Docker / Moby CLI and Engine already installed."
else
if [ "${USE_MOBY}" = "true" ]; then
apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx moby-engine${engine_version_suffix}
apt-get -y install --no-install-recommends moby-compose || echo "(*) Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
else
apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
fi
fi
echo "Finished installing docker / moby!"
### Diff start
# Install Docker Compose if not already installed
if type docker-compose > /dev/null 2>&1; then
echo "Docker Compose already installed."
else
target_compose_arch="$(uname -m)"
case $target_compose_arch in
x86_64) target_compose_arch="x86_64";;
aarch64 | armv8*) target_compose_arch="aarch64";;
*) echo "(!) Architecture $target_compose_arch unsupported"; exit 1 ;;
esac
# Get the last version from the GitHub APIs
docker_dash_compose_version=$(curl -s https://api.github.com/repos/docker/compose/releases/latest | jq -r ".tag_name")
echo "(*) Installing docker-compose ${docker_dash_compose_version}..."
curl -fsSL "https://github.com/docker/compose/releases/download/${docker_dash_compose_version}/docker-compose-linux-${target_compose_arch}" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
fi
### Diff end
# If init file already exists, exit
if [ -f "/usr/local/share/docker-init.sh" ]; then
echo "/usr/local/share/docker-init.sh already exists, so exiting."
exit 0
fi
echo "docker-init doesnt exist, adding..."
# Add user to the docker group
if [ "${ENABLE_NONROOT_DOCKER}" = "true" ]; then
if ! getent group docker > /dev/null 2>&1; then
groupadd docker
fi
usermod -aG docker ${USERNAME}
fi
tee /usr/local/share/docker-init.sh > /dev/null \
<< 'EOF'
#!/bin/sh
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
set -e
dockerd_start="$(cat << 'INNEREOF'
# explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
# ie: docker kill <ID>
find /run /var/run -iname 'docker*.pid' -delete || :
find /run /var/run -iname 'container*.pid' -delete || :
## Dind wrapper script from docker team, adapted to a function
# Maintained: https://github.com/moby/moby/blob/master/hack/dind
export container=docker
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# cgroup v2: enable nesting
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
# move the processes from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
# An error during moving non-existent process (i.e., "cat") is ignored.
mkdir -p /sys/fs/cgroup/init
xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
> /sys/fs/cgroup/cgroup.subtree_control
fi
## Dind wrapper over.
# Handle DNS
set +e
cat /etc/resolv.conf | grep -i 'internal.cloudapp.net'
if [ $? -eq 0 ]
then
echo "Setting dockerd Azure DNS."
CUSTOMDNS="--dns 168.63.129.16"
else
echo "Not setting dockerd DNS manually."
CUSTOMDNS=""
fi
set -e
# Start docker/moby engine
( dockerd $CUSTOMDNS > /tmp/dockerd.log 2>&1 ) &
INNEREOF
)"
# Start using sudo if not invoked as root
if [ "$(id -u)" -ne 0 ]; then
sudo /bin/sh -c "${dockerd_start}"
else
eval "${dockerd_start}"
fi
set +e
# Execute whatever commands were passed in (if any). This allows us
# to set this script to ENTRYPOINT while still executing the default CMD.
exec "$@"
EOF
chmod +x /usr/local/share/docker-init.sh
chown ${USERNAME}:root /usr/local/share/docker-init.sh
|
mikeee/dapr
|
docker/library-scripts/docker-in-docker-debian.sh
|
Shell
|
mit
| 12,116 |
#!/usr/bin/env bash
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Initializes the devcontainer tasks each time the container starts.
# Users can edit this copy under /usr/local/share in the container to
# customize this as needed for their custom localhost bindings.
# Source: https://github.com/microsoft/vscode-dev-containers/blob/v0.224.3/script-library/go-debian.sh
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
#
# Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
# Maintainer: The VS Code and Codespaces Teams
#
# Syntax: ./go-debian.sh [Go version] [GOROOT] [GOPATH] [non-root user] [Add GOPATH, GOROOT to rc files flag] [Install tools flag]
TARGET_GO_VERSION=${1:-"latest"}
TARGET_GOROOT=${2:-"/usr/local/go"}
TARGET_GOPATH=${3:-"/go"}
USERNAME=${4:-"automatic"}
UPDATE_RC=${5:-"true"}
INSTALL_GO_TOOLS=${6:-"true"}
# https://www.google.com/linuxrepositories/
GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
set -e
if [ "$(id -u)" -ne 0 ]; then
echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
exit 1
fi
# Ensure that login shells get the correct path if the user updated the PATH using ENV.
rm -f /etc/profile.d/00-restore-env.sh
echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
chmod +x /etc/profile.d/00-restore-env.sh
# Determine the appropriate non-root user
if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
USERNAME=""
POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
if id -u ${CURRENT_USER} > /dev/null 2>&1; then
USERNAME=${CURRENT_USER}
break
fi
done
if [ "${USERNAME}" = "" ]; then
USERNAME=root
fi
elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
USERNAME=root
fi
updaterc() {
if [ "${UPDATE_RC}" = "true" ]; then
echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..."
if [[ "$(cat /etc/bash.bashrc)" != *"$1"* ]]; then
echo -e "$1" >> /etc/bash.bashrc
fi
if [ -f "/etc/zsh/zshrc" ] && [[ "$(cat /etc/zsh/zshrc)" != *"$1"* ]]; then
echo -e "$1" >> /etc/zsh/zshrc
fi
fi
}
# Figure out correct version of a three part version number is not passed
find_version_from_git_tags() {
local variable_name=$1
local requested_version=${!variable_name}
if [ "${requested_version}" = "none" ]; then return; fi
local repository=$2
local prefix=${3:-"tags/v"}
local separator=${4:-"."}
local last_part_optional=${5:-"false"}
if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
local escaped_separator=${separator//./\\.}
local last_part
if [ "${last_part_optional}" = "true" ]; then
last_part="(${escaped_separator}[0-9]+)?"
else
last_part="${escaped_separator}[0-9]+"
fi
local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
else
set +e
declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
set -e
fi
fi
if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
exit 1
fi
echo "${variable_name}=${!variable_name}"
}
# Get central common setting
get_common_setting() {
if [ "${common_settings_file_loaded}" != "true" ]; then
curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping."
common_settings_file_loaded=true
fi
if [ -f "/tmp/vsdc-settings.env" ]; then
local multi_line=""
if [ "$2" = "true" ]; then multi_line="-z"; fi
local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')"
if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi
fi
echo "$1=${!1}"
}
# Function to run apt-get if needed
apt_get_update_if_needed()
{
if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
echo "Running apt-get update..."
apt-get update
else
echo "Skipping apt-get update."
fi
}
# Checks if packages are installed and installs them if not
check_packages() {
if ! dpkg -s "$@" > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get -y install --no-install-recommends "$@"
fi
}
export DEBIAN_FRONTEND=noninteractive
# Install curl, tar, git, other dependencies if missing
check_packages curl ca-certificates gnupg2 tar g++ gcc libc6-dev make pkg-config
if ! type git > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get -y install --no-install-recommends git
fi
# Get closest match for version number specified
find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
architecture="$(uname -m)"
case $architecture in
x86_64) architecture="amd64";;
aarch64 | armv8*) architecture="arm64";;
aarch32 | armv7* | armvhf*) architecture="armv6l";;
i?86) architecture="386";;
*) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
esac
# Install Go
umask 0002
if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
groupadd -r golang
fi
usermod -a -G golang "${USERNAME}"
mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
if [ "${TARGET_GO_VERSION}" != "none" ] && ! type go > /dev/null 2>&1; then
# Use a temporary locaiton for gpg keys to avoid polluting image
export GNUPGHOME="/tmp/tmp-gnupg"
mkdir -p ${GNUPGHOME}
chmod 700 ${GNUPGHOME}
get_common_setting GO_GPG_KEY_URI
curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
gpg -q --import /tmp/tmp-gnupg/golang_key
echo "Downloading Go ${TARGET_GO_VERSION}..."
set +e
curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
exit_code=$?
set -e
if [ "$exit_code" != "0" ]; then
echo "(!) Download failed."
# Try one break fix version number less if we get a failure
major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
((minor=minor-1))
TARGET_GO_VERSION="${major}.${minor}"
find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
else
((breakfix=breakfix-1))
TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
fi
echo "Trying ${TARGET_GO_VERSION}..."
curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
fi
curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
echo "Extracting Go ${TARGET_GO_VERSION}..."
tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
else
echo "Go already installed. Skipping."
fi
# Install Go tools that are isImportant && !replacedByGopls based on
# https://github.com/golang/vscode-go/blob/v0.31.1/src/goToolsInformation.ts
GO_TOOLS="\
golang.org/x/tools/gopls@latest \
honnef.co/go/tools/cmd/staticcheck@latest \
golang.org/x/lint/golint@latest \
github.com/mgechev/revive@latest \
github.com/uudashr/gopkgs/v2/cmd/gopkgs@latest \
github.com/ramya-rao-a/go-outline@latest \
github.com/go-delve/delve/cmd/dlv@latest \
github.com/josharian/impl@latest \
github.com/fatih/gomodifytags@latest"
if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
echo "Installing common Go tools..."
export PATH=${TARGET_GOROOT}/bin:${PATH}
mkdir -p /tmp/gotools /usr/local/etc/vscode-dev-containers ${TARGET_GOPATH}/bin
cd /tmp/gotools
export GOPATH=/tmp/gotools
export GOCACHE=/tmp/gotools/cache
# Use go get for versions of go under 1.16
go_install_command=install
if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
export GO111MODULE=on
go_install_command=get
echo "Go version < 1.16, using go get."
fi
(echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
# Move Go tools into path and clean up
mv /tmp/gotools/bin/* ${TARGET_GOPATH}/bin/
rm -rf /tmp/gotools
fi
# Add GOPATH variable and bin directory into PATH in bashrc/zshrc files (unless disabled)
updaterc "$(cat << EOF
export GOPATH="${TARGET_GOPATH}"
if [[ "\${PATH}" != *"\${GOPATH}/bin"* ]]; then export PATH="\${PATH}:\${GOPATH}/bin"; fi
export GOROOT="${TARGET_GOROOT}"
if [[ "\${PATH}" != *"\${GOROOT}/bin"* ]]; then export PATH="\${PATH}:\${GOROOT}/bin"; fi
EOF
)"
chown -R :golang "${TARGET_GOROOT}" "${TARGET_GOPATH}"
chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
find "${TARGET_GOROOT}" -type d | xargs -n 1 chmod g+s
find "${TARGET_GOPATH}" -type d | xargs -n 1 chmod g+s
echo "Done!"
|
mikeee/dapr
|
docker/library-scripts/go-debian.sh
|
Shell
|
mit
| 10,929 |
#!/usr/bin/env bash
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Initializes the devcontainer tasks each time the container starts.
# Users can edit this copy under /usr/local/share in the container to
# customize this as needed for their custom localhost bindings.
# Source: https://github.com/microsoft/vscode-dev-containers/blob/v0.224.3/script-library/kubectl-helm-debian.sh
#-------------------------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
#-------------------------------------------------------------------------------------------------------------
#
# Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/kubectl-helm.md
# Maintainer: The VS Code and Codespaces Teams
#
# Syntax: ./kubectl-helm-debian.sh [kubectl version] [Helm version] [minikube version] [kubectl SHA256] [Helm SHA256] [minikube SHA256]
set -e
KUBECTL_VERSION="${1:-"latest"}"
HELM_VERSION="${2:-"latest"}"
MINIKUBE_VERSION="${3:-"none"}" # latest is also valid
KUBECTL_SHA256="${4:-"automatic"}"
HELM_SHA256="${5:-"automatic"}"
MINIKUBE_SHA256="${6:-"automatic"}"
USERNAME=${7:-"automatic"}
HELM_GPG_KEYS_URI="https://raw.githubusercontent.com/helm/helm/main/KEYS"
GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com:80
keyserver hkps://keys.openpgp.org
keyserver hkp://keyserver.pgp.com"
if [ "$(id -u)" -ne 0 ]; then
echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
exit 1
fi
# Determine the appropriate non-root user
if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
USERNAME=""
POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
for CURRENT_USER in ${POSSIBLE_USERS[@]}; do
if id -u ${CURRENT_USER} > /dev/null 2>&1; then
USERNAME=${CURRENT_USER}
break
fi
done
if [ "${USERNAME}" = "" ]; then
USERNAME=root
fi
elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
USERNAME=root
fi
# Get central common setting
get_common_setting() {
if [ "${common_settings_file_loaded}" != "true" ]; then
curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping."
common_settings_file_loaded=true
fi
if [ -f "/tmp/vsdc-settings.env" ]; then
local multi_line=""
if [ "$2" = "true" ]; then multi_line="-z"; fi
local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')"
if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi
fi
echo "$1=${!1}"
}
# Figure out correct version of a three part version number is not passed
find_version_from_git_tags() {
local variable_name=$1
local requested_version=${!variable_name}
if [ "${requested_version}" = "none" ]; then return; fi
local repository=$2
local prefix=${3:-"tags/v"}
local separator=${4:-"."}
local last_part_optional=${5:-"false"}
if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
local escaped_separator=${separator//./\\.}
local last_part
if [ "${last_part_optional}" = "true" ]; then
last_part="(${escaped_separator}[0-9]+)?"
else
last_part="${escaped_separator}[0-9]+"
fi
local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
else
set +e
declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
set -e
fi
fi
if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
exit 1
fi
echo "${variable_name}=${!variable_name}"
}
# Function to run apt-get if needed
apt_get_update_if_needed()
{
if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
echo "Running apt-get update..."
apt-get update
else
echo "Skipping apt-get update."
fi
}
# Checks if packages are installed and installs them if not
check_packages() {
if ! dpkg -s "$@" > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get -y install --no-install-recommends "$@"
fi
}
# Ensure apt is in non-interactive to avoid prompts
export DEBIAN_FRONTEND=noninteractive
# Install dependencies
check_packages curl ca-certificates coreutils gnupg2 dirmngr bash-completion
if ! type git > /dev/null 2>&1; then
apt_get_update_if_needed
apt-get -y install --no-install-recommends git
fi
architecture="$(uname -m)"
case $architecture in
x86_64) architecture="amd64";;
aarch64 | armv8*) architecture="arm64";;
aarch32 | armv7* | armvhf*) architecture="arm";;
i?86) architecture="386";;
*) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
esac
# Install the kubectl, verify checksum
echo "Downloading kubectl..."
if [ "${KUBECTL_VERSION}" = "latest" ] || [ "${KUBECTL_VERSION}" = "lts" ] || [ "${KUBECTL_VERSION}" = "current" ] || [ "${KUBECTL_VERSION}" = "stable" ]; then
KUBECTL_VERSION="$(curl -sSL https://dl.k8s.io/release/stable.txt)"
else
find_version_from_git_tags KUBECTL_VERSION https://github.com/kubernetes/kubernetes
fi
if [ "${KUBECTL_VERSION::1}" != 'v' ]; then
KUBECTL_VERSION="v${KUBECTL_VERSION}"
fi
curl -sSL -o /usr/local/bin/kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${architecture}/kubectl"
chmod 0755 /usr/local/bin/kubectl
if [ "$KUBECTL_SHA256" = "automatic" ]; then
KUBECTL_SHA256="$(curl -sSL "https://dl.k8s.io/${KUBECTL_VERSION}/bin/linux/${architecture}/kubectl.sha256")"
fi
([ "${KUBECTL_SHA256}" = "dev-mode" ] || (echo "${KUBECTL_SHA256} */usr/local/bin/kubectl" | sha256sum -c -))
if ! type kubectl > /dev/null 2>&1; then
echo '(!) kubectl installation failed!'
exit 1
fi
# kubectl bash completion
kubectl completion bash > /etc/bash_completion.d/kubectl
# kubectl zsh completion
mkdir -p "/home/${USERNAME}/.oh-my-zsh/completions"
kubectl completion zsh > "/home/${USERNAME}/.oh-my-zsh/completions/_kubectl"
chown -R "${USERNAME}" "/home/${USERNAME}/.oh-my-zsh"
# Install Helm, verify signature and checksum
echo "Downloading Helm..."
find_version_from_git_tags HELM_VERSION "https://github.com/helm/helm"
if [ "${HELM_VERSION::1}" != 'v' ]; then
HELM_VERSION="v${HELM_VERSION}"
fi
mkdir -p /tmp/helm
helm_filename="helm-${HELM_VERSION}-linux-${architecture}.tar.gz"
tmp_helm_filename="/tmp/helm/${helm_filename}"
curl -sSL "https://get.helm.sh/${helm_filename}" -o "${tmp_helm_filename}"
curl -sSL "https://github.com/helm/helm/releases/download/${HELM_VERSION}/${helm_filename}.asc" -o "${tmp_helm_filename}.asc"
export GNUPGHOME="/tmp/helm/gnupg"
mkdir -p "${GNUPGHOME}"
chmod 700 ${GNUPGHOME}
get_common_setting HELM_GPG_KEYS_URI
get_common_setting GPG_KEY_SERVERS true
curl -sSL "${HELM_GPG_KEYS_URI}" -o /tmp/helm/KEYS
echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf
gpg -q --import "/tmp/helm/KEYS"
if ! gpg --verify "${tmp_helm_filename}.asc" > ${GNUPGHOME}/verify.log 2>&1; then
echo "Verification failed!"
cat /tmp/helm/gnupg/verify.log
exit 1
fi
if [ "${HELM_SHA256}" = "automatic" ]; then
curl -sSL "https://get.helm.sh/${helm_filename}.sha256" -o "${tmp_helm_filename}.sha256"
curl -sSL "https://github.com/helm/helm/releases/download/${HELM_VERSION}/${helm_filename}.sha256.asc" -o "${tmp_helm_filename}.sha256.asc"
if ! gpg --verify "${tmp_helm_filename}.sha256.asc" > /tmp/helm/gnupg/verify.log 2>&1; then
echo "Verification failed!"
cat /tmp/helm/gnupg/verify.log
exit 1
fi
HELM_SHA256="$(cat "${tmp_helm_filename}.sha256")"
fi
([ "${HELM_SHA256}" = "dev-mode" ] || (echo "${HELM_SHA256} *${tmp_helm_filename}" | sha256sum -c -))
tar xf "${tmp_helm_filename}" -C /tmp/helm
mv -f "/tmp/helm/linux-${architecture}/helm" /usr/local/bin/
chmod 0755 /usr/local/bin/helm
rm -rf /tmp/helm
if ! type helm > /dev/null 2>&1; then
echo '(!) Helm installation failed!'
exit 1
fi
# Install Minikube, verify checksum
if [ "${MINIKUBE_VERSION}" != "none" ]; then
echo "Downloading minikube..."
if [ "${MINIKUBE_VERSION}" = "latest" ] || [ "${MINIKUBE_VERSION}" = "lts" ] || [ "${MINIKUBE_VERSION}" = "current" ] || [ "${MINIKUBE_VERSION}" = "stable" ]; then
MINIKUBE_VERSION="latest"
else
find_version_from_git_tags MINIKUBE_VERSION https://github.com/kubernetes/minikube
if [ "${MINIKUBE_VERSION::1}" != "v" ]; then
MINIKUBE_VERSION="v${MINIKUBE_VERSION}"
fi
fi
# latest is also valid in the download URLs
curl -sSL -o /usr/local/bin/minikube "https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-${architecture}"
chmod 0755 /usr/local/bin/minikube
if [ "$MINIKUBE_SHA256" = "automatic" ]; then
MINIKUBE_SHA256="$(curl -sSL "https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-${architecture}.sha256")"
fi
([ "${MINIKUBE_SHA256}" = "dev-mode" ] || (echo "${MINIKUBE_SHA256} */usr/local/bin/minikube" | sha256sum -c -))
if ! type minikube > /dev/null 2>&1; then
echo '(!) minikube installation failed!'
exit 1
fi
fi
if ! type docker > /dev/null 2>&1; then
echo -e '\n(*) Warning: The docker command was not found.\n\nYou can use one of the following scripts to install it:\n\nhttps://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md\n\nor\n\nhttps://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker.md'
fi
echo -e "\nDone!"
|
mikeee/dapr
|
docker/library-scripts/kubectl-helm-debian.sh
|
Shell
|
mit
| 11,034 |
@ECHO OFF
@REM Copyright 2022 The Dapr Authors
@REM Licensed under the Apache License, Version 2.0 (the "License");
@REM you may not use this file except in compliance with the License.
@REM You may obtain a copy of the License at
@REM http://www.apache.org/licenses/LICENSE-2.0
@REM Unless required by applicable law or agreed to in writing, software
@REM distributed under the License is distributed on an "AS IS" BASIS,
@REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@REM See the License for the specific language governing permissions and
@REM limitations under the License.
SET CERT_DIR=%SSL_CERT_DIR%
IF "%CERT_DIR%" == "" (
ECHO SSL_CERT_DIR environment variable not set, skipping certificate setup
EXIT /B 0
)
IF NOT EXIST "%CERT_DIR%" (
ECHO SSL_CERT_DIR environment variable is not set to a valid path
ECHO Found SSL_CERT_DIR="%CERT_DIR%"
EXIT /B 1
)
SET FOUND_CERT=0
CD %CERT_DIR%
FOR /R %%F IN (*) DO (
SET FOUND_CERT=1
ECHO Adding %%F to the root store
certoc.exe -addstore root %%F
)
CD -
IF %FOUND_CERT% == 0 (
ECHO No certificates found in %CERT_DIR%, skipping certificate setup
EXIT /B 0
)
|
mikeee/dapr
|
docker/windows-base-scripts/setup-certificates.cmd
|
Batchfile
|
mit
| 1,187 |
# Dapr runtime documentation
This file includes Dapr links to runtime development documentation. Please see the [Dapr documentation](https://docs.dapr.io) for docs on Getting Started, Quickstarts and samples, Concepts, Howtos, and more.
## Runtime docs
* **[Architecture Decision Records](./decision_records) -** Records for **architecturally significant** decisions
* **[Development documentation](./development) -** How to setup developer environment and change Dapr code for Dapr runtime contributors
* **[Release notes](./release_notes) -** Release notes for Dapr runtime, CLI, and SDKs
|
mikeee/dapr
|
docs/README.md
|
Markdown
|
mit
| 594 |
# API-001: State store API design
## Status
Accepted
## Context
We reviewed storage API design for completeness and consistency.
## Decisions
* All requests/responses use a single parameter that represents the request/response object. This allows us to extend/update request/response object without changing the API.
* Add Delete() method
* Support bulk operations: BulkDelete() and BulkSet(). All operations in the bulk are expected to be completed within a single transaction scope.
* Support a generic BulkOperation() method, which is carried out as a single transaction.
* Transaction across multiple API requests is postponed to future versions.
* Actor state operations are moved to a new Actor interface. Please see [API-002-actor-api-design](./API-002-actor-api-design.md).
## Consequences
With these changes we should meet stateful actor requirements.
|
mikeee/dapr
|
docs/decision_records/api/API-001-state-store-api-design.md
|
Markdown
|
mit
| 885 |
# API-002: Actor API design
## Status
Accepted
## Context
Given Dapr is going out with language specific Actor SDKs, we formally introduced an Actor API into Dapr to make Actors are first-class citizen in Dapr. The goal of this review was to ensure Dapr can provide strong support of Service Fabric stateful actors programming model so that we can offer a migration path to the majority of existing actor users.
## Decisions
### Dapr
* A separate Actor interface is defined.
* Actors should support multiple reminders and timers.
* Actor state access methods are encapsulated in the Actor interface itself.
* Actor interface shall support updating a group of key-value states in a single operation.
* Actor interface shall support deletion of an actor. If the actor is activated when the method is called, the in-flight transaction is allowed to complete, then the actor is deactivated, deleted, with associated state removed.
### Non-Dapr
* Transaction across multiple API calls is left for future versions, if proven necessary. Due to single-threaded guarantee, such transaction scope might be unnecessary. However, if developer expects an Actor code to behave atomically (in an implied transaction scope), we may have to implement this.
## Consequences
Dapr can provide strong support of Service Fabric stateful actors programming model so that we can offer a migration path to most existing actor users.
|
mikeee/dapr
|
docs/decision_records/api/API-002-actor-api-design.md
|
Markdown
|
mit
| 1,440 |
# API-003: Messaging API names
## Status
Accepted
## Context
Our existing messaging interface names lack of clarity. This review was to make sure messaging interfaces were named appropriately to avoid possible confusions.
## Decisions
### Dapr
* All messaging APIs are grouped under a **messaging** namespace/package.
* We define three distinct messaging interfaces:
- **direct**
One-to-one messaging between two parties: a sender sending message to a recipient.
- **broadcast**
One-to-many messaging: a sender sending message to a list of recipients.
- **pub-sub**
Messaging through pub-sub: a publisher publishing to a topic, to which subscribers subscribe.
* We distinguish message and direct invocation. For messaging, we guarantee at-least-once delivery. For direct invocation, we provide best-attempt delivery.
## Consequences
We should achieve better clarity on messaging behaviors.
|
mikeee/dapr
|
docs/decision_records/api/API-003-messaging-api-names.md
|
Markdown
|
mit
| 950 |
# API-004: Bindg Manifests
## Status
Accepted
## Context
As we rename Event Sources to Bindings, and formally separate State Stores, Message Buses, and Bindings, we need to decide if we need to introduce different manifest types.
## Decisions
### Dapr
* All components use the same **Component** manifests, identified by a component **type**.
* We'll come up with a mechanism to support pluggable secret stores. We'll support Kubernetes native secret store and Azure Key Vault in the initial release.
|
mikeee/dapr
|
docs/decision_records/api/API-004-binding-manifests.md
|
Markdown
|
mit
| 519 |
# API-005: State Store Behavior
## Status
Proposed
## Context
As we continue to solidify our API spec, we need to explicitly define component behaviors in the spec and make sure those are implemented in our implementation. This document captures our decisions on state store behaviors. It's expected that we'll create more of such documents to capture explicit component behavior decisions.
## Decisions
### Concurrency model
* Dapr supports two flavors of optimistic concurrency: first-write wins and last-write wins. First-write wins is implemented through ETag.
* User code can express concurrency intention with a *config* annotation attached to a request. See **Config annotation** for details.
* Future version of Dapr may support call throttling through application channel.
* We'll choose last-write wins as the default.
### Consistency model
* Dapr supports both eventual consistency and strong consistency.
* Actors always use strong consistency.
* We'll choose eventual consistency as default for services other than actors.
### Actor Transaction
* Dapr-compatible Actor state stores shall support ACID transaction.
* Dapr doesn't mandate specific transaction isolation level at this point. However, when deemed necessary, we can easily add those to **Config annotation** as needed.
### Config annotation
* User payload can contain an optional **config** annotation/element that expresses various constraints and policies to be applied to the call, including:
* Concurrency model: first-write or last-write
* Consistency model: strong or eventual
* Retry policies:
* Interval
* Pattern: linear, expotential
* Circuit-breaker Timeout (before an open circuit-breaker is reset)
### State store configuration probe
* An Dapr-compatible state store shall provide an endpoint that answers to configuration probe and returns (among others):
* Supported concurrency model
* Supported consistency model
* A state store instance shall return the specific configuration of the current instance.
* It's considered out of scope to require state store to dynamically apply new configurations.
### Dapr
* Update state store API spec to reflect above decisions
* Create backlog of issues to implement above decisions
|
mikeee/dapr
|
docs/decision_records/api/API-005-state-store-behavior.md
|
Markdown
|
mit
| 2,303 |
# API-006: Universal Namespace
## Status
Proposed
## Context
For cloud-edge hybrid scenarios and multie-region deployment scenarios, we need the ability to facilitate communications cross clusters. Specifically, it's desirable to have services scoped by cluster names so that a service in one cluster can address and invoke services on another trusted cluster through fully qualified names in a universal namespace, such as cluster1.serviceb.
## Decisions
We should consider adding universal namespace capabilities to Dapr.
|
mikeee/dapr
|
docs/decision_records/api/API-006-universal-namespace.md
|
Markdown
|
mit
| 537 |
# API-007: Tracing Endpoint
## Status
Proposed
## Context
We now support distributed tracing across Dapr sidecars, and we inject correlation id to HTTP headers and gRPC metadata before we hand the requests to user code. However, it's up to the user code to configure and implement proper tracing themselves.
## Decisions
We should consider adding a tracing endpoint that user code can call in to log traces and telemetries.
|
mikeee/dapr
|
docs/decision_records/api/API-007-tracing-endpoint.md
|
Markdown
|
mit
| 436 |
# API-008: Multi State store API design
## Status
Accepted
## Context
This decision record is to support multiple state stores support in Dapr. We agreed on the decision to introduce the breaking change in API
to support multi state store with no backward compatibility.
With this change , the state API allows the app to target a specific state store by store-name, for example:
v1.0/state/storeA/
v1.0/state/storeB/
Earlier this breaking change, the API is v1.0/state/`<key>`
We have reviewed multi storage API design for completeness and consistency.
## Decisions
* New state store API is v1.0/state/`<store-name>`/
* If user is using actors and like to persist the state then user must provide actorStateStore: true in the configuration yaml.
If the attribute is not specified or multiple actor state stores are configured, Dapr runtime will log warning.
The actor API to save the state will fail in both these scenarios where actorStore is not specified or multiple actor stores
are specified.
* It is noted that after this breaking change, actor state store has to be specified unlike earlier where first state store is picked up by default.
* It is noted that this breaking change will also require a CLI change to generate the state store YAML for redis with actorStateStore.
* To provide multiple stores, user has to provide separate YAML for each store and giving unique name for the store.
* It is noted that the param's keyPrefix represents state key prefix, it's value included ${appid} is the microservice appid, ${name} is the CRDs component's unique name, ${none} is non key prefix and the custom key prefix
For example, below are the 2 sample yaml files in which redis store is used as actor state store while mongodb store is not used as actor state store.
```
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: myStore1 # Required. This is the unique name of the store.
spec:
type: state.redis
metadata:
- name: keyPrefix
value: none # Optional. default appid. such as: appid, none, name and custom key prefix
- name: <KEY>
value: <VALUE>
- name: <KEY>
value: <VALUE>
- name: actorStateStore # Optional. default: false
value : true
```
```
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: myStore2 # Required. This is the unique name of the store.
spec:
type: state.mongodb
metadata:
- name: keyPrefix
value: none # Optional. default appid. such as: appid, none, name and custom key prefix
- name: <KEY>
value: <VALUE>
- name: <KEY>
value: <VALUE>
```
So with the above example, the state APIs will be : v1.0/state/myStore1/`<key>`
and v1.0/state/myStore2/`<key>`
## Consequences
With these changes we should meet multiple state stores requirements.
|
mikeee/dapr
|
docs/decision_records/api/API-008-multi-state-store-api-design.md
|
Markdown
|
mit
| 2,855 |
# API-009: Bi-Directional Bindings
## Status
Accepted
## Context
As we want to provide bi-directional capabilities for bindings to allow for cases such as getting a blob from a storage account,
An API change is needed to account for the requested type of operation.
## Decisions
### Naming
It was decided to keep the bindings name as is. Alternative proposals were included changing bindings to connectors, but a strong case couldn't be made in favor of connectors to justify the breaking change it would cause.
### Types
It was decided to keep the same YAML format for both input bindings and bi-directional bindings as it is today.
After careful inspection, splitting to two types (for example, trigger bindings and bindings) would incur significant maintanace overhead for the app operator and
Did not provide meaningful value.
In addition, there was no feedback from community or prospecive users that input bindings and output bindings were confusing in any way.
### API structure
It was decided that the API url will be kept as: `http://localhost:<port>/v1.0/bindings/<name>`.
The verb for the HTTP API will remain POST/PUT, and the type of operation will be part of a versioned, structured schema for bindings.
This is not a breaking change.
### Schema and versioning
In accordance with our decision to work towards enterprise versioning, it was accepted that schemas will include a `version` field in
The payload to specify which version of given component needs to be used that corresponds to the given payload.
In addition, an extra field will be added to denote the type of operation that binding supports, for example: `get`, `list`, `create` etc.
Bindings components will provide the means for the Dapr runtime to query for their supported capabilities and return a validaton error if the operation type is not supported.
|
mikeee/dapr
|
docs/decision_records/api/API-009-bidirectional-bindings.md
|
Markdown
|
mit
| 1,849 |
# API-010: Do not implement App Callback Versioning for HTTP
## Status
Accepted
## Context
There was a proposal to introducing versioning for HTTP App Callbacks. The goal of this review was to understand if a versioning was required and how it could handle situations post v1.0 of DAPR
## Decisions
- Introducing versioning to app callback APIs would require changes to the user applications which is not feasible
- There would be no way for DAPR runtime to find out the app callback version before hand
We decided not to introduce such a versioning scheme on the app callback APIs. Post v1.0, if required, the versioning could be implemented inside the payload but not on the API itself. A missing version in the payload could imply v1.0.
|
mikeee/dapr
|
docs/decision_records/api/API-010-appcallback-versioning.md
|
Markdown
|
mit
| 757 |
# API-011: State Store APIs Parity
## Status
Accepted
## Context
We reviewed parity of state store APIs .
## Decisions
* GetState APIs continue to have Single Key Get and Bulk Get APIs behaviour as current 0.10.0 version.
* SaveState API will continue to have one SaveState API endpoint. If user wants to save single key, same save state API will be used
for passing single item in the bulk set.
Potential issues arises if following new single key save state API is introduced:
`Post : state/{storeName}/{key}`
This will conflict with
- State Transaction API, if the key is "transaction"
- GetBulkState API, if the key is "bulk"
So the decision is to continue the Save State API behaviour as current 0.10.0 version.
* Bulk Delete API might come in future versions based on the scenarios.
## Consequences
No changes needed to bring the parity among state store APIs. APIs continue to remain same as current 0.10.0 version.
|
mikeee/dapr
|
docs/decision_records/api/API-011-state-store-api-parity.md
|
Markdown
|
mit
| 994 |
# API-012: Content Type
## Status
Accepted
## Context
Not adding content-type to state store, pubsub and bindings.
## Decisions
* We will not add content-type since it is a persisted metadata and it can cause problems such as:
* Long term support since metadata persisted previously would need to be supported indefinitely.
* Added requirement for components to implement, leading to potentially hacky implementations to persist metadata side-by-side with data.
Original issue and discussion: https://github.com/dapr/dapr/issues/2026
## Consequences
SDKs need to handle deserialization on their end, requiring enough context in the API to determine how to handle type deserialization.
|
mikeee/dapr
|
docs/decision_records/api/API-012-content-type.md
|
Markdown
|
mit
| 696 |
# ARC-001: Refactor for modularity and testability
## Status
Accepted
## Context
As we keep building up Dapr features, it becomes apparent that we need to refactor the existing code base to reinforce component modularity. This will improve testability and maintainability in long run. And this refactor also lays the foundation of opening up extensible points (such as Bindings) to the community.
## Decisions
### Dapr
* Formally separate hosting and API implementations. Hosting provides communication protocols (HTTP/gRPC) as different access heads to the same Dapr API implementation.
* Ensure consistency between gRPC and HTTP interface.
* Separate binding implementations to a separate repository.
* Use smart defaults for configurable parameters.
* Rename Dapr runtime binary from **dapr** to **daprd**.
### Non-Dapr
* We may consider allowing Dapr to dynamically load bindings during runtime. However, we are not going to implement this unless it's justified by customer asks.
* A unified configuration file that includes paths to individual configuration files.
* Provide a Discovery building block with hopefully pluggable discovery mechanisms (such as a custom DNS).
## Consequences
This will improve testability and maintainability in long run.
|
mikeee/dapr
|
docs/decision_records/architecture/ARC-001-refactor-for-modularity-and-testability.md
|
Markdown
|
mit
| 1,287 |
# ARC-002: Multitenancy
## Status
Placeholder
## Context
We need to make sure Dapr works well in a multitenant environment, especially serverless environment in which multiple applications share the same compute resource pool. We haven’t identified explicit dapral items. This document serves as a placeholder to capture such requirements.
|
mikeee/dapr
|
docs/decision_records/architecture/ARC-002-multitenancy.md
|
Markdown
|
mit
| 349 |
# ARC-003: gRPC and Protobuf message coding convention
## Status
Proposed
## Context
We have defined gRPC services and protobuf messages without convention, which results in the duplicated protobuf definitions and inconsistent names of services and messages. Thus, this record defines the minimum-level coding convention for Protobuf message to improve the quality of grpc/protobuf message definitions.
## Decisions
* Use `google.protobuf.Any` data field only if the message field conveys serialized protobuf message with type url. Otherwise, use the explicit data type or protobuf message.
* Use `Request` suffix for gRPC request message name and `Response` suffix for gRPC response message name
* Do not use `Client` and `Service` suffix for gRPC service name e.g. (x) DaprClient, DaprService
* Avoid the duplicated protobuf message definitions by defining the messages in shared proto
* Define and use enum type if field accepts only predefined values.
## Consequences
This allows us to define the consistent, readable gRPC service and protobuf message.
|
mikeee/dapr
|
docs/decision_records/architecture/ARC-003-grpc-protobuf-coding-convention.md
|
Markdown
|
mit
| 1,086 |
# CLI-001: CLI and runtime versioning
## Status
Accepted
## Context
As we formally establish Dapr component version, we need to decide if we want to couple CLI versions with runtime versions.
## Decisions
* We'll keep CLI versioning and runtime versioning separate.
* CLI will pull down latest runtime binary during the *init()* command.
* Version scheme is: *major.minor.revision.build* for both CLI and runtime.
## Consequences
This allows us each Dapr component to evolve independently.
|
mikeee/dapr
|
docs/decision_records/cli/CLI-001-cli-and-runtime-versioning.md
|
Markdown
|
mit
| 513 |
# CLI-002: Self-Hosted mode Init and Uninstall behaviours
## Status
Accepted
## Context
Changes in behavior of `init` and `uninstall` on Self Hosted mode for. Discussed in this [issue](https://github.com/dapr/cli/issues/411).
## Decisions
* Calling `dapr init` will
* Install `daprd` binary in `$HOME/.dapr/bin` for Linux/MacOS and `%USERPROFILE%\.dapr\bin` for Windows.
* Set up the `dapr_placement`, `dapr_redis` and `dapr_zipkin` containers.
* Create the default `components` folder in `$HOME/.dapr/bin` for Linux/MacOS or `%USERPROFILE\.dapr\bin` for Windows.
* Create the default components configurations for `pubsub.yaml`, `statestore.yaml` and `zipkin.yaml` in the default `components` folder.
* Create a default configuration file in `$HOME/.dapr/config.yaml` for Linx/MacOS and `%USERPROFILE%\.dapr\config.yaml` for Windows for enabling tracing by default.
* Calling `dapr init --slim` will
* Install the binaries `daprd` and `placement` in `$HOME/.dapr/bin` for Linux/MacOS and `%USERPROFILE%\.dapr\bin` for Windows.
* Create an empty default `components` folder in `$HOME/.dapr/bin` for Linux/MacOS or `%USERPROFILE\.dapr\bin` for Windows.
* Calling `dapr uninstall` will
* Remove the folder containing binaries `bin` the default path `$HOME/.dapr` for Linux/MacOS and `%USERPROFILE%\.dapr` for Windows.
* Remove the docker dapr_placement if Docker is installed.
* Calling `dapr uninstall --all`
* Remove the folder containing binaries `bin` the default path `$HOME/.dapr` for Linux/MacOS and `%USERPROFILE%\.dapr` for Windows.
* Remove the docker containers dapr_placement, dapr_redis and dapr_zipkin if Docker is installed.
* Remove the default folder `$HOME/.dapr` in Linux/MacOS and `%USERPROFILE%\.dapr` in Windows.
* CLI on the init command will fail if a prior installtion exists in the default path `$HOME/.dapr` for Linux/MacOS and `%USERPROFILE%\.dapr` for Windows.
* **There will no longer be an option for `--install-path` during init or during uninstall.**
* The `dapr` CLI by default will expect the `daprd` in `$HOME/.dapr/bin` for Linux/MacOS and `%USERPROFILE%\.dapr\bin` for Windows. The command `dapr run` will not expect the `daprd` binary to be in the `PATH` variable, it will launch the binary from the default path.
## Consequences
All other binaries except `dapr` cli and configurations that dapr needs (on running `dapr init`)will be placed in the path `$HOME/.dapr/bin` for Linux/MacOS and `%USERPROFILE%\.dapr\bin` for Windows.
|
mikeee/dapr
|
docs/decision_records/cli/CLI-002-self-hosted-init-and-uninstall-behaviors.md
|
Markdown
|
mit
| 2,500 |
# Architecture Decision Records
Architecture Decision Records (ADRs or simply decision records) are a collection of records for "architecturally significant" decisions. A decision record is a short markdown file in a specific light-weight format.
This folder contains all the decisions we have recorded in Dapr, including Dapr runtime, Dapr CLI as well as Dapr SDKs in different languages.
## Dapr decision record organization and index
All decisions are categorized in the following folders:
* **Architecture** - Decisions on general architecture, code structure, coding conventions and common practices.
- [ARC-001: Refactor for modularity and testability](./architecture/ARC-001-refactor-for-modularity-and-testability.md)
- [ARC-002: Multitenancy](./architecture/ARC-002-multitenancy.md)
- [ARC-003: gRPC and Protobuf message coding convention](./architecture/ARC-003-grpc-protobuf-coding-convention.md)
- [ARC-004: HTTP API server](./architecture/ARC-004-http-server.md)
* **API** - Decisions on Dapr runtime API designs.
- [API-001: State store API design](./api/API-001-state-store-api-design.md)
- [API-002: Actor API design](./api/API-002-actor-api-design.md)
- [API-003: Messaging API names](./api/API-003-messaging-api-names.md)
- [API-004: Binding Manifests](./api/API-004-binding-manifests.md)
- [API-005: State store behavior](./api/API-005-state-store-behavior.md)
- [API-006: Universal namespace (customer ask)](./api/API-006-universal-namespace.md)
- [API-007: Tracing Endpoint](./api/API-007-tracing-endpoint.md)
- [API-008: Multi State store API design](./api/API-008-multi-state-store-api-design.md)
- [API-009: Bi-Directional Bindings](./api/API-009-bidirectional-bindings.md)
- [API-010: Appcallback Versioning for HTTP](./api/API-010-appcallback-versioning.md)
- [API-011: State Store APIs Parity](./api/API-011-state-store-api-parity.md)
- [API-012: Content Type](./api/API-012-content-type.md)
* **CLI** - Decisions on Dapr CLI architecture and behaviors.
- [CLI-001: CLI and runtime versioning](./cli/CLI-001-cli-and-runtime-versioning.md)
- [CLI-002: Self-hosted init and uninstall behaviors](./cli/CLI-002-self-hosted-init-and-uninstall-behaviors.md)
* **SDKs** - Decisions on Dapr SDKs.
- [SDK-001: SDK releases](./sdk/SDK-001-releases.md)
- [SDK-002: Java JDK versions](./sdk/SDK-002-java-jdk-versions.md)
* **Engineering** - Decisions on Engineering practices, including CI/CD, testing and releases.
- [ENG-001: Image Tagging](./engineering/ENG-001-tagging.md)
- [ENG-002: Dapr Release](./engineering/ENG-002-Dapr-Release.md)
- [ENG-003: Test Infrastructure](./engineering/ENG-003-test-infrastructure.md)
- [ENG-004: Signing](./engineering/ENG-004-signing.md)
## Creating new decision records
A new decision record should be a _.md_ file named as
```
<category prefix>-<sequence number in category>-<descriptive title>.md
```
|Category|Prefix|
|----|----|
|Architecture|ARC|
|API|API|
|CLI|CLI|
|SDKs|SDK|
|Engineering|ENG|
A decision record should contain the following fields:
* **Status** - can be "proposed", "accepted", "implemented", or "rejected".
* **Context** - the context of the design discussion.
* **Decision** - Description of the decision.
* **Consequences** - what impacts this decision may create.
* **Implementation** - when a decision is implemented, the corresponding doc should be updated with the following information (when applicable):
* Release version
* Associated test cases
|
mikeee/dapr
|
docs/decision_records/decision_records.md
|
Markdown
|
mit
| 3,582 |
# ENG-001: Image Tagging
## Status
Accepted
## Context
As we embraced using Docker repositories to store our images, and keeping in mind we support multiple repositories along with versioning of images and different architectures,
We needed a way to construct an accepted and constant way of naming our Docker images.
## Decisions
* An image will conform to the following format: \<namespace>/\<repository>:\<tag>
* A valid tag conforms to the following format: \<version>-\<architecture>, or just \<version>, then arch is assumed Linux
## Consequences
This keeps us constant with widely accepted naming conventions and sets clear guidelines for naming of future images.
## Examples
Dapr Runtime, latest Linux:
actionscore.azurecr.io/dapr:latest
Dapr Runtime, v0.1.0-alpha for ARM:
actionscore.azurecr.io/dapr:v0.1.0-alpha-arm
|
mikeee/dapr
|
docs/decision_records/engineering/ENG-001-tagging.md
|
Markdown
|
mit
| 840 |
# ENG-002: Dapr Release
## Status
Proposal
## Context
This record descibes how to safely release new dapr binaries and the corresponding configurations without any blockers to users.
## Decisions
### Integration build release
Integration build refers to the build from `master` branch once we merge PullRequest to master branch. This build will be used for development purposes and must not be released to users and impact their environments.
### Official build release
#### Pre-release build
Pre-release build will be built from `release-<major>.<minor>` branch and versioned by git version tag suffix e.g. `-alpha.0`, `-alpha.1`, etc. This build is not released to users who use the latest stable version.
**Pre-release process**
1. Create branch `release-<major>.<minor>` from master and push the branch. e.g. `release-0.1`
2. Add pre-release version tag(with suffix -alpha.0 e.g. v0.1.0-alpha.0) and push the tag
```
$ git tag "v0.1.0-alpha.0" -m "v0.1.0-alpha.0"
$ git push --tags
```
3. CI creates new build and push the images with only version tag
4. Test and validate the functionalities with the specific version
5. If there are regressions and bugs, fix them in release-* branch and merge back to master
6. Create new pre-release version tag(with suffix -alpha.1, -alpha.2, etc)
7. Repeat from 4 to 6 until all bugs are fixed
#### Release the stable version to users
Once all bugs are fixed, we will create the release note under [./docs/release_notes](https://github.com/dapr/dapr/tree/master/docs/release_notes) and run CI release manually in order to deliver the stable version to users.
### Release Patch version
We will work on the existing `release-<major>.<minor>` branch to release patch version. Once all bugs are fixed, we will add new patch version tag, such as `v0.1.1-alpha.0`, and then release the build manually.
## Consequences
* Keep master branch in a working state
* Deliver the stable version to user safely
|
mikeee/dapr
|
docs/decision_records/engineering/ENG-002-Dapr-Release.md
|
Markdown
|
mit
| 1,957 |
# ENG-003: Test infrastrcuture
## Status
Proposal
## Context
E2E tests ensure the functional correctness in an e2e environment in order to make sure Dapr works with the user code deployments. The tests will be run before / after PR is merged or by a scheduler.
Dapr E2E tests require the test infrastructure in order to not only test Dapr functionalities, but also show these test results in a consistent way. This document will decide how to bring up the test cluster, run the test, and report the test results.
## Decisions
### Test environments
Although Dapr is designed for multi cloud environments, e2e tests will be run under Kubernetes environments for now. We will support two different options to run e2e tests with local machine and CI on the pre-built Kubernetes cluster.
* **Local machine**. contributors or developers will use [Minikube](https://github.com/kubernetes/minikube) to validate their changes and run new tests before creating Pull Request.
* **Continuous Integration**. E2E tests will be run in the pre-built [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) before/after PR is merged or by a scheduler. Even if we will use [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) in our test infrastructure, contributors should run e2e tests in any RBAC-enabled Kubernetes clusters.
### Bring up test cluster
We will provide the manual instruction or simple script to bring up test infrastructure unlike the other Kubernetes projects using [kubetest](https://github.com/kubernetes/test-infra/tree/master/kubetest). Dapr E2E tests will clean up and revert all configurations in the cluster once the test is done. Without kubetest, we can create e2e tests simpler without the dependency of the 3rd party test frameworks, such as ginkgo, gomega.
### CI/CD and test result report for tests
Many Kubernetes-related projects use [Prow](https://github.com/kubernetes/test-infra/tree/master/prow), and [Testgrid](https://github.com/kubernetes/test-infra/tree/master/testgrid) for Test CI, PR, and test result management. However, we will not use them to run Dapr E2E tests and share the test result since we need to self-host them on Google cloud platform.
Instead, Dapr will use [Azure Pipeline](https://azure.microsoft.com/en-us/services/devops/pipelines/) to run e2e tests and its [test report feature](https://docs.microsoft.com/en-us/azure/devops/pipelines/test/review-continuous-test-results-after-build?view=azure-devops) without self-hosted CI and test report services. Even contributors can get their own azure pipelines accounts **for free** without self-hosting them.
## Consequences
* Dapr E2E tests will run in [Minikube](https://github.com/kubernetes/minikube) with local machine and [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) with CI, but the tests will run in any RBAC-enabled Kubernetes clusters
* We will provide the manual instruction and scripts to build test Kubernetes cluster
* [Azure Pipeline](https://azure.microsoft.com/en-us/services/devops/pipelines/) will run e2e tests before/after PR is merged or by a scheduler and report test results
|
mikeee/dapr
|
docs/decision_records/engineering/ENG-003-test-infrastructure.md
|
Markdown
|
mit
| 3,226 |
# ENG-004: Binary Signing
## Status
Accepted
## Context
Authenticode signing of binaries.
## Decisions
* Binaries will not be signed with Microsoft keys. In future we can revisit to sign the binaries with dapr.io keys.
## Consequences
This will allow the Dapr releases to be built outside of Microsoft build and release pipelines.
|
mikeee/dapr
|
docs/decision_records/engineering/ENG-004-signing.md
|
Markdown
|
mit
| 339 |
# SDK-001: SDK Releases
## Status
Accepted
## Context
Dapr exposes APIs for building blocks which can be invoked over http/gRPC by the user code. Making raw http/gRPC calls from user code works but it doesn't provide a good strongly typed experience for developers.
## Decisions
* Dapr provides language specific SDKs for developers for C#, Java, Javascript, Python, Go, Rust, C++. There may be others in the future
- For the current release, the SDKs are auto-generated from the Dapr proto specifications using gRPC tools.
- In future releases, we will work on creating and releasing strongly typed SDKs for the languages, which are wrappers on top of the auto-generated gRPC SDKs (e.g. C# SDK shipped for state management APIs with the 0.1.0 release.) This is the preferred approach. Creating purely handcrafted SDKs is discouraged.
* For Actors, language specific SDKs are written as Actor specific handcrafted code is preferred since this greatly simplifies the user experience. e.g. The C# Actor SDK shipped with the 0.1.0 release.
## Consequences
Auto-generation of gRPC client side code from Dapr proto files allows Dapr to provide SDKs for the major languages with the 0.1.0 release and set us on the correct path to generate more user friendly SDKs by wrapping the auto-generated gRPC ones. There will be no auto-generated code for actor SDKs, which are also handcrafted to focus on API usability.
|
mikeee/dapr
|
docs/decision_records/sdk/SDK-001-releases.md
|
Markdown
|
mit
| 1,420 |
# SDK-002: Java JDK Versions
## Status
Accepted
## Context
Dapr offers a Java SDK. Java 11 is the latest LTS version. Java 8 is the previous LTS version but still the mainly used version by the Java community in 2019. What should be the minimum Java version supported by Dapr's Java SDK?
See https://github.com/dapr/java-sdk/issues/17
## Decisions
* Java 8 should be the minimum version supported for Dapr's Java SDK.
* Java 11 should be used in samples and user documentation to encourage adoption.
* Java 8's commercial support ends in 2022. Dapr's Java SDK shoud migrate to Java 11 prior to that. The timeline still not decided.
## Consequences
* Customers running with Java 7 or below cannot use Dapr's Java SDK.
* Customers running with Java 8 are still supported, even through Java 11 is the recommended version.
* Modern language features will not be available in Dapr's Java SDK code.
* Modern JVM features can still be used since the Java 11 JVM can run Java 8 bytecode.
|
mikeee/dapr
|
docs/decision_records/sdk/SDK-002-java-jdk-versions.md
|
Markdown
|
mit
| 990 |
## Development
* [Setup Dapr Development environment](./setup-dapr-development-env.md): Provides Dapr development setup guide
* [Setup Dapr Development environment using VS Code](./setup-dapr-development-using-vscode.md): Provides Dapr development setup guide using VSCode in containerized dev environment
* [Setup Continuous Integration](./setup-ci.md): Provides how to set up GitHub Actions CI for Dapr
* [Developing Dapr](./developing-dapr.md): Provides how to develop Dapr runtime
* [Dapr Metrics](./dapr-metrics.md): Provides the list of the metrics that Dapr system components produce.
|
mikeee/dapr
|
docs/development/README.md
|
Markdown
|
mit
| 592 |
# Dapr metrics
Dapr metric name starts with `dapr_` prefix except for health metrics.
* [Dapr Common metrics](#dapr-common-metrics)
* [Dapr Operator metrics](#dapr-operator-metrics)
* [Dapr Sidecar Injector metrics](#dapr-sidecar-injector-metrics)
* [Dapr Placement metrics](#dapr-placement-metrics)
* [Dapr Sentry metrics](#dapr-sentry-metrics)
* [Dapr Runtime metrics](#dapr-runtime-metrics)
* [Dapr Component metrics](#dapr-component-metrics)
## Dapr Common metrics
### Health metrics
Dapr uses prometheus process and go collectors by default.
* process_* : [prometheus process collector](https://github.com/prometheus/client_golang/blob/master/prometheus/process_collector.go)
* go_* : [prometheus go collector](https://github.com/prometheus/client_golang/blob/master/prometheus/go_collector.go)
## Dapr Operator metrics
[monitoring metrics](../../pkg/operator/monitoring/metrics.go)
* dapr_operator_service_created_total: The total number of dapr services created.
* dapr_operator_service_deleted_total: The total number of dapr services deleted.
* dapr_operator_service_updated_total: The total number of dapr services updated.
## Dapr Sidecar-injector metrics
[monitoring metrics](../../pkg/injector/monitoring/metrics.go)
* dapr_injector_sidecar_injection/requests_total: The total number of sidecar injection requests.
* dapr_injector_sidecar_injection/succeeded_total: The total number of successful sidecar injections.
* dapr_injector_sidecar_injection/failed_total: The total number of failed sidecar injections.
## Dapr Placement metrics
[monitoring metrics](../../pkg/placement/monitoring/metrics.go)
* dapr_placement_runtimes_total: The total number of hosts reported to placement service.
* dapr_placement_actorruntimes_total: The total number of actor runtimes reported to placement service.
* dapr_placement_actor_heartbeat_timestamp: The actor's heartbeat timestamp (in seconds) was last reported to the placement service.
## Dapr Sentry metrics
[monitoring metrics](../../pkg/sentry/monitoring/metrics.go)
* dapr_sentry_cert_sign_request_received_total: The number of CSRs received.
* dapr_sentry_cert_sign_success_total: The number of certificates issuances that have succeeded.
* dapr_sentry_cert_sign_failure_total: The number of errors occurred when signing the CSR.
* dapr_sentry_servercert_issue_failed_total: The number of server TLS certificate issuance failures.
* dapr_sentry_issuercert_changed_total: The number of issuer cert updates, when issuer cert or key is changed
* dapr_sentry_issuercert_expiry_timestamp: The unix timestamp, in seconds, when issuer/root cert will expire.
## Dapr Runtime metrics
### Service related metrics
[service metrics](../../pkg/diagnostics/service_monitoring.go)
#### Component
* dapr_runtime_component_loaded: The number of successfully loaded components
* dapr_runtime_component_init_total: The number of initialized components
* dapr_runtime_component_init_fail_total: The number of component initialization failures
#### Service Invocation
* dapr_runtime_service_invocation_req_sent_total: The number of remote service invocation requests sent
* dapr_runtime_service_invocation_req_recv_total: The number of remote service invocation requests received
* dapr_runtime_service_invocation_res_sent_total: The number of remote service invocation responses sent
* dapr_runtime_service_invocation_res_recv_total: The number of remote service invocation responses received
* dapr_runtime_service_invocation_res_recv_latency_ms: The remote service invocation round trip latency
#### Security
* dapr_runtime_mtls_init_total: The number of successful mTLS authenticator initialization.
* dapr_runtime_mtls_init_fail_total: The number of mTLS authenticator init failures
* dapr_runtime_mtls_workload_cert_rotated_total: The number of the successful workload certificate rotations
* dapr_runtime_mtls_workload_cert_rotated_fail_total: The number of the failed workload certificate rotations
#### Actors
* dapr_runtime_actor_status_report_total: The number of the successful status reports to placement service.
* dapr_runtime_actor_status_report_fail_total: The number of the failed status reports to placement service
* dapr_runtime_actor_table_operation_recv_total: The number of the received actor placement table operations.
* dapr_runtime_actor_rebalanced_total: The number of the actor rebalance requests.
* dapr_runtime_actor_deactivated_total: The number of the successful actor deactivation.
* dapr_runtime_actor_deactivated_failed_total: The number of the failed actor deactivation.
* dapr_runtime_actor_pending_actor_calls: The number of pending actor calls waiting to acquire the per-actor lock.
* dapr_runtime_actor_timers: The number of actor timers requests.
* dapr_runtime_actor_reminders: The number of actor reminders requests.
* dapr_runtime_actor_reminders_fired_total: The number of actor reminders fired requests.
* dapr_runtime_actor_timers_fired_total: The number of actor timers fired requests.
#### Resiliency
* dapr_resiliency_loaded: The number of resiliency policies loaded.
* dapr_resiliency_count: The number of times a resiliency policy has been executed.
* dapr_resiliency_activations_total: Number of times a resiliency policy has been activated in a building block after a failure or after a state change.
#### Workflow metrics
[workflow metrics](../../pkg/diagnostics/workflow_monitoring.go)
* dapr_runtime_workflow_operation_count: The number of successful/failed workflow operation requests.
* dapr_runtime_workflow_operation_latency: The latencies of responses for workflow operation requests.
* dapr_runtime_workflow_execution_count: The number of successful/failed/recoverable workflow executions.
* dapr_runtime_workflow_activity_execution_count: The number of successful/failed/recoverable activity executions.
* dapr_runtime_workflow_activity_execution_latency: The total time taken to run an activity to completion.
### gRPC monitoring metrics
Dapr leverages opencensus ocgrpc plugin to generate gRPC server and client metrics.
* [server metrics](https://github.com/census-instrumentation/opencensus-go/blob/master/plugin/ocgrpc/server_metrics.go)
* [client_metrics](https://github.com/census-instrumentation/opencensus-go/blob/master/plugin/ocgrpc/client_metrics.go)
#### gRPC Server metrics
* dapr_grpc_io_server_received_bytes_per_rpc_*: Distribution of received bytes per RPC, by method.
* dapr_grpc_io_server_sent_bytes_per_rpc_*: Distribution of total sent bytes per RPC, by method.
* dapr_grpc_io_server_server_latency_*: Distribution of server latency in milliseconds, by method.
* dapr_grpc_io_server_completed_rpcs: Count of RPCs by method and status.
#### gRPC Client metrics
* dapr_grpc_io_client_sent_bytes_per_rpc: Distribution of bytes sent per RPC, by method.
* dapr_grpc_io_client_received_bytes_per_rpc_*: Distribution of bytes received per RPC, by method.
* dapr_grpc_io_client_completed_rpcs_*: Count of RPCs by method and status.
### HTTP monitoring metrics
We support only server side metrics.
* [server metrics](../../pkg/diagnostics/http_monitoring.go)
#### Server metrics
> Note: Server metrics are prefixed by a forward slash character `/`
* dapr_http_server_request_count: Number of HTTP requests started in server
* dapr_http_server_request_bytes: HTTP request body size if set as ContentLength (uncompressed) in server
* dapr_http_server_response_count: Number of HTTP responses in server
* dapr_http_server_response_bytes: HTTP response body size (uncompressed) in server.
* dapr_http_server_latency: HTTP request end to end latency in server.
#### Client metrics
* dapr_http_client_sent_bytes: Total bytes sent in request body (not including headers)
* dapr_http_client_received_bytes: Total bytes received in response bodies (not including headers but including error responses with bodies)
* dapr_http_client_roundtrip_latency: End-to-end latency
* dapr_http_client_completed_count: Count of completed requests
## Dapr Component Metrics
### Pub/Sub metrics
* dapr_component_pubsub_ingress_latencies: The consuming app event processing latency
* dapr_component_pubsub_ingress_count: The number of incoming messages arriving from the pub/sub component
* dapr_component_pubsub_egress_count: The number of outgoing messages published to the pub/sub component
* dapr_component_pubsub_egress_latencies: The latency of the response from the pub/sub component
### Bindings metrics
* dapr_component_input_binding_count: The number of incoming events arriving from the input binding component
* dapr_component_input_binding_latencies: The triggered app event processing latency
* dapr_component_output_binding_count: The number of operations invoked on the output binding component
* dapr_component_output_binding_latencies: The latency of the response from the output binding component
### State metrics
* dapr_component_state_count: The number of operations performed on the state component
* dapr_component_state_latencies: The latency of the response from the state component
### Configuration metrics
* dapr_component_configuration_count: The number of operations performed on the configuration component
* dapr_component_configuration_latencies: The latency of the response from the configuration component
### Secret metrics
* dapr_component_secret_count: The number of operations performed on the secret component
* dapr_component_secret_latencies: The latency of the response from the secret component
|
mikeee/dapr
|
docs/development/dapr-metrics.md
|
Markdown
|
mit
| 9,509 |
# Developing Dapr
## Setup Dapr development environment
There are several options for getting an environment up and running for Dapr development:
- Using [GitHub Codespaces](https://docs.dapr.io/contributing/codespaces/) pre-configured for Dapr development is often the quickest path to get started with a development environment for Dapr. ([Learn about Codespaces](https://github.com/features/codespaces))
- If you are using [Visual Studio Code](https://code.visualstudio.com/), you can [connect to a development container](./setup-dapr-development-using-vscode.md) configured for Dapr development.
- [Manually install](./setup-dapr-development-env.md) the necessary tools and frameworks for developing Dapr on your device.
## Forking the repo
Contributing to Dapr often requires working with multiple repositories at once. We recommend creating a folder for Dapr and cloning all forked repositories in that folder.
For instructions on how to fork a repo, [see this video on forking the dapr/docs repo](https://youtu.be/uPYuXcaEs-c?t=289). The process is the same, just for different repositories.
```sh
mkdir dapr
git clone https://github.com/dapr/dapr.git dapr/dapr
```
## Build the Dapr binaries
You can build Dapr binaries with the `make` tool.
> On Windows, the `make` commands must be run under [git-bash](https://www.atlassian.com/git/tutorials/git-bash).
>
> These instructions also require that a `make` alias has been created for `mingw32-make.exe` according to the [setup instructions](./setup-dapr-development-env.md#installing-make).
- When running `make`, you need to be at the root of the `dapr/dapr` repo directory, for example: `$GOPATH/src/github.com/dapr/dapr`.
- Once built, the release binaries will be found in `./dist/{os}_{arch}/release/`, where `{os}_{arch}` is your current OS and architecture.
For example, running `make build` on an Intel-based macOS will generate the directory `./dist/darwin_amd64/release`.
- To build for your current local environment:
```sh
cd dapr/dapr
make build
```
- To cross-compile for a different platform, use the `GOOS` and `GOARCH` environmental variables:
```sh
make build GOOS=windows GOARCH=amd64
```
> For example, developers on Windows who prefer to develop in [WSL2](https://docs.microsoft.com/en-us/windows/wsl/install-win10) can use the Linux development environment to cross-compile binaries like `daprd.exe` that run on Windows natively.
You can individually build the daprd binary:
```sh
cd cmd/daprd
go build -tags=allcomponents -v
# use it in this manner
./daprd ...
# if you need to execute a `dapr run` command with that newly-built binary:
mv daprd ~/.dapr/bin/daprd
dapr version # see `Runtime version: edge` to ensure you are using the newly built binary
dapr run ... # this will use the newly-built binary
```
## Run unit tests
```sh
make test
```
## One-line command for local development
```sh
make check
```
This command will:
- format, test and lint all the code
- check if you forgot to `git commit` something
Note: To run linter locally, please use golangci-lint version v1.51.2, otherwise you might encounter errors. You can download version v1.55.2 [here](https://github.com/golangci/golangci-lint/releases/tag/v1.55.2).
## Debug Dapr
We recommend using VS Code with the [Go extension](https://marketplace.visualstudio.com/items?itemName=golang.Go) for your productivity. If you want to use other code editors, please refer to the list of [editor plugins for Delve](https://github.com/go-delve/delve/blob/master/Documentation/EditorIntegration.md).
This section introduces how to start debugging with the Delve CLI. Please refer to the [Delve documentation](https://github.com/go-delve/delve/tree/master/Documentation) for more details.
### Start the Dapr runtime with a debugger
To start the Dapr runtime with a debugger, you need to use build tags to include the components you want to debug. The following build tags are available:
- allcomponents - (default) includes all components in Dapr sidecar
- stablecomponents - includes all stable components in Dapr sidecar
```bash
$ cd dapr/dapr/cmd/daprd
$ dlv debug . --build-flags=--tags=allcomponents
Type 'help' for list of commands.
(dlv) break main.main
(dlv) continue
```
### Attach a Debugger to running process
This is useful to debug Dapr when the process is running.
1. Build Dapr binaries for debugging.
Use the `DEBUG=1` option to generate Dapr binaries without code optimization in `./dist/{os}_{arch}/debug/`
```bash
make DEBUG=1 build
```
2. Create a component YAML file under `./dist/{os}_{arch}/debug/components` (for example a statestore component YAML).
3. Start the Dapr runtime
```bash
/dist/{os}_{arch}/debug/daprd
```
4. Find the process ID (e.g. `PID` displayed by the `ps` command for `daprd`) and attach the debugger
```bash
dlv attach {PID}
```
### Debug Dapr With Goland IDE
1. Build the daprd binary `go build -tags=allcomponents -v` from `/cmd/daprd`.
2. Proceed to run client code necessary for testing purposes and set break points as needed.

### Debug unit-tests
Specify the package that you want to test when running the `dlv test`. For example, to debug the `./pkg/actors` tests:
```bash
dlv test ./pkg/actors
```
## Developing on Kubernetes environment
### Setting environment variable
- **DAPR_REGISTRY** : should be set to `docker.io/<your_docker_hub_account>`.
- **DAPR_TAG** : should be set to whatever value you wish to use for a container image tag (`dev` is a common choice).
- **ONLY_DAPR_IMAGE**: should be set to `true` to use a single `dapr` image instead of individual images (like sentry, injector, daprd, etc.).
On Linux/macOS:
```bash
export DAPR_REGISTRY=docker.io/<your_docker_hub_account>
export DAPR_TAG=dev
```
On Windows:
```cmd
set DAPR_REGISTRY=docker.io/<your_docker_hub_account>
set DAPR_TAG=dev
```
### Building the container image
```bash
# Build Linux binaries
make build-linux
# Build Docker image with Linux binaries
make docker-build
```
## Push the container image
To push the image to DockerHub, complete your `docker login` and run:
```bash
make docker-push
```
## Deploy Dapr with your Changes
Now we'll deploy Dapr with your changes.
To create the dapr-system namespace:
```bash
kubectl create namespace dapr-system
```
If you deployed Dapr to your cluster before, delete it now using:
```bash
helm uninstall dapr -n dapr-system
```
To deploy your changes to your Kubernetes cluster:
```bash
make docker-deploy-k8s
```
## Verifying your changes
Once Dapr is deployed, list the Dapr pods:
```bash
$ kubectl get pod -n dapr-system
NAME READY STATUS RESTARTS AGE
dapr-operator-86cddcfcb7-v2zjp 1/1 Running 0 4d3h
dapr-placement-5d6465f8d5-pz2qt 1/1 Running 0 4d3h
dapr-sidecar-injector-dc489d7bc-k2h4q 1/1 Running 0 4d3h
```
## Debug Dapr in a Kubernetes deployment
Refer to the [Dapr Docs](https://docs.dapr.io/developing-applications/debugging/debug-k8s/) on how to:
- [Debug the Dapr control plane on Kubernetes](https://docs.dapr.io/developing-applications/debugging/debug-k8s/debug-dapr-services/)
- [Debug the Dapr sidecar (daprd) on Kubernetes](https://docs.dapr.io/developing-applications/debugging/debug-k8s/debug-daprd/)
## See also
- Setting up a development environment [for building Dapr components](https://github.com/dapr/components-contrib/blob/master/docs/developing-component.md)
|
mikeee/dapr
|
docs/development/developing-dapr.md
|
Markdown
|
mit
| 7,606 |
# Preview Features
Feature Toggles (a.k.a [feature flags](https://martinfowler.com/articles/feature-toggles.html)) are a powerful way to change runtime behavior without changing the code itself. We encourage the use of the feature flags especially for preview features - explicit opt-in, and this document is focused on showing best practices/examples, how to configure and how they can be enabled when appropriate (i.e running e2e tests).
Dapr flags for preview features are something between Release Toggles and Ops Toggles and mostly behaving as Kill-Switches with months of longevity.
## Declaring
The set of all possible feature flags are defined by dapr contributors by changing the codebase and the toggle is made by the user (i.e app developer) when configuring the application.
All available feature flags are defined in the `../../pkg/config/configuration.go` file and they are arbitrary strings. We encourage to choose a meaningful name, do not avoid to use longer names if it is necessary, keep in mind that this is the name that will be used by the user when toggling.
> avoid using words like `Enabled`, `Disabled` and `Active` they are redundant with the flag boolean value
## Toggling
Feature flags are enabled/disabled via dapr global configuration.
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: pluggablecomponentsconfig # arbitrary config name
spec:
features:
- name: PluggableComponents # The name you chose
enabled: true # or false
```
That way the runtime will load the global configuration and make it available to be used for the application.
Note that `features` is a list, so you can activate/deactivate many features at once.
## Using
To check if a feature is available in runtime any time by calling `../../pkg/config/configuration.go#IsFeatureEnabled`
Feature checks are generally made as earlier as possible on code to avoid unnecessary computations when feature is disabled and to make code cleaner for the reader.
Bad :x:
```golang
func doSomething() error {
if !config.IsFeatureEnabled(doSomethingFeature) {
// do nothing
return nil
}
// .. doSomething instead
}
```
Good :heavy_check_mark:
```golang
func initSomething() {
if config.IsFeatureEnabled(doSomethingFeature) {
doSomething()
}
}
```
Great, how about e2e tests?
For that we recommend that you create a specific configuration for your app activating the feature,
i.e `../../tests/config/the_configuration_name_goes_here.yaml`
```yaml
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: myappconfig # arbitrary config name
spec:
features:
- name: MyFeatureFlag # The name you chose
enabled: true # or false
```
1. Include your configuration set on `../../tests/dapr_tests.mk` under `setup-test-components` target
2. Start your app pointing to it
```golang
testApps := []kube.AppDescription{
{
AppName: yourApp,
ImageName: "e2e-your-app-image",
Config: "myappconfig",
},
}
```
## Documentation
When a new [preview feature](https://docs.dapr.io/operations/support/support-preview-features/) is added our [documentation should be updated](https://github.com/dapr/docs/blob/4674817212c141acd4256a4d3ac441d5559f1eef/daprdocs/content/en/operations/support/support-preview-features.md). As a followup action create a new issue on docs repository, [check an example](https://github.com/dapr/docs/issues/2786).
## Release GA
When the feature flag is no longer needed as the feature has published for General Availability, then all previous steps should be revisited, documentation, code reference and additional settings. Creating a feature flag removal issue for a future milestone is seen as good practice.
|
mikeee/dapr
|
docs/development/preview-features.md
|
Markdown
|
mit
| 3,784 |
# Setup Continuous Integration
Dapr uses [GitHub Actions](https://github.com/features/actions) for continuous integration in order to automate the build and publish processes. As long as you have GitHub Account, you can set up your own private Actions in your fork of the Dapr repo. This document helps you set up the continuous integration for Dapr.
## Prerequistes
* GitHub Account
## How to set up GitHub Actions in your account
1. Fork the [dapr/dapr repo](https://github.com/dapr/dapr) to your GitHub Account
2. Go to `Settings` in the forked repo and click Secrets

3. Add secret variables for Dapr CI

* **`DOCKER_REGISTRY`** : Your private Docker registry name or dockerhub id e.g. `docker.io/[your_dockerhub_id]`
* **`DOCKER_REGISTRY_ID`** : Your private Docker registry id
* **`DOCKER_REGISTRY_PASS`** : Your private Docker registry password or Docker Hub password/token
* **`DAPR_BOT_TOKEN`** : Your [GitHub Personal Access Token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line); you do not need this unless you want to publish binaries to your forked GitHub release.
4. Go to `Actions` tab
Click `I understand my workflows, go ahead and run them`

5. Make sure your Actions is enabled

## Trigger the build
Dapr CI has give different behaviors based on the situations:
| | Build binaries | Store binaries into artifact | Publish docker image | GitHub Release |
|-----|--------------|------------------------------|-------------------|--------------|
| Create PR against master branch | X | X | | |
| Push the commit to master branch | X | X | `dapr:edge` image | |
| Push vX.Y.Z-rc.R tag e.g. v0.0.1-rc.0 | X | X | `dapr:vX.Y.Z-rc.R` image | X |
| Push vX.Y.Z tag e.g. v0.0.1 | X | X | `dapr:vX.Y.Z` and `dapr:vX.Y.Z:latest` image | X |
| Cron schedule ("nightly") | X | X | `dapr:nightly-YYYY-MM-DD` image | |
|
mikeee/dapr
|
docs/development/setup-ci.md
|
Markdown
|
mit
| 2,106 |
# Setup Dapr development environment
This document helps you get started developing Dapr. If you find any problems while following this guide, please create a Pull Request to update this document.
## Git
1. Install [Git](https://git-scm.com/downloads)
> On Windows, the Dapr build environment depends on Git BASH that comes as a part of [Git for Windows](https://gitforwindows.org).
>
> Ensure that the Git and Unix tools are part of the `PATH` environment variable, and that the editor experience is integrated with the Windows Terminal. For example, if [installing Git with chocolatey](https://chocolatey.org/packages/git), you may want to specify:
>
> ```cmd
> choco install git -y --package-parameters="/GitAndUnixToolsOnPath /WindowsTerminal"
> ```
## Docker environment
1. Install [Docker](https://docs.docker.com/install/)
> For Linux, you'll have to configure docker to run without `sudo` for the dapr build scripts to work. Follow the instructions to [manage Docker as a non-root user](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user).
2. Create your [Docker Hub account](https://hub.docker.com/signup) if you don't already have one.
## Go (Golang)
1. Download and install [Go 1.22.3 or later](https://golang.org/doc/install#tarball).
2. Install [Delve](https://github.com/go-delve/delve/tree/master/Documentation/installation) for Go debugging, if desired.
3. Install [golangci-lint](https://golangci-lint.run/usage/install) version 1.55.2.
## Setup a Kubernetes development environment
1. Follow the guide on [how to set up a Kubernetes cluster for Dapr](https://docs.dapr.io/operations/hosting/kubernetes/cluster/).
2. For development purposes, you will also want to follow the optional steps to install [Helm 3.x](https://helm.sh/docs/intro/install/).
## Installing Make
Dapr uses `make` for a variety of build and test actions, and needs to be installed as appropriate for your platform:
### Linux
1. Install the `build-essential` package:
```bash
sudo apt-get install build-essential
```
### macOS
1. Ensure that build tools are installed:
```sh
xcode-select --install
```
2. When completed, you should see `make` and other command line developer tools in `/usr/bin`.
### Windows
1. Install MinGW and make with [Chocolatey](https://chocolatey.org/install):
```cmd
choco install mingw
choco install make
```
|
mikeee/dapr
|
docs/development/setup-dapr-development-env.md
|
Markdown
|
mit
| 2,446 |
# Setup Dapr development environment using Visual Studio Code
This document helps you get started developing Dapr using VS Code. If you find any problems while following this guide, please create a Pull Request to update this document.
## Using a development container in Visual Studio Code (VS Code)
[VS Code](https://code.visualstudio.com/) supports development in a containerized environment through its [Remote - Container extension](https://code.visualstudio.com/docs/remote/containers), so you don't need to manually install all of the tools and frameworks needed to [setup a Dapr development environment](./setup-dapr-development-env.md) yourself.
### Prerequisites
1. [Docker](https://docs.docker.com/get-docker/)
> For Windows users, we recommend enabling [WSL2 back-end integration with Docker](https://docs.docker.com/docker-for-windows/wsl/).
2. [Visual Studio Code](https://code.visualstudio.com/)
3. [Visual Studio Code Remote - Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
### Using the Dapr development container
1. After you have cloned the Dapr repo locally, open the dapr folder in VSCode. For example:
```bash
git clone https://github.com/dapr/dapr.git
cd dapr
code .
```
VS Code will detect the presence of a dev container definition in the repo and will prompt you to reopen the project in a container:

Alternatively, you can open the command palette and use the `Remote-Containers: Reopen in Container` command.
2. Once the container is loaded, open an [integrated terminal](https://code.visualstudio.com/docs/editor/integrated-terminal) in VS Code and you're ready to start [Developing Dapr](./developing-dapr.md)!
## Customizing your dev container
The Dapr dev container is configured by default to support [GitHub Codespaces](https://github.com/features/codespaces), which you might want to change when running the dev container locally on your device.
### Personalizing user settings in a dev container
VS Code supports applying your user settings, such as your `.gitconfig`, to a dev container through the use of [dotfiles repositories](https://code.visualstudio.com/docs/remote/containers#_personalizing-with-dotfile-repositories). This can be done through your own VS Code `settings.json` file without changing the dev container image or configuration.
### Using a custom dev container image
The Dapr [devcontainer.json](../../.devcontainer/devcontainer.json) uses the latest image from the [daprio Docker Hub](https://hub.docker.com/r/daprio/dapr-dev), but you may need to modify the image destination to suit your host environment. For example, if you are using the devcontainer on a Linux host with a user whose UID is not 1000, you many need to remap the UID of the `dapr` user in the dev container to match your UID on the host.
1. Edit the [docker/Dockerfile-dev](../../docker/Dockerfile-dev) container image definition.
2. Replace the `"image"` property with the commented-out `"dockerFile"` property in [devcontainer.json](../../.devcontainer/devcontainer.json) to build and use the updated `Dockerfile-dev` file.
```json
{
"name": "Dapr Dev Environment",
// Update container version when you update dev-container
// "image": "docker.io/daprio/dapr-dev:0.1.7",
// Replace with uncommented line below to build your own local copy of the image
"dockerFile": "../docker/Dockerfile-dev",
"runArgs": [
...
```
3. Rebuild and reopen the workspace in the dev container via the command palette and the `Remote-Containers: Rebuild and Reopen in Container` command.
4. When you are satisfied with your changes, you can optionally publish your container image to your own registry to speed up rebuilding the container when you only want to make changes to the `devcontainer.json` configuration in the future. For a Docker registry named `myregistry`:
```bash
export DAPR_REGISTRY=myregistry
make build-dev-container
make push-dev-container
```
And the `devcontainer.json` would be updated to restore the `"image"` property pointing to your own image:
```json
{
"name": "Dapr Dev Environment",
// Update container version when you update dev-container
"image": "docker.io/myregistry/dapr-dev:0.1.7",
// Replace with uncommented line below to build your own local copy of the image
// "dockerFile": "../docker/Dockerfile-dev",
"runArgs": [
...
```
### Sharing a Docker environment with localhost
The default Dapr dev container provides a Docker-in-docker configuration, so containers set up inside the dev container are not visible to the host and vice versa. This prevents the host environment from interfering with the dev container environment, for example, so that both the host and dev container can have separate standalone Dapr environments via `dapr init`.
If there are situations where you would like the dev container to share the Docker environment of the host or between multiple dev containers, you will likely want your dev container to be on the same network as your localhost, so you will need to update the `"runArgs"` property list to include the `"--net=host"` setting in `devcontainer.json`:
```json
"runArgs": [
...
// Uncomment to bind to host network for local devcontainer; this is necessary if using the
// bind-mounted /var/run/docker-host.sock directly.
"--net=host",
```
In addition, there are a couple of options for how you can expose the localhost Docker context to the dev container:
#### Create a Docker context for localhost in the dev container
The default `devcontainer.json` already maps a socket for the localhost Docker into the dev container, so you can take advantage of that by defining a Docker separate context for using it in the dev container:
> ⚠ If your localhost is a Linux system, note that the dev container is running as `--privileged` and these instructions can modify your privileges.
```bash
sudo chown root:docker /var/run/docker-host.sock
docker context create host-context --description "Use localhost Docker environment" --docker "host=unix:///var/run/docker-host.sock"
docker context use host-context
docker ps
```
You should be able to see that the dev container itself is now visible in the list of running containers in the host-context. You can also use `docker context use default` to toggle back to using the Docker-in-docker configuration.
#### Rebind /var/run/docker.sock to localhost
If you don't want to use the Docker-in-docker configuration at all, you can chose to rebind the `/var/run/docker.sock` directly to the localhost socket so that the default Docker context is always the localhost. This can be done by enabling the `BIND_LOCALHOST_DOCKER` environment variable in the `devcontainer.json` file:
```json
"containerEnv": {
...
// Uncomment to disable docker-in-docker and automatically proxy default /var/run/docker.sock to
// the localhost bind-mount /var/run/docker-host.sock.
"BIND_LOCALHOST_DOCKER": "true"
},
```
This approach has the added benefit that it will use socat to proxy `/var/run/docker-host.sock` if it is owned by root on localhost, to avoid modifying localhost permissions on Linux hosts.
### Cloning your Kubernetes configuration into the dev container
Since it is likely that contributors and maintainers will want to test Dapr changes against a Kubernetes environment, the Dapr dev container comes pre-installed with Kubernetes, Helm and Minikube for testing within the dev container environment.
If you want to reuse an existing Kubernetes config, such as your [Azure Kubernetes Service config](https://docs.dapr.io/operations/hosting/kubernetes/cluster/setup-aks/) or local [Minikube cluster](https://docs.dapr.io/operations/hosting/kubernetes/cluster/setup-minikube/), you can also configure the `devcontainer.json` copy those settings into the dev container as well. This requires:
1. Enabling the `SYNC_LOCALHOST_KUBECONFIG` environment variable
2. Bind mounting the locations of your Kubernetes and Minikube config paths to `/home/dapr/.kube-localhost` and `/home/dapr/.minikube-localhost` respectively.
- You don't need to bind the Minikube path if you're not using it.
```json
"containerEnv": {
// Uncomment to overwrite devcontainer .kube/config and .minikube certs with the localhost versions
// each time the devcontainer starts, if the respective .kube-localhost/config and .minikube-localhost
// folders respectively are bind mounted to the devcontainer.
"SYNC_LOCALHOST_KUBECONFIG": "true",
...
},
...
"runArgs": [
...
// Uncomment to clone local .kube/config into devcontainer
"--mount", "type=bind,source=${env:HOME}${env:USERPROFILE}/.kube,target=/home/dapr/.kube-localhost",
// Uncomment to additionally clone minikube certs into devcontainer for use with .kube/config
"--mount", "type=bind,source=${env:HOME}${env:USERPROFILE}/.minikube,target=/home/dapr/.minikube-localhost",
...
]
```
The `SYNC_LOCALHOST_KUBECONFIG` option only supports providing the dev container with the snapshot configuration from the host and does not support updating the host Kubernetes configuration from the dev container directly.
- The copy happens on every boot of the dev container, so any changes on the host side are not captured after the dev container is started.
- Any changes made to the dev container `~/.kube/config` such as changing the default context or updating credentials are not reflected back to the host, even if it may have changed for the underlying environment (e.g. AKS credential updates).
- If using Minikube, it needs to be started on the host before the dev container is launched. The Minikube configuration in the dev container is distinct from the host and cannot be used to start the host Minikube from inside the dev container. This option only supports cloning default Minikube credentials into the dev container for use by the Kubernetes config.
- The copy will overwrite any previous state in the dev container with localhost settings, so do not use this option with the dev container's instance of Minikube.
|
mikeee/dapr
|
docs/development/setup-dapr-development-using-vscode.md
|
Markdown
|
mit
| 10,231 |
# Dapr $dapr_version
We're happy to announce the release of Dapr $dapr_version!
We would like to extend our thanks to all the new and existing contributors who helped make this release happen.
**Highlights**
If you're new to Dapr, visit the [getting started](https://docs.dapr.io/getting-started/) page and familiarize yourself with Dapr.
Docs have been updated with all the new features and changes of this release. To get started with new capabilities introduced in this release, go to the [Concepts](https://docs.dapr.io/concepts/) and the [Developing applications](https://docs.dapr.io/developing-applications/).
$warnings
See [this](#upgrading-to-dapr-$dapr_version) section on upgrading Dapr to version $dapr_version.
## Acknowledgements
Thanks to everyone who made this release possible!
$dapr_contributors
## New in this release
$dapr_changes
## Upgrading to Dapr $dapr_version
To upgrade to this release of Dapr, follow the steps here to ensure a smooth upgrade. You know, the one where you don't get red errors on the terminal.. we all hate that, right?
### Local Machine / Self-hosted
Uninstall Dapr using the CLI you currently have installed. Note that this will remove the default $HOME/.dapr directory, binaries and all containers dapr_redis, dapr_placement and dapr_zipkin. Linux users need to run sudo if docker command needs sudo:
```bash
dapr uninstall --all
```
For RC releases like this, download the latest and greatest release from [here](https://github.com/dapr/cli/releases) and put the `dapr` binary in your PATH.
Once you have installed the CLI, run:
```bash
dapr init --runtime-version=$dapr_version
```
Wait for the update to finish, ensure you are using the latest version of Dapr($dapr_version) with:
```bash
$ dapr --version
CLI version: $dapr_version
Runtime version: $dapr_version
```
### Kubernetes
#### Upgrading from previous version
You can perform zero-downtime upgrades using both Helm 3 and the Dapr CLI.
##### Upgrade using the CLI
Download the latest RC release from [here](https://github.com/dapr/cli/releases) and put the `dapr` binary in your PATH.
To upgrade Dapr, run:
```
dapr upgrade --runtime-version $dapr_version -k
```
To upgrade with high availability mode:
```
dapr upgrade --runtime-version $dapr_version --enable-ha=true -k
```
Wait until the operation is finished and check your status with `dapr status -k`.
All done!
*Note: Make sure your deployments are restarted to pick the latest version of the Dapr sidecar*
##### Upgrade using Helm
To upgrade Dapr using Helm, run:
```
helm repo add dapr https://dapr.github.io/helm-charts/
helm repo update
helm upgrade dapr dapr/dapr --version $dapr_version --namespace=dapr-system --wait
```
Wait until the operation is finished and check your status with `dapr status -k`.
All done!
*Note: Make sure your deployments are restarted to pick the latest version of the Dapr sidecar*
#### Starting a fresh install on a cluster
Please see [how to deploy Dapr on a Kubernetes cluster](https://docs.dapr.io/operations/hosting/kubernetes/kubernetes-deploy/) for a complete guide to installing Dapr on Kubernetes
You can use Helm 3 to install Dapr:
```
helm repo add dapr https://dapr.github.io/helm-charts/
helm repo update
kubectl create namespace dapr-system
helm install dapr dapr/dapr --version $dapr_version --namespace dapr-system --wait
```
Alternatively, you can use the latest version of CLI:
```
dapr init --runtime-version=$dapr_version -k
```
##### Post installation
Verify the control plane pods are running and are healthy:
```
$ dapr status -k
NAME NAMESPACE HEALTHY STATUS REPLICAS VERSION AGE CREATED
dapr-sidecar-injector dapr-system True Running 1 $dapr_version 15s $today 13:07.39
dapr-sentry dapr-system True Running 1 $dapr_version 15s $today 13:07.39
dapr-operator dapr-system True Running 1 $dapr_version 15s $today 13:07.39
dapr-placement dapr-system True Running 1 $dapr_version 15s $today 13:07.39
```
After Dapr $dapr_version has been installed, perform a rolling restart for your deployments to pick up the new version of the sidecar.
This can be done with:
```
kubectl rollout restart deploy/<deployment-name>
```
## Breaking Changes
$dapr_breaking_changes
## Deprecation Notices
$dapr_deprecation_notices
|
mikeee/dapr
|
docs/release_notes/template.md
|
Markdown
|
mit
| 4,424 |
# Dapr 0.1.0
This is this initial release of Dapr.
|
mikeee/dapr
|
docs/release_notes/v0.1.0.md
|
Markdown
|
mit
| 52 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.