code
stringlengths 0
56.1M
| repo_name
stringclasses 515
values | path
stringlengths 2
147
| language
stringclasses 447
values | license
stringclasses 7
values | size
int64 0
56.8M
|
---|---|---|---|---|---|
module github.com/dapr/dapr/tests/apps/service_invocation_grpc_proxy_server
go 1.22.3
require (
google.golang.org/grpc v1.54.0
google.golang.org/grpc/examples v0.0.0-20210610163306-6351a55c3895
)
require (
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230626202813-9b080da550b3 // indirect
google.golang.org/protobuf v1.30.0 // indirect
)
|
mikeee/dapr
|
tests/apps/service_invocation_grpc_proxy_server/go.mod
|
mod
|
mit
| 513 |
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230626202813-9b080da550b3 h1:QJuqz7YzNTyKDspkp2lrzqtq4lf2AhUSpXTsGP5SbLw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230626202813-9b080da550b3/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc/examples v0.0.0-20210610163306-6351a55c3895 h1:gkOGw8P+cjlOkwkqO/syZdUKX3GH+JzYbtYClNsSnkA=
google.golang.org/grpc/examples v0.0.0-20210610163306-6351a55c3895/go.mod h1:bF8wuZSAZTcbF7ZPKrDI/qY52toTP/yxLpRRY4Eu9Js=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
mikeee/dapr
|
tests/apps/service_invocation_grpc_proxy_server/go.sum
|
sum
|
mit
| 10,295 |
# In e2e test, this will not be used to deploy the app to test cluster.
# This is created for testing purpose in order to deploy this app using kubectl
# before writing e2e test.
apiVersion: apps/v1
kind: Deployment
metadata:
name: service-invocation-grpc-proxy-server
labels:
testapp: service-invocation-grpc-proxy-server
spec:
replicas: 1
selector:
matchLabels:
testapp: service-invocation-grpc-proxy-server
template:
metadata:
labels:
testapp: service-invocation-grpc-proxy-server
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "grpcproxyserver"
dapr.io/app-port: "50051"
dapr.io/http-max-request-size: "6"
spec:
containers:
- name: service-invocation-grpc-proxy-server
image: docker.io/[YOUR ALIAS]/e2e-service_invocation_grpc_proxy_server:dev
ports:
- containerPort: 50051
imagePullPolicy: Always
|
mikeee/dapr
|
tests/apps/service_invocation_grpc_proxy_server/service.yaml
|
YAML
|
mit
| 936 |
../../../dapr/proto/common/v1/*.proto
../../../dapr/proto/runtime/v1/*.proto
../utils/*.go
|
mikeee/dapr
|
tests/apps/stateapp/.cache-include
|
none
|
mit
| 90 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
daprhttp "github.com/dapr/dapr/pkg/api/http"
commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1"
runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1"
"github.com/dapr/dapr/tests/apps/utils"
)
const (
// statestore is the name of the store
stateURLTemplate = "http://localhost:%d/v1.0/state/%s"
bulkStateURLTemplate = "http://localhost:%d/v1.0/state/%s/bulk?metadata.partitionKey=e2etest"
stateTransactionURLTemplate = "http://localhost:%d/v1.0/state/%s/transaction"
queryURLTemplate = "http://localhost:%d/v1.0-alpha1/state/%s/query"
metadataPartitionKey = "partitionKey"
partitionKey = "e2etest"
badEtag = "99999" // Must be numeric because of Redis
)
var (
appPort = 3000
daprGRPCPort = 50001
daprHTTPPort = 3500
httpClient = utils.NewHTTPClient()
grpcClient runtimev1pb.DaprClient
)
func init() {
p := os.Getenv("DAPR_HTTP_PORT")
if p != "" && p != "0" {
daprHTTPPort, _ = strconv.Atoi(p)
}
p = os.Getenv("DAPR_GRPC_PORT")
if p != "" && p != "0" {
daprGRPCPort, _ = strconv.Atoi(p)
}
p = os.Getenv("PORT")
if p != "" && p != "0" {
appPort, _ = strconv.Atoi(p)
}
}
// appState represents a state in this app.
type appState struct {
Data []byte `json:"data,omitempty"`
Metadata map[string][]string `json:"metadata,omitempty"`
}
// daprState represents a state in Dapr.
type daprState struct {
Key string `json:"key,omitempty"`
Value *appState `json:"value,omitempty"`
Etag string `json:"etag,omitempty"`
TTLExpireTime *time.Time `json:"ttlExpireTime,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
OperationType string `json:"operationType,omitempty"`
}
// bulkGetRequest is the bulk get request object for the test
type bulkGetRequest struct {
Metadata map[string]string `json:"metadata"`
Keys []string `json:"keys"`
Parallelism int `json:"parallelism"`
}
// bulkGetResponse is the response object from Dapr for a bulk get operation.
type bulkGetResponse struct {
Key string `json:"key"`
Data any `json:"data"`
ETag string `json:"etag"`
}
// requestResponse represents a request or response for the APIs in this app.
type requestResponse struct {
States []daprState `json:"states,omitempty"`
Message string `json:"message,omitempty"`
}
// indexHandler is the handler for root path
func indexHandler(w http.ResponseWriter, r *http.Request) {
log.Println("indexHandler is called")
w.WriteHeader(http.StatusOK)
}
func save(states []daprState, statestore string, meta map[string]string) (int, error) {
log.Printf("Processing save request for %d entries.", len(states))
jsonValue, err := json.Marshal(states)
if err != nil {
log.Printf("Could save states in Dapr: %s", err.Error())
return http.StatusInternalServerError, err
}
return load(jsonValue, statestore, meta)
}
func load(data []byte, statestore string, meta map[string]string) (int, error) {
stateURL := fmt.Sprintf(stateURLTemplate, daprHTTPPort, statestore)
if len(meta) != 0 {
stateURL += "?" + metadata2RawQuery(meta)
}
log.Printf("Posting %d bytes of state to %s", len(data), stateURL)
res, err := httpClient.Post(stateURL, "application/json", bytes.NewReader(data))
if err != nil {
return http.StatusInternalServerError, err
}
defer res.Body.Close()
// Save must return 204
if res.StatusCode != http.StatusNoContent {
body, _ := io.ReadAll(res.Body)
err = fmt.Errorf("expected status code 204, got %d; response: %s", res.StatusCode, string(body))
}
// Drain before closing
_, _ = io.Copy(io.Discard, res.Body)
return res.StatusCode, err
}
func get(key string, statestore string, meta map[string]string) (*appState, string, *time.Time, error) {
log.Printf("Processing get request for %s.", key)
url, err := createStateURL(key, statestore, meta)
if err != nil {
return nil, "", nil, err
}
log.Printf("Fetching state from %s", url)
// url is created from user input, it is OK since this is a test app only and will not run in prod.
/* #nosec */
res, err := httpClient.Get(url)
if err != nil {
return nil, "", nil, fmt.Errorf("could not get value for key %s from Dapr: %s", key, err.Error())
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return nil, "", nil, fmt.Errorf("could not load value for key %s from Dapr: %s", key, err.Error())
}
if res.StatusCode < 200 || res.StatusCode > 299 {
return nil, "", nil, fmt.Errorf("failed to get value for key %s from Dapr: %s", key, body)
}
log.Printf("Found state for key %s: %s", key, body)
state, err := parseState(key, body)
if err != nil {
return nil, "", nil, err
}
var ttlExpireTime *time.Time
if v := res.Header.Values("metadata.ttlexpiretime"); len(v) == 1 {
exp, err := time.Parse(time.RFC3339, v[0])
if err != nil {
return nil, "", nil, fmt.Errorf("could not parse ttlexpiretime for key %s from Dapr: %w", key, err)
}
ttlExpireTime = &exp
}
return state, res.Header.Get("etag"), ttlExpireTime, nil
}
func parseState(key string, body []byte) (*appState, error) {
state := &appState{}
if len(body) == 0 {
return nil, nil
}
// a key not found in Dapr will return 200 but an empty response.
err := json.Unmarshal(body, &state)
if err != nil {
var stateData string
stringMarshalErr := json.Unmarshal(body, &stateData)
if stringMarshalErr != nil {
return nil, fmt.Errorf("could not parse value for key %s from Dapr: %s", key, err.Error())
}
state.Data = []byte(stateData)
}
return state, nil
}
func getAll(states []daprState, statestore string, meta map[string]string) ([]daprState, error) {
log.Printf("Processing get request for %d states.", len(states))
output := make([]daprState, 0, len(states))
for _, state := range states {
value, etag, ttlExpireTime, err := get(state.Key, statestore, meta)
if err != nil {
return nil, err
}
log.Printf("Result for get request for key %s: %v", state.Key, value)
output = append(output, daprState{
Key: state.Key,
Value: value,
Etag: etag,
TTLExpireTime: ttlExpireTime,
})
}
log.Printf("Result for get request for %d states: %v", len(states), output)
return output, nil
}
func getBulk(states []daprState, statestore string) ([]daprState, error) {
log.Printf("Processing get bulk request for %d states.", len(states))
output := make([]daprState, 0, len(states))
url, err := createBulkStateURL(statestore)
if err != nil {
return nil, err
}
log.Printf("Fetching bulk state from %s", url)
req := bulkGetRequest{}
for _, s := range states {
req.Keys = append(req.Keys, s.Key)
}
b, err := json.Marshal(&req)
if err != nil {
return nil, err
}
res, err := httpClient.Post(url, "application/json", bytes.NewReader(b))
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("could not load values for bulk get from Dapr: %s", err.Error())
}
if res.StatusCode < 200 || res.StatusCode > 299 {
return nil, fmt.Errorf("failed to load values for bulk get from Dapr: %s", body)
}
var resp []bulkGetResponse
err = json.Unmarshal(body, &resp)
if err != nil {
return nil, fmt.Errorf("could not unmarshal bulk get response from Dapr: %s", err.Error())
}
for _, state := range resp {
var as appState
b, err := json.Marshal(state.Data)
if err != nil {
return nil, fmt.Errorf("could not marshal return data: %s", err)
}
json.Unmarshal(b, &as)
output = append(output, daprState{
Key: state.Key,
Value: &as,
Etag: state.ETag,
})
}
log.Printf("Result for bulk get request for %d states: %v", len(states), output)
return output, nil
}
func delete(key, statestore string, meta map[string]string, etag string) (int, error) {
log.Printf("Processing delete request for %s", key)
url, err := createStateURL(key, statestore, meta)
if err != nil {
return 0, err
}
req, err := http.NewRequest(http.MethodDelete, url, nil)
if err != nil {
return 0, fmt.Errorf("could not create delete request for key %s in Dapr: %s", key, err.Error())
}
if etag != "" {
req.Header.Set("If-Match", etag)
}
log.Printf("Deleting state for %s", url)
res, err := httpClient.Do(req)
if err != nil {
return 0, fmt.Errorf("could not delete key %s in Dapr: %s", key, err.Error())
}
defer res.Body.Close()
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := io.ReadAll(res.Body)
return res.StatusCode, fmt.Errorf("failed to delete key %s in Dapr: %s", key, string(body))
}
// Drain before closing
_, _ = io.Copy(io.Discard, res.Body)
return res.StatusCode, nil
}
func deleteAll(states []daprState, statestore string, meta map[string]string) error {
log.Printf("Processing delete request for %d states.", len(states))
for _, state := range states {
_, err := delete(state.Key, statestore, meta, "")
if err != nil {
return err
}
}
return nil
}
func executeTransaction(states []daprState, statestore string) error {
transactionalOperations := make([]map[string]interface{}, len(states))
stateTransactionURL := fmt.Sprintf(stateTransactionURLTemplate, daprHTTPPort, statestore)
for i, s := range states {
transactionalOperations[i] = map[string]interface{}{
"operation": s.OperationType,
"request": map[string]interface{}{
"key": s.Key,
"value": s.Value,
},
}
}
jsonValue, err := json.Marshal(map[string]interface{}{
"operations": transactionalOperations,
"metadata": map[string]string{metadataPartitionKey: partitionKey},
})
if err != nil {
log.Printf("Could save transactional operations in Dapr: %s", err.Error())
return err
}
log.Printf("Posting state to %s with '%s'", stateTransactionURL, jsonValue)
res, err := httpClient.Post(stateTransactionURL, "application/json", bytes.NewReader(jsonValue))
if err != nil {
return err
}
res.Body.Close()
return nil
}
func executeQuery(query []byte, statestore string, meta map[string]string) ([]daprState, error) {
log.Printf("Processing query request '%s'.", string(query))
queryURL := fmt.Sprintf(queryURLTemplate, daprHTTPPort, statestore)
if len(meta) != 0 {
queryURL += "?" + metadata2RawQuery(meta)
}
log.Printf("Posting %d bytes of state to %s", len(query), queryURL)
resp, err := httpClient.Post(queryURL, "application/json", bytes.NewReader(query))
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("could not load query results from Dapr: %s", err.Error())
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return nil, fmt.Errorf("failed to load query results from Dapr: %s", body)
}
var qres daprhttp.QueryResponse
err = json.Unmarshal(body, &qres)
if err != nil {
return nil, fmt.Errorf("could not unmarshal query response from Dapr: %v. Raw response: '%s'", err, string(body))
}
log.Printf("Query returned %d results", len(qres.Results))
output := make([]daprState, 0, len(qres.Results))
for _, item := range qres.Results {
output = append(output, daprState{
Key: item.Key,
Value: &appState{
Data: item.Data,
},
})
}
log.Printf("Result for query request for %d states: %v", len(output), output)
return output, nil
}
func parseRequestBody(w http.ResponseWriter, r *http.Request) (*requestResponse, error) {
req := &requestResponse{}
err := json.NewDecoder(r.Body).Decode(req)
if err != nil {
log.Printf("Could not parse request body: %s", err.Error())
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(requestResponse{
Message: err.Error(),
})
return nil, err
}
for i := range req.States {
req.States[i].Metadata = map[string]string{metadataPartitionKey: partitionKey}
}
log.Printf("%v\n", *req)
return req, nil
}
func getRequestBody(w http.ResponseWriter, r *http.Request) (data []byte, err error) {
data, err = io.ReadAll(r.Body)
if err != nil {
log.Printf("Could not read request body: %s", err.Error())
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(requestResponse{
Message: err.Error(),
})
}
return
}
func getMetadata(values url.Values) map[string]string {
ret := make(map[string]string)
for k, v := range values {
ret[k] = v[0]
}
return ret
}
func metadata2RawQuery(meta map[string]string) string {
if len(meta) == 0 {
return ""
}
arr := make([]string, 0, len(meta))
for k, v := range meta {
arr = append(arr, fmt.Sprintf("metadata.%s=%s", k, v))
}
return strings.Join(arr, "&")
}
// handles all APIs for HTTP calls
func httpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Processing request for %s", r.URL.RequestURI())
var req *requestResponse
var data []byte
var err error
res := requestResponse{}
uri := r.URL.RequestURI()
statusCode := http.StatusOK
cmd := mux.Vars(r)["command"]
statestore := mux.Vars(r)["statestore"]
meta := getMetadata(r.URL.Query())
switch cmd {
case "save":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
_, err = save(req.States, statestore, meta)
if err == nil {
// The save call to dapr side car has returned correct status.
// Set the status code to statusNoContent
statusCode = http.StatusNoContent
}
case "load":
data, err = getRequestBody(w, r)
if err != nil {
return
}
statusCode, err = load(data, statestore, meta)
case "get":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
res.States, err = getAll(req.States, statestore, meta)
case "getbulk":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
res.States, err = getBulk(req.States, statestore)
case "delete":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
err = deleteAll(req.States, statestore, meta)
statusCode = http.StatusNoContent
case "transact":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
err = executeTransaction(req.States, statestore)
case "query":
data, err = getRequestBody(w, r)
if err != nil {
return
}
res.States, err = executeQuery(data, statestore, meta)
default:
err = fmt.Errorf("invalid URI: %s", uri)
statusCode = http.StatusBadRequest
res.Message = err.Error()
}
statusCheck := (statusCode == http.StatusOK || statusCode == http.StatusNoContent)
if err != nil && statusCheck {
log.Printf("Error: %v", err)
statusCode = http.StatusInternalServerError
res.Message = err.Error()
}
if !statusCheck {
log.Printf("Error status code %v: %v", statusCode, res.Message)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(res)
}
// Handles all APIs for GRPC
func grpcHandler(w http.ResponseWriter, r *http.Request) {
log.Println("Processing request for ", r.URL.RequestURI())
var req *requestResponse
var data []byte
var states []daprState
var err error
var res requestResponse
var response *runtimev1pb.GetBulkStateResponse
statusCode := http.StatusOK
cmd := mux.Vars(r)["command"]
statestore := mux.Vars(r)["statestore"]
meta := getMetadata(r.URL.Query())
switch cmd {
case "save":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
_, err = grpcClient.SaveState(context.Background(), &runtimev1pb.SaveStateRequest{
StoreName: statestore,
States: daprState2StateItems(req.States, meta),
})
statusCode = http.StatusNoContent
if err != nil {
statusCode, res.Message = setErrorMessage("ExecuteSaveState", err.Error())
}
case "getbulk":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
response, err = grpcClient.GetBulkState(context.Background(), &runtimev1pb.GetBulkStateRequest{
StoreName: statestore,
Keys: daprState2Keys(req.States),
Metadata: map[string]string{metadataPartitionKey: partitionKey},
})
if err != nil {
statusCode, res.Message = setErrorMessage("GetBulkState", err.Error())
}
states, err = toDaprStates(response)
if err != nil {
statusCode, res.Message = setErrorMessage("GetBulkState", err.Error())
}
res.States = states
case "get":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
states, err = getAllGRPC(req.States, statestore, meta)
if err != nil {
statusCode, res.Message = setErrorMessage("GetState", err.Error())
}
res.States = states
case "delete":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
statusCode = http.StatusNoContent
err = deleteAllGRPC(req.States, statestore, meta)
if err != nil {
statusCode, res.Message = setErrorMessage("DeleteState", err.Error())
}
case "transact":
req, err = parseRequestBody(w, r)
if err != nil {
return
}
_, err = grpcClient.ExecuteStateTransaction(context.Background(), &runtimev1pb.ExecuteStateTransactionRequest{
StoreName: statestore,
Operations: daprState2TransactionalStateRequest(req.States),
Metadata: map[string]string{metadataPartitionKey: partitionKey},
})
if err != nil {
statusCode, res.Message = setErrorMessage("ExecuteStateTransaction", err.Error())
}
case "query":
data, err = getRequestBody(w, r)
if err != nil {
return
}
resp, err := grpcClient.QueryStateAlpha1(context.Background(), &runtimev1pb.QueryStateRequest{
StoreName: statestore,
Query: string(data),
Metadata: meta,
})
if err != nil {
statusCode, res.Message = setErrorMessage("QueryState", err.Error())
}
if resp != nil && len(resp.GetResults()) > 0 {
res.States = make([]daprState, 0, len(resp.GetResults()))
for _, r := range resp.GetResults() {
res.States = append(res.States, daprState{
Key: r.GetKey(),
Value: &appState{Data: r.GetData()},
})
}
}
default:
statusCode = http.StatusInternalServerError
unsupportedCommandMessage := fmt.Sprintf("GRPC protocol command %s not supported", cmd)
log.Print(unsupportedCommandMessage)
res.Message = unsupportedCommandMessage
}
if statusCode != http.StatusOK && statusCode != http.StatusNoContent {
log.Printf("Error status code %v: %v", statusCode, res.Message)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(res)
}
func daprState2Keys(states []daprState) []string {
keys := make([]string, len(states))
for i, state := range states {
keys[i] = state.Key
}
return keys
}
func toDaprStates(response *runtimev1pb.GetBulkStateResponse) ([]daprState, error) {
result := make([]daprState, len(response.GetItems()))
for i, state := range response.GetItems() {
if state.GetError() != "" {
return nil, fmt.Errorf("%s while getting bulk state", state.GetError())
}
daprStateItem, err := parseState(state.GetKey(), state.GetData())
if err != nil {
return nil, err
}
result[i] = daprState{
Key: state.GetKey(),
Value: daprStateItem,
Etag: state.GetEtag(),
Metadata: state.GetMetadata(),
}
}
return result, nil
}
func deleteAllGRPC(states []daprState, statestore string, meta map[string]string) (err error) {
if len(states) == 0 {
return nil
}
if len(states) == 1 {
log.Print("deleting sate for key", states[0].Key)
m := map[string]string{metadataPartitionKey: partitionKey}
for k, v := range meta {
m[k] = v
}
_, err = grpcClient.DeleteState(context.Background(), &runtimev1pb.DeleteStateRequest{
StoreName: statestore,
Key: states[0].Key,
Metadata: m,
})
return err
}
keys := make([]string, len(states))
for i, state := range states {
keys[i] = state.Key
}
log.Print("deleting bulk sates for keys", keys)
_, err = grpcClient.DeleteBulkState(context.Background(), &runtimev1pb.DeleteBulkStateRequest{
StoreName: statestore,
States: daprState2StateItems(states, meta),
})
return err
}
func getAllGRPC(states []daprState, statestore string, meta map[string]string) ([]daprState, error) {
m := map[string]string{metadataPartitionKey: partitionKey}
for k, v := range meta {
m[k] = v
}
responses := make([]daprState, len(states))
for i, state := range states {
log.Printf("getting state for key %s\n", state.Key)
res, err := grpcClient.GetState(context.Background(), &runtimev1pb.GetStateRequest{
StoreName: statestore,
Key: state.Key,
Metadata: m,
})
if err != nil {
return nil, err
}
log.Printf("found state for key %s, value is %s\n", state.Key, res.GetData())
val, err := parseState(state.Key, res.GetData())
if err != nil {
return nil, err
}
responses[i] = daprState{
Key: state.Key,
Value: val,
}
}
return responses, nil
}
func setErrorMessage(method, errorString string) (int, string) {
log.Printf("GRPC %s had error %s", method, errorString)
return http.StatusInternalServerError, errorString
}
func daprState2StateItems(daprStates []daprState, meta map[string]string) []*commonv1pb.StateItem {
m := map[string]string{metadataPartitionKey: partitionKey}
for k, v := range meta {
m[k] = v
}
stateItems := make([]*commonv1pb.StateItem, len(daprStates))
for i, daprState := range daprStates {
val, _ := json.Marshal(daprState.Value)
stateItems[i] = &commonv1pb.StateItem{
Key: daprState.Key,
Value: val,
Metadata: m,
}
if daprState.Etag != "" {
stateItems[i].Etag = &commonv1pb.Etag{
Value: daprState.Etag,
}
}
}
return stateItems
}
func daprState2TransactionalStateRequest(daprStates []daprState) []*runtimev1pb.TransactionalStateOperation {
transactionalStateRequests := make([]*runtimev1pb.TransactionalStateOperation, len(daprStates))
for i, daprState := range daprStates {
val, _ := json.Marshal(daprState.Value)
transactionalStateRequests[i] = &runtimev1pb.TransactionalStateOperation{
OperationType: daprState.OperationType,
Request: &commonv1pb.StateItem{
Key: daprState.Key,
Value: val,
},
}
}
return transactionalStateRequests
}
func createStateURL(key, statestore string, meta map[string]string) (string, error) {
stateURL := fmt.Sprintf(stateURLTemplate, daprHTTPPort, statestore)
url, err := url.Parse(stateURL)
if err != nil {
return "", fmt.Errorf("could not parse %s: %s", stateURL, err.Error())
}
url.Path = path.Join(url.Path, key)
m := map[string]string{metadataPartitionKey: partitionKey}
for k, v := range meta {
m[k] = v
}
url.RawQuery = metadata2RawQuery(m)
return url.String(), nil
}
func createBulkStateURL(statestore string) (string, error) {
bulkStateURL := fmt.Sprintf(bulkStateURLTemplate, daprHTTPPort, statestore)
url, err := url.Parse(bulkStateURL)
if err != nil {
return "", fmt.Errorf("could not parse %s: %s", bulkStateURL, err.Error())
}
return url.String(), nil
}
// Etag test for HTTP
func etagTestHTTP(statestore string) error {
pkMetadata := map[string]string{metadataPartitionKey: partitionKey}
// Use two random keys for testing
var etags [2]string
keys := [2]string{
uuid.NewString(),
uuid.NewString(),
}
type retrieveStateOpts struct {
expectNotFound bool
expectValue string
expectEtagEqual string
expectEtagNotEqual string
}
retrieveState := func(stateId int, opts retrieveStateOpts) (string, error) {
value, etag, _, err := get(keys[stateId], statestore, pkMetadata)
if err != nil {
return "", fmt.Errorf("failed to retrieve value %d: %w", stateId, err)
}
if opts.expectNotFound {
if value != nil && len(value.Data) != 0 {
return "", fmt.Errorf("invalid value for state %d: %#v (expected empty)", stateId, value)
}
return "", nil
}
if value == nil || string(value.Data) != opts.expectValue {
return "", fmt.Errorf("invalid value for state %d: %#v (expected: %q)", stateId, value, opts.expectValue)
}
if etag == "" {
return "", fmt.Errorf("etag is empty for state %d", stateId)
}
if opts.expectEtagEqual != "" && etag != opts.expectEtagEqual {
return "", fmt.Errorf("etag is invalid for state %d: %q (expected: %q)", stateId, etag, opts.expectEtagEqual)
}
if opts.expectEtagNotEqual != "" && etag == opts.expectEtagNotEqual {
return "", fmt.Errorf("etag is invalid for state %d: %q (expected different value)", stateId, etag)
}
return etag, nil
}
// First, write two values
_, err := save([]daprState{
{Key: keys[0], Value: &appState{Data: []byte("1")}, Metadata: pkMetadata},
{Key: keys[1], Value: &appState{Data: []byte("1")}, Metadata: pkMetadata},
}, statestore, pkMetadata)
if err != nil {
return fmt.Errorf("failed to store initial values: %w", err)
}
// Retrieve the two values to get the etag
etags[0], err = retrieveState(0, retrieveStateOpts{expectValue: "1"})
if err != nil {
return fmt.Errorf("failed to check initial value for state 0: %w", err)
}
etags[1], err = retrieveState(1, retrieveStateOpts{expectValue: "1"})
if err != nil {
return fmt.Errorf("failed to check initial value for state 1: %w", err)
}
// Update the first state using the correct etag
_, err = save([]daprState{
{Key: keys[0], Value: &appState{Data: []byte("2")}, Metadata: pkMetadata, Etag: etags[0]},
}, statestore, pkMetadata)
if err != nil {
return fmt.Errorf("failed to update value 0: %w", err)
}
// Check the first state
etags[0], err = retrieveState(0, retrieveStateOpts{expectValue: "2", expectEtagNotEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check initial value for state 0: %w", err)
}
// Updating with wrong etag should fail with 409 status code
statusCode, _ := save([]daprState{
{Key: keys[1], Value: &appState{Data: []byte("2")}, Metadata: pkMetadata, Etag: badEtag},
}, statestore, pkMetadata)
if statusCode != http.StatusConflict {
return fmt.Errorf("expected update with invalid etag to fail with status code 409, but got: %d", statusCode)
}
// Value should not have changed
_, err = retrieveState(1, retrieveStateOpts{expectValue: "1", expectEtagEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 1: %w", err)
}
// Bulk update with all valid etags
_, err = save([]daprState{
{Key: keys[0], Value: &appState{Data: []byte("3")}, Metadata: pkMetadata, Etag: etags[0]},
{Key: keys[1], Value: &appState{Data: []byte("3")}, Metadata: pkMetadata, Etag: etags[1]},
}, statestore, pkMetadata)
if err != nil {
return fmt.Errorf("failed to update bulk values: %w", err)
}
// Retrieve the two values to confirm they're updated
etags[0], err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagNotEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
etags[1], err = retrieveState(1, retrieveStateOpts{expectValue: "3", expectEtagNotEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 1: %w", err)
}
// Bulk update with one etag incorrect
statusCode, _ = save([]daprState{
{Key: keys[0], Value: &appState{Data: []byte("4")}, Metadata: pkMetadata, Etag: badEtag},
{Key: keys[1], Value: &appState{Data: []byte("4")}, Metadata: pkMetadata, Etag: etags[1]},
}, statestore, pkMetadata)
if statusCode != http.StatusConflict {
return fmt.Errorf("expected update with invalid etag to fail with status code 409, but got: %d", statusCode)
}
// Retrieve the two values to confirm only the second is updated
_, err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
etags[1], err = retrieveState(1, retrieveStateOpts{expectValue: "4", expectEtagNotEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 1: %w", err)
}
// Delete single item with incorrect etag
statusCode, _ = delete(keys[0], statestore, pkMetadata, badEtag)
if statusCode != http.StatusConflict {
return fmt.Errorf("expected delete with invalid etag to fail with status code 409, but got: %d", statusCode)
}
// Value should not have changed
_, err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
// TODO: There's no "Bulk Delete" API in HTTP right now, so we can't test that
// Create a test here when the API is implemented
err = deleteAll([]daprState{
{Key: keys[0], Metadata: pkMetadata},
{Key: keys[1], Metadata: pkMetadata},
}, statestore, pkMetadata)
if err != nil {
return fmt.Errorf("failed to delete all data at the end of the test: %w", err)
}
return nil
}
// Etag test for gRPC
func etagTestGRPC(statestore string) error {
pkMetadata := map[string]string{metadataPartitionKey: partitionKey}
// Use three random keys for testing
var etags [3]string
keys := [3]string{
uuid.NewString(),
uuid.NewString(),
uuid.NewString(),
}
type retrieveStateOpts struct {
expectNotFound bool
expectValue string
expectEtagEqual string
expectEtagNotEqual string
}
retrieveState := func(stateId int, opts retrieveStateOpts) (string, error) {
res, err := grpcClient.GetState(context.Background(), &runtimev1pb.GetStateRequest{
StoreName: statestore,
Key: keys[stateId],
Metadata: pkMetadata,
})
if err != nil {
return "", fmt.Errorf("failed to retrieve value %d: %w", stateId, err)
}
if opts.expectNotFound {
if len(res.GetData()) != 0 {
return "", fmt.Errorf("invalid value for state %d: %q (expected empty)", stateId, string(res.GetData()))
}
return "", nil
}
if len(res.GetData()) == 0 || string(res.GetData()) != opts.expectValue {
return "", fmt.Errorf("invalid value for state %d: %q (expected: %q)", stateId, string(res.GetData()), opts.expectValue)
}
if res.GetEtag() == "" {
return "", fmt.Errorf("etag is empty for state %d", stateId)
}
if opts.expectEtagEqual != "" && res.GetEtag() != opts.expectEtagEqual {
return "", fmt.Errorf("etag is invalid for state %d: %q (expected: %q)", stateId, res.GetEtag(), opts.expectEtagEqual)
}
if opts.expectEtagNotEqual != "" && res.GetEtag() == opts.expectEtagNotEqual {
return "", fmt.Errorf("etag is invalid for state %d: %q (expected different value)", stateId, res.GetEtag())
}
return res.GetEtag(), nil
}
// First, write three values
_, err := grpcClient.SaveState(context.Background(), &runtimev1pb.SaveStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[0], Value: []byte("1"), Metadata: pkMetadata},
{Key: keys[1], Value: []byte("1"), Metadata: pkMetadata},
{Key: keys[2], Value: []byte("1"), Metadata: pkMetadata},
},
})
if err != nil {
return fmt.Errorf("failed to store initial values: %w", err)
}
// Retrieve the two values to get the etag
etags[0], err = retrieveState(0, retrieveStateOpts{expectValue: "1"})
if err != nil {
return fmt.Errorf("failed to check initial value for state 0: %w", err)
}
etags[1], err = retrieveState(1, retrieveStateOpts{expectValue: "1"})
if err != nil {
return fmt.Errorf("failed to check initial value for state 1: %w", err)
}
etags[2], err = retrieveState(2, retrieveStateOpts{expectValue: "1"})
if err != nil {
return fmt.Errorf("failed to check initial value for state 2: %w", err)
}
// Update the first state using the correct etag
_, err = grpcClient.SaveState(context.Background(), &runtimev1pb.SaveStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[0], Value: []byte("2"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[0]}},
},
})
if err != nil {
return fmt.Errorf("failed to update value 0: %w", err)
}
// Check the first state
etags[0], err = retrieveState(0, retrieveStateOpts{expectValue: "2", expectEtagNotEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
// Updating with wrong etag should fail with 409 status code
_, err = grpcClient.SaveState(context.Background(), &runtimev1pb.SaveStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[1], Value: []byte("2"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: badEtag}},
},
})
if status.Code(err) != codes.Aborted {
return fmt.Errorf("expected gRPC error with code Aborted, but got err: %v", err)
}
// Value should not have changed
_, err = retrieveState(1, retrieveStateOpts{expectValue: "1", expectEtagEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 1: %w", err)
}
// Bulk update with all valid etags
_, err = grpcClient.SaveState(context.Background(), &runtimev1pb.SaveStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[0], Value: []byte("3"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[0]}},
{Key: keys[1], Value: []byte("3"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[1]}},
{Key: keys[2], Value: []byte("3"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[2]}},
},
})
if err != nil {
return fmt.Errorf("failed to update bulk values: %w", err)
}
// Retrieve the three values to confirm they're updated
etags[0], err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagNotEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
etags[1], err = retrieveState(1, retrieveStateOpts{expectValue: "3", expectEtagNotEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 1: %w", err)
}
etags[2], err = retrieveState(2, retrieveStateOpts{expectValue: "3", expectEtagNotEqual: etags[2]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 2: %w", err)
}
// Bulk update with one etag incorrect
_, err = grpcClient.SaveState(context.Background(), &runtimev1pb.SaveStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[0], Value: []byte("4"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: badEtag}},
{Key: keys[1], Value: []byte("4"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[1]}},
{Key: keys[2], Value: []byte("4"), Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[2]}},
},
})
if status.Code(err) != codes.Aborted {
return fmt.Errorf("expected gRPC error with code Aborted, but got err: %v", err)
}
// Retrieve the three values to confirm only the last two are updated
_, err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
etags[1], err = retrieveState(1, retrieveStateOpts{expectValue: "4", expectEtagNotEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 1: %w", err)
}
etags[2], err = retrieveState(2, retrieveStateOpts{expectValue: "4", expectEtagNotEqual: etags[2]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 2: %w", err)
}
// Delete single item with incorrect etag
_, err = grpcClient.DeleteState(context.Background(), &runtimev1pb.DeleteStateRequest{
StoreName: statestore,
Key: keys[0],
Metadata: pkMetadata,
Etag: &commonv1pb.Etag{Value: badEtag},
})
if status.Code(err) != codes.Aborted {
return fmt.Errorf("expected gRPC error with code Aborted, but got err: %v", err)
}
// Value should not have changed
_, err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check updated value for state 0: %w", err)
}
// Bulk delete with two etags incorrect
_, err = grpcClient.DeleteBulkState(context.Background(), &runtimev1pb.DeleteBulkStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[0], Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: badEtag}},
{Key: keys[1], Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: badEtag}},
{Key: keys[2], Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[2]}},
},
})
if status.Code(err) != codes.Aborted {
return fmt.Errorf("expected gRPC error with code Aborted, but got err: %v", err)
}
// Validate items 0 and 1 are the only ones still existing
_, err = retrieveState(0, retrieveStateOpts{expectValue: "3", expectEtagEqual: etags[0]})
if err != nil {
return fmt.Errorf("failed to check value for state 0 after not deleting it: %w", err)
}
_, err = retrieveState(1, retrieveStateOpts{expectValue: "4", expectEtagEqual: etags[1]})
if err != nil {
return fmt.Errorf("failed to check value for state 1 after not deleting it: %w", err)
}
_, err = retrieveState(2, retrieveStateOpts{expectNotFound: true})
if err != nil {
return fmt.Errorf("failed to check value for state 2 after deleting it: %w", err)
}
// Delete the remaining items
_, err = grpcClient.DeleteBulkState(context.Background(), &runtimev1pb.DeleteBulkStateRequest{
StoreName: statestore,
States: []*commonv1pb.StateItem{
{Key: keys[0], Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[0]}},
{Key: keys[1], Metadata: pkMetadata, Etag: &commonv1pb.Etag{Value: etags[1]}},
},
})
if err != nil {
return fmt.Errorf("failed to delete bulk values: %w", err)
}
return nil
}
// Returns a HTTP handler for functions that return an error
func testFnHandler(testFn func(statestore string) error) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
log.Printf("Processing request for %s", r.URL.RequestURI())
err := testFn(mux.Vars(r)["statestore"])
if err != nil {
w.Header().Add("content-type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]any{
"error": err.Error(),
})
return
}
w.WriteHeader(http.StatusNoContent)
}
}
// appRouter initializes restful api router
func appRouter() http.Handler {
router := mux.NewRouter().StrictSlash(true)
// Log requests and their processing time
router.Use(utils.LoggerMiddleware)
router.HandleFunc("/", indexHandler).Methods("GET")
router.HandleFunc("/test/http/{command}/{statestore}", httpHandler).Methods("POST")
router.HandleFunc("/test/grpc/{command}/{statestore}", grpcHandler).Methods("POST")
router.HandleFunc("/test-etag/http/{statestore}", testFnHandler(etagTestHTTP)).Methods("POST")
router.HandleFunc("/test-etag/grpc/{statestore}", testFnHandler(etagTestGRPC)).Methods("POST")
router.Use(mux.CORSMethodMiddleware(router))
return router
}
func main() {
grpcClient = utils.GetGRPCClient(daprGRPCPort)
log.Printf("State App - listening on http://localhost:%d", appPort)
log.Printf("State endpoint - to be saved at %s", fmt.Sprintf(stateURLTemplate, daprHTTPPort, "statestore"))
utils.StartServer(appPort, appRouter, true, false)
}
|
mikeee/dapr
|
tests/apps/stateapp/app.go
|
GO
|
mit
| 39,672 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: querystatestore
spec:
type: state.postgres
version: v1
metadata:
- name: connectionString
value: "host=dapr-postgres-postgresql.dapr-tests.svc.cluster.local user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"
- name: table
value: querytable
|
mikeee/dapr
|
tests/apps/stateapp/components/postgres.yml
|
YAML
|
mit
| 350 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: statestore
spec:
type: state.redis
version: v1
metadata:
- name: redisHost
value: localhost:6379
- name: redisPassword
value: ""
|
mikeee/dapr
|
tests/apps/stateapp/components/redis.yaml
|
YAML
|
mit
| 210 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: pubsub.redis
version: v1
metadata:
- name: redisHost
value: localhost:6379
- name: redisPassword
value: ""
|
mikeee/dapr
|
tests/apps/stateapp/components/redis_messagebus.yaml
|
YAML
|
mit
| 211 |
# In e2e test, this will not be used to deploy the app to test cluster.
# This is created for testing purpose in order to deploy this app using kubectl
# before writing e2e test.
kind: Service
apiVersion: v1
metadata:
name: stateapp
labels:
testapp: stateapp
spec:
selector:
testapp: stateapp
ports:
- protocol: TCP
port: 80
targetPort: 3000
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: stateapp
labels:
testapp: stateapp
spec:
replicas: 1
selector:
matchLabels:
testapp: stateapp
template:
metadata:
labels:
testapp: stateapp
annotations:
dapr.io/enabled: "true"
dapr.io/app-id: "stateapp"
dapr.io/app-port: "3000"
spec:
containers:
- name: stateapp
image: docker.io/YOUR_DOCKER_ALIAS/e2e-stateapp:dev
ports:
- containerPort: 3000
imagePullPolicy: Always
|
mikeee/dapr
|
tests/apps/stateapp/service.yaml
|
YAML
|
mit
| 955 |
../utils/*.go
|
mikeee/dapr
|
tests/apps/tracingapp/.cache-include
|
none
|
mit
| 13 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
"github.com/PaesslerAG/jsonpath"
"github.com/google/uuid"
"github.com/gorilla/mux"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/zipkin"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdk_trace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.opentelemetry.io/otel/trace"
"github.com/dapr/dapr/tests/apps/utils"
)
var (
appPort = 3000
daprHTTPPort = 3500
httpClient = utils.NewHTTPClient()
zipkinEndpoint = "http://dapr-zipkin:9411"
serviceName = "tracingapp"
tracer trace.Tracer
propagators propagation.TextMapPropagator
)
const (
jsonContentType = "application/json"
zipkinSpans = "/api/v2/spans"
zipkinTraces = "/api/v2/traces"
expectedDaprChildSpanNameTemplate = "calllocal/%s/invoke/something"
)
func init() {
p := os.Getenv("DAPR_HTTP_PORT")
if p != "" && p != "0" {
daprHTTPPort, _ = strconv.Atoi(p)
}
p = os.Getenv("PORT")
if p != "" && p != "0" {
appPort, _ = strconv.Atoi(p)
}
p = os.Getenv("ZIPKIN_ENDPOINT")
if p != "" {
zipkinEndpoint = p
}
p = os.Getenv("SERVICE_NAME")
if p != "" {
serviceName = p
}
}
type appResponse struct {
SpanName *string `json:"spanName,omitempty"`
Message string `json:"message,omitempty"`
}
// indexHandler is the handler for root path
func indexHandler(w http.ResponseWriter, r *http.Request) {
log.Println("indexHandler is called")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(appResponse{Message: "OK"})
}
// triggerInvoke is the handler for end-to-end to start the invoke stack
func triggerInvoke(w http.ResponseWriter, r *http.Request) {
log.Printf("Processing %s %s", r.Method, r.URL.RequestURI())
uuidObj, _ := uuid.NewRandom()
uuid := uuidObj.String()
ctx := propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
newCtx, span := tracer.Start(ctx, uuid)
defer span.End()
query := r.URL.Query()
appID := query.Get("appId")
url := fmt.Sprintf("http://127.0.0.1:%d/v1.0/invoke/%s/method/invoke/something", daprHTTPPort, appID)
/* #nosec */
req, _ := http.NewRequest(http.MethodPost, url, nil)
req = req.WithContext(newCtx)
hc := propagation.HeaderCarrier(req.Header)
propagators.Inject(newCtx, hc)
req.Header.Add("Content-Type", jsonContentType)
log.Printf("span's name is %s and invoke url is %s\n", uuid, url)
res, err := httpClient.Do(req)
if res != nil {
defer res.Body.Close()
}
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(appResponse{
SpanName: &uuid,
Message: err.Error(),
})
return
}
if res.StatusCode != http.StatusOK {
w.WriteHeader(http.StatusExpectationFailed)
json.NewEncoder(w).Encode(appResponse{
SpanName: &uuid,
Message: fmt.Sprintf("expected status code %d, got %d", http.StatusOK, res.StatusCode),
})
}
json.NewEncoder(w).Encode(appResponse{
SpanName: &uuid,
Message: "OK",
})
}
// invoke is the handler for end-to-end to invoke
func invoke(w http.ResponseWriter, r *http.Request) {
log.Printf("Processing %s %s", r.Method, r.URL.RequestURI())
ctx := propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
_, span := tracer.Start(ctx, "invokedMethod")
defer span.End()
// Pretend to do work
time.Sleep(time.Second)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(appResponse{Message: "OK"})
}
// validate is the handler to validate the tracing span
func validate(w http.ResponseWriter, r *http.Request) {
err := doValidate(w, r)
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusExpectationFailed)
json.NewEncoder(w).Encode(appResponse{Message: err.Error()})
}
json.NewEncoder(w).Encode(appResponse{Message: "OK"})
}
func doValidate(w http.ResponseWriter, r *http.Request) error {
query := r.URL.Query()
mainSpanName := query.Get("spanName")
resp, err := http.Get(zipkinEndpoint + zipkinTraces)
if err != nil {
return err
}
defer resp.Body.Close()
v := interface{}(nil)
json.NewDecoder(resp.Body).Decode(&v)
mainSpanID, err := findUniqueValueFromJSONPath("$..[?(@.name==\""+mainSpanName+"\")].id", v)
if err != nil {
return err
}
if mainSpanID == "" {
return errors.New("empty span id found for span name " + mainSpanName)
}
log.Printf("Found main span with name %s and id=%s", mainSpanName, mainSpanID)
childSpanName, err := findUniqueValueFromJSONPath("$..[?(@.parentId==\""+mainSpanID+"\")].name", v)
if err != nil {
return err
}
remoteServiceName, err := findUniqueValueFromJSONPath("$..[?(@.parentId==\""+mainSpanID+"\")].remoteEndpoint.serviceName", v)
if err != nil {
return err
}
expectedDaprChildSpanName := fmt.Sprintf(expectedDaprChildSpanNameTemplate, remoteServiceName)
if childSpanName != expectedDaprChildSpanName {
return errors.New("child span name is not correct, expected " + expectedDaprChildSpanName + ", actual " + childSpanName)
}
log.Printf("Tracing is correct for span with name=%s", mainSpanName)
return nil
}
func findUniqueValueFromJSONPath(jsonPath string, v interface{}) (string, error) {
values, err := jsonpath.Get(jsonPath, v)
if err != nil {
return "", err
}
arrValues := values.([]interface{})
log.Printf("%v", arrValues)
if len(arrValues) == 0 {
return "", errors.New("no value found for json path " + jsonPath)
}
if len(arrValues) > 1 {
return "", errors.New("more than one value found for json path " + jsonPath)
}
return fmt.Sprintf("%v", arrValues[0]), nil
}
// appRouter initializes restful api router
func appRouter() http.Handler {
router := mux.NewRouter().StrictSlash(true)
// Log requests and their processing time
router.Use(utils.LoggerMiddleware)
router.HandleFunc("/", indexHandler).Methods("GET")
router.HandleFunc("/triggerInvoke", triggerInvoke).Methods("POST")
router.HandleFunc("/invoke/something", invoke).Methods("POST")
router.HandleFunc("/validate", validate).Methods("POST", "GET")
router.Use(mux.CORSMethodMiddleware(router))
return router
}
func main() {
exporter, err := zipkin.New(zipkinEndpoint + zipkinSpans)
if err != nil {
log.Fatalf("failed to create exporter: %v", err)
}
res := resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(serviceName),
)
tp := sdk_trace.NewTracerProvider(
sdk_trace.WithBatcher(exporter),
sdk_trace.WithResource(res),
)
defer func() {
if err := tp.Shutdown(context.Background()); err != nil {
log.Fatal(err)
}
}()
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.TraceContext{})
propagators = otel.GetTextMapPropagator()
tracer = otel.Tracer(serviceName)
log.Printf("Tracing App - listening on http://localhost:%d", appPort)
utils.StartServer(appPort, appRouter, true, false)
}
|
mikeee/dapr
|
tests/apps/tracingapp/app.go
|
GO
|
mit
| 7,457 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"log"
"os"
"strconv"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1"
)
// GetGRPCClient returns a gRPC client to connect to Dapr
func GetGRPCClient(daprPort int) runtimev1pb.DaprClient {
if daprPort == 0 {
if s, _ := os.LookupEnv("DAPR_GRPC_PORT"); s != "" {
daprPort, _ = strconv.Atoi(s)
}
}
url := fmt.Sprintf("localhost:%d", daprPort)
log.Printf("Connecting to dapr using url %s", url)
var grpcConn *grpc.ClientConn
start := time.Now()
for retries := 10; retries > 0; retries-- {
var err error
grpcConn, err = grpc.Dial(url,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
)
if err == nil {
break
}
if retries == 0 {
log.Printf("Could not connect to dapr: %v", err)
log.Panic(err)
}
log.Printf("Could not connect to dapr: %v, retrying...", err)
time.Sleep(5 * time.Second)
}
elapsed := time.Since(start)
log.Printf("gRPC connect elapsed: %v", elapsed)
return runtimev1pb.NewDaprClient(grpcConn)
}
|
mikeee/dapr
|
tests/apps/utils/grpc.go
|
GO
|
mit
| 1,671 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"os"
"strconv"
"strings"
)
// IsTruthy returns true if a string is a truthy value.
// Truthy values are "y", "yes", "true", "t", "on", "1" (case-insensitive); everything else is false.
func IsTruthy(val string) bool {
switch strings.ToLower(strings.TrimSpace(val)) {
case "y", "yes", "true", "t", "on", "1":
return true
default:
return false
}
}
// PortFromEnv returns a port from an env var, or the default value.
func PortFromEnv(envName string, defaultPort int) (res int) {
p := os.Getenv(envName)
if p != "" && p != "0" {
res, _ = strconv.Atoi(p)
}
if res <= 0 {
res = defaultPort
}
return
}
|
mikeee/dapr
|
tests/apps/utils/helpers.go
|
GO
|
mit
| 1,204 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"net"
"net/http"
"time"
)
// NewHTTPClient returns a HTTP client configured for our tests.
func NewHTTPClient() *http.Client {
dialer := &net.Dialer{ //nolint:exhaustivestruct
Timeout: 5 * time.Second,
}
netTransport := &http.Transport{ //nolint:exhaustivestruct
DialContext: dialer.DialContext,
TLSHandshakeTimeout: 5 * time.Second,
}
return &http.Client{ //nolint:exhaustivestruct
Timeout: 30 * time.Second,
Transport: netTransport,
}
}
// NewHTTPClientForSocket returns a HTTP client that connects to a Unix Domain Socket.
func NewHTTPClientForSocket(socketAddr string) *http.Client {
return &http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", socketAddr)
},
},
}
}
|
mikeee/dapr
|
tests/apps/utils/http-client.go
|
GO
|
mit
| 1,397 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
"log"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
const (
tlsCertEnvKey = "DAPR_TESTS_TLS_CERT"
tlsKeyEnvKey = "DAPR_TESTS_TLS_KEY"
)
// StartServer starts a HTTP or HTTP2 server
func StartServer(port int, appRouter func() http.Handler, allowHTTP2 bool, enableTLS bool) {
// HTTP/2 is allowed only if the DAPR_TESTS_HTTP2 env var is set
allowHTTP2 = allowHTTP2 && IsTruthy(os.Getenv("DAPR_TESTS_HTTP2"))
logConnState := IsTruthy(os.Getenv("DAPR_TESTS_LOG_CONNSTATE"))
// Create a listener
addr := fmt.Sprintf(":%d", port)
ln, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("Failed to create listener: %v", err)
}
var server *http.Server
if allowHTTP2 {
// Create a server capable of supporting HTTP2 Cleartext connections
// Also supports HTTP1.1 and upgrades from HTTP1.1 to HTTP2
h2s := &http2.Server{}
//nolint:gosec
server = &http.Server{
Addr: addr,
Handler: h2c.NewHandler(appRouter(), h2s),
ConnState: func(c net.Conn, cs http.ConnState) {
if logConnState {
log.Printf("ConnState changed: %s -> %s state: %s (HTTP2)", c.RemoteAddr(), c.LocalAddr(), cs)
}
},
}
} else {
//nolint:gosec
server = &http.Server{
Addr: addr,
Handler: appRouter(),
ConnState: func(c net.Conn, cs http.ConnState) {
if logConnState {
log.Printf("ConnState changed: %s -> %s state: %s", c.RemoteAddr(), c.LocalAddr(), cs)
}
},
}
}
var certFile, keyFile string
if enableTLS {
certFile, keyFile, err = getTLSCertAndKey()
if err != nil {
log.Fatalf("Failed to get TLS cert and key: %v", err)
}
}
// Stop the server when we get a termination signal
stopCh := make(chan os.Signal, 1)
signal.Notify(stopCh, syscall.SIGKILL, syscall.SIGTERM, syscall.SIGINT) //nolint:staticcheck
go func() {
// Wait for cancelation signal
<-stopCh
log.Println("Shutdown signal received")
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
server.Shutdown(ctx)
}()
// Blocking call
if enableTLS {
err = server.ServeTLS(ln, certFile, keyFile)
} else {
err = server.Serve(ln)
}
if err != http.ErrServerClosed {
log.Fatalf("Failed to run server: %v", err)
}
log.Println("Server shut down")
}
func getTLSCertAndKey() (string, string, error) {
cert, ok := os.LookupEnv(tlsCertEnvKey)
if !ok {
return "", "", fmt.Errorf("%s is not set", tlsCertEnvKey)
}
key, ok := os.LookupEnv(tlsKeyEnvKey)
if !ok {
return "", "", fmt.Errorf("%s is not set", tlsKeyEnvKey)
}
return cert, key, nil
}
|
mikeee/dapr
|
tests/apps/utils/http-server.go
|
GO
|
mit
| 3,211 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"log"
"net/http"
"time"
"github.com/google/uuid"
)
// LoggerMiddleware returns a middleware for gorilla/mux that logs all requests and processing times
func LoggerMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Check if we have a request ID or generate one
reqID := r.URL.Query().Get("reqid")
if reqID == "" {
reqID = r.Header.Get("x-daprtest-reqid")
}
if reqID == "" {
reqID = "m-" + uuid.New().String()
}
ctx := context.WithValue(r.Context(), "reqid", reqID) //nolint:staticcheck
u := r.URL.Path
qs := r.URL.Query().Encode()
if qs != "" {
u += "?" + qs
}
log.Printf("Received request %s %s (source=%s, reqID=%s)", r.Method, u, r.RemoteAddr, reqID)
// Process the request
start := time.Now()
next.ServeHTTP(w, r.WithContext(ctx))
log.Printf("Request %s: completed in %s", reqID, time.Since(start))
})
}
|
mikeee/dapr
|
tests/apps/utils/logger-middleware.go
|
GO
|
mit
| 1,523 |
/*
Copyright 2022 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This utility adapted from https://go.dev/src/crypto/tls/generate_cert.go
package utils
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"log"
"math/big"
"net"
"os"
"path"
"strings"
"time"
)
const (
// clockSkew is the margin of error for checking whether a certificate is valid.
clockSkew = time.Minute * 5
)
// GenerateTLSCertAndKey generates a self-signed X.509 certificate for a TLS server.
// Outputs to 'cert.pem' and 'key.pem' and will overwrite existing files.
//
// host: Comma-separated hostnames and IPs to generate a certificate for
// validFrom: The time the certificate is valid from
// validFor: The duration the certificate is valid for
// directory: Path to write the files to
func GenerateTLSCertAndKey(host string, validFrom time.Time, validFor time.Duration, directory string) error {
// *********************
// Generate private key
// *********************
tlsKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return err
}
b, err := x509.MarshalPKCS8PrivateKey(tlsKey)
if err != nil {
log.Printf("Unable to marshal ECDSA private key: %v", err)
return err
}
if err = bytesToPemFile(path.Join(directory, "key.pem"), "PRIVATE KEY", b); err != nil {
log.Printf("Unable to write key.pem: %v", err)
return err
}
// *********************
// Generate certificate
// *********************
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
log.Fatalf("failed to generate serial number: %s", err)
}
certTemplate := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Dapr"},
},
NotBefore: validFrom.Add(-clockSkew),
NotAfter: validFrom.Add(validFor).Add(clockSkew),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
IsCA: true,
}
hosts := strings.Split(host, ",")
for _, h := range hosts {
if ip := net.ParseIP(h); ip != nil {
certTemplate.IPAddresses = append(certTemplate.IPAddresses, ip)
} else {
certTemplate.DNSNames = append(certTemplate.DNSNames, h)
}
}
certBytes, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &tlsKey.PublicKey, tlsKey)
if err != nil {
log.Printf("Unable to create certificate: %v", err)
return err
}
if err := bytesToPemFile(path.Join(directory, "cert.pem"), "CERTIFICATE", certBytes); err != nil {
log.Printf("Unable to write cert.pem: %v", err)
return err
}
return nil
}
func bytesToPemFile(filename string, pemBlockType string, bytes []byte) error {
f, err := os.Create(filename)
if err != nil {
return err
}
if err := pem.Encode(f, &pem.Block{Type: pemBlockType, Bytes: bytes}); err != nil {
return err
}
return f.Close()
}
|
mikeee/dapr
|
tests/apps/utils/tls.go
|
GO
|
mit
| 3,526 |
obj/
bin/
.vscode/
data.db
data.db-*
|
mikeee/dapr
|
tests/apps/workflowsapp/.gitignore
|
Git
|
mit
| 37 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using Microsoft.AspNetCore.Mvc;
using System.Threading.Tasks;
using Dapr.Client;
using System;
namespace DaprDemoActor
{
[ApiController]
[Route("/")]
public class Controller : ControllerBase
{
static string httpEndpoint = "http://127.0.0.1:" + Environment.GetEnvironmentVariable("DAPR_HTTP_PORT");
static string grpcEndpoint = "http://127.0.0.1:" + Environment.GetEnvironmentVariable("DAPR_GRPC_PORT");
public static DaprClient daprClient = new DaprClientBuilder().UseGrpcEndpoint(grpcEndpoint).UseHttpEndpoint(httpEndpoint).Build();
[HttpGet("{workflowComponent}/{instanceID}")]
public async Task<ActionResult<string>> GetWorkflow([FromRoute] string instanceID, string workflowComponent)
{
await daprClient.WaitForSidecarAsync();
var getResponse = await daprClient.GetWorkflowAsync(instanceID, workflowComponent);
return getResponse.RuntimeStatus.ToString();
}
[HttpPost("StartWorkflow/{workflowComponent}/{workflowName}/{instanceID}")]
public async Task<ActionResult<string>> StartWorkflow([FromRoute] string instanceID, string workflowName, string workflowComponent)
{
await daprClient.WaitForSidecarAsync();
var inputItem = "paperclips";
var workflowOptions = new Dictionary<string, string>();
var startResponse = await daprClient.StartWorkflowAsync(
instanceId: instanceID,
workflowComponent: workflowComponent,
workflowName: "PlaceOrder",
input: inputItem,
workflowOptions: workflowOptions);
var getResponse = await daprClient.GetWorkflowAsync(instanceID, workflowComponent);
return getResponse.InstanceId;
}
[HttpPost("StartMonitorWorkflow/{workflowComponent}/{watchInstanceID}/{instanceID}")]
public async Task<ActionResult<string>> StartMonitorWorkflow([FromRoute] string watchInstanceID, string instanceID, string workflowComponent)
{
await daprClient.WaitForSidecarAsync();
var inputItem = watchInstanceID;
var workflowOptions = new Dictionary<string, string>();
var startResponse = await daprClient.StartWorkflowAsync(
instanceId: instanceID,
workflowComponent: workflowComponent,
workflowName: "Monitor",
input: inputItem,
workflowOptions: workflowOptions);
var getResponse = await daprClient.GetWorkflowAsync(instanceID, workflowComponent);
return getResponse.InstanceId;
}
[HttpPost("PurgeWorkflow/{workflowComponent}/{instanceID}")]
public async Task<ActionResult<bool>> PurgeWorkflow([FromRoute] string instanceID, string workflowComponent)
{
await daprClient.PurgeWorkflowAsync(instanceID, workflowComponent);
return true;
}
[HttpPost("TerminateWorkflow/{workflowComponent}/{instanceID}")]
public async Task<ActionResult<bool>> TerminateWorkflow([FromRoute] string instanceID, string workflowComponent)
{
await daprClient.TerminateWorkflowAsync(instanceID, workflowComponent);
return true;
}
[HttpPost("PauseWorkflow/{workflowComponent}/{instanceID}")]
public async Task<ActionResult<bool>> PauseWorkflow([FromRoute] string instanceID, string workflowComponent)
{
await daprClient.PauseWorkflowAsync(instanceID, workflowComponent);
return true;
}
[HttpPost("ResumeWorkflow/{workflowComponent}/{instanceID}")]
public async Task<ActionResult<bool>> ResumeWorkflow([FromRoute] string instanceID, string workflowComponent)
{
await daprClient.ResumeWorkflowAsync(instanceID, workflowComponent);
return true;
}
[HttpPost("RaiseWorkflowEvent/{workflowComponent}/{instanceID}/{eventName}/{eventInput}")]
public async Task<ActionResult<bool>> RaiseWorkflowEvent([FromRoute] string instanceID, string workflowComponent, string eventName, string eventInput)
{
await daprClient.RaiseWorkflowEventAsync(instanceID, workflowComponent, eventName, eventInput);
return true;
}
}
}
|
mikeee/dapr
|
tests/apps/workflowsapp/Controller.cs
|
C#
|
mit
| 4,599 |
FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base
WORKDIR /app
EXPOSE 3000
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build
WORKDIR /src
COPY ["WorkflowActor.csproj", "./"]
RUN dotnet restore "WorkflowActor.csproj"
COPY . .
WORKDIR "/src/."
RUN dotnet build "WorkflowActor.csproj" -c Release -o /app/build
FROM build AS publish
RUN dotnet publish "WorkflowActor.csproj" -c Release -o /app/publish
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "WorkflowActor.dll"]
# Make sure the app binds to port 3000
ENV ASPNETCORE_URLS http://*:3000
|
mikeee/dapr
|
tests/apps/workflowsapp/Dockerfile
|
Dockerfile
|
mit
| 580 |
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build-env
WORKDIR /app
# Copy csproj and restore as distinct layers
COPY *.csproj ./
RUN dotnet restore
# Copy everything else and build
COPY . ./
RUN dotnet publish -c Release -o out
# Build runtime image
FROM mcr.microsoft.com/dotnet/aspnet:6.0
WORKDIR /app
EXPOSE 3000
COPY --from=build-env /app/out .
ENTRYPOINT ["dotnet", "WorkflowActor.dll"]
# Make sure the app binds to port 3000
ENV ASPNETCORE_URLS http://*:3000
|
mikeee/dapr
|
tests/apps/workflowsapp/Dockerfile-windows
|
none
|
mit
| 465 |
// ------------------------------------------------------------------------
// Copyright 2021 The Dapr Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ------------------------------------------------------------------------
namespace DaprDemoActor
{
using Microsoft.AspNetCore.Hosting;
using Microsoft.Extensions.Hosting;
public class Program
{
public static void Main(string[] args)
{
CreateHostBuilder(args).Build().Run();
}
public static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.ConfigureWebHostDefaults(webBuilder =>
{
webBuilder.UseStartup<Startup>();
});
}
}
|
mikeee/dapr
|
tests/apps/workflowsapp/Program.cs
|
C#
|
mit
| 1,263 |
// ------------------------------------------------------------------------
// Copyright 2021 The Dapr Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ------------------------------------------------------------------------
namespace DaprDemoActor
{
using Dapr.Workflow;
using Microsoft.AspNetCore.Authentication;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using System.Threading.Tasks;
using System;
using Dapr.Client;
/// <summary>
/// Startup class.
/// </summary>
public class Startup
{
/// <summary>
/// Initializes a new instance of the <see cref="Startup"/> class.
/// </summary>
/// <param name="configuration">Configuration.</param>
public Startup(IConfiguration configuration)
{
this.Configuration = configuration;
}
/// <summary>
/// Gets the configuration.
/// </summary>
public IConfiguration Configuration { get; }
/// <summary>
/// Configures Services.
/// </summary>
/// <param name="services">Service Collection.</param>
public void ConfigureServices(IServiceCollection services)
{
services.AddDaprWorkflow(options =>
{
// Example of registering a "PlaceOrder" workflow function
options.RegisterWorkflow<string, string>("PlaceOrder", implementation: async (context, input) =>
{
var itemToPurchase = input;
itemToPurchase = await context.WaitForExternalEventAsync<string>("ChangePurchaseItem");
// Parallel Execution - Waiting for all tasks to finish
Task<string> t1 = context.WaitForExternalEventAsync<string>("ConfirmSize", TimeSpan.FromSeconds(10));
Task<string> t2 = context.WaitForExternalEventAsync<string>("ConfirmColor", TimeSpan.FromSeconds(10));
Task<string> t3 = context.WaitForExternalEventAsync<string>("ConfirmAddress", TimeSpan.FromSeconds(10));
await Task.WhenAll(t1, t2, t3);
// Parallel Execution - Waiting for any task to finish
Task<string> e1 = context.WaitForExternalEventAsync<string>("PayInCash", TimeSpan.FromSeconds(10));
Task<string> e2 = context.WaitForExternalEventAsync<string>("PayByCard", TimeSpan.FromSeconds(10));
Task<string> e3 = context.WaitForExternalEventAsync<string>("PayOnline", TimeSpan.FromSeconds(10));
await Task.WhenAny(e1, e2, e3);
// In real life there are other steps related to placing an order, like reserving
// inventory and charging the customer credit card etc. But let's keep it simple ;)
await context.CallActivityAsync<string>("ShipProduct", itemToPurchase);
return itemToPurchase;
});
// Example of registering a "Monitor" workflow function
options.RegisterWorkflow<string, string>("Monitor", implementation: async (context, input) =>
{
var workflowInstanceId = input;
TimeSpan nextSleepInterval;
var status = await context.CallActivityAsync<string>("GetStatus", workflowInstanceId);
if (status == "Running")
{
nextSleepInterval = TimeSpan.FromSeconds(3);
}
else
{
await context.CallActivityAsync("Alert", $"Workflow is not in RUNNING status. status is {status}. ");
// Check more frequently when not in running state
nextSleepInterval = TimeSpan.FromSeconds(1);
}
// Put the workflow to sleep until the determined time
await context.CreateTimer(nextSleepInterval);
// Restart from the beginning with the updated state
if(status != "Completed" ){
context.ContinueAsNew(workflowInstanceId);
}
return "Monitor closed";
});
// Example of registering a "ShipProduct" workflow activity function
options.RegisterActivity<string, string>("ShipProduct", implementation: (context, input) =>
{
return Task.FromResult($"We are shipping {input} to the customer using our hoard of drones!");
});
// Example of registering a "GetStatus" workflow activity function
options.RegisterActivity<string, string>("GetStatus", implementation: async (context, input) =>
{
var InstanceId = input;
string httpEndpoint = "http://127.0.0.1:" + Environment.GetEnvironmentVariable("DAPR_HTTP_PORT");
string grpcEndpoint = "http://127.0.0.1:" + Environment.GetEnvironmentVariable("DAPR_GRPC_PORT");
DaprClient daprClient = new DaprClientBuilder().UseGrpcEndpoint(grpcEndpoint).UseHttpEndpoint(httpEndpoint).Build();
var getResponse = await daprClient.GetWorkflowAsync(InstanceId, "dapr");
return getResponse.RuntimeStatus.ToString();
});
// Example of registering a "Alert" workflow activity function
options.RegisterActivity<string, string>("Alert", implementation: (context, input) =>
{
return Task.FromResult($"Alert: {input}");
});
});
services.AddAuthentication().AddDapr();
services.AddAuthorization(o => o.AddDapr());
services.AddControllers().AddDapr();
}
/// <summary>
/// Configures Application Builder and WebHost environment.
/// </summary>
/// <param name="app">Application builder.</param>
/// <param name="env">Webhost environment.</param>
public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
app.UseRouting();
app.UseAuthentication();
app.UseAuthorization();
app.UseCloudEvents();
app.UseEndpoints(endpoints =>
{
endpoints.MapSubscribeHandler();
endpoints.MapControllers();
});
}
}
}
|
mikeee/dapr
|
tests/apps/workflowsapp/Startup.cs
|
C#
|
mit
| 7,426 |
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>net6.0</TargetFramework>
<NoWarn>612,618</NoWarn>
<ImplicitUsings>enable</ImplicitUsings>
<LangVersion>latest</LangVersion>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Dapr.AspNetCore" Version="1.11.0" />
<PackageReference Include="Dapr.Client" Version="1.11.0" />
<PackageReference Include="Dapr.Actors" Version="1.11.0" />
<PackageReference Include="Dapr.Actors.AspNetCore" Version="1.11.0" />
<PackageReference Include="Dapr.Workflow" Version="1.11.0" />
</ItemGroup>
</Project>
|
mikeee/dapr
|
tests/apps/workflowsapp/WorkflowActor.csproj
|
csproj
|
mit
| 611 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: statestore
spec:
type: state.sqlite
version: v1
metadata:
- name: actorStateStore
value: "true"
- name: connectionString
value: "data.db"
|
mikeee/dapr
|
tests/apps/workflowsapp/resources/sqlite.yaml
|
YAML
|
mit
| 227 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Subscription
metadata:
name: pluggable-c-topic-subscription
spec:
pubsubname: pluggable-messagebus
topic: pubsub-c-topic-http
route: /pubsub-c-topic-http
scopes:
- pubsub-subscriber-pluggable
|
mikeee/dapr
|
tests/config/app_topic_subscription_pluggable_pubsub.yaml
|
YAML
|
mit
| 816 |
apiVersion: dapr.io/v1alpha1
kind: Subscription
metadata:
name: c-topic-subscription
spec:
pubsubname: messagebus
topic: pubsub-c-topic-http
route: /pubsub-c-topic-http
scopes:
- pubsub-subscriber
|
mikeee/dapr
|
tests/config/app_topic_subscription_pubsub.yaml
|
YAML
|
mit
| 206 |
apiVersion: dapr.io/v1alpha1
kind: Subscription
metadata:
name: c-topic-subscription-grpc
spec:
pubsubname: messagebus
topic: pubsub-c-topic-grpc
route: /pubsub-c-topic-grpc
scopes:
- pubsub-subscriber-grpc
|
mikeee/dapr
|
tests/config/app_topic_subscription_pubsub_grpc.yaml
|
YAML
|
mit
| 216 |
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: pubsub-routing-crd-http-subscription
spec:
pubsubname: messagebus
topic: pubsub-routing-crd-http
routes:
rules:
- match: 'event.type == "myevent.D"'
path: myevent.D
- match: 'event.type == "myevent.E"'
path: myevent.E
default: myevent.F
scopes:
- pubsub-subscriber-routing
|
mikeee/dapr
|
tests/config/app_topic_subscription_routing.yaml
|
YAML
|
mit
| 382 |
apiVersion: dapr.io/v2alpha1
kind: Subscription
metadata:
name: pubsub-routing-crd-grpc-subscription
spec:
pubsubname: messagebus
topic: pubsub-routing-crd-grpc
routes:
rules:
- match: 'event.type == "myevent.D"'
path: myevent.D
- match: 'event.type == "myevent.E"'
path: myevent.E
default: myevent.F
scopes:
- pubsub-subscriber-routing-grpc
|
mikeee/dapr
|
tests/config/app_topic_subscription_routing_grpc.yaml
|
YAML
|
mit
| 387 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: querystatestore
spec:
type: state.azure.cosmosdb
version: v1
initTimeout: 1m
metadata:
- name: masterKey
secretKeyRef:
name: cosmosdb-secret
key: primaryMasterKey
- name: url
secretKeyRef:
name: cosmosdb-secret
key: url
- name: database
value: dapre2e
- name: collection
value: items-query
scopes:
- stateapp
- stateapp-pluggable
|
mikeee/dapr
|
tests/config/dapr_cosmosdb_query_state.yaml
|
YAML
|
mit
| 1,032 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: statestore
spec:
type: state.azure.cosmosdb
version: v1
initTimeout: 1m
metadata:
- name: masterKey
secretKeyRef:
name: cosmosdb-secret
key: primaryMasterKey
- name: url
secretKeyRef:
name: cosmosdb-secret
key: url
- name: database
value: dapre2e
- name: collection
value: items
scopes:
- stateapp
- stateapp-pluggable
- httpmetrics
- grpcmetrics
- disabledmetric
- perf-workflowsapp
- perf-workflowsapp-actors
|
mikeee/dapr
|
tests/config/dapr_cosmosdb_state.yaml
|
YAML
|
mit
| 1,123 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: statestore-actors
spec:
type: state.azure.cosmosdb
version: v1
initTimeout: 1m
metadata:
- name: masterKey
secretKeyRef:
name: cosmosdb-secret
key: primaryMasterKey
- name: url
secretKeyRef:
name: cosmosdb-secret
key: url
- name: database
value: dapre2e
- name: collection
value: items
- name: actorStateStore
value: true
scopes:
# actortestclient is deliberately omitted to ensure that `actor_features_test` works without a state store
- actor1
- actor2
- actorapp
- actorfeatures
- reentrantactor
- actorreminder
- actorreminderpartition
- actorinvocationapp
- actormetadata-a
- actormetadata-b
- actorjava
- actordotnet
- actorpython
- actorphp
- actorstate
- resiliencyapp
- resiliencyappgrpc
- perf-actor-activation-service
- perf-actor-activation-client
- perf-actor-reminder-service
- perf-actor-reminder-client
- perf-actor-timer-service
- perf-actor-timer-client
- perf-actor-double-activation
- perf-actorfeatures
- perf-actor-id
- perf-actor-type
- workflowsapp
- workflowsapp-actors
|
mikeee/dapr
|
tests/config/dapr_cosmosdb_state_actorstore.yaml
|
YAML
|
mit
| 1,699 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: schedule
spec:
type: bindings.cron
version: v1
metadata:
- name: schedule
envRef: "CRON_SCHEDULE"
- name: direction
value: input
scopes:
- healthapp-http
- healthapp-grpc
- healthapp-h2c
|
mikeee/dapr
|
tests/config/dapr_cron_binding.yaml
|
YAML
|
mit
| 861 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: azurekeyvault
spec:
type: crypto.azure.keyvault
version: v1
metadata:
- name: vaultName
envRef: AZURE_KEY_VAULT_NAME
- name: azureTenantId
secretKeyRef:
name: azurekeyvault-secret
key: tenant-id
- name: azureClientId
secretKeyRef:
name: azurekeyvault-secret
key: client-id
- name: azureClientSecret
secretKeyRef:
name: azurekeyvault-secret
key: client-secret
scopes:
- cryptoapp
|
mikeee/dapr
|
tests/config/dapr_crypto_azurekeyvault.yaml
|
YAML
|
mit
| 537 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: jwks
spec:
type: crypto.dapr.jwks
version: v1
metadata:
- name: jwks
value: |
{
"keys": [
{
"kid": "rsakey",
"kty": "RSA",
"n": "3I2mdIK4mRRu-ywMrYjUZzBxt0NlAVLrMhGlaJsby7PWTMiLpZVip4SBD9GwnCU0TGFD7k2-7tfs0y9U6WV7MwgCjc9m_DUUGbE-kKjEU7JYkLzYlndys-6xuhD4Jf1hu9AZVdfXftpWSy_NNg6fVwTH4nckOAbOSL1hXToOYWQcDDW95Rhw3U4z04PqssEpRKn5KGBuTahNNNiZcWns99pChpLTxgdm93LjMBI1KCGBpOaz7fcQJ9V3c6rSwMKyY3IPm1LwS6PIs7xb2ZJ0Eb8A6MtCkGhgNsodpkxhqKbqtxI-KqTuZy9g4jb8WKjJq9lB9q-HPHoQqIEDom6P8w",
"e": "AQAB",
"d": "sEksVg4yPHEfr_VqgL5Qf9Yx3nio4CEFyvRd0LnBwtUQojjdndFElH8NpbZGQthd7_sCkBRmW4QNUFORvCgYDBcH649uUoKeeVa5mmi62-c-cRnwyHvbG8TbG8z6e2sG1lakGYA3R7Z-qQQhM2cGp0gz-Gigc4PFGFD-5MRFwEMHP2fy-1PEp2dKBiT3sl2yo4IjMPRWMWh1e_2xnvFXYSGCx1-1qyD_VpCCpL7-kuTWCZFOx8Cq_EpZiX7ovDVBEb0JlrT3srXAApkEE42YYFnl3TejlgZOy00gv7qe-6WB17lBM_2_eFd5lidJzVS4riJxZaBNJeW0sg_SusYAEQ",
"p": "9LOms8fd0FVwh2CyScxBWz5Pg7gVtlCm6qD2ZKDZL84FDD_BVyy5LQm9lV4kTcE3TyWH6dHeGmJ4g3WFpuY4xusuU8rdVUOv-dpcLoW4gJnQ4saqKdi3uU5TmzqCqvzNXSoECvy5APXK-Lh01c5fgrmeRzYUEJN2hgxxaL6HJBk",
"q": "5ryRKJpzsZD2B1xbnbUeRLTjBRqOIqFsVuF0lMi7SMZ24IsKIfa9CeBCfGIFlIwGe-jIbozL2lrIsw3Q3jZHcwIVq9q1s9voZeW4Z7cylCBs9Lq5199b5-6-QueDK4Fksg5uQsDY2Jleamm53L4CMIjmBRaVEUMW7jRfxNQJdes"
},
{
"kid": "symmetrickey",
"kty": "oct",
"k": "RjJPhQzsDB5dvjQZ-85l_D_SBXWCBFx7IVsesenVvts"
}
]
}
scopes:
- cryptoapp
|
mikeee/dapr
|
tests/config/dapr_crypto_jwks.yaml
|
YAML
|
mit
| 1,620 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: inmemorypubsub
spec:
type: pubsub.in-memory
version: v1
initTimeout: 1m
metadata: []
scopes:
- pubsub-perf-bulk-grpc
- pubsub-perf-grpc
- healthapp-http
- healthapp-grpc
- healthapp-h2c
|
mikeee/dapr
|
tests/config/dapr_in_memory_pubsub.yaml
|
YAML
|
mit
| 266 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: inmemorystate
spec:
type: state.in-memory
version: v1
initTimeout: 1m
metadata:
- name: spacehold
value: metadata-is-required
scopes:
- perfstatehttp
- perfstategrpc
|
mikeee/dapr
|
tests/config/dapr_in_memory_state.yaml
|
YAML
|
mit
| 248 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: test-topic
spec:
type: bindings.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: dapr-test
- name: consumerGroup
value: group1
# publisher configuration: topic
- name: publishTopic
value: dapr-test
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
scopes:
- bindinginput
- bindingoutput
- bindinginputgrpc
|
mikeee/dapr
|
tests/config/dapr_kafka_bindings.yaml
|
YAML
|
mit
| 1,167 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: test-topic-custom-route
spec:
type: bindings.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: dapr-test
- name: consumerGroup
value: group2
# publisher configuration: topic
- name: publishTopic
value: dapr-test
- name: authRequired
value: "false"
# set specify route
- name: route
value: /custom-path
- name: initialOffset
value: oldest
scopes:
- bindinginput
- bindingoutput
- bindinginputgrpc
|
mikeee/dapr
|
tests/config/dapr_kafka_bindings_custom_route.yaml
|
YAML
|
mit
| 1,244 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: test-topic-grpc
spec:
type: bindings.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: dapr-test-grpc
- name: consumerGroup
value: group1
# publisher configuration: topic
- name: publishTopic
value: dapr-test-grpc
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
- name: direction
value: input, output
scopes:
- bindinginput
- bindingoutput
- bindinginputgrpc
|
mikeee/dapr
|
tests/config/dapr_kafka_bindings_grpc.yaml
|
YAML
|
mit
| 1,227 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: pluggable-test-topic
spec:
type: bindings.kafka-pluggable
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: dapr-test-pluggable
- name: consumerGroup
value: group1-pluggable
# publisher configuration: topic
- name: publishTopic
value: dapr-test-pluggable
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
scopes:
- pluggable-bindinginput
- pluggable-bindingoutput
---
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: pluggable-test-topic-grpc
spec:
type: bindings.kafka-pluggable
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: dapr-test-grpc-pluggable
- name: consumerGroup
value: group1-pluggable
# publisher configuration: topic
- name: publishTopic
value: dapr-test-grpc-pluggable
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
scopes:
- pluggable-bindinginputgrpc
- pluggable-bindingoutput
---
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: test-topic-custom-route-pluggable
spec:
type: bindings.kafka-pluggable
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: dapr-test-pluggable
- name: consumerGroup
value: group2-pluggable
# publisher configuration: topic
- name: publishTopic
value: dapr-test-pluggable
- name: authRequired
value: "false"
# set specify route
- name: route
value: /pluggable-custom-path
- name: initialOffset
value: oldest
scopes:
- pluggable-bindinginput
|
mikeee/dapr
|
tests/config/dapr_kafka_pluggable_bindings.yaml
|
YAML
|
mit
| 2,674 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
spec:
mtls:
enabled: false
workloadCertTTL: "1h"
allowedClockSkew: "20m"
|
mikeee/dapr
|
tests/config/dapr_mtls_off_config.yaml
|
YAML
|
mit
| 165 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: disable-telemetry
spec:
tracing:
samplingRate: "0"
metrics:
enabled: false
---
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: obs-defaultmetric
spec:
tracing:
samplingRate: "0"
# by default, metric.enabled is true.
---
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: metrics-config
spec:
metrics:
enabled: true
http:
increasedCardinality: false
|
mikeee/dapr
|
tests/config/dapr_observability_test_config.yaml
|
YAML
|
mit
| 489 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: configstore
spec:
type: configuration.postgres
version: v1
metadata:
- name: connectionString
value: "host=dapr-postgres-postgresql.dapr-tests.svc.cluster.local user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"
- name: table
value: configtable
scopes:
- configurationapp
|
mikeee/dapr
|
tests/config/dapr_postgres_configuration.yaml
|
YAML
|
mit
| 392 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: querystatestore
spec:
type: state.postgres
# Must use v1 as the v2 of the component does not support query API
version: v1
initTimeout: 1m
metadata:
- name: connectionString
value: "host=dapr-postgres-postgresql.dapr-tests.svc.cluster.local user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"
- name: table
value: querytable
scopes:
- stateapp
- stateapp-pluggable
|
mikeee/dapr
|
tests/config/dapr_postgres_query_state.yaml
|
YAML
|
mit
| 1,057 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: statestore
spec:
type: state.postgres
version: v2
initTimeout: 1m
metadata:
- name: connectionString
value: "host=dapr-postgres-postgresql.dapr-tests.svc.cluster.local user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"
- name: tablePrefix
value: v2
- name: metadataTableName
value: dapr_metadata_v2
scopes:
- stateapp
- stateapp-pluggable
- httpmetrics
- grpcmetrics
- disabledmetric
- perf-workflowsapp
- perf-workflowsapp-actors
|
mikeee/dapr
|
tests/config/dapr_postgres_state.yaml
|
YAML
|
mit
| 1,154 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: statestore-actors
spec:
type: state.postgres
version: v2
metadata:
- name: connectionString
value: "host=dapr-postgres-postgresql.dapr-tests.svc.cluster.local user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"
- name: tablePrefix
value: v2actor
- name: metadataTableName
value: dapr_metadata_v2actor
- name: actorStateStore
value: true
scopes:
# actortestclient is deliberately omitted to ensure that `actor_features_test` works without a state store
- actor1
- actor2
- actorapp
- actorfeatures
- reentrantactor
- actorreminder
- actorreminderpartition
- actorinvocationapp
- actormetadata-a
- actormetadata-b
- actorjava
- actordotnet
- actorpython
- actorstate
- actorphp
- resiliencyapp
- resiliencyappgrpc
- perf-actor-activation-service
- perf-actor-activation-client
- perf-actor-reminder-service
- perf-actor-reminder-client
- perf-actor-timer-service
- perf-actor-timer-client
- perf-actor-double-activation
- perf-actorfeatures
- perf-actor-id
- perf-actor-type
- workflowsapp
- workflowsapp-actors
- perf-workflowsapp
- perf-workflowsapp-actors
|
mikeee/dapr
|
tests/config/dapr_postgres_state_actorstore.yaml
|
YAML
|
mit
| 1,194 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: configstore
spec:
type: configuration.redis
version: v1
metadata:
- name: redisHost
value: dapr-redis-master.dapr-tests.svc.cluster.local:6379
- name: redisPassword
value: ""
- name: redisDB
value: "0"
scopes:
- configurationapp
|
mikeee/dapr
|
tests/config/dapr_redis_configuration.yaml
|
YAML
|
mit
| 897 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: pluggable-statestore
spec:
type: state.redis-pluggable
version: v1
initTimeout: 20s
metadata:
- name: redisHost
secretKeyRef:
name: redissecret
key: host
- name: redisPassword
value: ""
scopes:
- stateapp-pluggable
|
mikeee/dapr
|
tests/config/dapr_redis_pluggable_state.yaml
|
YAML
|
mit
| 903 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: pubsub.redis
initTimeout: 1m
version: v1
metadata:
- name: redisHost
secretKeyRef:
name: redissecret
key: host
- name: redisPassword
value: ""
- name: processingTimeout
value: 1s
- name: redeliverInterval
value: 1s
- name: idleCheckFrequency
value: 1s
- name: readTimeout
value: 1s
scopes:
- pubsub-publisher
- pubsub-subscriber
- pubsub-publisher-grpc
- pubsub-subscriber-grpc
- pubsub-publisher-routing
- pubsub-subscriber-routing
- pubsub-publisher-routing-grpc
- pubsub-subscriber-routing-grpc
- pubsub-publisher-bulk-subscribe
- pubsub-bulk-subscriber
- job-publisher
- job-subscriber
|
mikeee/dapr
|
tests/config/dapr_redis_pubsub.yaml
|
YAML
|
mit
| 1,335 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: badhost-store
spec:
type: state.redis
initTimeout: 1m
ignoreErrors: true
version: v1
metadata:
- name: redisHost
value: badhost:6379
- name: redisPassword
value: ""
scopes:
- stateapp
- stateapp-pluggable
|
mikeee/dapr
|
tests/config/dapr_redis_state_badhost.yaml
|
YAML
|
mit
| 294 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: badpass-store
spec:
type: state.redis
initTimeout: 1m
version: v1
ignoreErrors: true
metadata:
- name: redisHost
value: localhost:6379
- name: redisPassword
value: "not_a_password"
scopes:
- stateapp
- stateapp-pluggable
|
mikeee/dapr
|
tests/config/dapr_redis_state_badpass.yaml
|
YAML
|
mit
| 310 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: querystatestore2
spec:
type: state.redis
version: v1
initTimeout: 1m
metadata:
- name: redisHost
secretKeyRef:
name: redissecret
key: host
- name: redisPassword
value: ""
- name: queryIndexes
value: |
[
{
"name": "orgIndx",
"indexes": [
{
"key": "person.org",
"type": "TEXT"
},
{
"key": "state",
"type": "TEXT"
}
]
}
]
scopes:
- stateapp
- stateapp-pluggable
|
mikeee/dapr
|
tests/config/dapr_redis_state_query.yaml
|
YAML
|
mit
| 1,205 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: messagebus
spec:
type: pubsub.azure.servicebus
initTimeout: 1m
version: v1
metadata:
- name: connectionString
secretKeyRef:
name: servicebus-secret
key: connectionString
- name: handlerTimeoutInSec
value: 5
- name: timeoutInSec
value: 5
- name: lockDurationInSec
value: 2
- name: lockRenewalInSec
value: 2
- name: defaultMessageTimeToLiveInSec
value: 999
- name: maxConcurrentHandlers
value: 20
- name: publishMaxRetries
value: 100
- name: publishInitialRetryInternalInMs
value: 10
- name: maxDeliveryCount
value: 999
- name: maxActiveMessages
value: 20
- name: disableEntityManagement
value: "false"
scopes:
- pubsub-publisher
- pubsub-subscriber
- pubsub-publisher-grpc
- pubsub-subscriber-grpc
- pubsub-publisher-routing
- pubsub-subscriber-routing
- pubsub-publisher-routing-grpc
- pubsub-subscriber-routing-grpc
- pubsub-publisher-bulk-subscribe
- pubsub-bulk-subscriber
- job-publisher
- job-subscriber
|
mikeee/dapr
|
tests/config/dapr_servicebus_pubsub.yaml
|
YAML
|
mit
| 1,667 |
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: dapr-secret-reader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: secret-reader
subjects:
- kind: ServiceAccount
name: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: secret-reader
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
|
mikeee/dapr
|
tests/config/dapr_tests_cluster_role_binding.yaml
|
YAML
|
mit
| 400 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: tracingconfig-zipkin
spec:
tracing:
samplingRate: "1"
zipkin:
endpointAddress: "http://dapr-zipkin:9411/api/v2/spans"
|
mikeee/dapr
|
tests/config/dapr_tracing_config.yaml
|
YAML
|
mit
| 200 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: vault
spec:
type: secretstores.hashicorp.vault
version: v1
metadata:
# The values for the metadata don't metter as this component will not be loaded
- name: vaultTokenMountPath
value: foo
- name: vaultToken
value: foo
scopes:
- secretapp
|
mikeee/dapr
|
tests/config/dapr_vault_secretstore.yaml
|
YAML
|
mit
| 912 |
apiVersion: dapr.io/v1alpha1
kind: HTTPEndpoint
metadata:
name: "external-http-endpoint"
spec:
baseUrl: http://service-invocation-external:80
headers:
- name: "Accept-Language"
value: "en-US"
|
mikeee/dapr
|
tests/config/external_invocation_http_endpoint.yaml
|
YAML
|
mit
| 204 |
apiVersion: dapr.io/v1alpha1
kind: HTTPEndpoint
metadata:
name: "external-http-endpoint-tls"
spec:
baseUrl: https://service-invocation-external:443
headers:
- name: "Accept-Language"
value: "en-US"
clientTLS:
rootCA:
secretKeyRef:
name: dapr-tls-client
key: ca.crt
certificate:
secretKeyRef:
name: dapr-tls-client
key: tls.crt
privateKey:
secretKeyRef:
name: dapr-tls-client
key: tls.key
|
mikeee/dapr
|
tests/config/external_invocation_http_endpoint_tls.yaml
|
YAML
|
mit
| 480 |
kind: Service
apiVersion: v1
metadata:
name: service-invocation-external
labels:
testapp: service-invocation-external
spec:
selector:
testapp: serviceinvocation-callee-external
ports:
- protocol: TCP
port: 80
targetPort: 3000
name: http
- protocol: TCP
port: 443
targetPort: 3001
name: https
type: LoadBalancer
|
mikeee/dapr
|
tests/config/externalinvocationcrd.yaml
|
YAML
|
mit
| 357 |
apiVersion: v1
kind: Service
metadata:
name: grpcproxyserver-app
spec:
selector:
testapp: grpcproxyserverexternal
ports:
- protocol: TCP
name: app-port
port: 50051
targetPort: 50051
|
mikeee/dapr
|
tests/config/grpcproxyserverexternal_service.yaml
|
YAML
|
mit
| 214 |
apiVersion: v1
kind: Namespace
metadata:
name: aa
---
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: daprsystem
namespace: aa
spec:
metric:
enabled: true
metrics:
enabled: true
mtls:
allowedClockSkew: 0m
controlPlaneTrustDomain: cluster.local
enabled: false
sentryAddress: bad-address:1234
workloadCertTTL: 1ms
|
mikeee/dapr
|
tests/config/ignore_daprsystem_config.yaml
|
YAML
|
mit
| 369 |
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k6-rb
subjects:
- kind: ServiceAccount
name: "k6-sa"
namespace: dapr-tests
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
|
mikeee/dapr
|
tests/config/k6_rolebinding.yaml
|
YAML
|
mit
| 834 |
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: ServiceAccount
metadata:
name: k6-sa
|
mikeee/dapr
|
tests/config/k6_sa.yaml
|
YAML
|
mit
| 636 |
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: Secret
metadata:
name: k6-sa-secret
annotations:
kubernetes.io/service-account.name: k6-sa
type: kubernetes.io/service-account-token
|
mikeee/dapr
|
tests/config/k6_sa_secret.yaml
|
YAML
|
mit
| 738 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Install 1 replica for e2e test
replicaCount: 1
# Disable persistent storage
persistence:
enabled: false
autoCreateTopicsEnable: true
# Topic creation and configuration for dapr test
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
zookeeper:
persistence:
enabled: false
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
|
mikeee/dapr
|
tests/config/kafka_override.yaml
|
YAML
|
mit
| 1,502 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: kafka-messagebus
spec:
type: pubsub.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
scopes:
- pubsub-publisher
- pubsub-subscriber
- pubsub-publisher-grpc
- pubsub-subscriber-grpc
- pubsub-publisher-bulk-subscribe
- pubsub-bulk-subscriber
- pubsub-publisher-pluggable
- pubsub-subscriber-pluggable
- pubsub-perf-bulk-grpc
- kafka-test-app-normal
- kafka-test-app-bulk
- k6-tester-pubsub-subscribe-http
|
mikeee/dapr
|
tests/config/kafka_pubsub.yaml
|
YAML
|
mit
| 1,237 |
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
nodes:
- role: control-plane
image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
- role: worker
image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
- role: worker
image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
- role: worker
image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
- role: worker
image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"]
endpoint = ["http://kind-registry:5000"]
|
mikeee/dapr
|
tests/config/kind.yaml
|
YAML
|
mit
| 802 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: allowlistsappconfig
spec:
accessControl:
defaultAction: deny
trustDomain: "public"
policies:
- appId: "allowlists-caller"
defaultAction: deny
trustDomain: 'public'
namespace: "dapr-tests"
operations:
- name: /opAllow
httpVerb: ['POST', 'GET']
action: allow
- name: /opDeny
httpVerb: ["*"]
action: deny
|
mikeee/dapr
|
tests/config/kubernetes_allowlists_config.yaml
|
YAML
|
mit
| 455 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: allowlistsgrpcappconfig
spec:
accessControl:
defaultAction: deny
trustDomain: "public"
policies:
- appId: "allowlists-caller"
defaultAction: deny
trustDomain: 'public'
namespace: "dapr-tests"
operations:
- name: grpcToGrpcTest
httpVerb: [ '*' ]
action: allow
- name: httpToGrpcTest
httpVerb: [ "*" ]
action: deny
- name: grpcToGrpcWithoutVerbTest
action: allow
- appId: "grpcproxyclient"
defaultAction: deny
trustDomain: 'public'
namespace: "dapr-tests"
operations:
- name: /helloworld.Greeter/SayHello
action: allow
- name: /helloworld.Greeter/SayGoodbye
action: allow
|
mikeee/dapr
|
tests/config/kubernetes_allowlists_grpc_config.yaml
|
YAML
|
mit
| 861 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: redishostconfig
spec:
secrets:
scopes:
- storeName: "kubernetes"
defaultAccess: "allow"
allowedSecrets: ["redissecret"]
|
mikeee/dapr
|
tests/config/kubernetes_redis_host_config.yaml
|
YAML
|
mit
| 215 |
apiVersion: v1
kind: Secret
metadata:
name: redissecret
type: Opaque
data:
host: ZGFwci1yZWRpcy1tYXN0ZXI6NjM3OQ==
|
mikeee/dapr
|
tests/config/kubernetes_redis_secret.yaml
|
YAML
|
mit
| 118 |
apiVersion: v1
kind: Secret
metadata:
name: daprsecret
type: Opaque
data:
username: YWRtaW4=
---
apiVersion: v1
kind: Secret
metadata:
name: daprsecret2
type: Opaque
data:
username: YWRtaW4=
---
apiVersion: v1
kind: Secret
metadata:
name: emptysecret
type: Opaque
|
mikeee/dapr
|
tests/config/kubernetes_secret.yaml
|
YAML
|
mit
| 273 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: secretappconfig
spec:
secrets:
scopes:
- storeName: "kubernetes"
defaultAccess: "allow"
# adding nonexistentsecret to test scenario where it is allowed but not defined
allowedSecrets: ["daprsecret","redissecret","emptysecret","nonexistentsecret"]
components:
deny:
# The built-in Kubernetes secret store is loaded regardless
- secretstores.kubernetes
- secretstores.hashicorp.vault
|
mikeee/dapr
|
tests/config/kubernetes_secret_config.yaml
|
YAML
|
mit
| 518 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
auth:
enabled: false
persistentVolume:
enabled: false
tls:
enabled: false
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
|
mikeee/dapr
|
tests/config/mongodb_override.yaml
|
YAML
|
mit
| 990 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: omithealthchecksconfig
spec:
logging:
apiLogging:
omitHealthChecks: true
|
mikeee/dapr
|
tests/config/omithealthchecks_config.yaml
|
YAML
|
mit
| 152 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: pipeline
spec:
tracing:
samplingRate: "1"
httpPipeline:
handlers:
- type: middleware.http.uppercase
name: uppercase
|
mikeee/dapr
|
tests/config/pipeline.yaml
|
YAML
|
mit
| 205 |
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: app-channel-pipeline
spec:
appHttpPipeline:
handlers:
- type: middleware.http.uppercase
name: uppercase
|
mikeee/dapr
|
tests/config/pipeline_app.yaml
|
YAML
|
mit
| 187 |
global:
postgresql:
auth:
username: postgres
postgresPassword: example
database: dapr_test
primary:
initdb:
scripts:
init.sql: |
CREATE TABLE IF NOT EXISTS configtable (KEY VARCHAR NOT NULL, VALUE VARCHAR NOT NULL, VERSION VARCHAR NOT NULL, METADATA JSON);
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
persistence:
enabled: false
tls:
enabled: false
|
mikeee/dapr
|
tests/config/postgres_override.yaml
|
YAML
|
mit
| 734 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
---
# TODO: @joshvanl: Remove once ActorStateTTL feature is finalized (probably in
# v1.12 release).
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: actorstatettl
spec:
features:
- name: ActorStateTTL
enabled: true
---
# This is used in tests to validate that the features are loaded
# It is not an actual "preview configuration"
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: previewconfig
spec:
features:
- name: IsEnabled
enabled: true
- name: NotEnabled
enabled: false
---
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: hotreloading
spec:
features:
- name: HotReload
enabled: true
|
mikeee/dapr
|
tests/config/preview_configurations.yaml
|
YAML
|
mit
| 1,271 |
apiVersion: dapr.io/v1alpha1
kind: Resiliency
metadata:
name: pubsubnoresiliency
spec:
policies:
retries:
twoRetries:
policy: constant
maxRetries: 2
noRetries:
policy: constant
maxRetries: 0
targets:
components:
messagebus:
inbound:
retry: noRetries
outbound:
retry: noRetries
|
mikeee/dapr
|
tests/config/pubsub_no_resiliency.yaml
|
YAML
|
mit
| 384 |
#
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#kafka pubsub component
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-perf-test-kafka-pubsub-subs-http
spec:
type: pubsub.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
- name: authRequired
value: "false"
- name: initialOffset
value: newest
scopes:
- kafka-test-app-normal
- kafka-test-app-bulk
- k6-tester-pubsub-subscribe-http
---
#rabbit pubsub component
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-perf-test-rabbitmq-pubsub-subs-http
spec:
type: pubsub.rabbitmq
initTimeout: 1m
version: v1
metadata:
# rabbitmq broker connection setting
- name: host
value: amqp://admin:admin@rabbitmq.dapr-tests.svc.cluster.local:5672
scopes:
- rabbitmq-test-app-normal
- rabbitmq-test-app-bulk
- k6-tester-pubsub-subscribe-http
---
#mqtt pubsub component
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-perf-test-mqtt-pubsub-subs-http
spec:
type: pubsub.mqtt3
version: v1
metadata:
- name: url
#value: "tcp://[username][:password]@host.domain[:port]"
value: tcp://perf-test-emqx@perf-test-emqx-0.perf-test-emqx-headless.dapr-tests.svc.cluster.local:1883
scopes:
- mqtt-test-app-normal
- mqtt-test-app-bulk
- k6-tester-pubsub-subscribe-http
---
#pulsar pubsub component
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-perf-test-pulsar-pubsub-subs-http
spec:
type: pubsub.pulsar
version: v1
metadata:
- name: host
value: "perf-test-pulsar-broker.dapr-tests.svc.cluster.local:6650"
scopes:
- pulsar-test-app-normal
- pulsar-test-app-bulk
- k6-tester-pubsub-subscribe-http
---
#redis pubsub component
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-perf-test-redis-pubsub-subs-http
spec:
type: pubsub.redis
version: v1
metadata:
- name: redisHost
value: dapr-redis-master.dapr-tests.svc.cluster.local:6379
- name: redisPassword
value: ""
scopes:
- redis-test-app-normal
- redis-test-app-bulk
- k6-tester-pubsub-subscribe-http
|
mikeee/dapr
|
tests/config/pubsub_perf_components.yaml
|
YAML
|
mit
| 2,699 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
auth:
enabled: false
image:
repository: redislabs/rejson
tag: 2.0.11
architecture: standalone
master:
extraFlags:
- --loadmodule
- /usr/lib/redis/modules/rejson.so
- --loadmodule
- /usr/lib/redis/modules/redisearch.so
persistence:
enabled: false
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
|
mikeee/dapr
|
tests/config/redis_override.yaml
|
YAML
|
mit
| 1,211 |
apiVersion: dapr.io/v1alpha1
kind: Resiliency
metadata:
name: resiliency
spec:
policies:
timeouts:
fast: 500ms
retries:
fiveRetries:
policy: constant
duration: 10ms
maxRetries: 5
circuitBreakers:
simpleCB:
maxRequests: 1
timeout: 30s
trip: consecutiveFailures > 15
targets:
apps:
resiliencyapp:
timeout: fast
retry: fiveRetries
circuitBreaker: simpleCB
resiliencyappgrpc:
timeout: fast
retry: fiveRetries
circuitBreaker: simpleCB
actors:
resiliencyActor:
timeout: fast
retry: fiveRetries
resiliencyInvokeActor:
timeout: fast
retry: fiveRetries
components:
dapr-resiliency-binding:
inbound:
timeout: fast
retry: fiveRetries
outbound:
timeout: fast
retry: fiveRetries
dapr-resiliency-binding-grpc:
inbound:
timeout: fast
retry: fiveRetries
outbound:
timeout: fast
retry: fiveRetries
dapr-resiliency-pubsub:
inbound:
timeout: fast
retry: fiveRetries
outbound:
timeout: fast
retry: fiveRetries
|
mikeee/dapr
|
tests/config/resiliency.yaml
|
YAML
|
mit
| 1,293 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-resiliency-binding
spec:
type: bindings.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: resiliency-binding
- name: consumerGroup
value: group1
- name: route
value: /resiliencybinding
# publisher configuration: topic
- name: publishTopic
value: resiliency-binding
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
scopes:
- resiliencyappgrpc
- resiliencyapp
|
mikeee/dapr
|
tests/config/resiliency_kafka_bindings.yaml
|
YAML
|
mit
| 1,227 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-resiliency-binding-grpc
spec:
type: bindings.kafka
initTimeout: 1m
version: v1
metadata:
# Kafka broker connection setting
- name: brokers
value: dapr-kafka:9092
# consumer configuration: topic and consumer group
- name: topics
value: resiliency-binding-grpc
- name: consumerGroup
value: group1
- name: route
value: /resiliencybindinggrpc
# publisher configuration: topic
- name: publishTopic
value: resiliency-binding-grpc
- name: authRequired
value: "false"
- name: initialOffset
value: oldest
scopes:
- resiliencyappgrpc
- resiliencyapp
|
mikeee/dapr
|
tests/config/resiliency_kafka_bindings_grpc.yaml
|
YAML
|
mit
| 1,246 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-resiliency-pubsub
spec:
type: pubsub.redis
initTimeout: 1m
version: v1
metadata:
- name: redisHost
secretKeyRef:
name: redissecret
key: host
- name: redisPassword
value: ""
- name: processingTimeout
value: "0"
- name: redeliverInterval
value: "0"
scopes:
- resiliencyappgrpc
- resiliencyapp
|
mikeee/dapr
|
tests/config/resiliency_redis_pubsub.yaml
|
YAML
|
mit
| 986 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: dapr-resiliency-pubsub
spec:
type: pubsub.azure.servicebus
initTimeout: 1m
version: v1
metadata:
- name: connectionString
secretKeyRef:
name: servicebus-secret
key: connectionString
- name: handlerTimeoutInSec
value: 60
- name: timeoutInSec
value: 60
- name: lockDurationInSec
value: 5
- name: lockRenewalInSec
value: 5
- name: defaultMessageTimeToLiveInSec
value: 120
- name: maxConcurrentHandlers
value: 5
- name: prefetchCount
value: 20
- name: maxDeliveryCount
value: 1
- name: maxActiveMessages
value: 100
scopes:
- resiliencyappgrpc
- resiliencyapp
|
mikeee/dapr
|
tests/config/resiliency_servicebus_pubsub.yaml
|
YAML
|
mit
| 1,281 |
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# You may obtain a copy of the License at
# https://github.com/tailscale/tailscale/blob/main/LICENSE
# Source code from
# https://github.com/tailscale/tailscale/tree/main/docs/k8s
apiVersion: v1
kind: Secret
metadata:
name: tailscale-auth
stringData:
TS_AUTH_KEY: "{{TS_AUTH_KEY}}"
|
mikeee/dapr
|
tests/config/tailscale_key.yaml
|
YAML
|
mit
| 1,039 |
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# You may obtain a copy of the License at
# https://github.com/tailscale/tailscale/blob/main/LICENSE
# Source code from
# https://github.com/tailscale/tailscale/tree/main/docs/k8s
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
# Create can not be restricted to a resource name.
verbs: ["create"]
- apiGroups: [""] # "" indicates the core API group
resourceNames: ["tailscale-auth"]
resources: ["secrets"]
verbs: ["get", "update"]
|
mikeee/dapr
|
tests/config/tailscale_role.yaml
|
YAML
|
mit
| 1,326 |
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# You may obtain a copy of the License at
# https://github.com/tailscale/tailscale/blob/main/LICENSE
# Source code from
# https://github.com/tailscale/tailscale/tree/main/docs/k8s
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale
subjects:
- kind: ServiceAccount
name: "tailscale"
roleRef:
kind: Role
name: tailscale
apiGroup: rbac.authorization.k8s.io
|
mikeee/dapr
|
tests/config/tailscale_rolebinding.yaml
|
YAML
|
mit
| 1,155 |
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# You may obtain a copy of the License at
# https://github.com/tailscale/tailscale/blob/main/LICENSE
# Source code from
# https://github.com/tailscale/tailscale/tree/main/docs/k8s
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale
|
mikeee/dapr
|
tests/config/tailscale_sa.yaml
|
YAML
|
mit
| 998 |
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# You may obtain a copy of the License at
# https://github.com/tailscale/tailscale/blob/main/LICENSE
# Source code from
# https://github.com/tailscale/tailscale/tree/main/docs/k8s
apiVersion: apps/v1
kind: Deployment
metadata:
name: tailscale-subnet-router
labels:
app: tailscale-subnet-router
spec:
replicas: 1
selector:
matchLabels:
app: tailscale-subnet-router
template:
metadata:
labels:
app: tailscale-subnet-router
spec:
serviceAccountName: tailscale
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- name: tailscale
imagePullPolicy: Always
image: "ghcr.io/tailscale/tailscale:latest"
env:
# Store the state in a k8s secret
- name: TS_KUBE_SECRET
value: "tailscale-auth"
- name: TS_USERSPACE
value: "true"
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: TS_AUTH_KEY
- name: TS_ROUTES
value: "{{TS_ROUTES}}"
securityContext:
runAsUser: 1000
runAsGroup: 1000
resources:
limits:
cpu: "1000m"
memory: "256Mi"
requests:
cpu: "700m"
memory: "128Mi"
|
mikeee/dapr
|
tests/config/tailscale_subnet_router.yaml
|
YAML
|
mit
| 2,372 |
apiVersion: dapr.io/v1alpha1
kind: Component
metadata:
name: uppercase
spec:
ignoreErrors: true
version: v1
type: middleware.http.uppercase
metadata:
- name: scopes
value: "*"
|
mikeee/dapr
|
tests/config/uppercase.yaml
|
YAML
|
mit
| 196 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: dapr-zipkin
name: dapr-zipkin
spec:
replicas: 1
selector:
matchLabels:
app: dapr-zipkin
template:
metadata:
labels:
app: dapr-zipkin
spec:
containers:
- image: ghcr.io/dapr/3rdparty/zipkin:latest
imagePullPolicy: Always
name: zipkin
resources: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
- arm64
---
apiVersion: v1
kind: Service
metadata:
labels:
app: dapr-zipkin
name: dapr-zipkin
namespace: dapr-tests
spec:
ports:
- port: 9411
protocol: TCP
targetPort: 9411
selector:
app: dapr-zipkin
type: ClusterIP
|
mikeee/dapr
|
tests/config/zipkin.yaml
|
YAML
|
mit
| 1,663 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# E2E test app list
# e.g. E2E_TEST_APPS=hellodapr state service_invocation
E2E_TEST_APPS=actorjava \
actordotnet \
actorpython \
actorphp \
healthapp \
hellodapr \
stateapp \
secretapp \
service_invocation \
service_invocation_external \
service_invocation_grpc \
service_invocation_grpc_proxy_client \
service_invocation_grpc_proxy_server \
binding_input \
binding_input_grpc \
binding_output \
pubsub-publisher \
pubsub-subscriber \
pubsub-bulk-subscriber \
pubsub-subscriber_grpc \
pubsub-subscriber-routing \
pubsub-subscriber-routing_grpc \
actorapp \
actorclientapp \
actorfeatures \
actorinvocationapp \
actorstate \
actorreentrancy \
crypto \
runtime \
runtime_init \
middleware \
job-publisher \
resiliencyapp \
resiliencyapp_grpc \
injectorapp \
injectorapp-init \
metadata \
pluggable_redis-statestore \
pluggable_redis-pubsub \
pluggable_kafka-bindings \
tracingapp \
configurationapp \
workflowsapp \
# PERFORMANCE test app list
PERF_TEST_APPS=actorfeatures actorjava tester service_invocation_http service_invocation_grpc actor-activation-locker k6-custom pubsub_subscribe_http configuration workflowsapp
# E2E test app root directory
E2E_TESTAPP_DIR=./tests/apps
# PERFORMANCE test app root directory
PERF_TESTAPP_DIR=./tests/apps/perf
# PERFORMANCE tests
PERF_TESTS=actor_timer \
actor_reminder \
actor_activation \
service_invocation_http \
service_invocation_grpc \
state_get_grpc \
state_get_http \
pubsub_publish_grpc \
pubsub_publish_http \
pubsub_bulk_publish_grpc \
pubsub_bulk_publish_http \
actor_double_activation \
actor_id_scale \
actor_type_scale \
configuration \
pubsub_subscribe_http \
workflows \
KUBECTL=kubectl
DAPR_CONTAINER_LOG_PATH?=./dist/container_logs
DAPR_TEST_LOG_PATH?=./dist/logs
ifeq ($(DAPR_TEST_STATE_STORE),)
DAPR_TEST_STATE_STORE=postgres
endif
ifeq ($(DAPR_TEST_QUERY_STATE_STORE),)
DAPR_TEST_QUERY_STATE_STORE=postgres
endif
ifeq ($(DAPR_TEST_PUBSUB),)
DAPR_TEST_PUBSUB=redis
endif
ifeq ($(DAPR_TEST_CONFIG_STORE),)
DAPR_TEST_CONFIG_STORE=redis
endif
ifeq ($(DAPR_TEST_CRYPTO),)
DAPR_TEST_CRYPTO=jwks
endif
ifeq ($(DAPR_TEST_NAMESPACE),)
DAPR_TEST_NAMESPACE=$(DAPR_NAMESPACE)
endif
ifeq ($(DAPR_TEST_REGISTRY),)
DAPR_TEST_REGISTRY=$(DAPR_REGISTRY)
endif
ifeq ($(DAPR_TEST_TAG),)
DAPR_TEST_TAG=$(DAPR_TAG)-$(TARGET_OS)-$(TARGET_ARCH)
endif
ifeq ($(DAPR_TEST_ENV),minikube)
MINIKUBE_NODE_IP=$(shell minikube ip)
ifeq ($(MINIKUBE_NODE_IP),)
$(error cannot find get minikube node ip address. ensure that you have minikube environment.)
endif
endif
ifeq ($(DAPR_PERF_PUBSUB_SUBS_HTTP_TEST_CONFIG_FILE_NAME),)
DAPR_PERF_PUBSUB_SUBS_HTTP_TEST_CONFIG_FILE_NAME=test_all.yaml
endif
ifeq ($(WINDOWS_VERSION),)
WINDOWS_VERSION=ltsc2022
endif
# check the required environment variables
check-e2e-env:
ifeq ($(DAPR_TEST_REGISTRY),)
$(error DAPR_TEST_REGISTRY environment variable must be set)
endif
ifeq ($(DAPR_TEST_TAG),)
$(error DAPR_TEST_TAG environment variable must be set)
endif
check-e2e-cache:
ifeq ($(DAPR_CACHE_REGISTRY),)
$(error DAPR_CACHE_REGISTRY environment variable must be set)
endif
define genTestAppImageBuild
.PHONY: build-e2e-app-$(1)
build-e2e-app-$(1): check-e2e-env
$(RUN_BUILD_TOOLS) e2e build \
--name "$(1)" \
--appdir "../$(E2E_TESTAPP_DIR)" \
--dest-registry "$(DAPR_TEST_REGISTRY)" \
--dest-tag "$(DAPR_TEST_TAG)" \
--dockerfile "$(DOCKERFILE)" \
--target-os "$(TARGET_OS)" \
--target-arch "$(TARGET_ARCH)" \
--cache-registry "$(DAPR_CACHE_REGISTRY)"
endef
# Generate test app image build targets
$(foreach ITEM,$(E2E_TEST_APPS),$(eval $(call genTestAppImageBuild,$(ITEM))))
define genTestAppImagePush
.PHONY: push-e2e-app-$(1)
push-e2e-app-$(1): check-e2e-env
$(RUN_BUILD_TOOLS) e2e push \
--name "$(1)" \
--dest-registry "$(DAPR_TEST_REGISTRY)" \
--dest-tag "$(DAPR_TEST_TAG)"
endef
# Generate test app image push targets
$(foreach ITEM,$(E2E_TEST_APPS),$(eval $(call genTestAppImagePush,$(ITEM))))
define genTestAppImageBuildPush
.PHONY: build-push-e2e-app-$(1)
build-push-e2e-app-$(1): check-e2e-env check-e2e-cache
$(RUN_BUILD_TOOLS) e2e build-and-push \
--name "$(1)" \
--appdir "../$(E2E_TESTAPP_DIR)" \
--dest-registry "$(DAPR_TEST_REGISTRY)" \
--dest-tag "$(DAPR_TEST_TAG)" \
--dockerfile "$(DOCKERFILE)" \
--target-os "$(TARGET_OS)" \
--target-arch "$(TARGET_ARCH)" \
--cache-registry "$(DAPR_CACHE_REGISTRY)" \
--windows-version "$(WINDOWS_VERSION)"
endef
# Generate test app image build-push targets
$(foreach ITEM,$(E2E_TEST_APPS),$(eval $(call genTestAppImageBuildPush,$(ITEM))))
define genTestAppImageKindPush
.PHONY: push-kind-e2e-app-$(1)
push-kind-e2e-app-$(1): check-e2e-env
kind load docker-image $(DAPR_TEST_REGISTRY)/e2e-$(1):$(DAPR_TEST_TAG)
endef
# Generate test app image push targets
$(foreach ITEM,$(E2E_TEST_APPS),$(eval $(call genTestAppImageKindPush,$(ITEM))))
define genPerfTestAppImageBuild
.PHONY: build-perf-app-$(1)
build-perf-app-$(1): check-e2e-env
$(RUN_BUILD_TOOLS) perf build \
--name "$(1)" \
--appdir "../$(E2E_TESTAPP_DIR)" \
--dest-registry "$(DAPR_TEST_REGISTRY)" \
--dest-tag "$(DAPR_TEST_TAG)" \
--target-os "$(TARGET_OS)" \
--target-arch "$(TARGET_ARCH)" \
--cache-registry "$(DAPR_CACHE_REGISTRY)"
endef
# Generate perf app image build targets
$(foreach ITEM,$(PERF_TEST_APPS),$(eval $(call genPerfTestAppImageBuild,$(ITEM))))
define genPerfAppImagePush
.PHONY: push-perf-app-$(1)
push-perf-app-$(1): check-e2e-env
$(RUN_BUILD_TOOLS) perf push \
--name "$(1)" \
--dest-registry "$(DAPR_TEST_REGISTRY)" \
--dest-tag "$(DAPR_TEST_TAG)"
endef
define genPerfAppImageBuildPush
.PHONY: build-push-perf-app-$(1)
build-push-perf-app-$(1): check-e2e-env check-e2e-cache
$(RUN_BUILD_TOOLS) perf build-and-push \
--name "$(1)" \
--appdir "../$(E2E_TESTAPP_DIR)" \
--dest-registry "$(DAPR_TEST_REGISTRY)" \
--dest-tag "$(DAPR_TEST_TAG)" \
--cache-registry "$(DAPR_CACHE_REGISTRY)"
endef
# Generate perf app image build-push targets
$(foreach ITEM,$(PERF_TEST_APPS),$(eval $(call genPerfAppImageBuildPush,$(ITEM))))
define genPerfAppImageKindPush
.PHONY: push-kind-perf-app-$(1)
push-kind-perf-app-$(1): check-e2e-env
kind load docker-image $(DAPR_TEST_REGISTRY)/perf-$(1):$(DAPR_TEST_TAG)
endef
create-test-namespace:
kubectl create namespace $(DAPR_TEST_NAMESPACE)
delete-test-namespace:
kubectl delete namespace $(DAPR_TEST_NAMESPACE) aa
setup-3rd-party: setup-helm-init setup-test-env-redis setup-test-env-kafka setup-test-env-zipkin setup-test-env-postgres
setup-pubsub-subs-perf-test-components: setup-test-env-rabbitmq setup-test-env-pulsar setup-test-env-mqtt
e2e-build-deploy-run: create-test-namespace setup-3rd-party build docker-push docker-deploy-k8s setup-test-components build-e2e-app-all push-e2e-app-all test-e2e-all
perf-build-deploy-run: create-test-namespace setup-3rd-party build docker-push docker-deploy-k8s setup-test-components build-perf-app-all push-perf-app-all test-perf-all
# Generate perf app image push targets
$(foreach ITEM,$(PERF_TEST_APPS),$(eval $(call genPerfAppImagePush,$(ITEM))))
# Generate perf app image kind push targets
$(foreach ITEM,$(PERF_TEST_APPS),$(eval $(call genPerfAppImageKindPush,$(ITEM))))
# Enumerate test app build targets
BUILD_E2E_APPS_TARGETS:=$(foreach ITEM,$(E2E_TEST_APPS),build-e2e-app-$(ITEM))
# Enumerate test app push targets
PUSH_E2E_APPS_TARGETS:=$(foreach ITEM,$(E2E_TEST_APPS),push-e2e-app-$(ITEM))
# Enumerate test app build-push targets
BUILD_PUSH_E2E_APPS_TARGETS:=$(foreach ITEM,$(E2E_TEST_APPS),build-push-e2e-app-$(ITEM))
# Enumerate test app push targets
PUSH_KIND_E2E_APPS_TARGETS:=$(foreach ITEM,$(E2E_TEST_APPS),push-kind-e2e-app-$(ITEM))
# Enumerate test app build targets
BUILD_PERF_APPS_TARGETS:=$(foreach ITEM,$(PERF_TEST_APPS),build-perf-app-$(ITEM))
# Enumerate perf app push targets
PUSH_PERF_APPS_TARGETS:=$(foreach ITEM,$(PERF_TEST_APPS),push-perf-app-$(ITEM))
# Enumerate perf app build-push targets
BUILD_PUSH_PERF_APPS_TARGETS:=$(foreach ITEM,$(PERF_TEST_APPS),build-push-perf-app-$(ITEM))
# Enumerate perf app kind push targets
PUSH_KIND_PERF_APPS_TARGETS:=$(foreach ITEM,$(PERF_TEST_APPS),push-kind-perf-app-$(ITEM))
# build test app image
build-e2e-app-all: $(BUILD_E2E_APPS_TARGETS)
# push test app image to the registry
push-e2e-app-all: $(PUSH_E2E_APPS_TARGETS)
# build and push test app image to the registry
# can be faster because it uses cache and copies images directly
build-push-e2e-app-all: $(BUILD_PUSH_E2E_APPS_TARGETS)
# push test app image to kind cluster
push-kind-e2e-app-all: $(PUSH_KIND_E2E_APPS_TARGETS)
# build perf app image
build-perf-app-all: $(BUILD_PERF_APPS_TARGETS)
# push perf app image to the registry
push-perf-app-all: $(PUSH_PERF_APPS_TARGETS)
# build and push perf app image to the registry
# can be faster because it uses cache and copies images directly
build-push-perf-app-all: $(BUILD_PUSH_PERF_APPS_TARGETS)
# push perf app image to kind cluster
push-kind-perf-app-all: $(PUSH_KIND_PERF_APPS_TARGETS)
.PHONY: test-deps
test-deps:
# The desire here is to download this test dependency without polluting go.mod
command -v gotestsum || go install gotest.tools/gotestsum@latest
# start all e2e tests
test-e2e-all: check-e2e-env test-deps
# Note: we can set -p 2 to run two tests apps at a time, because today we do not share state between
# tests. In the future, if we add any tests that modify global state (such as dapr config), we'll
# have to be sure and run them after the main test suite, so as not to alter the state of a running
# test
# Note2: use env variable DAPR_E2E_TEST to pick one e2e test to run.
ifeq ($(DAPR_E2E_TEST),)
DAPR_CONTAINER_LOG_PATH=$(DAPR_CONTAINER_LOG_PATH) DAPR_TEST_LOG_PATH=$(DAPR_TEST_LOG_PATH) GOOS=$(TARGET_OS_LOCAL) DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) DAPR_TEST_TAG=$(DAPR_TEST_TAG) DAPR_TEST_REGISTRY=$(DAPR_TEST_REGISTRY) DAPR_TEST_MINIKUBE_IP=$(MINIKUBE_NODE_IP) gotestsum --jsonfile $(TEST_OUTPUT_FILE_PREFIX)_e2e.json --junitfile $(TEST_OUTPUT_FILE_PREFIX)_e2e.xml --format standard-quiet -- -timeout 15m -p 2 -count=1 -v -tags=e2e ./tests/e2e/$(DAPR_E2E_TEST)/...
else
for app in $(DAPR_E2E_TEST); do \
DAPR_CONTAINER_LOG_PATH=$(DAPR_CONTAINER_LOG_PATH) DAPR_TEST_LOG_PATH=$(DAPR_TEST_LOG_PATH) GOOS=$(TARGET_OS_LOCAL) DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) DAPR_TEST_TAG=$(DAPR_TEST_TAG) DAPR_TEST_REGISTRY=$(DAPR_TEST_REGISTRY) DAPR_TEST_MINIKUBE_IP=$(MINIKUBE_NODE_IP) gotestsum --jsonfile $(TEST_OUTPUT_FILE_PREFIX)_e2e.json --junitfile $(TEST_OUTPUT_FILE_PREFIX)_e2e.xml --format standard-quiet -- -timeout 15m -p 2 -count=1 -v -tags=e2e ./tests/e2e/$$app/...; \
done
endif
define genPerfTestRun
.PHONY: test-perf-$(1)
test-perf-$(1): check-e2e-env test-deps
DAPR_CONTAINER_LOG_PATH=$(DAPR_CONTAINER_LOG_PATH) \
DAPR_TEST_LOG_PATH=$(DAPR_TEST_LOG_PATH) \
GOOS=$(TARGET_OS_LOCAL) \
DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) \
DAPR_TEST_TAG=$(DAPR_TEST_TAG) \
DAPR_TEST_REGISTRY=$(DAPR_TEST_REGISTRY) \
DAPR_TEST_MINIKUBE_IP=$(MINIKUBE_NODE_IP) \
NO_API_LOGGING=true \
gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_perf_$(1).json \
--junitfile $(TEST_OUTPUT_FILE_PREFIX)_perf_$(1).xml \
--format standard-quiet \
-- \
-timeout 2h -p 1 -count=1 -v -tags=perf ./tests/perf/$(1)/...
jq -r .Output $(TEST_OUTPUT_FILE_PREFIX)_perf_$(1).json | strings
endef
# Generate perf app image build targets
$(foreach ITEM,$(PERF_TESTS),$(eval $(call genPerfTestRun,$(ITEM))))
TEST_PERF_TARGETS:=$(foreach ITEM,$(PERF_TESTS),test-perf-$(ITEM))
# start all perf tests
test-perf-all: check-e2e-env test-deps
# Note: use env variable DAPR_PERF_TEST to pick one e2e test to run.
ifeq ($(DAPR_PERF_TEST),)
DAPR_CONTAINER_LOG_PATH=$(DAPR_CONTAINER_LOG_PATH) \
DAPR_TEST_LOG_PATH=$(DAPR_TEST_LOG_PATH) \
GOOS=$(TARGET_OS_LOCAL) \
DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) \
DAPR_TEST_TAG=$(DAPR_TEST_TAG) \
DAPR_TEST_REGISTRY=$(DAPR_TEST_REGISTRY) \
DAPR_TEST_MINIKUBE_IP=$(MINIKUBE_NODE_IP) \
NO_API_LOGGING=true \
gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_perf.json \
--junitfile $(TEST_OUTPUT_FILE_PREFIX)_perf.xml \
--format standard-quiet \
-- \
-timeout 2.5h -p 1 -count=1 -v -tags=perf ./tests/perf/...
jq -r .Output $(TEST_OUTPUT_FILE_PREFIX)_perf.json | strings
else
for app in $(DAPR_PERF_TEST); do \
DAPR_CONTAINER_LOG_PATH=$(DAPR_CONTAINER_LOG_PATH) \
DAPR_TEST_LOG_PATH=$(DAPR_TEST_LOG_PATH) \
GOOS=$(TARGET_OS_LOCAL) \
DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) \
DAPR_TEST_TAG=$(DAPR_TEST_TAG) \
DAPR_TEST_REGISTRY=$(DAPR_TEST_REGISTRY) \
DAPR_TEST_MINIKUBE_IP=$(MINIKUBE_NODE_IP) \
NO_API_LOGGING=true \
gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_perf.json \
--junitfile $(TEST_OUTPUT_FILE_PREFIX)_perf.xml \
--format standard-quiet \
-- \
-timeout 2.5h -p 1 -count=1 -v -tags=perf ./tests/perf/$$app... || exit -1 ; \
jq -r .Output $(TEST_OUTPUT_FILE_PREFIX)_perf.json | strings ; \
done
endif
test-perf-pubsub-subscribe-http-components: check-e2e-env test-deps
DAPR_CONTAINER_LOG_PATH=$(DAPR_CONTAINER_LOG_PATH) \
DAPR_TEST_LOG_PATH=$(DAPR_TEST_LOG_PATH) \
GOOS=$(TARGET_OS_LOCAL) \
DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) \
DAPR_TEST_TAG=$(DAPR_TEST_TAG) \
DAPR_TEST_REGISTRY=$(DAPR_TEST_REGISTRY) \
DAPR_TEST_MINIKUBE_IP=$(MINIKUBE_NODE_IP) \
DAPR_PERF_PUBSUB_SUBS_HTTP_TEST_CONFIG_FILE_NAME=$(DAPR_PERF_PUBSUB_SUBS_HTTP_TEST_CONFIG_FILE_NAME) \
NO_API_LOGGING=true \
gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_perf_$(1).json \
--junitfile $(TEST_OUTPUT_FILE_PREFIX)_perf_$(1).xml \
--format standard-quiet \
-- \
-timeout 3h -p 1 -count=1 -v -tags=perf ./tests/perf/pubsub_subscribe_http/...
jq -r .Output $(TEST_OUTPUT_FILE_PREFIX)_perf_$(1).json | strings
# add required helm repo
setup-helm-init:
$(HELM) repo add bitnami https://charts.bitnami.com/bitnami
$(HELM) repo add stable https://charts.helm.sh/stable
$(HELM) repo add incubator https://charts.helm.sh/incubator
$(HELM) repo update
# setup tailscale
.PHONY: setup-tailscale
setup-tailscale:
ifeq ($(TAILSCALE_AUTH_KEY),)
$(error TAILSCALE_AUTH_KEY environment variable must be set)
else
DAPR_TEST_NAMESPACE=$(DAPR_TEST_NAMESPACE) TAILSCALE_AUTH_KEY=$(TAILSCALE_AUTH_KEY) ./tests/setup_tailscale.sh
endif
# install k6 loadtesting to the cluster
setup-test-env-k6: controller-gen
$(KUBECTL) apply -f ./tests/config/k6_sa.yaml -n $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/k6_rolebinding.yaml -n $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/k6_sa_secret.yaml -n $(DAPR_TEST_NAMESPACE)
# definitely not cruft - removing CRDs that have been deprecated
export IMG=ghcr.io/grafana/operator:controller-v0.0.8 && \
rm -rf /tmp/.k6-operator >/dev/null && \
git clone --depth 1 --branch v0.0.8 https://github.com/grafana/k6-operator /tmp/.k6-operator && \
cd /tmp/.k6-operator && \
sed -i 's/crd:trivialVersions=true,crdVersions=v1/crd:maxDescLen=0/g' Makefile && \
make deploy && \
cd - && \
rm -rf /tmp/.k6-operator
delete-test-env-k6: controller-gen
$(KUBECTL) delete -f ./tests/config/k6_sa.yaml -n $(DAPR_TEST_NAMESPACE)
$(KUBECTL) delete -f ./tests/config/k6_rolebinding.yaml -n $(DAPR_TEST_NAMESPACE)
$(KUBECTL) delete -f ./tests/config/k6_sa_secret.yaml -n $(DAPR_TEST_NAMESPACE)
rm -rf /tmp/.k6-operator >/dev/null && git clone https://github.com/grafana/k6-operator /tmp/.k6-operator && cd /tmp/.k6-operator && make delete && cd - && rm -rf /tmp/.k6-operator
# install redis to the cluster without password
setup-test-env-redis:
$(HELM) upgrade \
--install dapr-redis bitnami/redis \
--version 17.14.5 \
--wait \
--timeout 5m0s \
--namespace $(DAPR_TEST_NAMESPACE) \
-f ./tests/config/redis_override.yaml
delete-test-env-redis:
${HELM} del dapr-redis --namespace ${DAPR_TEST_NAMESPACE}
# install kafka to the cluster
setup-test-env-kafka:
$(HELM) upgrade \
--install dapr-kafka bitnami/kafka \
--version 23.0.7 \
-f ./tests/config/kafka_override.yaml \
--namespace $(DAPR_TEST_NAMESPACE) \
--timeout 10m0s
# install rabbitmq to the cluster
setup-test-env-rabbitmq:
$(HELM) upgrade \
--install rabbitmq bitnami/rabbitmq \
--version 12.0.9 \
--set auth.username='admin' \
--set auth.password='admin' \
--namespace $(DAPR_TEST_NAMESPACE) \
--timeout 10m0s
# install mqtt to the cluster
setup-test-env-mqtt:
$(HELM) repo add emqx https://repos.emqx.io/charts
$(HELM) repo update
$(HELM) upgrade \
--install perf-test-emqx emqx/emqx \
--version 5.1.4 \
--namespace $(DAPR_TEST_NAMESPACE) \
--timeout 10m0s
# install mqtt to the cluster
setup-test-env-pulsar:
$(HELM) repo add apache https://pulsar.apache.org/charts
$(HELM) repo update
$(HELM) upgrade \
--install perf-test-pulsar apache/pulsar \
--version 3.0.0 \
--namespace $(DAPR_TEST_NAMESPACE) \
--timeout 10m0s
# delete kafka from cluster
delete-test-env-kafka:
$(HELM) del dapr-kafka --namespace $(DAPR_TEST_NAMESPACE)
# install postgres to the cluster
setup-test-env-postgres:
$(HELM) upgrade \
--install dapr-postgres bitnami/postgresql \
--version 12.8.0 \
-f ./tests/config/postgres_override.yaml \
--namespace $(DAPR_TEST_NAMESPACE) \
--wait \
--timeout 5m0s
# delete postgres from cluster
delete-test-env-postgres:
$(HELM) del dapr-postgres --namespace $(DAPR_TEST_NAMESPACE)
# install zipkin to the cluster
setup-test-env-zipkin:
$(KUBECTL) apply -f ./tests/config/zipkin.yaml -n $(DAPR_TEST_NAMESPACE)
delete-test-env-zipkin:
$(KUBECTL) delete -f ./tests/config/zipkin.yaml -n $(DAPR_TEST_NAMESPACE)
# Setup the test environment by installing components
setup-test-env: setup-test-env-kafka setup-test-env-redis setup-test-env-postgres setup-test-env-k6 setup-test-env-zipkin setup-test-env-postgres
save-dapr-control-plane-k8s-resources:
mkdir -p '$(DAPR_CONTAINER_LOG_PATH)'
$(KUBECTL) describe all -n $(DAPR_TEST_NAMESPACE) > '$(DAPR_CONTAINER_LOG_PATH)/control_plane_k8s_resources.txt'
save-dapr-control-plane-k8s-logs:
mkdir -p '$(DAPR_CONTAINER_LOG_PATH)'
$(KUBECTL) logs -l 'app.kubernetes.io/name=dapr' --tail=-1 -n $(DAPR_TEST_NAMESPACE) > '$(DAPR_CONTAINER_LOG_PATH)/control_plane_containers.log' --all-containers
# Apply default config yaml to turn mTLS off for testing (mTLS is enabled by default)
setup-disable-mtls:
$(KUBECTL) apply -f ./tests/config/dapr_mtls_off_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
# Apply default config yaml to turn tracing off for testing (tracing is enabled by default)
setup-app-configurations:
$(KUBECTL) apply -f ./tests/config/dapr_observability_test_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
# Apply component yaml for state, secrets, pubsub, workflows, and bindings
setup-test-components: setup-app-configurations
$(KUBECTL) apply -f ./tests/config/kubernetes_secret.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/kubernetes_secret_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/kubernetes_redis_secret.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/kubernetes_redis_host_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_$(DAPR_TEST_STATE_STORE)_state.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_$(DAPR_TEST_STATE_STORE)_state_actorstore.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_$(DAPR_TEST_QUERY_STATE_STORE)_query_state.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_redis_pluggable_state.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_tests_cluster_role_binding.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_$(DAPR_TEST_PUBSUB)_pubsub.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_$(DAPR_TEST_CONFIG_STORE)_configuration.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/pubsub_no_resiliency.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/kafka_pubsub.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_crypto_$(DAPR_TEST_CRYPTO).yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_kafka_pluggable_bindings.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_kafka_bindings.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_kafka_bindings_custom_route.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_kafka_bindings_grpc.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/app_topic_subscription_pluggable_pubsub.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/app_topic_subscription_pubsub.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/app_topic_subscription_pubsub_grpc.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/kubernetes_allowlists_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/kubernetes_allowlists_grpc_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_redis_state_query.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_redis_state_badhost.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_redis_state_badpass.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_vault_secretstore.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/uppercase.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/pipeline.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/pipeline_app.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/preview_configurations.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/app_topic_subscription_routing.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/app_topic_subscription_routing_grpc.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/resiliency.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/resiliency_kafka_bindings.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/resiliency_kafka_bindings_grpc.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/resiliency_$(DAPR_TEST_PUBSUB)_pubsub.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_in_memory_pubsub.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_in_memory_state.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_tracing_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/dapr_cron_binding.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/external_invocation_http_endpoint.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/grpcproxyserverexternal_service.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/externalinvocationcrd.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/omithealthchecks_config.yaml --namespace $(DAPR_TEST_NAMESPACE)
$(KUBECTL) apply -f ./tests/config/external_invocation_http_endpoint_tls.yaml --namespace $(DAPR_TEST_NAMESPACE)
# Don't set namespace as Namespace is defind in the yaml.
$(KUBECTL) apply -f ./tests/config/ignore_daprsystem_config.yaml
# Show the installed components
$(KUBECTL) get components --namespace $(DAPR_TEST_NAMESPACE)
# Show the installed configurations
$(KUBECTL) get configurations --namespace $(DAPR_TEST_NAMESPACE)
setup-components-perf-test:
$(KUBECTL) apply -f ./tests/config/pubsub_perf_components.yaml --namespace $(DAPR_TEST_NAMESPACE)
# Setup kind
setup-kind:
kind create cluster --config ./tests/config/kind.yaml --name kind
$(KUBECTL) cluster-info --context kind-kind
# Setup registry
docker run -d --restart=always -p 5000:5000 --name kind-registry registry:2
# Connect the registry to the KinD network.
docker network connect "kind" kind-registry
# Setup metrics-server
helm install ms stable/metrics-server -n kube-system --set=args={--kubelet-insecure-tls}
describe-kind-env:
@echo "\
export MINIKUBE_NODE_IP=`kubectl get nodes \
-lkubernetes.io/hostname!=kind-control-plane \
-ojsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}'`\n\
export DAPR_REGISTRY=$${DAPR_REGISTRY:-localhost:5000/dapr}\n\
export DAPR_TEST_REGISTRY=$${DAPR_TEST_REGISTRY:-localhost:5000/dapr}\n\
export DAPR_TAG=dev\n\
export DAPR_NAMESPACE=dapr-tests"
delete-kind:
docker stop kind-registry && docker rm kind-registry || echo "Could not delete registry."
kind delete cluster --name kind
ifeq ($(OS),Windows_NT)
detected_OS := windows
else
detected_OS := $(shell sh -c 'uname 2>/dev/null || echo Unknown' | tr '[:upper:]' '[:lower:]')
endif
setup-minikube-darwin:
ifeq ($(TARGET_ARCH_LOCAL),amd64)
minikube start --memory=4g --cpus=4 --driver=hyperkit
else
# Install qemu and configure the dedicated network: https://minikube.sigs.k8s.io/docs/drivers/qemu/#networking
minikube start --memory=4g --cpus=4 --driver=qemu --network socket_vmnet
endif
minikube addons enable metrics-server
setup-minikube-windows:
minikube start --memory=4g --cpus=4
minikube addons enable metrics-server
setup-minikube-linux:
minikube start --memory=4g --cpus=4
minikube addons enable metrics-server
setup-minikube: setup-minikube-$(detected_OS)
describe-minikube-env:
@echo "\
export DAPR_REGISTRY=docker.io/`docker-credential-desktop list | jq -r '\
. | to_entries[] | select(.key | contains("docker.io")) | last(.value)'`\n\
export DAPR_TAG=dev\n\
export DAPR_NAMESPACE=dapr-tests\n\
export DAPR_TEST_ENV=minikube\n\
export DAPR_TEST_REGISTRY=\n\
export MINIKUBE_NODE_IP="
# Delete minikube
delete-minikube:
minikube delete
# Delete all stored test results
.PHONY: test-clean
test-clean:
-rm -rv ./tests/e2e/*/dist
-rm -rv ./tests/perf/*/dist
-rm -rv ./tests/perf/*/test_report_summary_table_*.json
-rm test_report_*.json
-rm test_report_*.xml
|
mikeee/dapr
|
tests/dapr_tests.mk
|
mk
|
mit
| 26,942 |
# Running End-To-End Tests
E2E tests are designed for verifying the functional correctness by replicating end-user behavior from app deployment. This describes how to run e2e tests.
- [Run E2E tests in local dev environment](#run-e2e-tests-in-local-dev-environment)
- [Run E2E tests through GitHub Actions](#run-e2e-tests-through-github-actions)
- [Run E2E tests on Azure Aks](#run-e2e-tests-on-Azure-AKS)
- [Run E2E tests using a Wireguard Tunnel with tailscale](#run-e2e-tests-using-a-wireguard-tunnel-with-tailscale)
## Run E2E tests in local dev environment
### Prerequisites
1. Set up [Dapr development environment](https://github.com/dapr/dapr/blob/master/docs/development/setup-dapr-development-env.md)
2. [Install the latest Helm v3](https://helm.sh/docs/intro/install/)
3. Get a Docker container registry:
- If using Docker Hub, create your Docker Hub ID
- Other options include Azure Container Registry, GitHub Container Registry, etc
4. Set the environment variables
```bash
# If using Docker Hub:
export DAPR_REGISTRY=docker.io/your_dockerhub_id
# You can use other registries too, for example:
export DAPR_REGISTRY=myregistry.azurecr.io
export DAPR_TAG=dev
export DAPR_NAMESPACE=dapr-tests
export DAPR_MTLS_ENABLED=true
# If you want to enable debug logs for the daprd container set this
# export DEBUG_LOGGING=true
# If you want to run tests against Windows or arm kubernetes clusters, uncomment and set these
# export TARGET_OS=linux
# export TARGET_ARCH=amd64
# If you are cross compiling (building on MacOS/Windows and running against a Linux Kubernetes cluster
# or vice versa) uncomment and set these
# export GOOS=linux
# export GOARCH=amd64
# If you want to use a single container image `dapr` instead of individual images
# (like sentry, injector, daprd, etc.), uncomment and set this.
# export ONLY_DAPR_IMAGE=true
# Do not set DAPR_TEST_ENV if you do not use minikube
export DAPR_TEST_ENV=minikube
# If you are using minikube, you'll need to set the IP address for the minikube control plane.
export MINIKUBE_NODE_IP=your_k8s_master_ip
# Set the below environment variables if you want to use the different registry and tag for test apps
# export DAPR_TEST_REGISTRY=docker.io/your_dockerhub_id
# export DARP_TEST_TAG=dev
# export DAPR_TEST_REGISTRY_SECRET=yourself_private_image_secret
```
> If you need to create the `DAPR_TEST_REGISTRY_SECRET` variable, you can use this command:
>
> ```sh
> DOCKER_REGISTRY="<url of the registry, such as myregistry.azurecr.io>"
> DOCKER_USERNAME="<your username>"
> DOCKER_PASSWORD="<your password>"
> DOCKER_EMAIL="<your email (leave empty if not required)>"
> export DAPR_TEST_REGISTRY_SECRET=$(
> kubectl create secret docker-registry --dry-run=client docker-regcred \
> --docker-server="${DOCKER_REGISTRY}" \
> --docker-username="${DOCKER_USERNAME}" \
> --docker-password="${DOCKER_PASSWORD}" \
> --docker-email=${DOCKER_EMAIL} \
> -o json | \
> jq -r '.data.".dockerconfigjson"'
> )
> ```
### Option 1: Build, deploy, and run Dapr and e2e tests
If you are starting from scratch and just want to build dapr, deploy it, and run the e2e tests to your kubernetes cluster, do the following:
1. Uninstall dapr, dapr-kafka, dapr-redis, dapr-postgres, if they exist
*Make sure you have DAPR_NAMESPACE set properly before you do this!*
```sh
helm uninstall dapr dapr-kafka dapr-redis dapr-postgres -n $DAPR_NAMESPACE
```
2. Remove the test namespace, if it exists
```bash
make delete-test-namespace
```
> Note: please make sure that you have executed helm uninstall command before you deleted dapr test namespace. Otherwise if you directly deleted the dapr test namespace without helm uninstall command and re-installed dapr control plane, the dapr sidecar injector won't work and fail for "bad certificate". And you have already run into this problem, you can recover by helm uninstall command. See https://github.com/dapr/dapr/issues/4612
3. Build, deploy, run tests from start to finish
```bash
make e2e-build-deploy-run
```
### Option 2: Step by step guide
We also have individual targets to allow for quick iteration on parts of deployment and testing. To follow all or part of these steps individually, do the following:
Create dapr-tests namespace
```bash
make create-test-namespace
```
Install redis and kafka for state, pubsub, and binding building block
```bash
make setup-helm-init
make setup-test-env-redis
make setup-test-env-postgres
# This may take a few minutes. You can skip kafka install if you do not use bindings for your tests.
make setup-test-env-kafka
```
Run the below commands to build and deploy dapr from your local disk
```bash
# Build Linux binaries
make build-linux
# Build Docker image with Linux binaries
make docker-build
# Push docker image to your dockerhub registry
make docker-push
# Deploy Dapr runtime to your cluster
make docker-deploy-k8s
```
#### Optional: Apply this configuration to disable mTLS
```bash
make setup-disable-mtls
```
#### Register the default component configurations for testing
```bash
make setup-test-components
```
#### Build and push test apps to docker hub
Build docker images from apps and push the images to test docker hub
```bash
# build e2e apps docker image under apps/
make build-e2e-app-all
# push e2e apps docker image to docker hub
make push-e2e-app-all
```
#### Run end-to-end test
Run end-to-end tests
```bash
# start e2e test
make test-e2e-all
```
#### Run a subset of end-to-end tests
If you'd rather run a subset of end-to-end test, set the environmental variable `DAPR_E2E_TEST` with the name(s) of the test(s) (space-separated). These are the names of folders within the `tests/e2e` directory.
```sh
DAPR_E2E_TEST="actor_reminder" make test-e2e-all
```
#### Cleanup local environment
To completely remove Dapr, test dependencies, and any lingering e2e test apps:
*Make sure you have DAPR_NAMESPACE set properly before you do this!*
```bash
# Uninstall dapr, dapr-kafka, dapr-redis, dapr-postgres, then remove dapr-zipkin
helm uninstall dapr -n $DAPR_NAMESPACE || true
helm uninstall dapr-kafka -n $DAPR_NAMESPACE || true
helm uninstall dapr-redis -n $DAPR_NAMESPACE || true
helm uninstall dapr-postgres -n $DAPR_NAMESPACE || true
kubectl delete deployment dapr-zipkin -n $DAPR_NAMESPACE || true
# Remove the test namespace
make delete-test-namespace
```
## Run E2E tests through GitHub Actions
To keep the build infrastructure simple, Dapr uses [dapr-test GitHub Actions Workflow](https://github.com/dapr/dapr/actions?query=workflow%3Adapr-test) to run e2e tests using one of [AKS clusters](https://github.com/dapr/dapr/blob/4cd61680a3129f729deae24a51da241d0701376c/tests/test-infra/find_cluster.sh#L12-L17). A separate workflow also runs E2E in [KinD](https://kind.sigs.k8s.io/) clusters.
Once a contributor creates a pull request, E2E tests on KinD clusters are automatically executed for faster feedback. In order to run the E2E tests on AKS, ask a maintainer or approver to add `/ok-to-test` comment to the Pull Request.
## Run E2E tests on Azure AKS
This repository's automated tests (CI) use an Azure Kubernetes Service (AKS) cluster to run E2E tests.
If you want to run the tests in a similar environment, you can deploy the test infrastructure on your own using the Bicep templates in [tests/test-infra](/tests/test-infra/). Here are the scripts you could use.
### Deploy AKS only
If you want to deploy AKS and the Azure Container Registry only (without Azure Cosmos DB and Azure Service Bus), you can use this script:
```sh
# Set the Azure region to use (needs to support Availability Zones)
AZURE_REGION="eastus2"
# Name prefix for your test resources
# Try to use a unique name,at least 4 characters
export TEST_PREFIX="mydapraks42"
# Set to true to add a Windows node pool to the AKS cluster
ENABLE_WINDOWS="false"
# Name of the resource group where to deploy your cluster
export TEST_RESOURCE_GROUP="MyDaprTest"
# Create a resource group
az group create \
--resource-group "${TEST_RESOURCE_GROUP}" \
--location "${AZURE_REGION}"
# Deploy the test infrastructure
az deployment group create \
--resource-group "${TEST_RESOURCE_GROUP}" \
--template-file ./tests/test-infra/azure-aks.bicep \
--parameters namePrefix=${TEST_PREFIX} location=${AZURE_REGION} enableWindows=${ENABLE_WINDOWS}
# Authenticate with Azure Container Registry
az acr login --name "${TEST_PREFIX}acr"
# Connect to AKS
az aks get-credentials -n "${TEST_PREFIX}-aks" -g "${TEST_RESOURCE_GROUP}"
# Set the value for DAPR_REGISTRY
export DAPR_REGISTRY="${TEST_PREFIX}acr.azurecr.io"
# Set the value for DAPR_NAMESPACE as per instructions above and create the namespace
export DAPR_NAMESPACE=dapr-tests
make create-test-namespace
```
After this, run the E2E tests as per instructions above, making sure to use the newly-created Azure Container Registry as Docker registry (make sure you maintain the environmental variables set in the steps above).
### Deploy AKS and other Azure resources
This is the setup that our E2E tests use in GitHub Actions, which includes AKS, Azure Cosmos DB, and Azure Service Bus, in addition to an Azure Container Registry. To replicate the same setup, run:
> **NOTE:** This deploys an Azure Service Bus instance with ultra-high performance and it is **very expensive**. If you deploy this, don't forget to shut it down after you're done!
```sh
# Set the Azure region to use (needs to support Availability Zones)
AZURE_REGION="eastus2"
# Name prefix for your test resources
# Try to use a unique name,at least 4 characters
export TEST_PREFIX="mydapraks42"
# Set to true to add a Windows node pool to the AKS cluster
ENABLE_WINDOWS="false"
# Name of the resource group where to deploy your cluster
export TEST_RESOURCE_GROUP="MyDaprTest"
# Create a resource group
az group create \
--resource-group "${TEST_RESOURCE_GROUP}" \
--location "${AZURE_REGION}"
# Deploy the test infrastructure
az deployment group create \
--resource-group "${TEST_RESOURCE_GROUP}" \
--template-file ./tests/test-infra/azure.bicep \
--parameters namePrefix=${TEST_PREFIX} location=${AZURE_REGION} enableWindows=${ENABLE_WINDOWS}
# Authenticate with Azure Container Registry
az acr login --name "${TEST_PREFIX}acr"
# Connect to AKS
az aks get-credentials -n "${TEST_PREFIX}-aks" -g "${TEST_RESOURCE_GROUP}"
# Set the value for DAPR_REGISTRY
export DAPR_REGISTRY="${TEST_PREFIX}acr.azurecr.io"
# Set the value for DAPR_NAMESPACE as per instructions above and create the namespace
export DAPR_NAMESPACE=dapr-tests
make create-test-namespace
# Create the Kubernetes secrets in the Dapr test namespaces to allow connecting to Cosmos DB and Service Bus
# Syntax: ./tests/test-infra/setup_azure.sh <ENABLE_COSMOSDB> <ENABLE_SERVICEBUS> <ENABLE_KEY_VAULT>
./tests/test-infra/setup_azure.sh true true false
```
After this, run the E2E tests as per instructions above, making sure to use the newly-created Azure Container Registry as Docker registry (make sure you maintain the environmental variables set in the steps above).
### Run tests
The command in [Option 2: Step by step guide](#option-2-step-by-step-guide) can also run in AKS. Here is a sample script.
```sh
export ONLY_DAPR_IMAGE=true
# Replace with the name of your Azure Container Registry
export TEST_PREFIX="testprefix"
export DAPR_REGISTRY="${TEST_PREFIX}acr.azurecr.io"
export DAPR_TAG=dev
export DAPR_NAMESPACE=dapr-tests
export DAPR_TEST_NAMESPACE="dapr-tests"
export DAPR_TEST_TAG="${DAPR_TAG}-linux-amd64"
export DAPR_TEST_REGISTRY=${DAPR_REGISTRY}
# To enable debug logging
export DEBUG_LOGGING=true
az acr login --name ${DAPR_REGISTRY}
# Build Dapr. Run it in root folder of dapr source code project.
make build
# Build and push Docker images
DOCKERFILE=Dockerfile-mariner make docker-build docker-push
make build-e2e-app-all
make push-e2e-app-all
# Setup Dapr in Kubernetes
make create-test-namespace
make docker-deploy-k8s
make setup-3rd-party setup-test-components
# Delete all logs etc
make test-clean
# You can run a single test with:
DAPR_E2E_TEST="pubsub" make test-e2e-all
# Or run all tests with:
make test-e2e-all
```
### Collect container and diagnostic logs from AKS
You can optionally configure AKS to collect certain logs, including:
- All container logs, sent to Azure Log Analytics
- Diagnostic logs (`kube-apiserver` and `kube-controller-manager`), sent to Azure Log Analytics
- Audit logs (`kube-audit`), sent to Azure Storage
To do that, first provision the required resources by deploying the `azure-aks-diagnostic.bicep` template (this template is not part of the `azure.bicep` or `azure-all.bicep` templates, as it's considered shared infrastructure):
```sh
# Name of the resource group where to deploy the diagnostic resources
DIAG_RESOURCE_GROUP="MyDaprTestLogs"
# Name prefix for the diagnostic resources (should be globally-unique)
DIAG_NAME_PREFIX="mydaprdiag42"
# Create a resource group
az group create \
--resource-group "${DIAG_RESOURCE_GROUP}" \
--location "${AZURE_REGION}"
# Deploy the test infrastructure
az deployment group create \
--resource-group "${DIAG_RESOURCE_GROUP}" \
--template-file ./tests/test-infra/azure-aks-diagnostic.bicep \
--parameters name=${DIAG_NAME_PREFIX} location=${AZURE_REGION}
```
The output of the last command includes two values that are resource IDs:
- `diagLogAnalyticsWorkspaceResourceId`, for example: `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.OperationalInsights/workspaces/<workspace name>`
- `diagStorageResourceId`, for example: `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.Storage/storageAccounts/<storage account name>`
Use those values as parameters when deploying the `azure.bicep` or `azure-all.bicep` templates. For example:
```sh
az deployment group create \
--resource-group "${TEST_RESOURCE_GROUP}" \
--template-file ./tests/test-infra/azure.bicep \
--parameters namePrefix=${TEST_PREFIX} location=${AZURE_REGION} enableWindows=${ENABLE_WINDOWS} diagLogAnalyticsWorkspaceResourceId=... diagStorageResourceId=...
```
### Clean UP AKS environment
```sh
export DAPR_NAMESPACE=dapr-tests
# Delete all logs etc
make test-clean
helm uninstall dapr -n $DAPR_NAMESPACE || true
helm uninstall dapr-kafka -n $DAPR_NAMESPACE || true
helm uninstall dapr-redis -n $DAPR_NAMESPACE || true
helm uninstall dapr-mongodb -n $DAPR_NAMESPACE || true
helm uninstall dapr-temporal -n $DAPR_NAMESPACE || true
kubectl delete deployment dapr-zipkin -n $DAPR_NAMESPACE || true
kubectl delete namespace $DAPR_NAMESPACE || true
# Remove the test namespace
make delete-test-namespace
```
## Run E2E tests using a Wireguard Tunnel with tailscale
[Tailscale](https://tailscale.com/) is a zero-config VPN that provides NAT traversal out-of-the-box allowing our services and pods to be called directly - using its ClusterIP - without needed to be exposed using a loadbalancer.
This provides a few advantages including the decrease of the total test duration.
if you want to run the tests using tailscale as your network, few things are necessary:
1. [Create a tailscale account](https://login.tailscale.com/), this will be necessary since we're going to use personal keys.
2. [Download and install](https://tailscale.com/download/) the tailscale client for your OS.
3. When you're logged in, navigate to the menu `Access Controls` and two things are necessary, edit the ACL definition with:
1. Create a new tag that will be used later to assign permissions to keys.
```json
{...
"tagOwners": {
"tag:dapr-tests": ["your_email_comes_here@your_domain.com"],
}
}
```
2. Assign permissions to the created tag. Since we are going to use the [tailscale subnet router](https://tailscale.com/kb/1185/kubernetes/), it is much convenient that the subnet router should auto approve the registered routes, for that, use the following acl.
```json
{...
"autoApprovers": {
"routes": {
"10.0.0.0/8": ["tag:dapr-tests"],
},
}
}
```
> Warning: as we are using `10.0.0.0/8` we must guarantee that our CIDR block used in the kubernetes cluster must be a subset of it
4. Now, go to the Settings > Personal Settings > Keys.
5. Once in the keys section, generate a new ephemeral key by clicking in `Generate auth key`.
6. Mark as `reusable`, `ephemeral` and add the created tag `dapr-tests` and do not forget to copy out the value.
Now, we're almost set.
The next step will be install the tailscale subnet router in your kubernetes cluster, for that, run
```sh
TAILSCALE_AUTH_KEY=your_key_goes_here make setup-tailscale
```
> TIP: for security reasons you could run `unset HISTFILE` before the tailscale command so that will discard your history file
Now, you have to login on tailscale client using your personal account, to verify if the subnet router deployment works, browse to the `Machines` on the tailscale portal, the subnet router should show up there.
One more config is necessary, `TEST_E2E_USE_INTERNAL_IP=true`, you can use it as a variable when running tests as the following:
```sh
TEST_E2E_USE_INTERNAL_IP=true make test-e2e-all
```
|
mikeee/dapr
|
tests/docs/running-e2e-test.md
|
Markdown
|
mit
| 17,415 |
# Running Performance Tests
Performance tests are designed to let you evaluate the latency, resource usage and processing times for Dapr in your environment for a given hardware. The following describes how to run performance tests in a local dev environment and run them through CI:
- [Run Performance tests in local dev environment](#run-perf-tests-in-local-dev-environment)
- [Run Performance tests through GitHub Actions](#run-perf-tests-through-github-actions)
## Run Performance tests in local dev environment
### Prerequisites
* Kubernetes cluster (Minikube and Kind are valid options too).
- To setup a new Kind cluster and local registry, run `make setup-kind`.
* Set up [Dapr development environment](https://github.com/dapr/dapr/blob/master/docs/development/setup-dapr-development-env.md)
- [Install the latest Helm v3](https://helm.sh/docs/intro/install/).
* Create your DockerHub ID
* Create dapr-tests namespace
```bash
kubectl create namespace dapr-tests
```
* Set the environment variables
- If using Kind, run `make describe-kind-env` and copy-and-paste the export commands displayed.
```bash
export DAPR_REGISTRY=docker.io/your_dockerhub_id
export DAPR_TAG=dev
export DAPR_NAMESPACE=dapr-tests
# Do not set DAPR_TEST_ENV if you do not use minikube
export DAPR_TEST_ENV=minikube
# Set the below environment variables if you want to use the different registry and tag for test apps
# export DAPR_TEST_REGISTRY=docker.io/your_dockerhub_id
# export DARP_TEST_TAG=dev
# export DAPR_TEST_REGISTRY_SECRET=yourself_private_image_secret
# Set the below environment variables to configure test specific settings for Fortio based tests.
# DAPR_PERF_QPS sets the desired number of requests per second. Default is 1.
# DAPR_PERF_CONNECTIONS sets the number of client connections used to send requests to Dapr. Default is 1.
# DAPR_TEST_DURATION sets the duration of the test. Default is "1m".
# DAPR_PAYLOAD_SIZE sets a payload size in bytes to test with. default is 0.
# DAPR_SIDECAR_CPU_LIMIT sets the cpu resource limit on the Dapr sidecar. default is 4.0.
# DAPR_SIDECAR_MEMORY_LIMIT sets the memory resource limit on the Dapr sidecar. default is 512Mi.
# DAPR_SIDECAR_CPU_REQUEST sets the cpu resource request on the Dapr sidecar. default is 0.5.
# DAPR_SIDECAR_MEMORY_REQUEST sets the memory resource request on the Dapr sidecar. default is 250Mi.
export DAPR_PERF_QPS
export DAPR_PERF_CONNECTIONS
export DAPR_TEST_DURATION
export DAPR_PAYLOAD_SIZE
export DAPR_SIDECAR_CPU_LIMIT
export DAPR_SIDECAR_MEMORY_LIMIT
export DAPR_SIDECAR_CPU_REQUEST
export DAPR_SIDECAR_MEMORY_REQUEST
```
### Deploy your dapr runtime change
Run the below commands to build and deploy dapr from your local disk
```bash
# Build Linux binaries
make build-linux
# Build Docker image with Linux binaries
make docker-build
# Push docker image to your dockerhub registry
make docker-push
# Deploy Dapr runtime to your cluster
make docker-deploy-k8s
# Install 3rd party software
make setup-3rd-party
```
### Register app configurations
```bash
make setup-app-configurations
```
### Optional: Disable tracing
```bash
export DAPR_DISABLE_TELEMETRY=true
```
### Optional: Apply this configuration to disable mTLS
```bash
make setup-disable-mtls
```
### Register the default component configurations for testing
```bash
make setup-test-components
```
### Build and push test apps to docker hub
Build docker images from apps and push the images to test docker hub.
```bash
# build perf apps docker image under apps/
make build-perf-app-all
# push perf apps docker image to docker hub
make push-perf-app-all
```
You can also build and push the test apps individually.
```bash
# build perf apps docker image under apps/
make build-perf-app-<app-name>
# push perf apps docker image to docker hub
make push-perf-app-<app-name>
```
`<app-name>` can be found at [PERF_TEST_APPS](https://github.com/dapr/dapr/blob/6def7d1b9ffe896b7b06d05128b9cd605d39f939/tests/dapr_tests.mk#L61C1-L61C15)
If you are building test apps individually, you need to build and push the tester app also:
- tester (`build-perf-app-tester` and `push-perf-app-tester`) for Fortio based tests
- k6-custom (`build-perf-app-k6-custom` and `push-perf-app-k6-custom`) for k6 based tests
### (k6) Install the k6-operator
If you are running k6 based tests, install the k6-operator.
```bash
make setup-test-env-k6
```
### Run performance tests
```bash
# start perf tests
make test-perf-all
```
You can also run selected tests using environment variables `DAPR_PERF TEST``.
```bash
export DAPR_PERF_TEST="<app-name-1> <app-name-2>"
# it will start perf tests defined in DAPR_PERF_TEST
make test-perf-all
```
Then it will run the tests defined in `DAPR_PERF_TEST`. `<app-name>` can be found at [PERF_TESTS](https://github.com/dapr/dapr/blob/6def7d1b9ffe896b7b06d05128b9cd605d39f939/tests/dapr_tests.mk#L70)
For example, if you only want to run `actor_id_scale` and `workflows` tests, you can do
```bash
export DAPR_PERF_TEST="actor_id_scale workflows"
make test-perf-all
```
### Remove all tests data
Once you finished your testing, it's recommended to remove old test data, so it's easier to find the new tests.
You can run
```bash
make test-clean
```
## Run perf tests through GitHub Actions
To keep the build infrastructure simple, Dapr uses dapr-test GitHub Actions Workflow to run e2e tests using one of AKS clusters. A separate workflow also runs E2E in KinD clusters.
Once a contributor creates a pull request, E2E tests on KinD clusters are automatically executed for faster feedback. In order to run the E2E tests on AKS, ask a maintainer or approver to add /ok-to-perf comment to the Pull Request.
## Optional: Visualize Performance Test Metrics
```bash
export DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_URL="http://localhost:9091"
```
Install the following in your Kubernetes cluster:
- Prometheus
- Pushgateway
- Grafana
### Create a new namespace
Create a new namesapce:
```bash
DAPR_PERF_METRICS_NAMESPACE=dapr-perf-metrics
kubectl create namespace $DAPR_PERF_METRICS_NAMESPACE
```
### Setup for Prometheus Server
```bash
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm install --namespace $DAPR_PERF_METRICS_NAMESPACE prometheus prometheus-community/prometheus
```
### Setup for Prometheus Pushgateway
The Prometheus installation above comes with a pushgateway.
* Forward port 9091 from your local machine to the prometheus-pushgateway pod and access it on `http://localhost:9091`
```bash
kubectl port-forward --namespace $DAPR_PERF_METRICS_NAMESPACE deployment/prometheus-prometheus-pushgateway 9091
```
### Setup for Grafana Server
* Create a `grafana.yaml` file with the following configurations:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
namespace: dapr-perf-metrics
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:grafana
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:latest
ports:
- containerPort: 3000
---
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: dapr-perf-metrics
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 3000
protocol: TCP
selector:
app: grafana
```
* Apply the configurations
```bash
kubectl apply -f grafana.yaml
```
* Forward port 3000 from your local machine to the pod where Grafana is running.
```bash
kubectl port-forward --namespace $DAPR_PERF_METRICS_NAMESPACE deployment/grafana 3000
```
The Grafana server can now be accessed on localhost:3000
* Login to Grafana with the default username and password 'admin' for both.
* Now go to data sources and connect Prometheus as a data source.
* The HTTP URL will be the ClusterIP of the prometheus-server pod running on Kubernetes which can be obtained by the command:
```bash
kubectl get svc --namespace $DAPR_PERF_METRICS_NAMESPACE
```
* [Grafana Dashboard for Perf Test](../grafana/grafana-perf-test-dashboard.json)
On running the perf-tests now, the metrics are collected from pushgateway by Prometheus and is made available for visualization as a dashboard by importing the above template in Grafana.
### Sample dashboard view
<img width="1267" alt="grafana_dashboard" src="./img/grafana_dashboard.png">
|
mikeee/dapr
|
tests/docs/running-perf-tests.md
|
Markdown
|
mit
| 8,659 |
# Write E2E test
Before writing e2e test, make sure that you have [local test environment](./running-e2e-test.md) and can run the e2e tests.
In order to add new e2e test, you need to implement test app and test driver:
* **Test app**: A simple http server listening to 3000 port exposes the test entry endpoints(`/tests/{test}`) to call dapr runtime APIs. Test driver code will deploy to Kubernetes cluster with dapr sidecar.
* **Test driver**: A Go test with `e2e` build tag deploys test app to Kubernetes cluster and runs go tests by calling the test entry endpoints of test app.
## Add test app
A test app is in [/tests/apps/](../apps/) and can be shared by the multiple test drivers. Technically, it can be written in any languages, but Go is recommended to keep the code base consistent and simple. [hellodapr](../apps/hellodapr/) is the good example of test app as a boilerplate code.
### Writing test app
1. Create new test app directory under [/tests/apps/](../apps).
2. Copy `app.go`, `Dockerfile`, and `service.yaml` from [/tests/apps/hellodapr/](../apps/hellodapr/) to test app directory
- app.go : A simple http server written in Go
- Dockerfile : Dockerfile for test app
- service.yaml : Test app deployment yaml for kubectl. This is for development purpose but not used by the actual e2e test.
3. Modify `app.go`
4. Run go app and validate the http endpoints of app.go
### Debug and validation
1. Add your test app name to `E2E_TEST_APPS` variable in [dapr_tests.mk](../dapr_tests.mk)
2. Build test image via make
```bash
# DAPR_TEST_REGISTRY and DAPR_TEST_TAG must be configured
make build-e2e-app-[new app directory name]
make push-e2e-app-[new app directory name]
```
3. Update `service.yaml` properly - change the metadata and registry/imagename
4. Deploy app to your k8s cluster for testing purpose
```bash
kubectl apply -f service.yaml
```
5. Get external ip for the app
```bash
kubectl get svc
```
6. Validate the external endpoint using wget/curl/postman
7. Delete test app
```bash
kubectl delete -f service.yaml
```
## Add test driver
A test driver is a typical Go test with `e2e` build tag under [/tests/e2e/](../e2e/). It can deploy test apps to a test cluster, run the tests, and clean up all test apps and any resources. We provides test runner framework to manage the test app's lifecycle during the test so that you do not need to manage the test apps by yourself. We plan to add more functionalities to test runner later, such as the log collection of test app POD.
> *Note:* Any test app you deploy should have a name unique to your test driver. Multiple test drivers may be run at the same time by the automated E2E test runner, so if you have an app name which is not unique, you may collide with another test app. Also, make sure that any stateful resources (pubsub topics, statestores, secrets, etc.) have names unique to your test driver.
### Writing test app
1. Create new test directory under [/tests/e2e/](../e2e).
1. Create go test file based on the below example:
- `// +build e2e` must be placed at the first line of test code
- `TestMain(m *testing.M)` defines the list of test apps and component, which will be used in `TestXxx(t *testing.T)`
- Define new package for the test
- Use Go test best practice
```go
// +build e2e
package hellodapr_e2e
import (
"testing"
kube "github.com/dapr/dapr/tests/platforms/kubernetes"
"github.com/dapr/dapr/tests/runner"
"github.com/stretchr/testify/require"
...
)
// Test runner instance
var tr *runner.TestRunner
// Go test main entry - https://golang.org/pkg/testing/#hdr-Main
func TestMain(m *testing.M) {
testApps := []kube.AppDescription{
{
AppName: "hellodapr",
DaprEnabled: true,
ImageName: "e2e-hellodapr",
Replicas: 1,
IngressEnabled: true,
MetricsEnabled: true,
},
}
tr = runner.NewTestRunner("hellodapr", testApps, nil, nil)
os.Exit(tr.Start(m))
}
func TestHelloDapr(t *testing.T) {
externalURL := tr.Platform.AcquireAppExternalURL("hellodapr")
require.NotEmpty(t, externalURL, "external URL must not be empty")
// Call endpoint for "hellodapr" test app
resp, err := httpGet(externalURL)
require.NoError(t, err)
...
}
func TestStateStore(t *testing.T) {
...
}
```
1. Define the test apps in `TestMain()` and create test runner instance
```go
// The array of test apps which will be used for the entire tests
// defined in this test driver file
testApps := []kube.AppDescription{
{
AppName: "hellodapr", // app name
DaprEnabled: true, // dapr sidecar injection
ImageName: "e2e-hellodapr", // docker image name 'e2e-[test app name]'
Replicas: 1, // number of replicas
IngressEnabled: true, // enable ingress endpoint
MetricsEnabled: true, // enable metrics endpoint
},
}
// Create test runner instance with 'hellodapr' runner id
tr = runner.NewTestRunner("hellodapr", testApps, nil, nil)
// Start the test
os.Exit(tr.Start(m))
```
3. Write the test by implementing `TestXxx(t *testing.T)` methods
- Acquire app external url to call test app endpoint
- Send http request to test app endpoint
```go
func TestHelloDapr(t *testing.T) {
// Acquire app external url
externalURL := tr.Platform.AcquireAppExternalURL("hellodapr")
require.NotEmpty(t, externalURL, "external URL must not be empty")
// Call endpoint for "hellodapr" test app
resp, err := httpGet(externalURL)
require.NoError(t, err)
...
}
```
### Debug and validation
1. If you use minikube for your dev environment, set `DAPR_TEST_MINIKUBE_IP` environment variable to IP address from `minikube ip`
2. Debug Go test
* Using [dlv test](https://github.com/go-delve/delve/blob/master/Documentation/usage/dlv_test.md). For example, if you want to debug `TestHelloDapr` test,
```bash
dlv test --build-flags='-tags=e2e' ./tests/e2e/... -- -test.run ^TestHelloDapr$
```
* Using VSCode + Go plugin (recommended)
[VSCode + Go plugin](https://github.com/Microsoft/vscode-go/wiki/Debugging-Go-code-using-VS-Code) provides the good debugging experience. However, it does not provide a way to add build tag to build option so you need to remove `// +build e2e` build constraints in your go test and [helpers.go](https://github.com/dapr/dapr/blob/53bf10569fe9a9a5f484c5c9cf5760881db9a3e4/tests/e2e/helpers.go#L1) temporarily during debugging.
3. Run all e2e tests
```bash
make test-e2e-all
```
|
mikeee/dapr
|
tests/docs/writing-e2e-test.md
|
Markdown
|
mit
| 6,637 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.