code
stringlengths 0
56.1M
| repo_name
stringclasses 515
values | path
stringlengths 2
147
| language
stringclasses 447
values | license
stringclasses 7
values | size
int64 0
56.8M
|
---|---|---|---|---|---|
#
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
## Required secrets:
# - DAPR_BOT_TOKEN: Token for the Dapr bot
name: dapr-test-sdk
on:
# Run every 12 hours on weekdays, and every 24 hours on weekends.
schedule:
- cron: "16 */11 * * 1-5"
- cron: "41 0 * * 0,6"
# Dispatch on external events
repository_dispatch:
types:
- test-sdk-all
- test-sdk-python
- test-sdk-java
- test-sdk-js
- test-sdk-go
env:
GOOS: linux
GOARCH: amd64
GOPROXY: https://proxy.golang.org
# Job(s) can be triggered with the following commands:
# /test-sdk-all
# /test-sdk-python
# /test-sdk-java
# /test-sdk-js
# /test-sdk-go
jobs:
python-sdk:
if: |
github.event_name == 'schedule' ||
( github.event_name == 'repository_dispatch' &&
(
github.event.action == 'test-sdk-all' ||
github.event.action == 'test-sdk-python'
)
)
name: "Python SDK verification tests"
runs-on: ubuntu-latest
steps:
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload) {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-python
number: ${{ env.PR_NUMBER }}
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr SDK Python test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Python 3.9
uses: actions/setup-python@v4
with:
python-version: "3.9"
- name: "Set up Go"
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Checkout python-sdk repo to run tests.
uses: actions/checkout@v4
with:
repository: dapr/python-sdk
path: python-sdk
- name: Set up Dapr CLI
run: wget -q https://raw.githubusercontent.com/dapr/cli/master/install/install.sh -O - | /bin/bash -s
- name: Initialize Dapr runtime
run: |
dapr uninstall --all
dapr init
- name: Build and override daprd
run: |
make
mkdir -p $HOME/.dapr/bin/
cp dist/linux_amd64/release/daprd $HOME/.dapr/bin/daprd
- name: Override placement service
run: |
docker stop dapr_placement
./dist/linux_amd64/release/placement --healthz-port 9091 &
- name: Install dependencies
run: |
cd python-sdk
python -m pip install --upgrade pip
pip install setuptools wheel twine tox
- name: Check Python Examples
run: |
cd python-sdk || true
tox -e examples
- name: Update PR comment for success
if: ${{ success() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-python
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Python SDK tests passed
- name: Update PR comment for failure
if: ${{ failure() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-python
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Python SDK tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: ${{ cancelled() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-python
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Python SDK tests cancelled
The Action has been canceled
java-sdk:
if: |
github.event_name == 'schedule' ||
( github.event_name == 'repository_dispatch' &&
(
github.event.action == 'test-sdk-all' ||
github.event.action == 'test-sdk-java'
)
)
name: "Java SDK verification tests"
runs-on: ubuntu-latest
env:
JDK_VER: 11
JAVA_SPRING_BOOT_VERSION: 2.7.8
TOXIPROXY_URL: https://github.com/Shopify/toxiproxy/releases/download/v2.5.0/toxiproxy-server-linux-amd64
steps:
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload) {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-java
number: ${{ env.PR_NUMBER }}
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr SDK Java test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up OpenJDK ${{ env.JDK_VER }}
uses: actions/setup-java@v3
with:
distribution: 'adopt'
java-version: ${{ env.JDK_VER }}
- name: "Set up Go"
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Checkout java-sdk repo to run tests.
uses: actions/checkout@v4
with:
repository: dapr/java-sdk
path: java-sdk
- name: Set up Dapr CLI
run: wget -q https://raw.githubusercontent.com/dapr/cli/master/install/install.sh -O - | /bin/bash -s
- name: Initialize Dapr runtime
run: |
dapr uninstall --all
dapr init
- name: Build and override daprd
run: |
make
mkdir -p $HOME/.dapr/bin/
cp dist/linux_amd64/release/daprd $HOME/.dapr/bin/daprd
- name: Override placement service
run: |
docker stop dapr_placement
./dist/linux_amd64/release/placement &
- name: Spin local environment
run: |
docker-compose -f ./java-sdk/sdk-tests/deploy/local-test.yml up -d mongo kafka
docker ps
- name: Install local ToxiProxy to simulate connectivity issues to Dapr sidecar
run: |
mkdir -p /home/runner/.local/bin
wget -q ${{ env.TOXIPROXY_URL }} -O /home/runner/.local/bin/toxiproxy-server
chmod +x /home/runner/.local/bin/toxiproxy-server
/home/runner/.local/bin/toxiproxy-server --version
- name: Clean up files
run: cd java-sdk && mvn clean -B
- name: Build sdk
run: cd java-sdk && mvn compile -B -q
- name: Install jars
run: cd java-sdk && mvn install -q -B -DskipTests
- name: Integration tests using spring boot version ${{ env.JAVA_SPRING_BOOT_VERSION }}
id: integration_tests
run: cd java-sdk && PRODUCT_SPRING_BOOT_VERSION=${{ env.JAVA_SPRING_BOOT_VERSION }} mvn -B -f sdk-tests/pom.xml verify
- name: Upload test report for sdk
uses: actions/upload-artifact@v4
with:
name: report-dapr-java-sdk
path: java-sdk/sdk/target/jacoco-report/
- name: Upload test report for sdk-actors
uses: actions/upload-artifact@v4
with:
name: report-dapr-java-sdk-actors
path: java-sdk/sdk-actors/target/jacoco-report/
- name: Upload failsafe test report for sdk-tests on failure
if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }}
uses: actions/upload-artifact@v4
with:
name: failsafe-report-sdk-tests
path: java-sdk/sdk-tests/target/failsafe-reports
- name: Upload surefire test report for sdk-tests on failure
if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }}
uses: actions/upload-artifact@v4
with:
name: surefire-report-sdk-tests
path: java-sdk/sdk-tests/target/surefire-reports
- name: Update PR comment for success
if: ${{ success() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-java
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Java SDK tests passed
- name: Update PR comment for failure
if: ${{ failure() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-java
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Java SDK tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: ${{ cancelled() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-java
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Java SDK tests cancelled
The Action has been canceled
js-sdk:
if: |
github.event_name == 'schedule' ||
( github.event_name == 'repository_dispatch' &&
(
github.event.action == 'test-sdk-all' ||
github.event.action == 'test-sdk-js'
)
)
name: "JS SDK verification tests"
runs-on: ubuntu-latest
env:
NODE_VER: 18
services:
emqx:
image: emqx/emqx
ports:
- 1883:1883
- 8081:8081
- 8083:8083
- 8883:8883
- 18083:18083
mongodb:
image: mongo
ports:
- 27017:27017
steps:
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload) {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-js
number: ${{ env.PR_NUMBER }}
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr SDK JS test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: NodeJS - Install
uses: actions/setup-node@v3
with:
node-version: ${{ env.NODE_VER }}
- name: "Set up Go"
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Checkout js-sdk repo to run tests.
uses: actions/checkout@v4
with:
repository: dapr/js-sdk
path: js-sdk
- name: Set up Dapr CLI
run: wget -q https://raw.githubusercontent.com/dapr/cli/master/install/install.sh -O - | /bin/bash -s
- name: Initialize Dapr runtime
run: |
dapr uninstall --all
dapr init
- name: Build and override daprd
run: |
make
mkdir -p $HOME/.dapr/bin/
cp dist/linux_amd64/release/daprd $HOME/.dapr/bin/daprd
- name: Override placement service
run: |
docker stop dapr_placement
./dist/linux_amd64/release/placement &
- name: Build Package
run: cd js-sdk && npm run build
- name: Run E2E tests
id: tests
run: cd js-sdk && npm run test:e2e:all
- name: Run E2E test to show successful TypeScript build
run: |
cd js-sdk/test/e2e/typescript-build
npm install
dapr run --app-id typescript-build npm run start
- name: Update PR comment for success
if: ${{ success() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-js
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
JS SDK tests passed
- name: Update PR comment for failure
if: ${{ failure() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-js
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β JS SDK tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: ${{ cancelled() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-js
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ JS SDK tests cancelled
The Action has been canceled
go-sdk:
if: |
github.event_name == 'schedule' ||
( github.event_name == 'repository_dispatch' &&
(
github.event.action == 'test-sdk-all' ||
github.event.action == 'test-sdk-go'
)
)
name: "Go SDK verification tests"
runs-on: ubuntu-latest
steps:
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload) {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-go
number: ${{ env.PR_NUMBER }}
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr SDK Go test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: "Set up Go"
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Set up Python 3.9
uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Checkout go-sdk repo to run tests.
uses: actions/checkout@v4
with:
repository: dapr/go-sdk
path: go-sdk
- name: Install Mechanical Markdown
run: |
cd go-sdk
python -m pip install --upgrade pip
pip install mechanical-markdown
- name: Set up Dapr CLI
run: wget -q https://raw.githubusercontent.com/dapr/cli/master/install/install.sh -O - | /bin/bash -s
- name: Initialize Dapr runtime
run: |
dapr uninstall --all
dapr init
- name: Build and override daprd with HEAD.
run: |
make
mkdir -p $HOME/.dapr/bin/
cp dist/linux_amd64/release/daprd $HOME/.dapr/bin/daprd
- name: Override placement service.
run: |
docker stop dapr_placement
./dist/linux_amd64/release/placement --healthz-port 9091 &
- name: Check Examples
run: |
cd go-sdk/examples
./validate.sh actor
./validate.sh configuration
./validate.sh crypto
./validate.sh grpc-service
./validate.sh hello-world
./validate.sh pubsub
./validate.sh service
./validate.sh socket
./validate.sh workflow
./validate.sh workflow-parallel
- name: Update PR comment for success
if: ${{ success() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-go
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Go SDK tests passed
- name: Update PR comment for failure
if: ${{ failure() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-go
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Go SDK tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: ${{ cancelled() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-go
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Go SDK tests cancelled
The Action has been canceled
|
mikeee/dapr
|
.github/workflows/dapr-test-sdk.yml
|
YAML
|
mit
| 21,379 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Required secrets:
# - AZURE_CREDENTIALS: JSON object containing the Azure service principal credentials. Docs: https://github.com/Azure/login#configure-a-service-principal-with-a-secret
# - DAPR_BOT_TOKEN: Token for the Dapr bot
#
# Optional secrets:
# - AZURE_DIAG_LOG_ANALYTICS_WORKSPACE_ID: Resource ID of the Log Analytics Workspace where to store certain diagnostic logs (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.OperationalInsights/workspaces/<workspace name>`)
# - AZURE_DIAG_STORAGE_ID: Resource ID of the Azure Storage account where to store certain diagnostic logs (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.Storage/storageAccounts/<storage account name>`)
# - AZURE_ARM_DIAG_LOG_ANALYTICS_WORKSPACE_ID: Resource ID of the Log Analytics Workspace where to store certain diagnostic logs for Arm64 (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.OperationalInsights/workspaces/<workspace name>`)
# - AZURE_ARM_DIAG_STORAGE_ID: Resource ID of the Azure Storage account where to store certain diagnostic logs for Arm64 (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.Storage/storageAccounts/<storage account name>`)
name: dapr-test
on:
# Run every 4 hours on weekdays, and every 12 hours on weekends
schedule:
- cron: "11 3,7,11,15,19,23 * * 1-5"
- cron: "11 11,23 * * 0,6"
# Manual trigger
workflow_dispatch:
# Dispatch on external events
repository_dispatch:
types: [e2e-test]
env:
# Configure proxy for Go modules
GOPROXY: https://proxy.golang.org
# Version of kubectl
KUBECTLVER: "v1.27.6"
# If not empty, uses cloud resources for testing
TEST_CLOUD_ENV: "azure"
# Version of Helm
HELMVER: "v3.10.0"
# Kubernetes namespace to use
DAPR_NAMESPACE: "dapr-tests"
# Timeout for tests
MAX_TEST_TIMEOUT: 5400
# Enable HA mode for tests
HA_MODE: true
# Enable tests on ARM64
ENABLE_ARM: "false"
# Space-separated of supported Azure regions: one will be picked randomly for each cluster
AZURE_REGIONS: "westus3"
AZURE_ARM_REGIONS: "eastus"
# Container registry where to cache e2e test images
DAPR_CACHE_REGISTRY: "dapre2eacr.azurecr.io"
# Name of the Azure Key Vault resource used in tests
# The credentials defined in AZURE_CREDENTIALS must have permissions to perform operations in this vault
AZURE_KEY_VAULT_NAME: "dapre2ekv"
# Whether to collect TCP dumps
TCP_DUMPS: "false"
# Additional build tags for Dapr
DAPR_GO_BUILD_TAGS: "subtlecrypto,wfbackendsqlite"
# Useful for upgrade/downgrade/compatibility tests
# TODO: Make this auto-populated based on GitHub's releases.
DAPR_TEST_N_MINUS_1_IMAGE: "ghcr.io/dapr/daprd:1.13.0"
DAPR_TEST_N_MINUS_2_IMAGE: "ghcr.io/dapr/daprd:1.12.5"
jobs:
deploy-infrastructure:
name: Deploy test infrastructure
runs-on: ubuntu-22.04
steps:
- name: Set up for manual runs
if: github.event_name == 'workflow_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
shell: bash
- name: Set up for scheduled test
if: github.event_name == 'schedule'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Set up for dispatched events
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-test") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
hide: true
hide_classify: OUTDATED
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr E2E test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="E2E|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
TEST_PREFIX="dapre2e${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Deploy the test cluster
if: env.TEST_PREFIX != ''
run: |
# Select one random Azure region
REGIONS=(${{ env.AZURE_REGIONS }})
REGIONS_SIZE=${#REGIONS[@]}
REGIONS_IDX=$(($RANDOM % $REGIONS_SIZE))
REGION1=${REGIONS[$REGIONS_IDX]}
echo "REGION1=${REGION1}" >> $GITHUB_ENV
REGION2=${REGIONS[$REGIONS_IDX]}
echo "REGION2=${REGION2}" >> $GITHUB_ENV
REGION3=${{ env.AZURE_ARM_REGIONS }}
echo "REGION3=${REGION3}" >> $GITHUB_ENV
echo "Deploying to Azure regions: Linux_amd64=${REGION1} Windows=${REGION2} Linux_arm64=${REGION3}"
# Tags
DATE_TAG=$(date --iso-8601=seconds)
echo "Tags: date=${DATE_TAG}"
# Deploy Linux arm64/amd64 and Windows clusters
# Retry the deployment twice in case of transient failures (such as capacity constraints)
success=false
for i in 1 2 3; do
az deployment sub create \
--name "${{ env.TEST_PREFIX }}" \
--location ${REGION1} \
--template-file ./tests/test-infra/azure-all.bicep \
--parameters \
namePrefix="${{ env.TEST_PREFIX }}" \
enableArm="${{ env.ENABLE_ARM }}" \
location1=${REGION1} \
location2=${REGION2} \
location3=${REGION3} \
dateTag="${DATE_TAG}" \
diagLogAnalyticsWorkspaceResourceId="${{ secrets.AZURE_DIAG_LOG_ANALYTICS_WORKSPACE_ID }}" \
diagStorageResourceId="${{ secrets.AZURE_DIAG_STORAGE_ID }}" \
armDiagLogAnalyticsWorkspaceResourceId="${{ secrets.AZURE_ARM_DIAG_LOG_ANALYTICS_WORKSPACE_ID }}" \
armDiagStorageResourceId="${{ secrets.AZURE_ARM_DIAG_STORAGE_ID }}" \
&& success=true \
&& break \
|| sleep 120
done
# Exit with error if failed
$success || exit 1
shell: bash
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Infrastructure deployed
| Cluster | Resource group name | Azure region |
| --- | --- | --- |
| Linux | `Dapr-E2E-${{ env.TEST_PREFIX }}l` | ${{ env.REGION1 }} |
| Windows | `Dapr-E2E-${{ env.TEST_PREFIX }}w` | ${{ env.REGION2 }} |
| Linux/arm64 | `Dapr-E2E-${{ env.TEST_PREFIX }}la` | ${{ env.REGION3 }} |
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Infrastructure deployment failed
| Cluster | Resource group name | Azure region |
| --- | --- | --- |
| Linux | `Dapr-E2E-${{ env.TEST_PREFIX }}l` | ${{ env.REGION1 }} |
| Windows | `Dapr-E2E-${{ env.TEST_PREFIX }}w` | ${{ env.REGION2 }} |
| Linux/arm64 | `Dapr-E2E-${{ env.TEST_PREFIX }}la` | ${{ env.REGION3 }} |
Please check the logs for details on the failure.
build:
name: Build for ${{ matrix.target_os }} on ${{ matrix.target_arch }}
runs-on: ${{ matrix.os }}
env:
GOOS: ${{ matrix.target_os }}
GOARCH: ${{ matrix.target_arch }}
TARGET_OS: ${{ matrix.target_os }}
TARGET_ARCH: ${{ matrix.target_arch }}
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-22.04
target_os: linux
target_arch: amd64
- os: windows-2022
target_os: windows
target_arch: amd64
windows_version: ltsc2022
steps:
- name: Set up for manual runs
if: github.event_name == 'workflow_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
shell: bash
- name: Set up for scheduled test
if: github.event_name == 'schedule'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Set up for dispatched events
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-test") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
# In windows-2019 images, WSL comes with bash.exe (but no distribution) and that causes issues
# See: https://github.community/t/wsl-not-available-for-hosted-windows-machine/124389
- name: Remove bash.exe from WSL
if: runner.os == 'Windows'
run: |
rm.exe "C:/WINDOWS/system32/bash.EXE"
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Login to cache registry for Windows
if: runner.os == 'Windows' && env.CHECKOUT_REPO != '' && env.DAPR_CACHE_REGISTRY != ''
run: |
$accessToken = (az acr login -n ${{ env.DAPR_CACHE_REGISTRY }} --expose-token --query accessToken --output tsv)
docker login ${{ env.DAPR_CACHE_REGISTRY }} -u 00000000-0000-0000-0000-000000000000 -p $accessToken
- name: Login to cache registry for Linux or Mac
if: runner.os != 'Windows' && env.CHECKOUT_REPO != '' && env.DAPR_CACHE_REGISTRY != ''
run: |
az acr login --name ${{ env.DAPR_CACHE_REGISTRY }}
shell: bash
- name: Build test prefix and set env vars
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="E2E|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
TEST_PREFIX=""
if [ "${{ env.TARGET_OS }}" == "windows" ] ; then
TEST_PREFIX="dapre2e${SUFFIX}w"
elif [ "${{ env.TARGET_ARCH }}" == "arm64" ] ; then
TEST_PREFIX="dapre2e${SUFFIX}la"
else
TEST_PREFIX="dapre2e${SUFFIX}l"
fi
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_REGISTRY=${TEST_PREFIX}acr.azurecr.io" >> $GITHUB_ENV
echo "TEST_CLUSTER=${TEST_PREFIX}-aks" >> $GITHUB_ENV
echo "DAPR_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_TEST_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-E2E-${TEST_PREFIX}" >> $GITHUB_ENV
echo "WINDOWS_VERSION=${{ matrix.windows_version }}" >> $GITHUB_ENV
shell: bash
- name: Build Dapr and its Docker images
if: env.TEST_PREFIX != ''
run: |
make build
if [ "${{ env.TARGET_OS }}" = "linux" ] && [ "${{ env.TARGET_ARCH }}" != "arm" ]; then
# For Linux, we use images based on Mariner
DOCKERFILE=Dockerfile-mariner make docker-build
else
make docker-build
fi
shell: bash
- name: Wait for Azure Container Registry deployment
timeout-minutes: 30
if: env.TEST_PREFIX != ''
run: ./tests/test-infra/wait_${{ env.TEST_CLOUD_ENV }}_registry.sh ${{ env.TEST_PREFIX }}acr
shell: bash
- name: Login to Azure Container Registry for Windows
if: runner.os == 'Windows' && env.TEST_PREFIX != ''
run: |
$accessToken = (az acr login -n ${{ env.DAPR_REGISTRY }} --expose-token --query accessToken --output tsv)
docker login ${{ env.DAPR_REGISTRY }} -u 00000000-0000-0000-0000-000000000000 -p $accessToken
- name: Login to Azure Container Registry for Linux or Mac
if: runner.os != 'Windows' && env.TEST_PREFIX != ''
run: |
az acr login --name ${{ env.TEST_PREFIX }}acr
shell: bash
- name: Push Dapr container images
if: env.TEST_PREFIX != ''
run: |
if [ "${{ env.TARGET_OS }}" = "linux" ] && [ "${{ env.TARGET_ARCH }}" != "arm" ]; then
# For Linux, we use images based on Mariner
DOCKERFILE=Dockerfile-mariner make docker-push
else
make docker-push
fi
shell: bash
- name: Build and push E2E test apps
if: env.TEST_PREFIX != ''
run: |
make build-push-e2e-app-all
shell: bash
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Build succeeded for ${{ matrix.target_os }}/${{ matrix.target_arch }}
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Build failed for ${{ matrix.target_os }}/${{ matrix.target_arch }}
Please check the logs for details on the error.
test-e2e:
name: End-to-end ${{ matrix.target_os }} on ${{ matrix.target_arch }} tests
needs:
- build
- deploy-infrastructure
# Always run on Linux as the local OS is irrelevant and this is faster
runs-on: ubuntu-22.04
env:
TARGET_OS: ${{ matrix.target_os }}
TARGET_ARCH: ${{ matrix.target_arch }}
TEST_OUTPUT_FILE_PREFIX: "test_report"
PULL_POLICY: IfNotPresent
strategy:
fail-fast: false
matrix:
include:
- target_os: linux
target_arch: amd64
- target_os: windows
target_arch: amd64
windows_version: ltsc2022
steps:
- name: Set up log paths
run: |
echo "DAPR_CONTAINER_LOG_PATH=$GITHUB_WORKSPACE/container_logs/${{ matrix.target_os }}_${{ matrix.target_arch }}" | sed 's/\\/\//g' >> $GITHUB_ENV
echo "DAPR_TEST_LOG_PATH=$GITHUB_WORKSPACE/test_logs/${{ matrix.target_os }}_${{ matrix.target_arch }}" | sed 's/\\/\//g' >> $GITHUB_ENV
shell: bash
- name: Set up for manual runs
if: github.event_name == 'workflow_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
shell: bash
- name: Set up for scheduled test
if: github.event_name == 'schedule'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Set up for dispatched events
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-test") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- uses: azure/setup-kubectl@v3
with:
version: ${{ env.KUBECTLVER }}
id: install
- name: Set up Helm ${{ env.HELMVER }}
uses: azure/setup-helm@v3
with:
version: ${{ env.HELMVER }}
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix and set env vars
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="E2E|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
TEST_PREFIX=""
if [ "${{ env.TARGET_OS }}" == "windows" ] ; then
TEST_PREFIX="dapre2e${SUFFIX}w"
elif [ "${{ env.TARGET_ARCH }}" == "arm64" ] ; then
TEST_PREFIX="dapre2e${SUFFIX}la"
else
TEST_PREFIX="dapre2e${SUFFIX}l"
fi
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_REGISTRY=${TEST_PREFIX}acr.azurecr.io" >> $GITHUB_ENV
echo "TEST_CLUSTER=${TEST_PREFIX}-aks" >> $GITHUB_ENV
echo "DAPR_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_TEST_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-E2E-${TEST_PREFIX}" >> $GITHUB_ENV
echo "WINDOWS_VERSION=${{ matrix.windows_version }}" >> $GITHUB_ENV
shell: bash
- name: Enable tcpdump
if: env.TCP_DUMPS == 'true'
run: |
sudo tcpdump -nn -i any -w sntp.cap &
sleep 1
shell: bash
- name: Connect to Kubernetes
if: env.TEST_PREFIX != ''
run: |
az aks get-credentials -n "${{ env.TEST_CLUSTER }}" -g "${{ env.TEST_RESOURCE_GROUP }}"
kubectl create namespace ${{ env.DAPR_NAMESPACE }}
shell: bash
- name: Setup for cloud resources
if: env.TEST_PREFIX != '' && env.TEST_CLOUD_ENV != ''
env:
AZURE_CREDENTIALS: ${{ secrets.AZURE_CREDENTIALS }}
run: ./tests/test-infra/setup_${{ env.TEST_CLOUD_ENV }}.sh
shell: bash
- name: Preparing AKS cluster for test
if: env.TEST_PREFIX != ''
run: |
make setup-helm-init
make setup-test-env-redis
make setup-test-env-kafka
make setup-test-env-zipkin
make setup-test-env-postgres
kubectl get pods -n ${{ env.DAPR_NAMESPACE }}
shell: bash
- name: Deploy dapr to AKS cluster
if: env.TEST_PREFIX != ''
env:
ADDITIONAL_HELM_SET: "dapr_operator.logLevel=debug,dapr_operator.watchInterval=20s,dapr_dashboard.enabled=false"
run: make docker-deploy-k8s
- name: Deploy test components
if: env.TEST_PREFIX != ''
run: make setup-test-components
- name: Show dapr configurations
if: env.TEST_PREFIX != ''
run: kubectl get configurations daprsystem -n ${{ env.DAPR_NAMESPACE }} -o yaml
- name: Determine if E2E tests should run
if: env.TEST_PREFIX != '' && env.TEST_CLOUD_ENV != ''
run: ./tests/test-infra/skip_${{ env.TEST_CLOUD_ENV }}.sh
shell: bash
- name: Run E2E tests
if: env.TEST_PREFIX != '' && env.SKIP_E2E != 'true'
run: make test-e2e-all
- name: Add job test summary
if: always()
uses: test-summary/action@v2
with:
paths: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_e2e*.xml
- name: Add job test outputs
if: always()
uses: actions/github-script@v6
with:
script: |
const script = require('./.github/scripts/dapr_tests_summary.js')
await script({core, glob})
- name: Save control plane K8s resources
if: always() && env.TEST_PREFIX != ''
run: |
make save-dapr-control-plane-k8s-resources || true
- name: Save control plane logs
if: always() && env.TEST_PREFIX != ''
run: |
make save-dapr-control-plane-k8s-logs
- name: Stop tcpdump
if: always() && env.TCP_DUMPS == 'true'
run: |
sleep 1
sudo kill -2 $(pgrep tcpdump)
sleep 1
# Container log files can be bigger than the maximum file size allowed by GitHub
- name: Compress logs
if: always()
run: |
test ${{ env.DAPR_CONTAINER_LOG_PATH }} \
&& gzip --fast -r ${{ env.DAPR_CONTAINER_LOG_PATH }} \
|| true
test ${{ env.DAPR_TEST_LOG_PATH }} \
&& gzip --fast -r ${{ env.DAPR_TEST_LOG_PATH }} \
|| true
test -f sntp.cap \
&& gzip --fast sntp.cap \
|| true
shell: bash
- name: Upload tcpdump
if: always() && env.TCP_DUMPS == 'true'
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target_os }}_${{ matrix.target_arch }}_tcpdump
path: sntp.cap.gz
compression-level: 0 # File is already compressed
- name: Upload container logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target_os }}_${{ matrix.target_arch }}_container_logs
path: ${{ env.DAPR_CONTAINER_LOG_PATH }}
compression-level: 0 # Content is already compressed
- name: Upload test logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target_os }}_${{ matrix.target_arch }}_test_logs
path: ${{ env.DAPR_TEST_LOG_PATH }}
compression-level: 0 # Content is already compressed
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
#TODO: .json suffix can be removed from artifact name after test analytics scripts are updated
name: ${{ matrix.target_os }}_${{ matrix.target_arch }}_e2e.json
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_e2e.*
- name: Update PR comment for success
if: success() && env.PR_NUMBER != '' && env.SKIP_E2E != 'true'
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Tests succeeded on ${{ matrix.target_os }}/${{ matrix.target_arch }}
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for skipped test run
if: success() && env.PR_NUMBER != '' && env.SKIP_E2E == 'true'
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Tests skipped on ${{ matrix.target_os }}/${{ matrix.target_arch }}
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Tests failed on ${{ matrix.target_os }}/${{ matrix.target_arch }}
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: cancelled() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Tests cancelled for ${{ matrix.target_os }}/${{ matrix.target_arch }}
The Action has been canceled
cleanup:
name: Clean up Azure resources
runs-on: ubuntu-22.04
needs:
- test-e2e
if: always()
steps:
- name: Login to Azure
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix
run: |
BASE_STR="E2E|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
TEST_PREFIX="dapre2e${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Delete Linux cluster
run: |
# We are not waiting for these commands to complete, and we're ignoring errors
echo "Starting removal of resource group Dapr-E2E-${{ env.TEST_PREFIX }}l"
az group delete --no-wait --yes --name "Dapr-E2E-${{ env.TEST_PREFIX }}l" || true
shell: bash
- name: Delete Windows cluster
run: |
# We are not waiting for these commands to complete, and we're ignoring errors
echo "Starting removal of resource group Dapr-E2E-${{ env.TEST_PREFIX }}w"
az group delete --no-wait --yes --name "Dapr-E2E-${{ env.TEST_PREFIX }}w" || true
shell: bash
- name: Delete Arm64 cluster
if: env.ENABLE_ARM == 'true'
run: |
# We are not waiting for these commands to complete, and we're ignoring errors
echo "Starting removal of resource group Dapr-E2E-${{ env.TEST_PREFIX }}la"
az group delete --no-wait --yes --name "Dapr-E2E-${{ env.TEST_PREFIX }}la" || true
shell: bash
|
mikeee/dapr
|
.github/workflows/dapr-test.yml
|
YAML
|
mit
| 29,243 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr
on:
workflow_dispatch:
schedule:
- cron: "00 22 * * *"
push:
branches:
- main
- master
- release-*
- feature/*
tags:
- v*
pull_request:
branches:
- main
- master
- release-*
- feature/*
jobs:
lint:
name: lint & proto validation
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
target_os: ["linux"]
target_arch: ["amd64"]
env:
GOLANGCILINT_VER: "v1.55.2"
PROTOC_VERSION: "24.4"
GOOS: "${{ matrix.target_os }}"
GOARCH: "${{ matrix.target_arch }}"
GOPROXY: "https://proxy.golang.org"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Check white space in .md files
if: github.event_name == 'pull_request'
run: |
TRAILING_WHITESPACE=0
# only check changed docs in pr
for file in $(git diff --cached --name-only --diff-filter=ACRMTU $GITHUB_BASE_REF | grep "\.md"); do
if grep -r '[[:blank:]]$' "$1" > /dev/null; then
echo "trailing whitespace: ${1}" >&2
ERRORS=yes
((TRAILING_WHITESPACE=TRAILING_WHITESPACE+1))
fi
done
if [[ -n "$ERRORS" ]]; then
echo >&2
echo "ERRORS found" >&2
echo "${TRAILING_WHITESPACE} files with trailing whitespace" >&2
echo >&2
exit 1
fi
- name: Check for disallowed changes in go.mod
run: node ./.github/scripts/check_go_mod.mjs
- name: golangci-lint
uses: golangci/golangci-lint-action@v3.7.0
with:
version: ${{ env.GOLANGCILINT_VER }}
skip-cache: true
args: --build-tags allcomponents
- name: Run go mod tidy check diff
run: make modtidy check-diff
- name: Check for retracted dependencies
run: |
if [[ $(go list -mod=mod -f '{{if .Retracted}}{{.}}{{end}}' -u -m all) ]]; then
exit 1
else
exit 0
fi
- name: Run gen-proto check diff
run: |
wget https://github.com/protocolbuffers/protobuf/releases/download/v${{ env.PROTOC_VERSION }}/protoc-${{ env.PROTOC_VERSION }}-linux-x86_64.zip
unzip protoc-${{ env.PROTOC_VERSION }}-linux-x86_64.zip -d protoc
sudo cp -r protoc/include/google/ /usr/local/include/
sudo chmod -R 755 /usr/local/include/google
sudo cp protoc/bin/protoc /usr/local/bin/
sudo chmod +x /usr/local/bin/protoc
rm -r protoc protoc-${{ env.PROTOC_VERSION }}-linux-x86_64.zip
make init-proto
make gen-proto check-proto-diff
unit-tests:
name: Unit tests
needs: lint
runs-on: "${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target_os: linux
target_arch: amd64
- os: windows-2022
target_os: windows
target_arch: amd64
windows_version: ltsc2022
- os: macOS-latest
target_os: darwin
target_arch: amd64
env:
GOOS: "${{ matrix.target_os }}"
GOARCH: "${{ matrix.target_arch }}"
GOPROXY: "https://proxy.golang.org"
ARCHIVE_OUTDIR: "dist/archives"
TEST_OUTPUT_FILE_PREFIX: "test_report"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Run make test
env:
COVERAGE_OPTS: "-coverprofile=coverage.txt -covermode=atomic"
run: make test
- name: Codecov
uses: codecov/codecov-action@v1
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.target_os }}_${{ matrix.target_arch }}_test_unit.json
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_unit.json
integration-tests:
name: Integration tests
needs: lint
runs-on: "${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target_os: linux
target_arch: amd64
- os: windows-2022
target_os: windows
target_arch: amd64
windows_version: ltsc2022
- os: macOS-latest
target_os: darwin
target_arch: amd64
env:
GOOS: "${{ matrix.target_os }}"
GOARCH: "${{ matrix.target_arch }}"
GOPROXY: "https://proxy.golang.org"
TEST_OUTPUT_FILE_PREFIX: "test_report"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Override DAPR_HOST_IP for MacOS
if: matrix.target_os == 'darwin'
run: |
echo "DAPR_HOST_IP=127.0.0.1" >>${GITHUB_ENV}
- name: Run make test-integration-parallel
run: make test-integration-parallel
build:
name: "Build artifacts on ${{ matrix.job_name }} - ${{ matrix.sidecar_flavor }}"
runs-on: "${{ matrix.os }}"
needs: [unit-tests, integration-tests]
env:
GOOS: "${{ matrix.target_os }}"
GOARCH: "${{ matrix.target_arch }}"
GOPROXY: "https://proxy.golang.org"
ARCHIVE_OUTDIR: "dist/archives"
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target_os: linux
target_arch: amd64
job_name: "Linux/amd64"
sidecar_flavor: "allcomponents"
- os: ubuntu-latest
target_os: linux
target_arch: amd64
job_name: "Linux/amd64"
sidecar_flavor: "stablecomponents"
- os: ubuntu-latest
target_os: linux
target_arch: arm64
job_name: "Linux/arm64"
sidecar_flavor: "allcomponents"
- os: ubuntu-latest
target_os: linux
target_arch: arm64
job_name: "Linux/arm64"
sidecar_flavor: "stablecomponents"
- os: ubuntu-latest
target_os: linux
target_arch: arm
job_name: "Linux/arm"
sidecar_flavor: "allcomponents"
- os: ubuntu-latest
target_os: linux
target_arch: arm
job_name: "Linux/arm"
sidecar_flavor: "stablecomponents"
- os: windows-2019
target_os: windows
target_arch: amd64
windows_version: "1809"
job_name: "Windows 1809"
sidecar_flavor: "allcomponents"
- os: windows-2022
target_os: windows
target_arch: amd64
windows_version: ltsc2022
job_name: "Windows LTSC 2022"
sidecar_flavor: "allcomponents"
- os: macOS-latest
target_os: darwin
target_arch: amd64
job_name: "macOS/Intel"
sidecar_flavor: "allcomponents"
- os: macOS-latest
target_os: darwin
target_arch: arm64
job_name: "macOS/Apple Silicon"
sidecar_flavor: "allcomponents"
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
if: matrix.target_os == 'linux' && github.event_name != 'pull_request'
with:
image: tonistiigi/binfmt:latest
platforms: arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
if: matrix.target_os == 'linux' && github.event_name != 'pull_request'
with:
version: v0.10.1 # Don't use latest since it broke our workflow once
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Parse release version and set REL_VERSION and LATEST_RELEASE
run: python ./.github/scripts/get_release_version.py ${{ github.event_name }}
- name: Updates version for sidecar flavor
if: matrix.sidecar_flavor != 'allcomponents'
run: |
echo "REL_VERSION=${REL_VERSION}-${{matrix.sidecar_flavor}}" >>${GITHUB_ENV}
# Only sidecar is built
echo "BINARIES=daprd" >>${GITHUB_ENV}
shell: bash
- name: Set REPO_OWNER
if: matrix.target_os != 'darwin'
run: |
REPO_OWNER=${{ github.repository_owner }}
# Lowercase the value
echo "REPO_OWNER=${REPO_OWNER,,}" >>${GITHUB_ENV}
shell: bash
- name: Run make release to build and archive binaries
env:
GOOS: ${{ matrix.target_os }}
GOARCH: ${{ matrix.target_arch }}
ARCHIVE_OUT_DIR: ${{ env.ARCHIVE_OUTDIR }}
if: matrix.sidecar_flavor == 'allcomponents'
run: |
mkdir -p "${ARCHIVE_OUT_DIR}"
make release
shell: bash
- name: Run make release to build and archive binaries for flavor
env:
GOOS: ${{ matrix.target_os }}
GOARCH: ${{ matrix.target_arch }}
ARCHIVE_OUT_DIR: ${{ env.ARCHIVE_OUTDIR }}
DAPR_SIDECAR_FLAVOR: "${{ matrix.sidecar_flavor }}"
if: matrix.sidecar_flavor != 'allcomponents'
run: |
mkdir -p "${ARCHIVE_OUT_DIR}"
make release-flavor
shell: bash
- name: upload artifacts
uses: actions/upload-artifact@v4
# Avoid publishing duplicate Windows artifacts, which will cause an error
if: matrix.windows_version != '1809'
with:
name: dapr_${{ matrix.target_os }}_${{ matrix.target_arch }}_${{ matrix.sidecar_flavor }}
path: ${{ env.ARCHIVE_OUTDIR }}
compression-level: 0 # Content is already compressed
- name: upload artifacts - grafana dashboards
if: matrix.target_arch == 'amd64' && matrix.target_os == 'linux' && matrix.sidecar_flavor == 'allcomponents'
uses: actions/upload-artifact@v4
with:
name: dapr_grafana_dashboards
path: ./grafana/*.json
- name: Docker Hub Login
if: matrix.target_os != 'darwin' && github.event_name != 'pull_request' && env.DOCKER_REGISTRY_ID != ''
uses: docker/login-action@v3
env:
DOCKER_REGISTRY_ID: ${{ secrets.DOCKER_REGISTRY_ID }}
with:
username: ${{ secrets.DOCKER_REGISTRY_ID }}
password: ${{ secrets.DOCKER_REGISTRY_PASS }}
- name: GitHub Container Registry login
if: matrix.target_os != 'darwin' && github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker images to Docker Hub
if: matrix.target_os != 'darwin' && github.event_name != 'pull_request' && env.DOCKER_REGISTRY_ID != ''
env:
DOCKER_REGISTRY_ID: ${{ secrets.DOCKER_REGISTRY_ID }}
DAPR_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
TARGET_OS: ${{ matrix.target_os }}
TARGET_ARCH: ${{ matrix.target_arch }}
WINDOWS_VERSION: ${{ matrix.windows_version }}
run: |
echo "Build Docker images and push to Docker Hub..."
DAPR_TAG=${{ env.REL_VERSION }} make docker-push
# Mariner images are built only on linux/amd64 and linux/arm64
if [ "$TARGET_OS" = "linux" ] && [ "$TARGET_ARCH" != "arm" ]; then
DOCKERFILE=Dockerfile-mariner DAPR_TAG="${{ env.REL_VERSION }}-mariner" make docker-push
fi
shell: bash
- name: Build and push Docker images to GHCR
if: matrix.target_os != 'darwin' && github.event_name != 'pull_request'
env:
DAPR_REGISTRY: ghcr.io/${{ env.REPO_OWNER }}
TARGET_OS: ${{ matrix.target_os }}
TARGET_ARCH: ${{ matrix.target_arch }}
WINDOWS_VERSION: ${{ matrix.windows_version }}
run: |
echo "Build Docker images and push to GHCR..."
DAPR_TAG=${{ env.REL_VERSION }} make docker-push
# Mariner images are built only on linux/amd64 and linux/arm64
if [ "$TARGET_OS" = "linux" ] && [ "$TARGET_ARCH" != "arm" ]; then
DOCKERFILE=Dockerfile-mariner DAPR_TAG="${{ env.REL_VERSION }}-mariner" make docker-push
fi
shell: bash
publish:
name: Publish binaries
needs: build
if: github.event_name != 'pull_request'
env:
ARTIFACT_DIR: ./release
DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Parse release version and set REL_VERSION and LATEST_RELEASE
run: python ./.github/scripts/get_release_version.py ${{ github.event_name }}
- name: Set REPO_OWNER
if: matrix.target_os != 'darwin'
shell: bash
run: |
REPO_OWNER=${{ github.repository_owner }}
# Lowercase the value
echo "REPO_OWNER=${REPO_OWNER,,}" >>${GITHUB_ENV}
- name: "download artifact: dapr_linux_amd64_allcomponents"
uses: actions/download-artifact@v4
with:
name: dapr_linux_amd64_allcomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_linux_amd64_stablecomponents"
uses: actions/download-artifact@v4
with:
name: dapr_linux_amd64_stablecomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_linux_arm_allcomponents"
uses: actions/download-artifact@v4
with:
name: dapr_linux_arm_allcomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_linux_arm_stablecomponents"
uses: actions/download-artifact@v4
with:
name: dapr_linux_arm_stablecomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_linux_arm64_allcomponents"
uses: actions/download-artifact@v4
with:
name: dapr_linux_arm64_allcomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_linux_arm64_stablecomponents"
uses: actions/download-artifact@v4
with:
name: dapr_linux_arm64_stablecomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_windows_amd64_allcomponents"
uses: actions/download-artifact@v4
with:
name: dapr_windows_amd64_allcomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_darwin_amd64_allcomponents"
uses: actions/download-artifact@v4
with:
name: dapr_darwin_amd64_allcomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_darwin_arm64_allcomponents"
uses: actions/download-artifact@v4
with:
name: dapr_darwin_arm64_allcomponents
path: ${{ env.ARTIFACT_DIR }}
- name: "download artifact: dapr_grafana_dashboards"
uses: actions/download-artifact@v4
with:
name: dapr_grafana_dashboards
path: ${{ env.ARTIFACT_DIR }}
- name: generate checksum files
run: cd ${ARTIFACT_DIR} && for i in *; do sha256sum -b $i > "$i.sha256"; done && cd -
- name: lists artifacts
run: ls -l ${{ env.ARTIFACT_DIR }}
- name: publish binaries to github
if: startswith(github.ref, 'refs/tags/v')
run: |
echo "installing github-release-cli..."
sudo npm install --silent --no-progress -g github-release-cli@2.1.0
if [ "$LATEST_RELEASE" = "true" ]; then
export RELEASE_BODY=`cat ./docs/release_notes/v${REL_VERSION}.md`
else
export RELEASE_BODY="This is the release candidate ${REL_VERSION}"
fi
# Get the list of files
RELEASE_ARTIFACT=(${ARTIFACT_DIR}/*)
# Parse repository to get owner and repo names
OWNER_NAME="${GITHUB_REPOSITORY%%/*}"
REPO_NAME="${GITHUB_REPOSITORY#*/}"
export GITHUB_TOKEN=${{ secrets.DAPR_BOT_TOKEN }}
echo "Uploading Dapr Runtime Binaries to GitHub Release"
github-release upload \
--owner $OWNER_NAME \
--repo $REPO_NAME \
--tag "v${REL_VERSION}" \
--release-name "Dapr Runtime v${REL_VERSION}" \
--body "${RELEASE_BODY}" \
--prerelease true \
${RELEASE_ARTIFACT[*]}
shell: bash
docker-publish:
name: Publish docker images
needs: build
if: github.event_name != 'pull_request'
env:
DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
LATEST_TAG: latest
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
sidecar_flavor: ["allcomponents", "stablecomponents"]
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Parse release version and set REL_VERSION and LATEST_RELEASE
run: python ./.github/scripts/get_release_version.py ${{ github.event_name }}
- name: Updates version for sidecar flavor
if: matrix.sidecar_flavor != 'allcomponents'
run: |
echo "REL_VERSION=${REL_VERSION}-${{matrix.sidecar_flavor}}" >>${GITHUB_ENV}
echo "LATEST_TAG=latest-${{matrix.sidecar_flavor}}" >>${GITHUB_ENV}
# We are doing image flavors only for Linux.
echo "DOCKER_MULTI_ARCH=linux-amd64 linux-arm64 linux-arm" >>${GITHUB_ENV}
# Only sidecar is built
echo "BINARIES=daprd" >>${GITHUB_ENV}
shell: bash
- name: Set REPO_OWNER
shell: bash
run: |
REPO_OWNER=${{ github.repository_owner }}
# Lowercase the value
echo "REPO_OWNER=${REPO_OWNER,,}" >>${GITHUB_ENV}
- name: Docker Hub Login
uses: docker/login-action@v3
if: env.DOCKER_REGISTRY != ''
with:
username: ${{ secrets.DOCKER_REGISTRY_ID }}
password: ${{ secrets.DOCKER_REGISTRY_PASS }}
- name: GitHub Container Registry login
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker multiarch manifest to Docker Hub
if: env.DOCKER_REGISTRY_ID != ''
env:
DOCKER_REGISTRY_ID: ${{ secrets.DOCKER_REGISTRY_ID }}
DAPR_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
run: |
echo "Build Docker multiarch manifest and push to Docker"
DAPR_TAG="${{ env.REL_VERSION }}" make docker-publish
# Publish the `-mariner` tag
# Mariner images are built only on linux/amd64 and linux/arm64
# Also, these use the "latest-mariner" tag if it's the latest
DOCKER_MULTI_ARCH="linux-amd64 linux-arm64" \
DAPR_TAG="${{ env.REL_VERSION }}-mariner" \
LATEST_TAG=${{ env.LATEST_TAG }}-mariner \
make docker-publish
shell: bash
- name: Build and push Docker multiarch Windows manifest to Docker Hub
if: env.DOCKER_REGISTRY_ID != '' && matrix.sidecar_flavor == 'allcomponents'
env:
DOCKER_REGISTRY_ID: ${{ secrets.DOCKER_REGISTRY_ID }}
DAPR_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
run: |
# Publish the `-windows-amd64` manifest.
# Note, the "latest" tag from the previous step already contains
# the windows images, so we don't need to publish the "latest-windows-amd64" tag.
DOCKER_MULTI_ARCH="windows-1809-amd64 windows-ltsc2022-amd64" \
DAPR_TAG="${{ env.REL_VERSION }}-windows-amd64" \
LATEST_RELEASE=false \
MANIFEST_TAG="${{ env.REL_VERSION }}" \
make docker-publish
shell: bash
- name: Build and push Docker multiarch manifest to GHCR
env:
DAPR_REGISTRY: ghcr.io/${{ env.REPO_OWNER }}
run: |
echo "Build Docker multiarch manifest and push to GHCR"
DAPR_TAG="${{ env.REL_VERSION }}" make docker-publish
# Publish the `-mariner` tag
# Mariner images are built only on linux/amd64 and linux/arm64
# Also, these use the "latest-mariner" tag if it's the latest
DOCKER_MULTI_ARCH="linux-amd64 linux-arm64" \
DAPR_TAG="${{ env.REL_VERSION }}-mariner" \
LATEST_TAG=${{ env.LATEST_TAG }}-mariner \
make docker-publish
- name: Build and push Docker multiarch Windows manifest to GHCR
if: matrix.sidecar_flavor == 'allcomponents'
env:
DAPR_REGISTRY: ghcr.io/${{ env.REPO_OWNER }}
run: |
# Publish the `-windows-amd64` manifest.
# Note, the "latest" tag from the previous step already contains
# the windows images, so we don't need to publish the "latest-windows-amd64" tag.
DOCKER_MULTI_ARCH="windows-1809-amd64 windows-ltsc2022-amd64" \
DAPR_TAG="${{ env.REL_VERSION }}-windows-amd64" \
LATEST_RELEASE=false \
MANIFEST_TAG="${{ env.REL_VERSION }}" \
make docker-publish
shell: bash
helm:
name: Package Helm Chart
needs: [publish, docker-publish]
if: github.event_name != 'pull_request'
env:
ARTIFACT_DIR: ./release
HELM_PACKAGE_DIR: helm
DAPR_VERSION_ARTIFACT: dapr_version
DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
HELMVER: v3.13.2
runs-on: ubuntu-latest
steps:
- name: Set up Helm ${{ env.HELMVER }}
uses: azure/setup-helm@v3
with:
version: ${{ env.HELMVER }}
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Parse release version and set REL_VERSION and LATEST_RELEASE
run: python ./.github/scripts/get_release_version.py ${{ github.event_name }}
- name: Set REPO_OWNER
shell: bash
run: |
REPO_OWNER=${{ github.repository_owner }}
# Lowercase the value
echo "REPO_OWNER=${REPO_OWNER,,}" >>${GITHUB_ENV}
- name: Update Helm chart files for release version ${{ env.REL_VERSION }}
run: bash ./.github/scripts/set_helm_dapr_version.sh
- name: Generate Helm chart manifest
if: env.DOCKER_REGISTRY != ''
env:
DAPR_REGISTRY: ${{ env.DOCKER_REGISTRY }}
DAPR_TAG: ${{ env.REL_VERSION }}
run: |
make manifest-gen
shell: bash
- name: Move Helm chart manifest to artifact
if: env.DOCKER_REGISTRY != ''
run: |
mkdir -p ${{ env.ARTIFACT_DIR }}
mv ./dist/install/dapr.yaml ${{ env.ARTIFACT_DIR }}/dapr-operator.yaml
- name: Save release version
run: |
mkdir -p ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
echo ${REL_VERSION} > ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}/${{ env.DAPR_VERSION_ARTIFACT }}
- name: Package Helm chart
if: ${{ env.LATEST_RELEASE }} == "true" && env.DOCKER_REGISTRY != ''
env:
HELM_CHARTS_DIR: charts/dapr
run: |
mkdir -p ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
helm package ${{ env.HELM_CHARTS_DIR }} --destination ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
- name: Upload Helm charts package to artifacts
if: ${{ env.LATEST_RELEASE }} == "true" && env.DOCKER_REGISTRY != ''
uses: actions/upload-artifact@v4
with:
name: dapr_helm_charts_package
path: ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
# This job downloads the helm charts package artifact uploaded by the publish job,
# checks out the helm charts git hub pages repo and commits the latest version of
# helm charts package.
# This does not run on forks
helmpublish:
name: Publish helm charts to Helm github pages repo
needs: helm
if: startswith(github.ref, 'refs/tags/v') && github.repository_owner == 'dapr'
env:
ARTIFACT_DIR: ./release
DAPR_VERSION_ARTIFACT: dapr_version
HELM_PACKAGE_DIR: helm
runs-on: ubuntu-latest
steps:
- name: Create Helm charts directory
run: |
mkdir -p ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
- name: download artifacts - dapr_helm_charts_package
uses: actions/download-artifact@v4
with:
name: dapr_helm_charts_package
path: ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
- name: Checkout Helm Charts Repo
uses: actions/checkout@v4
env:
DAPR_HELM_REPO: dapr/helm-charts
DAPR_HELM_REPO_CODE_PATH: helm-charts
with:
repository: ${{ env.DAPR_HELM_REPO }}
ref: refs/heads/master
token: ${{ secrets.DAPR_BOT_TOKEN }}
path: ${{ env.DAPR_HELM_REPO_CODE_PATH }}
- name: Upload helm charts to Helm Repo
env:
DAPR_HELM_REPO_CODE_PATH: helm-charts
DAPR_HELM_REPO: https://dapr.github.io/helm-charts/
run: |
daprVersion=`cat ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}/${{ env.DAPR_VERSION_ARTIFACT }}`
cd ${{ env.ARTIFACT_DIR }}/${{ env.HELM_PACKAGE_DIR }}
cp -r `ls -A | grep -v ${{ env.DAPR_VERSION_ARTIFACT }}` $GITHUB_WORKSPACE/${{ env.DAPR_HELM_REPO_CODE_PATH }}
cd $GITHUB_WORKSPACE/${{ env.DAPR_HELM_REPO_CODE_PATH }}
helm repo index --url ${{ env.DAPR_HELM_REPO }} --merge index.yaml .
git config --global user.email "daprweb@microsoft.com"
git config --global user.name "dapr-bot"
git add --all
# Check if the dapr-${daprVersion}.tgz file is modified.
if git diff --name-only --staged | grep -q ${daprVersion}; then
# If it is, we update the Helm chart, since this is an intentional update.
git commit -m "Release - $daprVersion"
git push
else
# If not, this update was accidentally triggered by tagging a release before updating the Helm chart.
echo "::error::There is no change for ${daprVersion} Helm chart. Did you forget to update the chart version before tagging?"
exit -1
fi
|
mikeee/dapr
|
.github/workflows/dapr.yml
|
YAML
|
mit
| 27,440 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: fossa
on:
push:
branches:
- master
- release-*
- feature/*
tags:
- v*
pull_request:
branches:
- master
- release-*
- feature/*
workflow_dispatch: {}
jobs:
fossa-scan:
if: github.repository_owner == 'dapr' # FOSSA is not intended to run on forks.
runs-on: ubuntu-latest
env:
FOSSA_API_KEY: b88e1f4287c3108c8751bf106fb46db6 # This is a push-only token that is safe to be exposed.
steps:
- name: "Checkout code"
uses: actions/checkout@v4
- name: "Run FOSSA Scan"
uses: fossas/fossa-action@v1.1.0 # Use a specific version if locking is preferred
with:
api-key: ${{ env.FOSSA_API_KEY }}
# REMOVE THIS STEP WHICH HAS BEEN FAILING DUE TO FOSSA SERVER ISSUES - VIEW RESULTS IN FOSSA PORTAL INSTEAD
# - name: "Run FOSSA Test"
# uses: fossas/fossa-action@v1.1.0 # Use a specific version if locking is preferred
# with:
# api-key: ${{ env.FOSSA_API_KEY }}
# run-tests: true
|
mikeee/dapr
|
.github/workflows/fossa.yml
|
YAML
|
mit
| 1,617 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: E2E tests on KinD
on:
# Manual trigger
workflow_dispatch:
# On pull requests
pull_request:
branches:
- master
- 'release-*'
- 'feature/*'
defaults:
run:
shell: bash
jobs:
# This workflow runs our e2e tests in a local KinD cluster. Since it
# does not required a paid cluster and special credentials, it does
# not require /ok-to-test. That allows contributors to run E2E tests
# in their Draft PRs without explicit approval from Dapr
# maintainers.
#
# One other benefit of a fully-localized workflow is that tests are
# running on a fresh cluster everytime, thus being decoupled from
# shared resource issues.
#
# However, KinD currently does not support Windows nodes so this is
# not intended to be a complete replacement for our AKS-based E2E
# workflows.
e2e:
name: e2e
runs-on: ubuntu-latest
env:
REGISTRY_PORT: 5000
REGISTRY_NAME: kind-registry
DAPR_REGISTRY: localhost:5000/dapr
DAPR_TAG: dev
DAPR_NAMESPACE: dapr-tests
# Useful for upgrade/downgrade/compatibility tests
# TODO: Make this auto-populated based on GitHub's releases.
DAPR_TEST_N_MINUS_1_IMAGE: "ghcr.io/dapr/daprd:1.13.0"
DAPR_TEST_N_MINUS_2_IMAGE: "ghcr.io/dapr/daprd:1.12.5"
# Container registry where to cache e2e test images
DAPR_CACHE_REGISTRY: "dapre2eacr.azurecr.io"
PULL_POLICY: IfNotPresent
DAPR_GO_BUILD_TAGS: wfbackendsqlite
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
k8s-version:
- v1.23.13
- v1.24.7
- v1.25.3
mode:
- ha
- non-ha
# Map between K8s and KinD versions.
# This is attempting to make it a bit clearer what's being tested.
# See: https://github.com/kubernetes-sigs/kind/releases/tag/v0.11.1
include:
- k8s-version: v1.23.13
kind-version: v0.17.0
kind-image-sha: sha256:ef453bb7c79f0e3caba88d2067d4196f427794086a7d0df8df4f019d5e336b61
dapr-test-config-store: "postgres"
- k8s-version: v1.24.7
kind-version: v0.17.0
kind-image-sha: sha256:577c630ce8e509131eab1aea12c022190978dd2f745aac5eb1fe65c0807eb315
dapr-test-config-store: "redis"
- k8s-version: v1.25.3
kind-version: v0.17.0
kind-image-sha: sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1
dapr-test-config-store: "redis"
exclude:
- k8s-version: v1.23.13
mode: non-ha
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Configure KinD
# Generate a KinD configuration file that uses:
# (a) a couple of worker nodes: this is needed to run both
# ZooKeeper + Kakfa, and
# (b) a local registry: Due to `kind load` performance
# https://github.com/kubernetes-sigs/kind/issues/1165, using
# a local repository speeds up the image pushes into KinD
# significantly.
run: |
cat > kind.yaml <<EOF
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
nodes:
- role: control-plane
image: kindest/node:${{ matrix.k8s-version }}@${{ matrix.kind-image-sha }}
- role: worker
image: kindest/node:${{ matrix.k8s-version }}@${{ matrix.kind-image-sha }}
- role: worker
image: kindest/node:${{ matrix.k8s-version }}@${{ matrix.kind-image-sha }}
- role: worker
image: kindest/node:${{ matrix.k8s-version }}@${{ matrix.kind-image-sha }}
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:$REGISTRY_PORT"]
endpoint = ["http://$REGISTRY_NAME:$REGISTRY_PORT"]
EOF
# Log the generated kind.yaml for easy reference.
cat kind.yaml
# Set log target directories
echo "DAPR_CONTAINER_LOG_PATH=$GITHUB_WORKSPACE/container_logs/${{ matrix.k8s-version }}_${{ matrix.mode }}" >> $GITHUB_ENV
echo "DAPR_TEST_LOG_PATH=$GITHUB_WORKSPACE/test_logs/${{ matrix.k8s-version }}_${{ matrix.mode }}" >> $GITHUB_ENV
- name: Create KinD Cluster
uses: helm/kind-action@v1.5.0
with:
config: kind.yaml
cluster_name: kind
version: ${{ matrix.kind-version }}
- name: Get KinD info
run: |
kubectl cluster-info --context kind-kind
NODE_IP=$(kubectl get nodes \
-lkubernetes.io/hostname!=kind-control-plane \
-ojsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
echo "MINIKUBE_NODE_IP=$NODE_IP" >> $GITHUB_ENV
- name: Setup test output
shell: bash
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
- name: Setup local registry
run: |
# Run a registry.
docker run -d --restart=always \
-p $REGISTRY_PORT:$REGISTRY_PORT --name $REGISTRY_NAME registry:2
# Connect the registry to the KinD network.
docker network connect "kind" $REGISTRY_NAME
- name: Setup Helm
uses: azure/setup-helm@v1
with:
version: v3.3.4
- name: Build and push Dapr
run: |
make build-linux
make docker-build
make docker-push
- name: Build and push test apps
run: |
make build-push-e2e-app-all
- name: Setup Dapr
run: |
make setup-helm-init
make create-test-namespace
export ADDITIONAL_HELM_SET="dapr_operator.logLevel=debug,dapr_operator.watchInterval=20s"
if [[ "${{ matrix.mode }}" == "ha" ]]; then
export HA_MODE=true
else
export HA_MODE=false
fi
make docker-deploy-k8s
- name: Setup Redis
run: |
make setup-test-env-redis
- name: Setup Kafka
run: |
make setup-test-env-kafka
- name: Setup Zipkin
run: |
make setup-test-env-zipkin
- name: Setup postgres
run: |
make setup-test-env-postgres
- name: Setup test components
run: |
make setup-test-components
env:
DAPR_TEST_CONFIG_STORE: ${{ matrix.dapr-test-config-store }}
- name: Free up some diskspace
run: |
docker image prune -a -f
- name: Run tests
run: |
make test-e2e-all
env:
DAPR_TEST_CONFIG_STORE: ${{ matrix.dapr-test-config-store }}
- name: Save control plane logs
if: always()
run: |
make save-dapr-control-plane-k8s-logs
# Container log files can be bigger than the maximum file size allowed by GitHub
- name: Compress logs
if: always()
run: |
gzip --fast -r ${{ env.DAPR_CONTAINER_LOG_PATH }}
gzip --fast -r ${{ env.DAPR_TEST_LOG_PATH }}
shell: bash
- name: Upload container logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.k8s-version }}_${{ matrix.mode}}_container_logs
path: ${{ env.DAPR_CONTAINER_LOG_PATH }}
compression-level: 0 # Content is already compressed
- name: Upload test logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.k8s-version }}_${{ matrix.mode}}_test_logs
path: ${{ env.DAPR_TEST_LOG_PATH }}
compression-level: 0 # Content is already compressed
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
#TODO: .json suffix can be removed from artifact name after test analytics scripts are updated
name: ${{ matrix.k8s-version }}_${{ matrix.mode }}_test_e2e.json
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_e2e.*
|
mikeee/dapr
|
.github/workflows/kind-e2e.yaml
|
YAML
|
mit
| 8,558 |
name: Test Tooling
on:
push:
paths: # Explicitly declare which paths
- ".github/workflows/test-tooling.yml"
- ".build-tools/*"
pull_request:
branches:
- master
paths: # Explicitly declare which paths
- ".github/workflows/test-tooling.yml"
- ".build-tools/*"
jobs:
lint:
name: Test (${{ matrix.os}})
strategy:
fail-fast: false
matrix:
os:
- "ubuntu-latest"
- "windows-latest"
- "macos-latest"
runs-on: ${{ matrix.os }}
env:
GOLANGCILINT_VER: "v1.55.2" # Make sure to bump /.build-tools/check-lint-version/main_test.go
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup
uses: actions/setup-go@v5
with:
go-version-file: './.build-tools/go.mod'
- name: Tidy
working-directory: ./.build-tools
run: go mod tidy
- name: Install Linter
run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b "$(go env GOPATH)/bin" ${{ env.GOLANGCILINT_VER }}
- name: Test
working-directory: ./.build-tools
run: go test ./...
|
mikeee/dapr
|
.github/workflows/test-tooling.yml
|
YAML
|
mit
| 1,206 |
#
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: Version Skew tests
on:
# Run when a PR is merged to master.
push:
branches:
- master
# Dispatch on external events
repository_dispatch:
types:
- test-version-skew
defaults:
run:
shell: bash
# Jobs can be triggered with the `/test-version-skew` command. You can also
# supply a specific version to test against, overriding the current latest
# release, e.g. `/test-version-skew 1.10.5`. This is useful for testing
# against a specific patch or release candidate. This version must be a
# valid tag and artefacts be publicly available. Remember that Dapr only
# guarantees N-1.
jobs:
# Dapr guarantees that the control plane and Dapr sidecars may have a version
# skew of N-1 and still be fully functional. This workflow verifies this
# guarantee holds for master and the previous release (or overridden version
# reference).
# This workflow runs the previous releases integration tests against the
# matrix of master/latest-release daprd/control-plane. This ensures the
# previous assertions hold for the current HEAD, and version skew works
# between daprd and the control plane.
integration-version-skew:
name: integration-version-skew
runs-on: ubuntu-latest
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
mode:
- control-plane-master
- dapr-sidecar-master
steps:
- name: Set up for Dapr lastest release
run: |
echo "DAPR_PREV_VERSION=$(curl -s https://api.github.com/repos/dapr/dapr/releases/latest | jq -r '.tag_name' | cut -c 2-)" >> $GITHUB_ENV
- name: Set up for master
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Setup test output
shell: bash
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload) {
var fs = require('fs');
var envs = `CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`;
if (testPayload.previous_version) {
envs += `\nDAPR_PREV_VERSION=${testPayload.previous_version}`;
}
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV, envs);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-integration-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
hide: true
hide_classify: OUTDATED
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr Version Skew integration test (${{ matrix.mode }} - ${{ env.DAPR_PREV_VERSION }})
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
uses: actions/checkout@v3
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Checkout last release repo
uses: actions/checkout@v3
with:
repository: dapr/dapr
path: latest-release
ref: v${{ env.DAPR_PREV_VERSION }}
- name: Apply patches to latest release
run: |
export DAPR_LATEST_MAJOR_MINOR=$(echo ${{ env.DAPR_PREV_VERSION }} | cut -d. -f1-2)
export DAPR_PATCH_DIR="$(pwd)/.github/scripts/version-skew-test-patches/integration/release-$DAPR_LATEST_MAJOR_MINOR/${{ matrix.mode }}"
if [ -d "$DAPR_PATCH_DIR" ]; then
cd latest-release
echo "Applying patches from $DAPR_PATCH_DIR to $(pwd)"
git apply --ignore-space-change --ignore-whitespace $DAPR_PATCH_DIR/*.patch
git diff
fi
- name: Set up Go
id: setup-go
uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- name: Build & download binaries
run: |
go mod tidy -v
make build
mkdir -p downloads && cd downloads
curl -so daprd_linux_amd64.tar.gz -L https://github.com/dapr/dapr/releases/download/v${{ env.DAPR_PREV_VERSION }}/daprd_linux_amd64.tar.gz
curl -so sentry_linux_amd64.tar.gz -L https://github.com/dapr/dapr/releases/download/v${{ env.DAPR_PREV_VERSION }}/sentry_linux_amd64.tar.gz
curl -so placement_linux_amd64.tar.gz -L https://github.com/dapr/dapr/releases/download/v${{ env.DAPR_PREV_VERSION }}/placement_linux_amd64.tar.gz
curl -so injector_linux_amd64.tar.gz -L https://github.com/dapr/dapr/releases/download/v${{ env.DAPR_PREV_VERSION }}/injector_linux_amd64.tar.gz
curl -so operator_linux_amd64.tar.gz -L https://github.com/dapr/dapr/releases/download/v${{ env.DAPR_PREV_VERSION }}/operator_linux_amd64.tar.gz
tar xvf daprd_linux_amd64.tar.gz
tar xvf sentry_linux_amd64.tar.gz
tar xvf placement_linux_amd64.tar.gz
tar xvf injector_linux_amd64.tar.gz
tar xvf operator_linux_amd64.tar.gz
- name: Setup DAPR_INTEGRATION_X_PATH - control-plane master
if: matrix.mode == 'control-plane-master'
run: |
echo "DAPR_INTEGRATION_DAPRD_PATH=$(pwd)/downloads/daprd" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_PLACEMENT_PATH=$(pwd)/dist/linux_amd64/release/placement" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_OPERATOR_PATH=$(pwd)/dist/linux_amd64/release/operator" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_SENTRY_PATH=$(pwd)/dist/linux_amd64/release/sentry" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_INJECTOR_PATH=$(pwd)/dist/linux_amd64/release/injector" >> $GITHUB_ENV
- name: Setup DAPR_INTEGRATION_X_PATH - dapr-sidecar master
if: matrix.mode == 'dapr-sidecar-master'
run: |
echo "DAPR_INTEGRATION_DAPRD_PATH=$(pwd)/dist/linux_amd64/release/daprd" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_PLACEMENT_PATH=$(pwd)/downloads/placement" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_SENTRY_PATH=$(pwd)/downloads/sentry" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_OPERATOR_PATH=$(pwd)/downloads/operator" >> $GITHUB_ENV
echo "DAPR_INTEGRATION_INJECTOR_PATH=$(pwd)/downloads/injector" >> $GITHUB_ENV
- name: Run make test-integration
run: cd latest-release && make test-integration
- name: Upload test results
if: always()
uses: actions/upload-artifact@master
with:
name: latest-release/${{ matrix.mode }}_test_e2e.json
path: latest-release/${{ env.TEST_OUTPUT_FILE_PREFIX }}_integration.*
- name: Update PR comment for success
if: ${{ success() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-integration-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Version Skew tests passed
- name: Update PR comment for failure
if: ${{ failure() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-integration-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Version Skew tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: ${{ cancelled() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-integration-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Version Skew tests cancelled
The Action has been canceled
# This workflow runs our e2e tests on KinD. Tests are performed with the
# control plane on master and side car on the previous release, and vice
# versa.
#
# The e2e tests from the previous release are run against this deployment.
e2e-version-skew:
name: e2e-version-skew
runs-on: ubuntu-latest
env:
REGISTRY_PORT: 5000
REGISTRY_NAME: kind-registry
DAPR_REGISTRY: localhost:5000/dapr
DAPR_TAG: dev
DAPR_NAMESPACE: dapr-tests
DAPR_CACHE_REGISTRY: "dapre2eacr.azurecr.io"
PULL_POLICY: Always
strategy:
fail-fast: false # Keep running if one leg fails.
matrix:
mode:
- control-plane-master
- dapr-sidecar-master
steps:
- name: Set up for Dapr lastest release
run: |
echo "DAPR_PREV_VERSION=$(curl -s https://api.github.com/repos/dapr/dapr/releases/latest | jq -r '.tag_name' | cut -c 2-)" >> $GITHUB_ENV
- name: Set up for master
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload) {
var fs = require('fs');
var envs = `CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`;
if (testPayload.previous_version) {
envs += `\nDAPR_PREV_VERSION=${testPayload.previous_version}`;
}
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV, envs);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-e2e-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
hide: true
hide_classify: OUTDATED
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr Version Skew e2e test (${{ matrix.mode }} - ${{ env.DAPR_PREV_VERSION }})
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Checkout last release repo
uses: actions/checkout@v4
with:
repository: dapr/dapr
path: latest-release
ref: v${{ env.DAPR_PREV_VERSION }}
- name: Apply patches to latest release
run: |
export DAPR_LATEST_MAJOR_MINOR=$(echo ${{ env.DAPR_PREV_VERSION }} | cut -d. -f1-2)
export DAPR_PATCH_DIR="$(pwd)/.github/scripts/version-skew-test-patches/e2e/release-$DAPR_LATEST_MAJOR_MINOR/${{ matrix.mode }}"
if [ -d "$DAPR_PATCH_DIR" ]; then
echo "Applying patches from $DAPR_PATCH_DIR"
cd latest-release
echo "Applying patches from $DAPR_PATCH_DIR to $(pwd)"
git apply --ignore-space-change --ignore-whitespace $DAPR_PATCH_DIR/*.patch
git diff
fi
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Configure KinD
run: |
cat > kind.yaml <<EOF
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
nodes:
- role: control-plane
image: kindest/node:v1.25.3@sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1
- role: worker
image: kindest/node:v1.25.3@sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1
- role: worker
image: kindest/node:v1.25.3@sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1
- role: worker
image: kindest/node:v1.25.3@sha256:f52781bc0d7a19fb6c405c2af83abfeb311f130707a0e219175677e366cc45d1
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:$REGISTRY_PORT"]
endpoint = ["http://$REGISTRY_NAME:$REGISTRY_PORT"]
EOF
# Log the generated kind.yaml for easy reference.
cat kind.yaml
# Set log target directories
echo "DAPR_CONTAINER_LOG_PATH=$GITHUB_WORKSPACE/container_logs/v1.25.3_ha" >> $GITHUB_ENV
echo "DAPR_TEST_LOG_PATH=$GITHUB_WORKSPACE/test_logs/v1.25.3_ha" >> $GITHUB_ENV
- name: Create KinD Cluster
uses: helm/kind-action@v1.5.0
with:
config: kind.yaml
cluster_name: kind
version: v0.17.0
- name: Get KinD info
run: |
kubectl cluster-info --context kind-kind
NODE_IP=$(kubectl get nodes \
-lkubernetes.io/hostname!=kind-control-plane \
-ojsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
echo "MINIKUBE_NODE_IP=$NODE_IP" >> $GITHUB_ENV
- name: Setup test output
shell: bash
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
- name: Setup local registry
run: |
# Run a registry.
docker run -d --restart=always \
-p $REGISTRY_PORT:$REGISTRY_PORT --name $REGISTRY_NAME registry:2
# Connect the registry to the KinD network.
docker network connect "kind" $REGISTRY_NAME
- name: Setup Helm
uses: azure/setup-helm@v3
with:
version: v3.3.4
- name: Build and push Dapr
run: |
make build-linux
make docker-build
make docker-push
- name: Build and push test apps
run: |
cd latest-release && make build-push-e2e-app-all
- name: Setup Dapr - control-plane master
if: matrix.mode == 'control-plane-master'
run: |
make setup-helm-init
make create-test-namespace
export ADDITIONAL_HELM_SET="dapr_operator.logLevel=debug,dapr_sidecar_injector.image.name=ghcr.io/dapr/daprd:${{env.DAPR_PREV_VERSION}}"
export HA_MODE=true
make docker-deploy-k8s
- name: Setup Dapr - control-plane previous release
if: matrix.mode == 'dapr-sidecar-master'
run: |
make setup-helm-init
make create-test-namespace
export DAPR_CLI_LATEST_VERSION=$(curl -s https://api.github.com/repos/dapr/cli/releases/latest | jq -r '.tag_name' | cut -c 2-)
wget -q https://raw.githubusercontent.com/dapr/cli/master/install/install.sh -O - | /bin/bash -s $DAPR_CLI_LATEST_VERSION
dapr uninstall --all
dapr init -k --runtime-version ${{ env.DAPR_PREV_VERSION }} \
--set dapr_operator.logLevel=debug,dapr_sidecar_injector.image.name=localhost:5000/dapr/daprd:dev-linux-amd64 \
-n dapr-tests --enable-ha
- name: Setup Components
run: |
cd latest-release
make setup-helm-init
make setup-test-env-redis
make setup-test-env-kafka
make setup-test-env-zipkin
make setup-test-env-postgres
make setup-test-components
- name: Free up some diskspace
run: |
docker image prune -a -f
- name: Run tests
run: |
cd latest-release && make test-e2e-all
- name: Save control plane logs
if: always()
run: |
cd latest-release && make save-dapr-control-plane-k8s-logs
# Container log files can be bigger than the maximum file size allowed by GitHub
- name: Compress logs
if: always()
run: |
gzip --fast -r ${{ env.DAPR_CONTAINER_LOG_PATH }}
gzip --fast -r ${{ env.DAPR_TEST_LOG_PATH }}
shell: bash
- name: Upload container logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.mode}}_container_logs
path: ${{ env.DAPR_CONTAINER_LOG_PATH }}
compression-level: 0 # Content is already compressed
- name: Upload test logs
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.mode}}_test_logs
path: ${{ env.DAPR_TEST_LOG_PATH }}
compression-level: 0 # Content is already compressed
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.mode }}_test_e2e.json
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_e2e.*
- name: Update PR comment for success
if: ${{ success() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-e2e-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Version Skew tests passed
- name: Update PR comment for failure
if: ${{ failure() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-e2e-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Version Skew tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: ${{ cancelled() }}
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}-version-skew-e2e-${{ matrix.mode }}-${{ env.DAPR_PREV_VERSION }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Version Skew tests cancelled
The Action has been canceled
|
mikeee/dapr
|
.github/workflows/version-skew.yaml
|
YAML
|
mit
| 19,266 |
/dist
.idea
**/.DS_Store
config/crd/bases
github.com/
.vscode
# Auto-generated CRDs
config/crd/bases/*.yaml
# Visual Studio 2015/2017/2019 cache/options directory
.vs/
/vendor
**/*.log
**/.project
**/.factorypath
google
dapr.sln
test_report*
coverage.txt
# Go Workspaces (introduced in Go 1.18+)
go.work
# Directory reserved for local dev files: config files, scripts, anything for individual local dev.
.local/
|
mikeee/dapr
|
.gitignore
|
Git
|
mit
| 419 |
# options for analysis running
run:
# default concurrency is a available CPU number
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 15m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
# include test files or not, default is true
tests: true
# list of build tags, all linters use it. Default is empty list.
build-tags:
- unit
# which dirs to skip: they won't be analyzed;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but next dirs are always skipped independently
# from this option's value:
# third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs:
- ^pkg.*client.*clientset.*versioned.*
- ^pkg.*client.*informers.*externalversions.*
- ^pkg.*proto.*
# which files to skip: they will be analyzed, but issues from them
# won't be reported. Default value is empty list, but there is
# no need to include all autogenerated files, we confidently recognize
# autogenerated files. If it's not please let us know.
# skip-files:
# - ".*\\.my\\.go$"
# - lib/bad.go
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
format: tab
# print lines of code with issue, default is true
print-issued-lines: true
# print linter name in the end of issue text, default is true
print-linter-name: true
# all available settings of specific linters
linters-settings:
errcheck:
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
# default is false: such cases aren't reported by default.
check-type-assertions: false
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
# default is false: such cases aren't reported by default.
check-blank: false
# [deprecated] comma-separated list of pairs of the form pkg:regex
# the regex is used to ignore names within pkg. (default "fmt:.*").
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
ignore: fmt:.*,io/ioutil:^Read.*
# path to a file containing a list of functions to exclude from checking
# see https://github.com/kisielk/errcheck#excluding-functions for details
# exclude:
funlen:
lines: 60
statements: 40
govet:
# report about shadowed variables
check-shadowing: true
# settings per analyzer
settings:
printf: # analyzer name, run `go tool vet help` to see all analyzers
funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
# enable or disable analyzers by name
enable:
- atomicalign
enable-all: false
disable:
- shadow
disable-all: false
golint:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
revive:
# minimal confidence for issues, default is 0.8
confidence: 0.8
gofmt:
# simplify code: gofmt with `-s` option, true by default
simplify: true
goimports:
# put imports beginning with prefix after 3rd-party packages;
# it's a comma-separated list of prefixes
local-prefixes: github.com/dapr/
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 10
gocognit:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 10
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
dupl:
# tokens count to trigger issue, 150 by default
threshold: 100
goconst:
# minimal length of string constant, 3 by default
min-len: 3
# minimal occurrences count to trigger, 3 by default
min-occurrences: 5
depguard:
rules:
master:
deny:
- pkg: "github.com/Sirupsen/logrus"
desc: "must use github.com/dapr/kit/logger"
- pkg: "github.com/agrea/ptr"
desc: "must use github.com/dapr/kit/ptr"
- pkg: "go.uber.org/atomic"
desc: "must use sync/atomic"
- pkg: "golang.org/x/net/context"
desc: "must use context"
- pkg: "github.com/pkg/errors"
desc: "must use standard library (errors package and/or fmt.Errorf)"
- pkg: "github.com/go-chi/chi$"
desc: "must use github.com/go-chi/chi/v5"
- pkg: "github.com/cenkalti/backoff$"
desc: "must use github.com/cenkalti/backoff/v4"
- pkg: "github.com/cenkalti/backoff/v2"
desc: "must use github.com/cenkalti/backoff/v4"
- pkg: "github.com/cenkalti/backoff/v3"
desc: "must use github.com/cenkalti/backoff/v4"
- pkg: "github.com/benbjohnson/clock"
desc: "must use k8s.io/utils/clock"
- pkg: "github.com/ghodss/yaml"
desc: "must use sigs.k8s.io/yaml"
- pkg: "gopkg.in/yaml.v2"
desc: "must use gopkg.in/yaml.v3"
- pkg: "github.com/golang-jwt/jwt"
desc: "must use github.com/lestrrat-go/jwx/v2"
- pkg: "github.com/golang-jwt/jwt/v2"
desc: "must use github.com/lestrrat-go/jwx/v2"
- pkg: "github.com/golang-jwt/jwt/v3"
desc: "must use github.com/lestrrat-go/jwx/v2"
- pkg: "github.com/golang-jwt/jwt/v4"
desc: "must use github.com/lestrrat-go/jwx/v2"
# pkg: Commonly auto-completed by gopls
- pkg: "github.com/gogo/status"
desc: "must use google.golang.org/grpc/status"
- pkg: "github.com/gogo/protobuf"
desc: "must use google.golang.org/protobuf"
- pkg: "github.com/lestrrat-go/jwx/jwa"
desc: "must use github.com/lestrrat-go/jwx/v2"
- pkg: "github.com/lestrrat-go/jwx/jwt"
desc: "must use github.com/lestrrat-go/jwx/v2"
- pkg: "github.com/labstack/gommon/log"
desc: "must use github.com/dapr/kit/logger"
- pkg: "github.com/gobuffalo/logger"
desc: "must use github.com/dapr/kit/logger"
- pkg: "k8s.io/utils/pointer"
desc: "must use github.com/dapr/kit/ptr"
- pkg: "k8s.io/utils/ptr"
desc: "must use github.com/dapr/kit/ptr"
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
# locale: default
ignore-words:
- someword
lll:
# max line length, lines longer will be reported. Default is 120.
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
line-length: 120
# tab width in spaces. Default to 1.
tab-width: 1
unused:
# treat code as a program (not a library) and report unused exported identifiers; default is false.
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
# with golangci-lint call it on a directory with the changed file.
check-exported: false
unparam:
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
# with golangci-lint call it on a directory with the changed file.
check-exported: false
nakedret:
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
max-func-lines: 30
prealloc:
# XXX: we don't recommend using this linter before doing performance profiling.
# For most programs usage of prealloc will be a premature optimization.
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
# True by default.
simple: true
range-loops: true # Report preallocation suggestions on range loops, true by default
for-loops: false # Report preallocation suggestions on for loops, false by default
gocritic:
# Which checks should be enabled; can't be combined with 'disabled-checks';
# See https://go-critic.github.io/overview#checks-overview
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
# By default list of stable checks is used.
# enabled-checks:
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
disabled-checks:
- regexpMust
- rangeValCopy
- hugeParam
- ifElseChain
- singleCaseSwitch
- exitAfterDefer
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks.
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
enabled-tags:
- performance
settings: # settings passed to gocritic
captLocal: # must be valid enabled check name
paramsOnly: true
godox:
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
# might be left in the code accidentally and should be resolved before merging
keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting
- NOTE
- OPTIMIZE # marks code that should be optimized before merging
- HACK # marks hack-arounds that should be removed before merging
godot:
exclude:
- 'nosec'
capital: false
scope: all
dogsled:
# checks assignments with too many blank identifiers; default is 2
max-blank-identifiers: 2
whitespace:
multi-if: false # Enforces newlines (or comments) after every multi-line if statement
multi-func: false # Enforces newlines (or comments) after every multi-line function signature
wsl:
# If true append is only allowed to be cuddled if appending value is
# matching variables, fields or types on line above. Default is true.
strict-append: true
# Allow calls and assignments to be cuddled as long as the lines have any
# matching variables, fields or types. Default is true.
allow-assign-and-call: true
# Allow multiline assignments to be cuddled. Default is true.
allow-multiline-assign: true
# Allow case blocks to end with a whitespace.
allow-case-traling-whitespace: true
# Allow declarations (var) to be cuddled.
allow-cuddle-declarations: false
linters:
fast: false
enable-all: true
disable:
# TODO Enforce the below linters later
- nosnakecase
- musttag
- dupl
- errcheck
- funlen
- gochecknoglobals
- gochecknoinits
- gocyclo
- gocognit
- godox
- interfacer
- lll
- maligned
- scopelint
- unparam
- wsl
- gomnd
- testpackage
- goerr113
- nestif
- nlreturn
- exhaustive
- exhaustruct
- noctx
- gci
- golint
- tparallel
- paralleltest
- wrapcheck
- tagliatelle
- ireturn
- exhaustivestruct
- errchkjson
- contextcheck
- gomoddirectives
- godot
- cyclop
- varnamelen
- errorlint
- forcetypeassert
- ifshort
- maintidx
- nilnil
- predeclared
- tenv
- thelper
- wastedassign
- containedctx
- gosimple
- nonamedreturns
- asasalint
- rowserrcheck
- sqlclosecheck
- structcheck
- varcheck
- deadcode
- inamedparam
- tagalign
|
mikeee/dapr
|
.golangci.yml
|
YAML
|
mit
| 11,904 |
tests/apps/*/obj/Debug
tests/apps/*/obj/Release
grafana/*.json
|
mikeee/dapr
|
.prettierignore
|
none
|
mit
| 63 |
{
"trailingComma": "es5",
"tabWidth": 4,
"semi": false,
"singleQuote": true
}
|
mikeee/dapr
|
.prettierrc.json
|
JSON
|
mit
| 94 |
# These owners are the maintainers and approvers of this repo
* @dapr/maintainers-dapr @dapr/approvers-dapr
|
mikeee/dapr
|
CODEOWNERS
|
none
|
mit
| 114 |
# Contribution Guidelines
Thank you for your interest in Dapr!
This project welcomes contributions and suggestions. Most contributions require you to signoff on your commits via
the Developer Certificate of Origin (DCO). When you submit a pull request, a DCO-bot will automatically determine
whether you need to provide signoff for your commit. Please follow the instructions provided by DCO-bot, as pull
requests cannot be merged until the author(s) have provided signoff to fulfill the DCO requirement.
You may find more information on the DCO requirements [below](#developer-certificate-of-origin-signing-your-work).
This project has adopted the [Contributor Covenant Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md).
Contributions come in many forms: submitting issues, writing code, participating in discussions and community calls.
This document provides the guidelines for how to contribute to the Dapr project.
## Issues
This section describes the guidelines for submitting issues
### Issue Types
There are 4 types of issues:
- Issue/Bug: You've found a bug with the code, and want to report it, or create an issue to track the bug.
- Issue/Discussion: You have something on your mind, which requires input from others in a discussion, before it eventually manifests as a proposal.
- Issue/Proposal: Used for items that propose a new idea or functionality. This allows feedback from others before code is written.
- Issue/Question: Use this issue type, if you need help or have a question.
### Before You File
Before you file an issue, make sure you've checked the following:
1. Is it the right repository?
- The Dapr project is distributed across multiple repositories. Check the list of [repositories](https://github.com/dapr) if you aren't sure which repo is the correct one.
1. Check for existing issues
- Before you create a new issue, please do a search in [open issues](https://github.com/dapr/dapr/issues) to see if the issue or feature request has already been filed.
- If you find your issue already exists, make relevant comments and add your [reaction](https://github.com/blog/2119-add-reaction-to-pull-requests-issues-and-comments). Use a reaction:
- π up-vote
- π down-vote
1. For bugs
- Check it's not an environment issue. For example, if running on Kubernetes, make sure prerequisites are in place. (state stores, bindings, etc.)
- You have as much data as possible. This usually comes in the form of logs and/or stacktrace. If running on Kubernetes or other environment, look at the logs of the Dapr services (runtime, operator, placement service). More details on how to get logs can be found [here](https://docs.dapr.io/operations/troubleshooting/logs-troubleshooting/).
1. For proposals
- Many changes to the Dapr runtime may require changes to the API. In that case, the best place to discuss the potential feature is the main [Dapr repo](https://github.com/dapr/dapr).
- Other examples could include bindings, state stores or entirely new components.
## Contributing to Dapr
This section describes the guidelines for contributing code / docs to Dapr.
### Pull Requests
All contributions come through pull requests. To submit a proposed change, we recommend following this workflow:
1. Make sure there's an issue (bug or proposal) raised, which sets the expectations for the contribution you are about to make.
1. Fork the relevant repo and create a new branch
1. Create your change
- Code changes require tests
- Make sure to run the linters to check and format the code:
- `make lint` executes the linter for Go code
- `make me prettier` uses Prettier to format JavaScript / JSON files (needed only if you've modified those)
1. Update relevant documentation for the change
1. Commit with [DCO sign-off](#developer-certificate-of-origin-signing-your-work) and open a PR
1. Wait for the CI process to finish and make sure all checks are green
1. A maintainer of the project will be assigned, and you can expect a review within a few days
#### Use work-in-progress PRs for early feedback
A good way to communicate before investing too much time is to create a "Work-in-progress" PR and share it with your reviewers. The standard way of doing this is to add a "[WIP]" prefix in your PR's title and assign the **do-not-merge** label. This will let people looking at your PR know that it is not well baked yet.
### Developer Certificate of Origin: Signing your work
#### Every commit needs to be signed
The Developer Certificate of Origin (DCO) is a lightweight way for contributors to certify that they wrote or otherwise have the right to submit the code they are contributing to the project. Here is the full text of the [DCO](https://developercertificate.org/), reformatted for readability:
```
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.
(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
```
Contributors sign-off that they adhere to these requirements by adding a `Signed-off-by` line to commit messages.
```text
This is my commit message
Signed-off-by: Random J Developer <random@developer.example.org>
```
Git even has a `-s` command line option to append this automatically to your commit message:
```sh
git commit -s -m 'This is my commit message'
```
Each Pull Request is checked whether or not commits in a Pull Request do contain a valid Signed-off-by line.
#### I didn't sign my commit, now what?!
No worries - You can easily replay your changes, sign them and force push them!
```sh
git checkout <branch-name>
git commit --amend --no-edit --signoff
git push --force-with-lease <remote-name> <branch-name>
```
### Use of Third-party code
- Third-party code must include licenses.
### Available GitHub Slash Commands
- Available GitHub Slash Commands listed in [command-reference](https://docs.dapr.io/contributing/daprbot/#command-reference).
**Thank You!** - Your contributions to open source, large or small, make projects like this possible. Thank you for taking the time to contribute.
## Code of Conduct
This project has adopted the [Contributor Covenant Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
|
mikeee/dapr
|
CONTRIBUTING.md
|
Markdown
|
mit
| 7,191 |
# Governance
## Project Maintainers
[Project maintainers](https://github.com/dapr/community/blob/master/MAINTAINERS.md) are responsible for activities around maintaining and updating Dapr. Final decisions on the project reside with the project maintainers.
Maintainers MUST remain active. If they are unresponsive for >3 months, they will be automatically removed unless a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the other project maintainers agrees to extend the period to be greater than 3 months.
New maintainers can be added to the project by a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) vote of the existing maintainers. A potential maintainer may be nominated by an existing maintainer. A vote is conducted in private between the current maintainers over the course of a one week voting period. At the end of the week, votes are counted and a pull request is made on the repo adding the new maintainer to the [CODEOWNERS](CODEOWNERS) file.
A maintainer may step down by submitting an [issue](https://github.com/dapr/dapr/issues/new) stating their intent.
Changes to this governance document require a pull request with approval from a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the current maintainers.
## Code of Conduct
This project has adopted the [Contributor Covenant Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
|
mikeee/dapr
|
GOVERNANCE.md
|
Markdown
|
mit
| 1,482 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# Variables #
################################################################################
export GO111MODULE ?= on
export GOPROXY ?= https://proxy.golang.org
export GOSUMDB ?= sum.golang.org
GIT_COMMIT = $(shell git rev-list -1 HEAD)
GIT_VERSION ?= $(shell git describe --always --abbrev=7 --dirty)
# By default, disable CGO_ENABLED. See the details on https://golang.org/cmd/cgo
CGO ?= 0
BINARIES ?= daprd placement operator injector sentry
HA_MODE ?= false
# Force in-memory log for placement
FORCE_INMEM ?= true
# Dapr sidecar "flavor" build tag:
# allcomponents - (default) includes all components in Dapr sidecar
# stablecomponents - includes all stable components in Dapr sidecar
DAPR_SIDECAR_FLAVOR ?= allcomponents
# Additional build tags
DAPR_GO_BUILD_TAGS ?=
ifneq ($(DAPR_GO_BUILD_TAGS),)
DAPR_GO_BUILD_TAGS := $(DAPR_GO_BUILD_TAGS),$(DAPR_SIDECAR_FLAVOR)
else
DAPR_GO_BUILD_TAGS := $(DAPR_SIDECAR_FLAVOR)
endif
# Add latest tag if LATEST_RELEASE is true
LATEST_RELEASE ?=
PROTOC ?=protoc
# Version of "protoc" to use
# We must also specify a protobuf "suite" version from https://github.com/protocolbuffers/protobuf/releases
PROTOC_VERSION = 24.4
PROTOBUF_SUITE_VERSION = 24.4
# name of protoc-gen-go when protoc-gen-go --version is run.
PROTOC_GEN_GO_NAME = "protoc-gen-go"
ifdef REL_VERSION
DAPR_VERSION := $(REL_VERSION)
else
DAPR_VERSION := edge
endif
LOCAL_ARCH := $(shell uname -m)
ifeq ($(LOCAL_ARCH),x86_64)
TARGET_ARCH_LOCAL=amd64
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 5),armv8)
TARGET_ARCH_LOCAL=arm64
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 4),armv)
TARGET_ARCH_LOCAL=arm
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 5),arm64)
TARGET_ARCH_LOCAL=arm64
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 7),aarch64)
TARGET_ARCH_LOCAL=arm64
else
TARGET_ARCH_LOCAL=amd64
endif
export GOARCH ?= $(TARGET_ARCH_LOCAL)
ifeq ($(GOARCH),amd64)
LATEST_TAG?=latest
else
LATEST_TAG?=latest-$(GOARCH)
endif
LOCAL_OS := $(shell uname)
ifeq ($(LOCAL_OS),Linux)
TARGET_OS_LOCAL = linux
else ifeq ($(LOCAL_OS),Darwin)
TARGET_OS_LOCAL = darwin
PATH := $(PATH):$(HOME)/go/bin/darwin_$(GOARCH)
else
TARGET_OS_LOCAL = windows
PROTOC_GEN_GO_NAME := "protoc-gen-go.exe"
endif
export GOOS ?= $(TARGET_OS_LOCAL)
PROTOC_GEN_GO_VERSION = v1.32.0
PROTOC_GEN_GO_NAME+= $(PROTOC_GEN_GO_VERSION)
PROTOC_GEN_GO_GRPC_VERSION = 1.3.0
ifeq ($(TARGET_OS_LOCAL),windows)
BUILD_TOOLS_BIN ?= build-tools.exe
BUILD_TOOLS ?= ./.build-tools/$(BUILD_TOOLS_BIN)
RUN_BUILD_TOOLS ?= cd .build-tools; go.exe run .
else
BUILD_TOOLS_BIN ?= build-tools
BUILD_TOOLS ?= ./.build-tools/$(BUILD_TOOLS_BIN)
RUN_BUILD_TOOLS ?= cd .build-tools; GOOS=$(TARGET_OS_LOCAL) GOARCH=$(TARGET_ARCH_LOCAL) go run .
endif
# Default docker container and e2e test targst.
TARGET_OS ?= linux
TARGET_ARCH ?= amd64
TEST_OUTPUT_FILE_PREFIX ?= ./test_report
GOLANGCI_LINT_TAGS=allcomponents,subtlecrypto
ifeq ($(GOOS),windows)
BINARY_EXT_LOCAL:=.exe
GOLANGCI_LINT:=golangci-lint.exe
export ARCHIVE_EXT = .zip
else
BINARY_EXT_LOCAL:=
GOLANGCI_LINT:=golangci-lint
export ARCHIVE_EXT = .tar.gz
endif
export BINARY_EXT ?= $(BINARY_EXT_LOCAL)
OUT_DIR := ./dist
# Helm template and install setting
HELM:=helm
RELEASE_NAME?=dapr
DAPR_NAMESPACE?=dapr-system
DAPR_MTLS_ENABLED?=true
HELM_CHART_ROOT:=./charts
HELM_CHART_DIR:=$(HELM_CHART_ROOT)/dapr
HELM_OUT_DIR:=$(OUT_DIR)/install
HELM_MANIFEST_FILE:=$(HELM_OUT_DIR)/$(RELEASE_NAME).yaml
HELM_REGISTRY?=daprio.azurecr.io
################################################################################
# Go build details #
################################################################################
BASE_PACKAGE_NAME := github.com/dapr/dapr
LOGGER_PACKAGE_NAME := github.com/dapr/kit/logger
# Comma-separated list of features to enable
ENABLED_FEATURES ?=
DEFAULT_LDFLAGS:=-X $(BASE_PACKAGE_NAME)/pkg/buildinfo.gitcommit=$(GIT_COMMIT) \
-X $(BASE_PACKAGE_NAME)/pkg/buildinfo.gitversion=$(GIT_VERSION) \
-X $(BASE_PACKAGE_NAME)/pkg/buildinfo.version=$(DAPR_VERSION) \
-X $(LOGGER_PACKAGE_NAME).DaprVersion=$(DAPR_VERSION)
ifneq ($(ENABLED_FEATURES),)
DEFAULT_LDFLAGS += -X $(BASE_PACKAGE_NAME)/pkg/buildinfo.features=$(ENABLED_FEATURES)
endif
ifeq ($(origin DEBUG), undefined)
BUILDTYPE_DIR:=release
LDFLAGS:="$(DEFAULT_LDFLAGS) -s -w"
else ifeq ($(DEBUG),0)
BUILDTYPE_DIR:=release
LDFLAGS:="$(DEFAULT_LDFLAGS) -s -w"
else
BUILDTYPE_DIR:=debug
GCFLAGS:=-gcflags="all=-N -l"
LDFLAGS:="$(DEFAULT_LDFLAGS)"
$(info Build with debugger information)
endif
DAPR_OUT_DIR := $(OUT_DIR)/$(GOOS)_$(GOARCH)/$(BUILDTYPE_DIR)
DAPR_LINUX_OUT_DIR := $(OUT_DIR)/linux_$(GOARCH)/$(BUILDTYPE_DIR)
################################################################################
# Target: build #
################################################################################
.PHONY: build
DAPR_BINS:=$(foreach ITEM,$(BINARIES),$(DAPR_OUT_DIR)/$(ITEM)$(BINARY_EXT))
build: $(DAPR_BINS)
# Generate builds for dapr binaries for the target
# Params:
# $(1): the binary name for the target
# $(2): the binary main directory
# $(3): the target os
# $(4): the target arch
# $(5): the output directory
define genBinariesForTarget
.PHONY: $(5)/$(1)
$(5)/$(1):
CGO_ENABLED=$(CGO) GOOS=$(3) GOARCH=$(4) go build $(GCFLAGS) -ldflags=$(LDFLAGS) -tags=$(DAPR_GO_BUILD_TAGS) \
-o $(5)/$(1) $(2)/;
endef
# Generate binary targets
$(foreach ITEM,$(BINARIES),$(eval $(call genBinariesForTarget,$(ITEM)$(BINARY_EXT),./cmd/$(ITEM),$(GOOS),$(GOARCH),$(DAPR_OUT_DIR))))
################################################################################
# Target: build-linux #
################################################################################
BUILD_LINUX_BINS:=$(foreach ITEM,$(BINARIES),$(DAPR_LINUX_OUT_DIR)/$(ITEM))
build-linux: $(BUILD_LINUX_BINS)
# Generate linux binaries targets to build linux docker image
ifneq ($(GOOS), linux)
$(foreach ITEM,$(BINARIES),$(eval $(call genBinariesForTarget,$(ITEM),./cmd/$(ITEM),linux,$(GOARCH),$(DAPR_LINUX_OUT_DIR))))
endif
################################################################################
# Target: archive #
################################################################################
ARCHIVE_OUT_DIR ?= $(DAPR_OUT_DIR)
ARCHIVE_FILE_EXTS:=$(foreach ITEM,$(BINARIES),archive-$(ITEM)$(ARCHIVE_EXT))
ARCHIVE_FILE_FLAVOR_EXTS:=$(foreach ITEM,$(BINARIES),archive-$(ITEM)-$(DAPR_SIDECAR_FLAVOR)$(ARCHIVE_EXT))
archive: $(ARCHIVE_FILE_EXTS)
archive-flavor: $(ARCHIVE_FILE_FLAVOR_EXTS)
# Generate archive files for each binary
# $(1): the binary name to be archived
# $(2): the archived file output directory
define genArchiveBinary
ifeq ($(GOOS),windows)
archive-$(1).zip:
7z.exe a -tzip "$(2)\\$(1)_$(GOOS)_$(GOARCH)$(ARCHIVE_EXT)" "$(DAPR_OUT_DIR)\\$(1)$(BINARY_EXT)"
archive-$(1)-$(3).zip:
7z.exe a -tzip "$(2)\\$(1)_$(GOOS)_$(GOARCH)-$(3)$(ARCHIVE_EXT)" "$(DAPR_OUT_DIR)\\$(1)$(BINARY_EXT)"
else
archive-$(1).tar.gz:
tar czf "$(2)/$(1)_$(GOOS)_$(GOARCH)$(ARCHIVE_EXT)" -C "$(DAPR_OUT_DIR)" "$(1)$(BINARY_EXT)"
archive-$(1)-$(3).tar.gz:
tar czf "$(2)/$(1)_$(GOOS)_$(GOARCH)-$(3)$(ARCHIVE_EXT)" -C "$(DAPR_OUT_DIR)" "$(1)$(BINARY_EXT)"
endif
endef
# Generate archive-*.[zip|tar.gz] targets
$(foreach ITEM,$(BINARIES),$(eval $(call genArchiveBinary,$(ITEM),$(ARCHIVE_OUT_DIR),$(DAPR_SIDECAR_FLAVOR))))
################################################################################
# Target: manifest-gen #
################################################################################
# Generate helm chart manifest
manifest-gen: dapr.yaml
dapr.yaml: check-docker-env
$(info Generating helm manifest $(HELM_MANIFEST_FILE)...)
@mkdir -p $(HELM_OUT_DIR)
$(HELM) template \
--include-crds=true --set global.ha.enabled=$(HA_MODE) --set dapr_config.dapr_config_chart_included=false --set-string global.tag=$(DAPR_TAG) --set-string global.registry=$(DAPR_REGISTRY) $(HELM_CHART_DIR) > $(HELM_MANIFEST_FILE)
################################################################################
# Target: upload-helmchart #
################################################################################
# Upload helm charts to Helm Registry
upload-helmchart:
export HELM_EXPERIMENTAL_OCI=1; \
$(HELM) chart save ${HELM_CHART_ROOT}/${RELEASE_NAME} ${HELM_REGISTRY}/${HELM}/${RELEASE_NAME}:${DAPR_VERSION}; \
$(HELM) chart push ${HELM_REGISTRY}/${HELM}/${RELEASE_NAME}:${DAPR_VERSION}
################################################################################
# Target: docker-deploy-k8s #
################################################################################
PULL_POLICY?=Always
ADDITIONAL_HELM_SET ?= ""
ifneq ($(ADDITIONAL_HELM_SET),)
ADDITIONAL_HELM_SET := --set $(ADDITIONAL_HELM_SET)
endif
ifeq ($(ONLY_DAPR_IMAGE),true)
ADDITIONAL_HELM_SET := $(ADDITIONAL_HELM_SET) \
--set dapr_operator.image.name=$(RELEASE_NAME) \
--set dapr_placement.image.name=$(RELEASE_NAME) \
--set dapr_sentry.image.name=$(RELEASE_NAME) \
--set dapr_sidecar_injector.image.name=$(RELEASE_NAME) \
--set dapr_sidecar_injector.injectorImage.name=$(RELEASE_NAME)
endif
docker-deploy-k8s: check-docker-env check-arch
$(info Deploying ${DAPR_REGISTRY}/${RELEASE_NAME}:${DAPR_TAG} to the current K8S context...)
$(HELM) upgrade --install \
$(RELEASE_NAME) --namespace=$(DAPR_NAMESPACE) --wait --timeout 5m0s \
--set global.ha.enabled=$(HA_MODE) --set-string global.tag=$(BUILD_TAG) \
--set-string global.registry=$(DAPR_REGISTRY) --set global.logAsJson=true \
--set global.daprControlPlaneOs=$(TARGET_OS) --set global.daprControlPlaneArch=$(TARGET_ARCH) \
--set dapr_placement.logLevel=debug --set dapr_sentry.logLevel=debug \
--set dapr_sidecar_injector.sidecarImagePullPolicy=$(PULL_POLICY) \
--set global.imagePullPolicy=$(PULL_POLICY) --set global.imagePullSecrets=${DAPR_TEST_REGISTRY_SECRET} \
--set global.mtls.enabled=${DAPR_MTLS_ENABLED} \
--set dapr_placement.cluster.forceInMemoryLog=$(FORCE_INMEM) \
$(ADDITIONAL_HELM_SET) $(HELM_CHART_DIR)
################################################################################
# Target: archive #
################################################################################
release: build archive
release-flavor: build archive-flavor
################################################################################
# Target: test #
################################################################################
.PHONY: test
test: test-deps
CGO_ENABLED=$(CGO) \
gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_unit.json \
--format pkgname-and-test-fails \
-- \
./pkg/... ./utils/... ./cmd/... \
$(COVERAGE_OPTS) --tags=unit,allcomponents
CGO_ENABLED=$(CGO) \
go test --tags=allcomponents ./tests/...
################################################################################
# Target: test-race #
################################################################################
# Note that we are expliciting maintaining an allow-list of packages that should be tested
# with "-race", as many packags aren't passing those tests yet.
# Eventually, the goal is to be able to have all packages pass tests with "-race"
# Note: CGO is required for tests with "-race"
TEST_WITH_RACE=./pkg/acl/... \
./pkg/actors \
./pkg/apis/... \
./pkg/apphealth/... \
./pkg/buildinfo/... \
./pkg/channel/... \
./pkg/client/... \
./pkg/components/... \
./pkg/config/... \
./pkg/cors/... \
./pkg/diagnostics/... \
./pkg/encryption/... \
./pkg/expr/... \
./pkg/grpc/... \
./pkg/health/... \
./pkg/http/... \
./pkg/httpendpoint/... \
./pkg/injector/... \
./pkg/messages/... \
./pkg/messaging/... \
./pkg/metrics/... \
./pkg/middleware/... \
./pkg/modes/... \
./pkg/operator/... \
./pkg/outbox/... \
./pkg/placement/... \
./pkg/proto/... \
./pkg/retry/... \
./pkg/resiliency/... \
./pkg/runtime/... \
./pkg/scopes/... \
./pkg/security/... \
./pkg/sentry/... \
./pkg/validation/... \
./utils/...
.PHONY: test-race
test-race:
CGO_ENABLED=1 echo "$(TEST_WITH_RACE)" | xargs \
go test -tags="allcomponents unit" -race
################################################################################
# Target: test-integration #
################################################################################
.PHONY: test-integration
test-integration: test-deps
CGO_ENABLED=1 gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_integration.json \
--format testname \
-- \
./tests/integration -timeout=20m -count=1 -v -tags="integration" -integration-parallel=false
.PHONY: test-integration-parallel
test-integration-parallel: test-deps
CGO_ENABLED=1 gotestsum \
--jsonfile $(TEST_OUTPUT_FILE_PREFIX)_integration.json \
--format testname \
-- \
./tests/integration -timeout=20m -count=1 -v -tags="integration" -integration-parallel=true
################################################################################
# Target: lint #
################################################################################
# Please use golangci-lint version v1.55.2 , otherwise you might encounter errors.
# You can download version v1.55.2 at https://github.com/golangci/golangci-lint/releases/tag/v1.55.2
.PHONY: lint
lint: check-linter
$(GOLANGCI_LINT) run --build-tags=$(GOLANGCI_LINT_TAGS) --timeout=20m
################################################################################
# Target: check-linter #
################################################################################
.SILENT: check-linter #Β Silence output other than the application run
.PHONY: check-linter
check-linter:
$(RUN_BUILD_TOOLS) check-linter
################################################################################
# Target: modtidy-all #
################################################################################
MODFILES := $(shell find . -name go.mod)
define modtidy-target
.PHONY: modtidy-$(1)
modtidy-$(1):
cd $(shell dirname $(1)); CGO_ENABLED=$(CGO) go mod tidy -compat=1.22.3; cd -
endef
# Generate modtidy target action for each go.mod file
$(foreach MODFILE,$(MODFILES),$(eval $(call modtidy-target,$(MODFILE))))
# Enumerate all generated modtidy targets
TIDY_MODFILES:=$(foreach ITEM,$(MODFILES),modtidy-$(ITEM))
# Define modtidy-all action trigger to run make on all generated modtidy targets
.PHONY: modtidy-all
modtidy-all: $(TIDY_MODFILES)
################################################################################
# Target: modtidy #
################################################################################
.PHONY: modtidy
modtidy:
go mod tidy
################################################################################
# Target: format #
################################################################################
.PHONY: format
format: modtidy-all
gofumpt -l -w . && goimports -local github.com/dapr/ -w $(shell find ./pkg -type f -name '*.go' -not -path "./pkg/proto/*")
################################################################################
# Target: check #
################################################################################
.PHONY: check
check: format test lint
git status && [[ -z `git status -s` ]]
################################################################################
# Target: init-proto #
################################################################################
.PHONY: init-proto
init-proto:
go install google.golang.org/protobuf/cmd/protoc-gen-go@$(PROTOC_GEN_GO_VERSION)
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v$(PROTOC_GEN_GO_GRPC_VERSION)
################################################################################
# Target: gen-proto #
################################################################################
GRPC_PROTOS:=$(shell ls dapr/proto)
PROTO_PREFIX:=github.com/dapr/dapr
# Generate archive files for each binary
# $(1): the binary name to be archived
define genProtoc
.PHONY: gen-proto-$(1)
gen-proto-$(1):
$(PROTOC) --go_out=. --go_opt=module=$(PROTO_PREFIX) --go-grpc_out=. --go-grpc_opt=require_unimplemented_servers=false,module=$(PROTO_PREFIX) ./dapr/proto/$(1)/v1/*.proto
endef
$(foreach ITEM,$(GRPC_PROTOS),$(eval $(call genProtoc,$(ITEM))))
GEN_PROTOS:=$(foreach ITEM,$(GRPC_PROTOS),gen-proto-$(ITEM))
.PHONY: gen-proto
gen-proto: check-proto-version $(GEN_PROTOS) modtidy
################################################################################
# Target: get-components-contrib #
################################################################################
.PHONY: get-components-contrib
get-components-contrib:
go get github.com/dapr/components-contrib@master
make modtidy-all
################################################################################
# Target: check-diff #
################################################################################
.PHONY: check-diff
check-diff:
git diff --exit-code ./go.mod # check no changes
git diff --exit-code ./go.sum # check no changes
################################################################################
# Target: check-proto-version #
################################################################################
.PHONY: check-proto-version
check-proto-version: ## Checking the version of proto related tools
@test "$(shell protoc --version)" = "libprotoc $(PROTOC_VERSION)" \
|| { echo "please use protoc $(PROTOC_VERSION) (protobuf $(PROTOBUF_SUITE_VERSION)) to generate proto, see https://github.com/dapr/dapr/blob/master/dapr/README.md#proto-client-generation"; exit 1; }
@test "$(shell protoc-gen-go-grpc --version)" = "protoc-gen-go-grpc $(PROTOC_GEN_GO_GRPC_VERSION)" \
|| { echo "please use protoc-gen-go-grpc $(PROTOC_GEN_GO_GRPC_VERSION) to generate proto, see https://github.com/dapr/dapr/blob/master/dapr/README.md#proto-client-generation"; exit 1; }
@test "$(shell protoc-gen-go --version 2>&1)" = "$(PROTOC_GEN_GO_NAME)" \
|| { echo "please use protoc-gen-go $(PROTOC_GEN_GO_VERSION) to generate proto, see https://github.com/dapr/dapr/blob/master/dapr/README.md#proto-client-generation"; exit 1; }
################################################################################
# Target: check-proto-diff #
################################################################################
.PHONY: check-proto-diff
check-proto-diff:
git diff --exit-code ./pkg/proto/common/v1/common.pb.go # check no changes
git diff --exit-code ./pkg/proto/internals/v1/status.pb.go # check no changes
git diff --exit-code ./pkg/proto/operator/v1/operator.pb.go # check no changes
git diff --exit-code ./pkg/proto/operator/v1/operator_grpc.pb.go # check no changes
git diff --exit-code ./pkg/proto/runtime/v1/appcallback.pb.go # check no changes
git diff --exit-code ./pkg/proto/runtime/v1/appcallback_grpc.pb.go # check no changes
git diff --exit-code ./pkg/proto/runtime/v1/dapr.pb.go # check no changes
git diff --exit-code ./pkg/proto/runtime/v1/dapr_grpc.pb.go # check no changes
git diff --exit-code ./pkg/proto/sentry/v1/sentry.pb.go # check no changes
################################################################################
# Target: compile-build-tools #
################################################################################
compile-build-tools:
ifeq (,$(wildcard $(BUILD_TOOLS)))
cd .build-tools; CGO_ENABLED=$(CGO) GOOS=$(TARGET_OS_LOCAL) GOARCH=$(TARGET_ARCH_LOCAL) go build -o $(BUILD_TOOLS_BIN) .
endif
################################################################################
# Prettier #
################################################################################
.PHONY: prettier-install prettier-check prettier-format me prettier
prettier-install:
npm install --global prettier
prettier-check:
npx prettier --check "*/**/*.{ts,js,mjs,json}"
prettier-format:
npx prettier --write "*/**/*.{ts,js,mjs,json}"
# "make me prettier"
me:
@echo "πͺππͺππͺπ"
prettier:
@npx prettier --write "*/**/*.{ts,js,mjs,json}"
################################################################################
# Targets for components-contrib #
################################################################################
.PHONY: update-components-contrib
# Branch or tag to pin
COMPONENTS_CONTRIB_BRANCH ?= master
COMPONENTS_CONTRIB_REPO ?= github.com/dapr/components-contrib
update-components-contrib:
go get -u $(COMPONENTS_CONTRIB_REPO)@$(COMPONENTS_CONTRIB_BRANCH)
make modtidy-all
################################################################################
# Target: codegen #
################################################################################
include tools/codegen.mk
################################################################################
# Target: docker #
################################################################################
include docker/docker.mk
################################################################################
# Target: tests #
################################################################################
include tests/dapr_tests.mk
|
mikeee/dapr
|
Makefile
|
Makefile
|
mit
| 23,409 |
<div style="text-align: center"><img src="/img/dapr_logo.svg" height="120px">
<h2>Any language, any framework, anywhere</h2>
</div>
[![Go Report][go-report-badge]][go-report-url] [![OpenSSF][openssf-badge]][openssf-url] [![Docker Pulls][docker-badge]][docker-url] [![Build Status][actions-badge]][actions-url] [![Test Status][e2e-badge]][e2e-url] [![Code Coverage][codecov-badge]][codecov-url] [![License: Apache 2.0][apache-badge]][apache-url] [![FOSSA Status][fossa-badge]][fossa-url] [![TODOs][todo-badge]][todo-url] [![Good First Issues][gfi-badge]][gfi-url] [![discord][discord-badge]][discord-url] [![YouTube][youtube-badge]][youtube-link] [![X/Twitter][x-badge]][x-link]
[go-report-badge]: https://goreportcard.com/badge/github.com/dapr/dapr
[go-report-url]: https://goreportcard.com/report/github.com/dapr/dapr
[openssf-badge]: https://www.bestpractices.dev/projects/5044/badge
[openssf-url]: https://www.bestpractices.dev/projects/5044
[docker-badge]: https://img.shields.io/docker/pulls/daprio/daprd?style=flat&logo=docker
[docker-url]: https://hub.docker.com/r/daprio/dapr
[apache-badge]: https://img.shields.io/github/license/dapr/dapr?style=flat&label=License&logo=github
[apache-url]: https://github.com/dapr/dapr/blob/master/LICENSE
[actions-badge]: https://github.com/dapr/dapr/workflows/dapr/badge.svg?event=push&branch=master
[actions-url]: https://github.com/dapr/dapr/actions?workflow=dapr
[e2e-badge]: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/dapr-bot/14e974e8fd6c6eab03a2475beb1d547a/raw/dapr-test-badge.json
[e2e-url]: https://github.com/dapr/dapr/actions?workflow=dapr-test&event=schedule
[codecov-badge]: https://codecov.io/gh/dapr/dapr/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/dapr/dapr
[fossa-badge]: https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdapr%2Fdapr.svg?type=shield
[fossa-url]: https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdapr%2Fdapr?ref=badge_shield
[todo-badge]: https://badgen.net/https/api.tickgit.com/badgen/github.com/dapr/dapr
[todo-url]: https://www.tickgit.com/browse?repo=github.com/dapr/dapr
[gfi-badge]:https://img.shields.io/github/issues-search/dapr/dapr?query=type%3Aissue%20is%3Aopen%20label%3A%22good%20first%20issue%22&label=Good%20first%20issues&style=flat&logo=github
[gfi-url]:https://github.com/dapr/dapr/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22
[discord-badge]: https://img.shields.io/discord/778680217417809931?label=Discord&style=flat&logo=discord
[discord-url]: http://bit.ly/dapr-discord
[youtube-badge]:https://img.shields.io/youtube/channel/views/UCtpSQ9BLB_3EXdWAUQYwnRA?style=flat&label=YouTube%20views&logo=youtube
[youtube-link]:https://youtube.com/@daprdev
[x-badge]:https://img.shields.io/twitter/follow/daprdev?logo=x&style=flat
[x-link]:https://twitter.com/daprdev
Dapr is a portable, serverless, event-driven runtime that makes it easy for developers to build resilient, stateless and stateful microservices that run on the cloud and edge and embraces the diversity of languages and developer frameworks.
Dapr codifies the *best practices* for building microservice applications into open, independent, building blocks that enable you to build portable applications with the language and framework of your choice. Each building block is independent and you can use one, some, or all of them in your application.

We are a Cloud Native Computing Foundation (CNCF) incubation project.
<p align="center"><img src="https://raw.githubusercontent.com/kedacore/keda/main/images/logo-cncf.svg" height="75px"></p>
## Goals
- Enable developers using *any* language or framework to write distributed applications
- Solve the hard problems developers face building microservice applications by providing best practice building blocks
- Be community driven, open and vendor neutral
- Gain new contributors
- Provide consistency and portability through open APIs
- Be platform agnostic across cloud and edge
- Embrace extensibility and provide pluggable components without vendor lock-in
- Enable IoT and edge scenarios by being highly performant and lightweight
- Be incrementally adoptable from existing code, with no runtime dependency
## How it works
Dapr injects a side-car (container or process) to each compute unit. The side-car interacts with event triggers and communicates with the compute unit via standard HTTP or gRPC protocols. This enables Dapr to support all existing and future programming languages without requiring you to import frameworks or libraries.
Dapr offers built-in state management, reliable messaging (at least once delivery), triggers and bindings through standard HTTP verbs or gRPC interfaces. This allows you to write stateless, stateful and actor-like services following the same programming paradigm. You can freely choose consistency model, threading model and message delivery patterns.
Dapr runs natively on Kubernetes, as a self hosted binary on your machine, on an IoT device, or as a container that can be injected into any system, in the cloud or on-premises.
Dapr uses pluggable component state stores and message buses such as Redis as well as gRPC to offer a wide range of communication methods, including direct dapr-to-dapr using gRPC and async Pub-Sub with guaranteed delivery and at-least-once semantics.
## Why Dapr?
Writing highly performant, scalable and reliable distributed application is hard. Dapr brings proven patterns and practices to you. It unifies event-driven and actors semantics into a simple, consistent programming model. It supports all programming languages without framework lock-in. You are not exposed to low-level primitives such as threading, concurrency control, partitioning and scaling. Instead, you can write your code by implementing a simple web server using familiar web frameworks of your choice.
Dapr is flexible in threading and state consistency models. You can leverage multi-threading if you choose to, and you can choose among different consistency models. This flexibility enables you to implement advanced scenarios without artificial constraints. Dapr is unique because you can transition seamlessly between platforms and underlying implementations without rewriting your code.
## Features
* Event-driven Pub-Sub system with pluggable providers and at-least-once semantics
* Input and output bindings with pluggable providers
* State management with pluggable data stores
* Consistent service-to-service discovery and invocation
* Opt-in stateful models: Strong/Eventual consistency, First-write/Last-write wins
* Cross platform virtual actors
* Secret management to retrieve secrets from secure key vaults
* Rate limiting
* Built-in [Observability](https://docs.dapr.io/concepts/observability-concept/) support
* Runs natively on Kubernetes using a dedicated Operator and CRDs
* Supports all programming languages via HTTP and gRPC
* Multi-Cloud, open components (bindings, pub-sub, state) from Azure, AWS, GCP
* Runs anywhere, as a process or containerized
* Lightweight (58MB binary, 4MB physical memory)
* Runs as a sidecar - removes the need for special SDKs or libraries
* Dedicated CLI - developer friendly experience with easy debugging
* Clients for Java, .NET Core, Go, Javascript, Python, Rust and C++
## Get Started using Dapr
See our [Getting Started](https://docs.dapr.io/getting-started/) guide over in our docs.
## Quickstarts and Samples
* See the [quickstarts repository](https://github.com/dapr/quickstarts) for code examples that can help you get started with Dapr.
* Explore additional samples in the Dapr [samples repository](https://github.com/dapr/samples).
## Community
We want your contributions and suggestions! One of the easiest ways to contribute is to participate in discussions on the mailing list, chat on IM or the bi-weekly community calls.
For more information on the community engagement, developer and contributing guidelines and more, head over to the [Dapr community repo](https://github.com/dapr/community#dapr-community).
### Contact Us
Reach out with any questions you may have and we'll make sure to answer them as soon as possible!
| Platform | Link |
|:----------|:------------|
| π¬ Instant Message Chat (preferred) | [](https://aka.ms/dapr-discord)
| π§ Mailing List | https://groups.google.com/forum/#!forum/dapr-dev
| π€ Twitter | [@daprdev](https://twitter.com/daprdev)
### Community Call
Every two weeks we host a community call to showcase new features, review upcoming milestones, and engage in a Q&A. All are welcome!
π Visit https://aka.ms/dapr-community-call for upcoming dates and the meeting link.
### Videos and Podcasts
We have a variety of keynotes, podcasts, and presentations available to reference and learn from.
πΊ Visit https://docs.dapr.io/contributing/presentations/ for previous talks and slide decks.
### Contributing to Dapr
See the [Development Guide](https://docs.dapr.io/contributing/) to get started with building and developing.
## Repositories
| Repo | Description |
|:-----|:------------|
| [Dapr](https://github.com/dapr/dapr) | The main repository that you are currently in. Contains the Dapr runtime code and overview documentation.
| [CLI](https://github.com/dapr/cli) | The Dapr CLI allows you to setup Dapr on your local dev machine or on a Kubernetes cluster, provides debugging support, launches and manages Dapr instances.
| [Docs](https://docs.dapr.io) | The documentation for Dapr.
| [Quickstarts](https://github.com/dapr/quickstarts) | This repository contains a series of simple code samples that highlight the main Dapr capabilities.
| [Samples](https://github.com/dapr/samples) | This repository holds community maintained samples for various Dapr use cases.
| [Components-contrib ](https://github.com/dapr/components-contrib) | The purpose of components contrib is to provide open, community driven reusable components for building distributed applications.
| [Dashboard ](https://github.com/dapr/dashboard) | General purpose dashboard for Dapr
| [Go-sdk](https://github.com/dapr/go-sdk) | Dapr SDK for Go
| [Java-sdk](https://github.com/dapr/java-sdk) | Dapr SDK for Java
| [JS-sdk](https://github.com/dapr/js-sdk) | Dapr SDK for JavaScript
| [Python-sdk](https://github.com/dapr/python-sdk) | Dapr SDK for Python
| [Dotnet-sdk](https://github.com/dapr/dotnet-sdk) | Dapr SDK for .NET
| [Rust-sdk](https://github.com/dapr/rust-sdk) | Dapr SDK for Rust
| [Cpp-sdk](https://github.com/dapr/cpp-sdk) | Dapr SDK for C++
| [PHP-sdk](https://github.com/dapr/php-sdk) | Dapr SDK for PHP
## Code of Conduct
Please refer to our [Dapr Community Code of Conduct](https://github.com/dapr/community/blob/master/CODE-OF-CONDUCT.md)
|
mikeee/dapr
|
README.md
|
Markdown
|
mit
| 10,866 |
## Security Policy
https://docs.dapr.io/operations/support/support-security-issues/
|
mikeee/dapr
|
SECURITY.md
|
Markdown
|
mit
| 84 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
packages/
|
mikeee/dapr
|
charts/dapr/.helmignore
|
none
|
mit
| 353 |
apiVersion: v1
appVersion: '0.0.0'
description: A Helm chart for Dapr on Kubernetes
name: dapr
version: '0.0.0'
dependencies:
- name: dapr_rbac
version: '0.0.0'
repository: "file://dapr_rbac"
- name: dapr_operator
version: '0.0.0'
repository: "file://dapr_operator"
- name: dapr_placement
version: '0.0.0'
repository: "file://dapr_placement"
- name: dapr_sidecar_injector
version: '0.0.0'
repository: "file://dapr_sidecar_injector"
- name: dapr_sentry
version: '0.0.0'
repository: "file://dapr_sentry"
|
mikeee/dapr
|
charts/dapr/Chart.yaml
|
YAML
|
mit
| 552 |
# Introduction
This chart deploys the Dapr control plane system services on a Kubernetes cluster using the Helm package manager.
## Chart Details
This chart installs Dapr via "child-charts":
* Dapr Component and Configuration Kubernetes CRDs
* Dapr Operator
* Dapr Sidecar injector
* Dapr Sentry
* Dapr Placement
## Prerequisites
* Kubernetes cluster with RBAC (Role-Based Access Control) enabled is required
* Helm 3.4.0 or newer
## Resources Required
The chart deploys pods that consume minimum resources as specified in the resources configuration parameter.
## Install the Chart
Ensure Helm is initialized in your Kubernetes cluster.
For more details on initializing Helm, [read the Helm docs](https://helm.sh/docs/)
1. Add dapr.github.io as an helm repo
```
helm repo add dapr https://dapr.github.io/helm-charts/
helm repo update
```
2. Install the Dapr chart on your cluster in the dapr-system namespace:
```
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --wait
```
## Verify installation
Once the chart is installed, verify the Dapr control plane system service pods are running in the `dapr-system` namespace:
```
kubectl get pods --namespace dapr-system
```
## Uninstall the Chart
To uninstall/delete the `dapr` release:
```
helm uninstall dapr -n dapr-system
```
## Upgrade the charts
Follow the upgrade HowTo instructions in [Upgrading Dapr with Helm](https://docs.dapr.io/operations/hosting/kubernetes/kubernetes-production/#upgrading-dapr-with-helm).
## Resource configuration
By default, all deployments are configured with blank `resources` attributes, which means that pods will consume as much cpu and memory as they want. This is probably fine for a local development or a non-production setup, but for production you should configure them. Consult Dapr docs and [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for guidance on setting these values.
For example, in order to configure the `memory.requests` setting for the `dapr-operator` deployment, configure a values.yml file with the following:
```yaml
dapr_operator:
resources:
requests:
memory: 200Mi
```
## Configuration
The Helm chart has the follow configuration options that can be supplied:
### Global options:
| Parameter | Description | Default |
|-------------------------------------------|-------------------------------------------------------------------------|-------------------------|
| `global.registry` | Docker image registry | `docker.io/daprio` |
| `global.tag` | Docker image version tag | latest release |
| `global.logAsJson` | Json log format for control plane services | `false` |
| `global.imagePullPolicy` | Global Control plane service imagePullPolicy | `IfNotPresent` |
| `global.imagePullSecrets` | Control plane service images pull secrets for docker registry. Its value can be: a string with single imagePullSecret, an array of `{name: pullSecret}` maps (Kubernetes-style), or an array of strings | `[]` |
| `global.ha.enabled` | Highly Availability mode enabled for control plane | `false` |
| `global.ha.replicaCount` | Number of replicas of control plane services in Highly Availability mode<br>Note that in HA mode, Dapr Placement has 3 replicas and that cannot be configured. | `3` |
| `global.ha.disruption.minimumAvailable` | Minimum amount of available instances for control plane. This can either be effective count or %. | `` |
| `global.ha.disruption.maximumUnavailable` | Maximum amount of instances that are allowed to be unavailable for control plane. This can either be effective count or %. | `25%` |
| `global.prometheus.enabled` | Prometheus metrics enablement for control plane services | `true` |
| `global.prometheus.port` | Prometheus scrape http endpoint port | `9090` |
| `global.mtls.enabled` | Mutual TLS enablement | `true` |
| `global.mtls.workloadCertTTL` | TTL for workload cert | `24h` |
| `global.mtls.allowedClockSkew` | Allowed clock skew for workload cert rotation | `15m` |
| `global.mtls.controlPlaneTrustDomain ` | Trust domain for control plane | `cluster.local` |
| `global.mtls.sentryAddress` | Sentry address for control plane | `dapr-sentry.{{ .ReleaseNamespace }}.svc:443` |
| `global.mtls.mountSentryToken` | Gates whether the sentry bound service account token volume is mounted to control plane pods | `true` |
| `global.extraVolumes.sentry` | Array of extra volumes to make available to sentry pods | `[]` |
| `global.extraVolumes.placement` | Array of extra volumes to make available to placement pods | `[]` |
| `global.extraVolumes.operator` | Array of extra volumes to make available to operator pods | `[]` |
| `global.extraVolumes.injector` | Array of extra volumes to make available to sidecar injector pods | `[]` |
| `global.extraVolumeMounts.sentry` | Array of extra volume mounts to make available to sentry pod containers | `[]` |
| `global.extraVolumeMounts.placement` | Array of extra volume mounts to make available to placement pod containers | `[]` |
| `global.extraVolumeMounts.operator` | Array of extra volume mounts to make available to operator pod containers | `[]` |
| `global.extraVolumeMounts.injector` | Array of extra volume mounts to make available to sidecar injector pod containers | `[]` |
| `global.dnsSuffix` | Kuberentes DNS suffix | `.cluster.local` |
| `global.daprControlPlaneOs` | Operating System for Dapr control plane | `linux` |
| `global.daprControlPlaneArch` | CPU Architecture for Dapr control plane | `amd64` |
| `global.nodeSelector` | Pods will be scheduled onto a node node whose labels match the nodeSelector | `{}` |
| `global.tolerations` | Pods will be allowed to schedule onto a node whose taints match the tolerations | `[]` |
| `global.labels` | Custom pod labels | `{}` |
| `global.k8sLabels` | Custom metadata labels | `{}` |
| `global.issuerFilenames.ca` | Custom name of the file containing the root CA certificate inside the container | `ca.crt` |
| `global.issuerFilenames.cert` | Custom name of the file containing the leaf certificate inside the container | `issuer.crt` |
| `global.issuerFilenames.key` | Custom name of the file containing the leaf certificate's key inside the container | `issuer.key` |
| `global.actors.enabled` | Enables the Dapr actors building block. When "false", the Dapr Placement service is not installed, and attempting to use Dapr actors will fail. | `true` |
| `global.actors.serviceName` | Name of the service that provides actor placement services. | `placement` |
| `global.reminders.serviceName` | Name of the service that provides reminders functionality. If empty (the default), uses the built-in reminders capabilities in Dapr sidecars. | |
| `global.seccompProfile` | SeccompProfile for Dapr control plane services | `""` |
| `global.rbac.namespaced` | Removes cluster wide permissions where applicable | `false` |
| `global.argoRolloutServiceReconciler.enabled` | Enable the service reconciler for Dapr-enabled Argo Rollouts | `false` |
| `global.priorityClassName` | Adds `priorityClassName` to Dapr pods | `""` |
### Dapr Operator options:
| Parameter | Description | Default |
|--------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|
| `dapr_operator.replicaCount` | Number of replicas | `1` |
| `dapr_operator.logLevel` | Log level | `info` |
| `dapr_operator.watchInterval` | Interval for polling pods' state (e.g. `2m`). Set to `0` to disable, or `once` to only run once when the operator starts | `0` |
| `dapr_operator.maxPodRestartsPerMinute` | Maximum number of pods in an invalid state that can be restarted per minute | `20` |
| `dapr_operator.image.name` | Docker image name (`global.registry/dapr_operator.image.name`) | `dapr` |
| `dapr_operator.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot`. You may have to set this to `false` when running in Minikube | `true` |
| `dapr_operator.resources` | Value of `resources` attribute. Can be used to set memory/cpu resources/limits. See the section "Resource configuration" above. Defaults to empty | `{}` |
| `dapr_operator.debug.enabled` | Boolean value for enabling debug mode | `{}` |
| `dapr_operator.serviceReconciler.enabled` | If false, disables the reconciler that creates Services for Dapr-enabled Deployments and StatefulSets.<br>Note: disabling this reconciler could prevent Dapr service invocation from working. | `true` |
| `dapr_operator.watchNamespace` | The namespace to watch for annotated Dapr resources in | `""` |
| `dapr_operator.deploymentAnnotations` | Custom annotations for Dapr Operator Deployment | `{}` |
| `dapr_operator.apiService.annotations` | Custom annotations for "dapr-operator" Service resource | `{}` |
| `dapr_operator.apiService.type` | Type for "dapr-operator" Service resource (e.g. `ClusterIP`, `LoadBalancer`, etc) | `ClusterIP` |
| `dapr_operator.webhookService.annotations` | Custom annotations for "dapr-webhook" Service resource | `{}` |
| `dapr_operator.webhookService.type` | Type for "dapr-webhook" Service resource (e.g. `ClusterIP`, `LoadBalancer`, etc) | `ClusterIP` |
| `dapr_operator.extraEnvVars` | Map of (name, value) tuples to use as extra environment variables (e.g. `my-env-var: "my-val"`, etc) | `{}` |
### Dapr Placement options:
| Parameter | Description | Default |
|---|---|---|
| `dapr_placement.ha`| If set to true, deploys the Placement service with 3 nodes regardless of the value of `global.ha.enabled` | `false` |
| `dapr_placement.replicationFactor` | Number of consistent hashing virtual node | `100`|
| `dapr_placement.logLevel` | Service Log level | `info`|
| `dapr_placement.image.name` | Service docker image name (`global.registry/dapr_placement.image.name`) | `dapr` |
| `dapr_placement.cluster.forceInMemoryLog` | Use in-memory log store and disable volume attach when HA is true | `false` |
| `dapr_placement.cluster.logStorePath` | Mount path for persistent volume for log store in unix-like system when HA is true | `/var/run/dapr/raft-log` |
| `dapr_placement.cluster.logStoreWinPath` | Mount path for persistent volume for log store in windows when HA is true | `C:\\raft-log` |
| `dapr_placement.volumeclaims.storageSize` | Attached volume size | `1Gi` |
| `dapr_placement.volumeclaims.storageClassName` | Storage class name ||
| `dapr_placement.maxActorApiLevel` | Sets the `max-api-level` flag which prevents the Actor API level from going above this value. The Placement service reports to all connected hosts the Actor API level as the minimum value observed in all actor hosts in the cluster. Actor hosts with a lower API level than the current API level in the cluster will not be able to connect to Placement. Setting a cap helps making sure that older versions of Dapr can connect to Placement as actor hosts, but may limit the capabilities of the actor subsystem. The default value of -1 means no cap. | `-1` |
| `dapr_placement.minActorApiLevel` | Sets the `min-api-level` flag, which enforces a minimum value for the Actor API level in the cluster. | `0` |
| `dapr_placement.scaleZero` | If true, the StatefulSet is deployed with a zero scale, regardless of the values of `global.ha.enabled` or `dapr_placement.ha` | `false` |
| `dapr_placement.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot`. Does not apply unless `forceInMemoryLog` is set to `true`. You may have to set this to `false` when running in Minikube | `false` |
| `dapr_placement.resources` | Value of `resources` attribute. Can be used to set memory/cpu resources/limits. See the section "Resource configuration" above. Defaults to empty | `{}` |
| `dapr_placement.debug.enabled` | Boolean value for enabling debug mode | `{}` |
| `dapr_placement.metadataEnabled` | Boolean value for enabling placement tables metadata HTTP API | `false` |
| `dapr_placement.statefulsetAnnotations` | Custom annotations for Dapr Placement Statefulset | `{}` |
| `dapr_placement.service.annotations` | Custom annotations for "dapr-placement-server" Service resource | `{}` |
| `dapr_placement.extraEnvVars` | Dictionary (key: value pairs) to use as extra environment variables in the injected sidecar containers (e.g. `my-env-var: "my-val"`, etc) | `{}` |
### Dapr RBAC options:
| Parameter | Description | Default |
|---|---|---|
| `dapr_rbac.secretReader.enabled` | Deploys a default secret reader Role and RoleBinding | `true` |
| `dapr_rbac.secretReader.namespace` | Namespace for the default secret reader | `default` |
### Dapr Sentry options:
| Parameter | Description | Default |
|---|---|---|
| `dapr_sentry.replicaCount` | Number of replicas | `1` |
| `dapr_sentry.logLevel` | Log level | `info` |
| `dapr_sentry.image.name` | Docker image name (`global.registry/dapr_sentry.image.name`) | `dapr` |
| `dapr_sentry.tls.issuer.certPEM` | Issuer Certificate cert | `""` |
| `dapr_sentry.tls.issuer.keyPEM` | Issuer Private Key cert | `""` |
| `dapr_sentry.tls.root.certPEM` | Root Certificate cert | `""` |
| `dapr_sentry.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot`. You may have to set this to `false` when running in Minikube | `true` |
| `dapr_sentry.resources` | Value of `resources` attribute. Can be used to set memory/cpu resources/limits. See the section "Resource configuration" above. Defaults to empty | `{}` |
| `dapr_sentry.debug.enabled` | Boolean value for enabling debug mode | `{}` |
| `dapr_sentry.deploymentAnnotations` | Custom annotations for Dapr Sentry Deployment | `{}` |
| `dapr_sentry.service.annotations` | Custom annotations for "dapr-sentry" Service resource | `{}` |
| `dapr_sentry.service.type` | Type for "dapr-sentry" Service resource (e.g. `ClusterIP`, `LoadBalancer`, etc) | `ClusterIP` |
| `dapr_placement.extraEnvVars` | Map of (name, value) tuples to use as extra environment variables (e.g. `my-env-var: "my-val"`, etc) | `{}` |
### Dapr Sidecar Injector options:
| Parameter | Description | Default |
|-----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|
| `dapr_sidecar_injector.enabled` | Enable the sidecar injector | `true` |
| `dapr_sidecar_injector.sidecarImagePullPolicy` | Dapr sidecar image pull policy | `IfNotPresent` |
| `dapr_sidecar_injector.replicaCount` | Number of replicas | `1` |
| `dapr_sidecar_injector.logLevel` | Log level | `info` |
| `dapr_sidecar_injector.image.name` | Docker image name for Dapr runtime sidecar to inject into an application (`global.registry/dapr_sidecar_injector.image.name`) | `daprd`|
| `dapr_sidecar_injector.injectorImage.name` | Docker image name for sidecar injector service (`global.registry/dapr_sidecar_injector.injectorImage.name`) | `dapr`|
| `dapr_sidecar_injector.webhookFailurePolicy` | Failure policy for the sidecar injector | `Ignore` |
| `dapr_sidecar_injector.runAsNonRoot` | Boolean value for `securityContext.runAsNonRoot` for the Sidecar Injector container itself. You may have to set this to `false` when running in Minikube | `true` |
| `dapr_sidecar_injector.sidecarRunAsNonRoot` | When this boolean value is true (the default), the injected sidecar containers have `runAsRoot: true`. You may have to set this to `false` when running Minikube | `true` |
| `dapr_sidecar_injector.sidecarReadOnlyRootFilesystem` | When this boolean value is true (the default), the injected sidecar containers have `readOnlyRootFilesystem: true` | `true` |
| `dapr_sidecar_injector.enableK8sDownwardAPIs` | When set to true, uses the Kubernetes downward projection APIs to inject certain environmental variables (such as pod IP) into the daprd container. (default: `false`) | `true` |
| `dapr_sidecar_injector.sidecarDropALLCapabilities` | When this boolean valus is true, the injected sidecar containers have `securityContext.capabilities.drop: ["ALL"]` | `false` |
| `dapr_sidecar_injector.allowedServiceAccounts` | String value for extra allowed service accounts in the format of `namespace1:serviceAccount1,namespace2:serviceAccount2` | `""` |
| `dapr_sidecar_injector.allowedServiceAccountsPrefixNames` | Comma-separated list of extra allowed service accounts. Each item in the list should be in the format of namespace:serviceaccount. To match service accounts by a common prefix, you can add an asterisk (`*`) at the end of the prefix. For instance, ns1*:sa2* will match any service account that starts with sa2, whose namespace starts with ns1. For example, it will match service accounts like sa21 and sa2223 in namespaces such as ns1, ns1dapr, and so on. | `""` |
| `dapr_sidecar_injector.resources` | Value of `resources` attribute. Can be used to set memory/cpu resources/limits. See the section "Resource configuration" above. Defaults to empty | `{}` |
| `dapr_sidecar_injector.debug.enabled` | Boolean value for enabling debug mode | `{}` |
| `dapr_sidecar_injector.kubeClusterDomain` | Domain for this kubernetes cluster. If not set, will auto-detect the cluster domain through the `/etc/resolv.conf` file `search domains` content. | `cluster.local` |
| `dapr_sidecar_injector.ignoreEntrypointTolerations` | JSON array of Kubernetes tolerations. If pod contains any of these tolerations, it will ignore the Docker image ENTRYPOINT for Dapr sidecar. | `[{\"effect\":\"NoSchedule\",\"key\":\"alibabacloud.com/eci\"},{\"effect\":\"NoSchedule\",\"key\":\"azure.com/aci\"},{\"effect\":\"NoSchedule\",\"key\":\"aws\"},{\"effect\":\"NoSchedule\",\"key\":\"huawei.com/cci\"}]` |
| `dapr_sidecar_injector.hostNetwork` | Enable hostNetwork mode. This is helpful when working with overlay networks such as Calico CNI and admission webhooks fail | `false` |
| `dapr_sidecar_injector.healthzPort` | The port used for health checks. Helpful in combination with hostNetwork to avoid port collisions | `8080` |
| `dapr_sidecar_injector.deploymentAnnotations` | Custom annotations for Dapr Sidecar Injector Deployment | `{}` |
| `dapr_sidecar_injector.service.annotations` | Custom annotations for "dapr-sidecar-injector" Service resource | `{}` |
| `dapr_sidecar_injector.service.type` | Type for "dapr-sidecar-injector" Service resource (e.g. `ClusterIP`, `LoadBalancer`, etc) | `ClusterIP` |
| `dapr_sidecar_injector.extraEnvVars` | Map of (name, value) tuples to use as extra environment variables (e.g. `my-env-var: "my-val"`, etc) | `{}` |
## Example of highly available configuration of the control plane
This command creates three replicas of each control plane pod for an HA deployment (with the exception of the Placement pod) in the dapr-system namespace:
```
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --set global.ha.enabled=true --wait
```
## Example of installing edge version of Dapr
This command deploys the latest `edge` version of Dapr to `dapr-system` namespace. This is useful if you want to deploy the latest version of Dapr to test a feature or some capability in your Kubernetes cluster.
```
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --set-string global.tag=edge --wait
```
## Example of installing dapr on Minikube
Configure a values file with these options:
```yaml
dapr_dashboard:
runAsNonRoot: false
logLevel: DEBUG
serviceType: NodePort # Allows retrieving the dashboard url by running the command "minikube service list"
dapr_placement:
runAsNonRoot: false
logLevel: DEBUG
dapr_operator:
runAsNonRoot: false
logLevel: DEBUG
dapr_sentry:
runAsNonRoot: false
logLevel: DEBUG
dapr_sidecar_injector:
runAsNonRoot: false
logLevel: DEBUG
global:
logAsJson: true
```
Install dapr:
```bash
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --values values.yml --wait
```
## Example of debugging dapr
Rebuild dapr binaries and docker images:
```bash
make release GOOS=linux GOARCH=amd64 DEBUG=1
export DAPR_TAG=dev
export DAPR_REGISTRY=<your docker.io id>
docker login
make docker-push DEBUG=1
```
Take dapr_operator as an example, configure the corresponding `debug.enabled` option in a value file:
```yaml
global:
registry: docker.io/<your docker.io id>
tag: "dev-linux-amd64"
dapr_operator:
debug:
enabled: true
```
Step into dapr project, and install dapr:
```bash
helm install dapr charts/dapr --namespace dapr-system --values values.yml --wait
```
Find the target dapr-operator pod:
```bash
kubectl get pods -n dapr-system -o wide
```
Port forward the debugging port so that it's visible to your IDE:
```bash
kubectl port-forward dapr-operator-5c99475ffc-m9z9f 40000:40000 -n dapr-system
```
## Example of using nodeSelector option
```bash
helm install dapr dapr/dapr --namespace dapr-system --create-namespace --set global.nodeSelector.myLabel=myValue --wait
```
|
mikeee/dapr
|
charts/dapr/README.md
|
Markdown
|
mit
| 34,270 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
|
mikeee/dapr
|
charts/dapr/charts/dapr_config/.helmignore
|
none
|
mit
| 342 |
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr configuration
name: dapr_config
version: '0.0.0'
|
mikeee/dapr
|
charts/dapr/charts/dapr_config/Chart.yaml
|
YAML
|
mit
| 116 |
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dapr_config.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dapr_config.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dapr_config.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/charts/dapr_config/templates/_helpers.tpl
|
tpl
|
mit
| 1,057 |
{{- if .Values.dapr_config_chart_included }}
apiVersion: dapr.io/v1alpha1
kind: Configuration
metadata:
name: {{ .Values.dapr_default_system_config_name }}
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
mtls:
enabled: {{ .Values.global.mtls.enabled }}
workloadCertTTL: {{ .Values.global.mtls.workloadCertTTL }}
allowedClockSkew: {{ .Values.global.mtls.allowedClockSkew }}
controlPlaneTrustDomain: {{ .Values.global.mtls.controlPlaneTrustDomain }}
sentryAddress: {{ if .Values.global.mtls.sentryAddress }}{{ .Values.global.mtls.sentryAddress }}{{ else }}dapr-sentry.{{ .Release.Namespace }}.svc.cluster.local:443{{ end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_config/templates/dapr_default_config.yaml
|
YAML
|
mit
| 771 |
# This value is essential in determining whether to build this chart or not based on the use case. For example this chart should not be included while generating manifest file for dapr which is achieved by overriding this value.
dapr_config_chart_included: true
dapr_default_system_config_name: "daprsystem"
component: config
|
mikeee/dapr
|
charts/dapr/charts/dapr_config/values.yaml
|
YAML
|
mit
| 326 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/.helmignore
|
none
|
mit
| 342 |
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Kubernetes Operator
name: dapr_operator
version: '0.0.0'
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/Chart.yaml
|
YAML
|
mit
| 125 |
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dapr_operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dapr_operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dapr_operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/templates/_helpers.tpl
|
tpl
|
mit
| 1,063 |
apiVersion: apps/v1
kind: Deployment
metadata:
name: dapr-operator
namespace: {{ .Release.Namespace }}
labels:
app: dapr-operator
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if eq .Values.global.ha.enabled true }}
replicas: {{ .Values.global.ha.replicaCount }}
{{- else }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
app: dapr-operator
template:
metadata:
labels:
app: dapr-operator
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
dapr.io/control-plane: operator
{{- if eq .Values.global.prometheus.enabled true }}
prometheus.io/scrape: "{{ .Values.global.prometheus.enabled }}"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- with .Values.deploymentAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: dapr-operator
livenessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- if contains "/" .Values.image.name }}
image: "{{ .Values.image.name }}"
{{- else }}
image: "{{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{- if eq .Values.global.daprControlPlaneOs "linux" }}
securityContext:
runAsNonRoot: {{ .Values.runAsNonRoot }}
{{- if eq .Values.debug.enabled true }}
capabilities:
add: ["SYS_PTRACE"]
{{- else }}
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
{{- end }}
{{- if .Values.global.seccompProfile }}
seccompProfile:
type: {{ .Values.global.seccompProfile }}
{{- end }}
{{- end }}
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- range $name, $value := .Values.extraEnvVars }}
- name: "{{ $name }}"
value: "{{ $value }}"
{{- end }}
ports:
- containerPort: 6500
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
containerPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end }}
{{- if eq .Values.debug.enabled true }}
- name: debug
containerPort: {{ .Values.debug.port }}
protocol: TCP
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
- name: dapr-trust-bundle
mountPath: /var/run/secrets/dapr.io/tls
readOnly: true
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
mountPath: /var/run/secrets/dapr.io/sentrytoken
readOnly: true
{{- end }}
{{- if eq .Values.debug.enabled false }}
# This is not needed in debug mode because the root FS is writable
- name: dapr-operator-tmp
mountPath: /tmp
{{- end }}
{{- with .Values.global.extraVolumeMounts.operator }}
{{- toYaml . | nindent 8 }}
{{- end }}
command:
{{- if eq .Values.debug.enabled false }}
- "/operator"
{{- else }}
- "/dlv"
{{- end }}
args:
{{- if eq .Values.debug.enabled true }}
- "--listen=:{{ .Values.debug.port }}"
- "--accept-multiclient"
- "--headless=true"
- "--log"
- "--api-version=2"
- "exec"
- "/operator"
- "--"
{{- end }}
- "--watch-interval"
- "{{ .Values.watchInterval }}"
- "--max-pod-restarts-per-minute"
- "{{ .Values.maxPodRestartsPerMinute }}"
- "--log-level"
- "{{ .Values.logLevel }}"
- "--trust-anchors-file"
- "/var/run/secrets/dapr.io/tls/ca.crt"
{{- if eq .Values.global.logAsJson true }}
- "--log-as-json"
{{- end }}
{{- if eq .Values.global.prometheus.enabled true }}
- "--enable-metrics"
- "--metrics-port"
- "{{ .Values.global.prometheus.port }}"
{{- else }}
- "--enable-metrics=false"
{{- end }}
{{- if .Values.watchNamespace }}
- "--watch-namespace"
- "{{ .Values.watchNamespace }}"
{{- end }}
{{- if not .Values.serviceReconciler.enabled }}
- "--disable-service-reconciler"
{{- end }}
{{- if .Values.global.argoRolloutServiceReconciler.enabled }}
- "--enable-argo-rollout-service-reconciler"
{{- end }}
{{- if .Values.global.operator.watchdogCanPatchPodLabels }}
- "--watchdog-can-patch-pod-labels"
{{- end }}
serviceAccountName: dapr-operator
volumes:
- name: dapr-operator-tmp
emptyDir:
sizeLimit: 2Mi
medium: Memory
- name: dapr-trust-bundle
configMap:
name: dapr-trust-bundle
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 600
audience: "spiffe://{{ .Values.global.mtls.controlPlaneTrustDomain }}/ns/{{ .Release.Namespace }}/dapr-sentry"
{{- end }}
{{- with .Values.global.extraVolumes.operator }}
{{- toYaml . | nindent 8 }}
{{- end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- {{ .Values.global.daprControlPlaneOs }}
{{- if .Values.global.daprControlPlaneArch }}
- key: kubernetes.io/arch
operator: In
values:
- {{ .Values.global.daprControlPlaneArch }}
{{- end }}
{{- if .Values.global.ha.enabled }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- dapr-operator
topologyKey: topology.kubernetes.io/zone
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "dapr.imagePullSecrets" (dict "imagePullSecrets" .Values.global.imagePullSecrets) | nindent 8 -}}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ toYaml .Values.global.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ toYaml .Values.global.tolerations | indent 8 }}
{{- end }}
{{- if .Values.global.priorityClassName }}
priorityClassName:
{{ toYaml .Values.global.priorityClassName | indent 8 }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/templates/dapr_operator_deployment.yaml
|
YAML
|
mit
| 7,882 |
{{- if eq .Values.global.ha.enabled true }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: dapr-operator-disruption-budget
namespace: {{ .Release.Namespace }}
labels:
app: dapr-operator
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if .Values.global.ha.disruption.minimumAvailable }}
minAvailable: {{ .Values.global.ha.disruption.minimumAvailable }}
{{- end }}
{{- if .Values.global.ha.disruption.maximumUnavailable }}
maxUnavailable: {{ .Values.global.ha.disruption.maximumUnavailable }}
{{- end }}
selector:
matchLabels:
app: dapr-operator
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/templates/dapr_operator_poddisruptionbudget.yaml
|
YAML
|
mit
| 977 |
kind: Service
apiVersion: v1
metadata:
name: dapr-api
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.apiService.annotations }}
{{ toYaml .Values.apiService.annotations | indent 4}}
{{- end }}
spec:
selector:
app: dapr-operator
type: {{ .Values.apiService.type }}
ports:
- protocol: TCP
port: {{ .Values.ports.port }}
targetPort: {{ .Values.ports.targetPort }}
name: grpc
# Added for backwards compatibility where previous clients will attempt to
# connect on port 80.
# TOOD: @joshvanl: remove in v1.14
{{ if (ne (int .Values.ports.port) 80) }}
- protocol: TCP
port: 80
targetPort: {{ .Values.ports.targetPort }}
name: legacy
{{ end }}
---
apiVersion: v1
kind: Service
metadata:
name: dapr-webhook
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.webhookService.annotations }}
{{ toYaml .Values.webhookService.annotations | indent 4}}
{{- end }}
spec:
type: {{ .Values.webhookService.type }}
ports:
- port: 443
targetPort: 19443
protocol: TCP
selector:
app: dapr-operator
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/templates/dapr_operator_service.yaml
|
YAML
|
mit
| 1,280 |
replicaCount: 1
logLevel: info
watchInterval: "0"
watchNamespace: ""
maxPodRestartsPerMinute: 20
component: operator
# Override this to use a custom operator service image.
# If the image name contains a "/", it is assumed to be a full docker image name, including the registry url and tag.
# Otherwise, the helm chart will use {{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}
image:
name: "operator"
nameOverride: ""
fullnameOverride: ""
deploymentAnnotations: {}
apiService:
type: ClusterIP
annotations: {}
webhookService:
type: ClusterIP
annotations: {}
runAsNonRoot: true
serviceReconciler:
enabled: true
ports:
protocol: TCP
port: 443
targetPort: 6500
resources: {}
extraEnvVars: {}
livenessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
readinessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
debug:
enabled: false
port: 40000
initialDelaySeconds: 30000
|
mikeee/dapr
|
charts/dapr/charts/dapr_operator/values.yaml
|
YAML
|
mit
| 978 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/.helmignore
|
none
|
mit
| 342 |
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Kubernetes placement
name: dapr_placement
version: '0.0.0'
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/Chart.yaml
|
YAML
|
mit
| 128 |
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dapr_placement.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dapr_placement.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dapr_placement.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create initial cluster peer list.
*/}}
{{- define "dapr_placement.initialcluster" -}}
{{- print "dapr-placement-server-0=dapr-placement-server-0.dapr-placement-server." .Release.Namespace ".svc" .Values.global.dnsSuffix ":" .Values.ports.raftRPCPort ",dapr-placement-server-1=dapr-placement-server-1.dapr-placement-server." .Release.Namespace ".svc" .Values.global.dnsSuffix ":" .Values.ports.raftRPCPort ",dapr-placement-server-2=dapr-placement-server-2.dapr-placement-server." .Release.Namespace ".svc" .Values.global.dnsSuffix ":" .Values.ports.raftRPCPort -}}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/templates/_helpers.tpl
|
tpl
|
mit
| 1,648 |
{{- if and (eq .Values.global.ha.enabled true) (eq .Values.global.actors.enabled true) (eq .Values.global.actors.serviceName "placement") }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: dapr-placement-server-disruption-budget
namespace: {{ .Release.Namespace }}
labels:
app: dapr-placement-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if .Values.global.ha.disruption.minimumAvailable }}
minAvailable: {{ .Values.global.ha.disruption.minimumAvailable }}
{{- end }}
{{- if .Values.global.ha.disruption.maximumUnavailable }}
maxUnavailable: {{ .Values.global.ha.disruption.maximumUnavailable }}
{{- end }}
selector:
matchLabels:
app: dapr-placement-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/templates/dapr_placement_poddisruptionbudget.yaml
|
YAML
|
mit
| 1,098 |
{{- if and (eq .Values.global.actors.enabled true) (eq .Values.global.actors.serviceName "placement") }}
kind: Service
apiVersion: v1
metadata:
name: dapr-placement-server
namespace: {{ .Release.Namespace }}
labels:
app: dapr-placement-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4}}
{{- end }}
spec:
selector:
app: dapr-placement-server
# placement must be able to resolve pod address to join initial cluster peers
# before POD is ready
publishNotReadyAddresses: true
ports:
- name: api
port: {{ .Values.ports.apiPort }}
- name: raft-node
port: {{ .Values.ports.raftRPCPort }}
clusterIP: None
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/templates/dapr_placement_service.yaml
|
YAML
|
mit
| 798 |
{{- if and (eq .Values.global.actors.enabled true) (eq .Values.global.actors.serviceName "placement") }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: dapr-placement-server
namespace: {{ .Release.Namespace }}
labels:
app: dapr-placement-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if eq .Values.scaleZero true }}
replicas: 0
{{- else if or (eq .Values.global.ha.enabled true) (eq .Values.ha true) }}
replicas: 3
{{- else }}
replicas: 1
{{- end }}
serviceName: dapr-placement-server
podManagementPolicy: Parallel
selector:
matchLabels:
app: dapr-placement-server
template:
metadata:
labels:
app: dapr-placement-server
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
dapr.io/control-plane: placement
{{- if eq .Values.global.prometheus.enabled true }}
prometheus.io/scrape: "{{ .Values.global.prometheus.enabled }}"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- with .Values.statefulsetAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: dapr-placement-server
livenessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- if contains "/" .Values.image.name }}
image: "{{ .Values.image.name }}"
{{- else }}
image: "{{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
- name: dapr-trust-bundle
mountPath: /var/run/secrets/dapr.io/tls
readOnly: true
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
mountPath: /var/run/secrets/dapr.io/sentrytoken
{{- end }}
{{- with .Values.global.extraVolumeMounts.placement }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- if or (eq .Values.global.ha.enabled true) (eq .Values.ha true) }}
{{- if eq .Values.cluster.forceInMemoryLog false }}
- name: raft-log
{{- if eq .Values.global.daprControlPlaneOs "windows" }}
mountPath: {{ .Values.cluster.logStoreWinPath }}
{{- else }}
mountPath: {{ .Values.cluster.logStorePath }}
{{- end }}
{{- end }}
{{- end }}
ports:
- containerPort: {{ .Values.ports.apiPort }}
name: api
- containerPort: {{ .Values.ports.raftRPCPort }}
name: raft-node
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
containerPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end }}
command:
{{- if eq .Values.debug.enabled false }}
- "/placement"
{{- else }}
- "/dlv"
{{- end }}
args:
{{- if eq .Values.debug.enabled true }}
- "--listen=:{{ .Values.debug.port }}"
- "--accept-multiclient"
- "--headless=true"
- "--log"
- "--api-version=2"
- "exec"
- "/placement"
- "--"
{{- end }}
{{- if or (eq .Values.global.ha.enabled true) (eq .Values.ha true) }}
- "--id"
- "$(PLACEMENT_ID)"
- "--initial-cluster"
- {{ template "dapr_placement.initialcluster" . }}
{{- if eq .Values.cluster.forceInMemoryLog false }}
- "--raft-logstore-path"
{{- if eq .Values.global.daprControlPlaneOs "windows" }}
- "{{ .Values.cluster.logStoreWinPath }}\\cluster-v2-$(PLACEMENT_ID)"
{{- else }}
- "{{ .Values.cluster.logStorePath }}/cluster-v2-$(PLACEMENT_ID)"
{{- end }}
{{- end }}
{{- end }}
- "--log-level"
- {{ .Values.logLevel }}
{{- if eq .Values.global.logAsJson true }}
- "--log-as-json"
{{- end }}
{{- if eq .Values.metadataEnabled true }}
- "--metadata-enabled"
{{- end }}
{{- if eq .Values.global.prometheus.enabled true }}
- "--enable-metrics"
- "--replicationFactor"
- "{{ .Values.replicationFactor }}"
- "--max-api-level"
- "{{ .Values.maxActorApiLevel }}"
- "--min-api-level"
- "{{ .Values.minActorApiLevel }}"
- "--metrics-port"
- "{{ .Values.global.prometheus.port }}"
{{- else }}
- "--enable-metrics=false"
{{- end }}
- "--tls-enabled"
- "--trust-domain={{ .Values.global.mtls.controlPlaneTrustDomain }}"
- "--trust-anchors-file=/var/run/secrets/dapr.io/tls/ca.crt"
- "--sentry-address={{ if .Values.global.mtls.sentryAddress }}{{ .Values.global.mtls.sentryAddress }}{{ else }}dapr-sentry.{{ .Release.Namespace }}.svc.cluster.local:443{{ end }}"
- "--mode=kubernetes"
{{- if eq .Values.global.daprControlPlaneOs "linux" }}
securityContext:
{{- if eq .Values.cluster.forceInMemoryLog true }}
runAsNonRoot: {{ .Values.runAsNonRoot }}
{{- else }}
runAsUser: 0
{{- end }}
{{- if eq .Values.debug.enabled true }}
capabilities:
add: ["SYS_PTRACE"]
{{- else }}
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
{{- end }}
{{- if .Values.global.seccompProfile }}
seccompProfile:
type: {{ .Values.global.seccompProfile }}
{{- end }}
{{- end }}
env:
- name: PLACEMENT_ID
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- range $name, $value := .Values.extraEnvVars }}
- name: "{{ $name }}"
value: "{{ $value }}"
{{- end }}
serviceAccountName: dapr-placement
volumes:
- name: dapr-trust-bundle
configMap:
name: dapr-trust-bundle
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 600
audience: "spiffe://{{ .Values.global.mtls.controlPlaneTrustDomain }}/ns/{{ .Release.Namespace }}/dapr-sentry"
{{- end }}
{{- with .Values.global.extraVolumes.placement }}
{{- toYaml . | nindent 6 }}
{{- end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- {{ .Values.global.daprControlPlaneOs }}
{{- if .Values.global.daprControlPlaneArch }}
- key: kubernetes.io/arch
operator: In
values:
- {{ .Values.global.daprControlPlaneArch }}
{{- end }}
{{- if .Values.global.ha.enabled }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- dapr-placement-server
topologyKey: topology.kubernetes.io/zone
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "dapr.imagePullSecrets" (dict "imagePullSecrets" .Values.global.imagePullSecrets) | nindent 8 -}}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ toYaml .Values.global.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ toYaml .Values.global.tolerations | indent 8 }}
{{- end }}
{{- if or (eq .Values.global.ha.enabled true) (eq .Values.ha true) }}
{{- if eq .Values.cluster.forceInMemoryLog false }}
volumeClaimTemplates:
- metadata:
name: raft-log
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.volumeclaims.storageSize }}
{{- if .Values.volumeclaims.storageClassName }}
storageClassName: {{ .Values.volumeclaims.storageClassName }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.global.priorityClassName }}
priorityClassName:
{{ toYaml .Values.global.priorityClassName | indent 8 }}
{{- end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/templates/dapr_placement_statefulset.yaml
|
YAML
|
mit
| 9,433 |
logLevel: info
component: placement
# Override this to use a custom placement service image.
# If the image name contains a "/", it is assumed to be a full docker image name, including the registry url and tag.
# Otherwise, the helm chart will use {{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}
image:
name: "placement"
nameOverride: ""
fullnameOverride: ""
statefulsetAnnotations: {}
service:
annotations: {}
ports:
protocol: TCP
apiPort: 50005
raftRPCPort: 8201
scaleZero: false
ha: false
maxActorApiLevel: 10
minActorApiLevel: 0
cluster:
forceInMemoryLog: false
logStorePath: /var/run/dapr/raft-log
logStoreWinPath: C:\\raft-log
volumeclaims:
storageSize: 1Gi
storageClassName:
replicationFactor: 100
metadataEnabled: false
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 3
failureThreshold: 5
readinessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
debug:
enabled: false
port: 40000
initialDelaySeconds: 30000
runAsNonRoot: true
resources: {}
extraEnvVars: {}
|
mikeee/dapr
|
charts/dapr/charts/dapr_placement/values.yaml
|
YAML
|
mit
| 1,077 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/.helmignore
|
none
|
mit
| 342 |
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Kubernetes RBAC components
name: dapr_rbac
version: '0.0.0'
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/Chart.yaml
|
YAML
|
mit
| 129 |
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dapr_rbac.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dapr_rbac.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dapr_rbac.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/_helpers.tpl
|
tpl
|
mit
| 1,051 |
apiVersion: v1
kind: ServiceAccount
metadata:
name: dapr-injector
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-injector
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations"]
verbs: ["patch"]
resourceNames: ["dapr-sidecar-injector"]
{{- if not .Values.global.rbac.namespaced }}
- apiGroups: ["dapr.io"]
resources: ["components"]
verbs: [ "get", "list"]
{{- end }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-injector
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-injector
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dapr-injector
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-injector
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
resourceNames: ["dapr-trust-bundle"]
{{- if eq .Values.global.rbac.namespaced true }}
- apiGroups: ["dapr.io"]
resources: ["components"]
verbs: [ "get", "list"]
{{- end }}
- apiGroups: ["dapr.io"]
resources: ["configurations"]
verbs: [ "get" ]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-injector
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-injector
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dapr-injector
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/injector.yaml
|
YAML
|
mit
| 2,205 |
apiVersion: v1
kind: ServiceAccount
metadata:
name: dapr-operator
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
---
{{- if not .Values.global.rbac.namespaced }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-operator-admin
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "patch"]
- apiGroups: ["apps"]
resources: ["deployments", "deployments/finalizers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments/finalizers"]
verbs: ["update"]
- apiGroups: ["apps"]
resources: ["statefulsets", "statefulsets/finalizers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["pods"]
{{- if .Values.global.operator.watchdogCanPatchPodLabels }}
verbs: ["get", "list", "delete", "watch", "patch"]
{{- else }}
verbs: ["get", "list", "delete", "watch"]
{{- end }}
- apiGroups: [""]
resources: ["services","services/finalizers"]
verbs: ["get", "list", "watch", "update", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["dapr.io"]
resources: ["components", "configurations", "subscriptions", "resiliencies", "httpendpoints"]
verbs: [ "get", "list", "watch"]
{{- end }}
{{- if .Values.global.argoRolloutServiceReconciler.enabled }}
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["get", "list", "watch", "delete"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts/finalizers"]
verbs: ["update"]
{{- end }}
---
{{- if not .Values.global.rbac.namespaced }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-operator-admin
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-operator
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dapr-operator-admin
{{- end }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-operator
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "update", "create"]
resourceNames: ["operator.dapr.io", "webhooks.dapr.io"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "update", "create"]
resourceNames: ["operator.dapr.io", "webhooks.dapr.io"]
# We cannot use resourceNames for create because Kubernetes doesn't nessarily
# know resource names at authorization time.
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: [""]
resources: ["configmaps", "events"]
verbs: ["create"]
{{- if eq .Values.global.rbac.namespaced true }}
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "patch"]
- apiGroups: ["apps"]
resources: ["deployments", "deployments/finalizers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments/finalizers"]
verbs: ["update"]
- apiGroups: ["apps"]
resources: ["statefulsets", "statefulsets/finalizers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "delete"]
- apiGroups: [""]
resources: ["services","services/finalizers"]
verbs: ["get", "list", "watch", "update", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["dapr.io"]
resources: ["components", "configurations", "subscriptions", "resiliencies", "httpendpoints"]
verbs: [ "get", "list", "watch"]
{{- end }}
{{- if .Values.global.argoRolloutServiceReconciler.enabled }}
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["get", "list", "watch", "delete"]
- apiGroups: ["argoproj.io"]
resources: ["rollouts/finalizers"]
verbs: ["update"]
{{- end }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-operator
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-operator
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dapr-operator
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/operator.yaml
|
YAML
|
mit
| 5,135 |
{{- if and (eq .Values.global.actors.enabled true) (eq .Values.global.actors.serviceName "placement") }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: dapr-placement
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- end }}
---
{{- if and (eq .Values.global.actors.enabled true) (eq .Values.global.actors.serviceName "placement") }}
{{- if eq .Values.global.rbac.namespaced true }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-placement
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules: []
{{- end }}
---
{{- if and (eq .Values.global.actors.enabled true) (eq .Values.global.actors.serviceName "placement") }}
{{- if eq .Values.global.rbac.namespaced true }}
kind: RoleBinding
{{- else }}
kind: ClusterRoleBinding
{{- end }}
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-placement
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-placement
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if eq .Values.global.rbac.namespaced true }}
kind: Role
{{- else }}
kind: ClusterRole
{{- end }}
name: dapr-placement
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/placement.yaml
|
YAML
|
mit
| 1,460 |
{{/* Allows to create a ResourceQuota for the priority class if it is set to system-node-critical or system-cluster-critical
this is required in some cases to ensure that the priority class is allowed in the namespace
https://kubernetes.io/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default */}}
{{ if or (eq .Values.global.priorityClassName "system-node-critical") (eq .Values.global.priorityClassName "system-cluster-critical") }}
apiVersion: v1
kind: ResourceQuota
metadata:
name: system-critical-quota
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
scopeSelector:
matchExpressions:
- operator : In
scopeName: PriorityClass
values: [{{.Values.global.priorityClassName}}]
{{ end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/priorityclassresourcequota.yaml
|
YAML
|
mit
| 870 |
{{- if .Values.secretReader.enabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: secret-reader
namespace: {{ .Values.secretReader.namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-secret-reader
namespace: {{ .Values.secretReader.namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: default
roleRef:
kind: Role
name: secret-reader
apiGroup: rbac.authorization.k8s.io
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/secret-reader.yaml
|
YAML
|
mit
| 772 |
apiVersion: v1
kind: ServiceAccount
metadata:
name: dapr-sentry
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-sentry
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: ["authentication.k8s.io"]
resources: ["tokenreviews"]
verbs: ["create"]
{{- if not .Values.global.rbac.namespaced }}
- apiGroups: ["dapr.io"]
resources: ["configurations"]
verbs: ["list", "get", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
{{- end }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-sentry
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-sentry
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dapr-sentry
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-sentry
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "update","delete"]
resourceNames: ["dapr-trust-bundle"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "update", "watch", "list"]
resourceNames: ["dapr-trust-bundle"]
- apiGroups: ["dapr.io"]
resources: ["configurations"]
verbs: ["list", "get", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: dapr-sentry
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
subjects:
- kind: ServiceAccount
name: dapr-sentry
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: dapr-sentry
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/templates/sentry.yaml
|
YAML
|
mit
| 2,193 |
secretReader:
enabled: true
namespace: default
component: rbac
|
mikeee/dapr
|
charts/dapr/charts/dapr_rbac/values.yaml
|
YAML
|
mit
| 71 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/.helmignore
|
none
|
mit
| 342 |
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Dapr Sentry
name: dapr_sentry
version: '0.0.0'
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/Chart.yaml
|
YAML
|
mit
| 110 |
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "dapr_sentry.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dapr_sentry.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dapr_sentry.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/templates/_helpers.tpl
|
tpl
|
mit
| 1,057 |
apiVersion: v1
kind: Secret
metadata:
name: dapr-trust-bundle
namespace: {{ .Release.Namespace }}
labels:
app: dapr-sentry
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{ if and .Values.tls.issuer.certPEM (and .Values.tls.issuer.keyPEM .Values.tls.root.certPEM) }}
data:
issuer.crt: {{ b64enc .Values.tls.issuer.certPEM | trim }}
issuer.key: {{ b64enc .Values.tls.issuer.keyPEM | trim }}
ca.crt: {{ b64enc .Values.tls.root.certPEM | trim }}
{{ end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: dapr-trust-bundle
namespace: {{ .Release.Namespace }}
labels:
app: dapr-sentry
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{ if .Values.tls.root.certPEM }}
data:
ca.crt: {{- .Values.tls.root.certPEM | toYaml | indent 1}}
{{end}}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dapr-sentry
namespace: {{ .Release.Namespace }}
labels:
app: dapr-sentry
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if eq .Values.global.ha.enabled true }}
replicas: {{ .Values.global.ha.replicaCount }}
{{- else }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
app: dapr-sentry
template:
metadata:
labels:
app: dapr-sentry
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
dapr.io/control-plane: sentry
{{- if eq .Values.global.prometheus.enabled true }}
prometheus.io/scrape: "{{ .Values.global.prometheus.enabled }}"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- with .Values.deploymentAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: dapr-sentry
livenessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
port: 8080
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- if contains "/" .Values.image.name }}
image: "{{ .Values.image.name }}"
{{- else }}
image: "{{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{- if eq .Values.global.daprControlPlaneOs "linux" }}
securityContext:
runAsNonRoot: {{ .Values.runAsNonRoot }}
{{- if eq .Values.debug.enabled true }}
capabilities:
add: ["SYS_PTRACE"]
{{- else }}
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
{{- end }}
{{- if .Values.global.seccompProfile }}
seccompProfile:
type: {{ .Values.global.seccompProfile }}
{{- end }}
{{- end }}
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- range $name, $value := .Values.extraEnvVars }}
- name: "{{ $name }}"
value: "{{ $value }}"
{{- end }}
ports:
- containerPort: 50001
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
containerPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end }}
{{- if eq .Values.debug.enabled true }}
- name: debug
containerPort: {{ .Values.debug.port }}
protocol: TCP
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
- name: credentials
mountPath: /var/run/secrets/dapr.io/credentials
readOnly: true
{{- with .Values.global.extraVolumeMounts.sentry }}
{{- toYaml . | nindent 10 }}
{{- end }}
command:
{{- if eq .Values.debug.enabled false }}
- "/sentry"
{{- else }}
- "/dlv"
{{- end }}
args:
{{- if eq .Values.debug.enabled true }}
- "--listen=:{{ .Values.debug.port }}"
- "--accept-multiclient"
- "--headless=true"
- "--log"
- "--api-version=2"
- "exec"
- "/sentry"
- "--"
{{- end }}
- "--log-level"
- {{ .Values.logLevel }}
{{- if eq .Values.global.logAsJson true }}
- "--log-as-json"
{{- end }}
{{- if eq .Values.global.prometheus.enabled true }}
- "--enable-metrics"
- "--metrics-port"
- "{{ .Values.global.prometheus.port }}"
{{- else }}
- "--enable-metrics=false"
{{- end }}
- "--trust-domain"
- {{ .Values.global.mtls.controlPlaneTrustDomain }}
{{- with .Values.global.issuerFilenames }}
- "--issuer-ca-filename"
- "{{ .ca }}"
- "--issuer-certificate-filename"
- "{{ .cert }}"
- "--issuer-key-filename"
- "{{ .key }}"
{{- end }}
serviceAccountName: dapr-sentry
volumes:
- name: credentials
secret:
secretName: dapr-trust-bundle
{{- with .Values.global.extraVolumes.sentry }}
{{- toYaml . | nindent 8 }}
{{- end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- {{ .Values.global.daprControlPlaneOs }}
{{- if .Values.global.daprControlPlaneArch }}
- key: kubernetes.io/arch
operator: In
values:
- {{ .Values.global.daprControlPlaneArch }}
{{- end }}
{{- if .Values.global.ha.enabled }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- dapr-sentry
topologyKey: topology.kubernetes.io/zone
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "dapr.imagePullSecrets" (dict "imagePullSecrets" .Values.global.imagePullSecrets) | nindent 8 -}}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ toYaml .Values.global.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ toYaml .Values.global.tolerations | indent 8 }}
{{- end }}
{{- if .Values.global.priorityClassName }}
priorityClassName:
{{ toYaml .Values.global.priorityClassName | indent 8 }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/templates/dapr_sentry_deployment.yaml
|
YAML
|
mit
| 7,570 |
{{- if eq .Values.global.ha.enabled true }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: dapr-sentry-budget
namespace: {{ .Release.Namespace }}
labels:
app: dapr-sentry
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if .Values.global.ha.disruption.minimumAvailable }}
minAvailable: {{ .Values.global.ha.disruption.minimumAvailable }}
{{- end }}
{{- if .Values.global.ha.disruption.maximumUnavailable }}
maxUnavailable: {{ .Values.global.ha.disruption.maximumUnavailable }}
{{- end }}
selector:
matchLabels:
app: dapr-sentry
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/templates/dapr_sentry_poddisruptionbudget.yaml
|
YAML
|
mit
| 960 |
kind: Service
apiVersion: v1
metadata:
name: dapr-sentry
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4}}
{{- end }}
spec:
selector:
app: dapr-sentry
type: {{ .Values.service.type }}
ports:
- protocol: TCP
port: {{ .Values.ports.port }}
targetPort: {{ .Values.ports.targetPort }}
name: grpc
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/templates/dapr_sentry_service.yaml
|
YAML
|
mit
| 513 |
replicaCount: 1
logLevel: info
component: sentry
# Override this to use a custom sentry service image.
# If the image name contains a "/", it is assumed to be a full docker image name, including the registry url and tag.
# Otherwise, the helm chart will use {{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}
image:
name: "sentry"
nameOverride: ""
fullnameOverride: ""
deploymentAnnotations: {}
service:
type: ClusterIP
annotations: {}
ports:
protocol: TCP
port: 443
targetPort: 50001
tls:
issuer:
certPEM: ""
keyPEM: ""
root:
certPEM: ""
trustDomain: cluster.local
livenessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
readinessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
debug:
enabled: false
port: 40000
initialDelaySeconds: 30000
runAsNonRoot: true
resources: {}
extraEnvVars: {}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sentry/values.yaml
|
YAML
|
mit
| 916 |
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/.helmignore
|
none
|
mit
| 342 |
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for the Dapr sidecar injector
name: dapr_sidecar_injector
version: '0.0.0'
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/Chart.yaml
|
YAML
|
mit
| 134 |
{{- if eq .Values.enabled true }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: dapr-sidecar-injector
namespace: {{ .Release.Namespace }}
labels:
app: dapr-sidecar-injector
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if eq .Values.global.ha.enabled true }}
replicas: {{ .Values.global.ha.replicaCount }}
{{- else }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
app: dapr-sidecar-injector
template:
metadata:
labels:
app: dapr-sidecar-injector
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
dapr.io/control-plane: injector
{{- if eq .Values.global.prometheus.enabled true }}
prometheus.io/scrape: "{{ .Values.global.prometheus.enabled }}"
prometheus.io/port: "{{ .Values.global.prometheus.port }}"
prometheus.io/path: "/"
{{- end }}
{{- with .Values.deploymentAnnotations }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.hostNetwork }}
hostNetwork: true
{{- end }}
serviceAccountName: dapr-injector
containers:
- name: dapr-sidecar-injector
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.healthzPort }}
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
port: {{ .Values.healthzPort }}
{{- if eq .Values.debug.enabled false }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
{{- else }}
initialDelaySeconds: {{ .Values.debug.initialDelaySeconds }}
{{- end }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- if contains "/" .Values.injectorImage.name }}
image: "{{ .Values.injectorImage.name }}"
{{- else }}
image: "{{ .Values.global.registry }}/{{ .Values.injectorImage.name }}:{{ .Values.global.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{- if eq .Values.global.daprControlPlaneOs "linux" }}
securityContext:
{{- if eq .Values.runAsNonRoot true }}
runAsNonRoot: {{ .Values.runAsNonRoot }}
{{- else }}
runAsUser: 1000
{{- end }}
{{- if eq .Values.debug.enabled true }}
capabilities:
add: ["SYS_PTRACE"]
{{- else }}
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
{{- end }}
{{- if .Values.global.seccompProfile }}
seccompProfile:
type: {{ .Values.global.seccompProfile }}
{{- end }}
{{- end }}
command:
{{- if eq .Values.debug.enabled false }}
- "/injector"
{{- else }}
- "/dlv"
{{- end }}
args:
{{- if eq .Values.debug.enabled true }}
- "--listen=:{{ .Values.debug.port }}"
- "--accept-multiclient"
- "--headless=true"
- "--log"
- "--api-version=2"
- "exec"
- "/injector"
- "--"
{{- end }}
- "--log-level"
- {{ .Values.logLevel }}
{{- if eq .Values.global.logAsJson true }}
- "--log-as-json"
{{- end }}
{{- if eq .Values.global.prometheus.enabled true }}
- "--enable-metrics"
- "--metrics-port"
- "{{ .Values.global.prometheus.port }}"
{{- else }}
- "--enable-metrics=false"
{{- end }}
- "--healthz-port"
- {{ .Values.healthzPort | toString | toYaml }}
env:
- name: DAPR_TRUST_ANCHORS_FILE
value: /var/run/secrets/dapr.io/tls/ca.crt
- name: DAPR_CONTROL_PLANE_TRUST_DOMAIN
value: {{ .Values.global.mtls.controlPlaneTrustDomain | toYaml }}
- name: DAPR_SENTRY_ADDRESS
value: {{ with .Values.global.mtls.sentryAddress }}{{ . }}{{ else }}dapr-sentry.{{ .Release.Namespace }}.svc.cluster.local:443{{ end }}
{{- range $name, $value := .Values.extraEnvVars }}
- name: {{ $name | toYaml }}
value: {{ $value | toString | toYaml }}
{{- end }}
{{- if .Values.kubeClusterDomain }}
- name: KUBE_CLUSTER_DOMAIN
value: {{ .Values.kubeClusterDomain | toYaml }}
{{- end }}
- name: SIDECAR_IMAGE
{{- if contains "/" .Values.image.name }}
value: {{ .Values.image.name | toYaml }}
{{- else }}
value: "{{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}"
{{- end }}
- name: SIDECAR_IMAGE_PULL_POLICY
value: {{ .Values.sidecarImagePullPolicy | toYaml }}
# Configuration for injected sidecars
- name: SIDECAR_RUN_AS_NON_ROOT
value: {{ .Values.sidecarRunAsNonRoot | toString | toYaml }}
- name: ENABLE_K8S_DOWNWARD_APIS
value: {{ .Values.enableK8sDownwardAPIs | toString | toYaml }}
- name: SIDECAR_DROP_ALL_CAPABILITIES
value: {{ .Values.sidecarDropALLCapabilities | toString | toYaml }}
- name: SIDECAR_READ_ONLY_ROOT_FILESYSTEM
value: {{ .Values.sidecarReadOnlyRootFilesystem | toString | toYaml }}
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.ignoreEntrypointTolerations }}
- name: IGNORE_ENTRYPOINT_TOLERATIONS
value: {{ .Values.ignoreEntrypointTolerations | toYaml }}
{{- end }}
# Configuration for actors and reminders
- name: ACTORS_ENABLED
value: {{ .Values.global.actors.enabled | toString | toYaml }}
- name: ACTORS_SERVICE_NAME
value: {{ .Values.global.actors.serviceName | toString | toYaml }}
- name: ACTORS_SERVICE_ADDRESS
value: {{ include (print "address." .Values.global.actors.serviceName) . | toString | toYaml }}
{{- with .Values.global.reminders.serviceName }}
- name: REMINDERS_SERVICE_NAME
value: {{ . | toString | toYaml }}
- name: REMINDERS_SERVICE_ADDRESS
value: {{ include (print "address." .) . | toString | toYaml }}
{{- end }}
{{- if .Values.allowedServiceAccounts }}
- name: ALLOWED_SERVICE_ACCOUNTS
value: {{ .Values.allowedServiceAccounts | toYaml }}
{{- end }}
{{- if .Values.allowedServiceAccountsPrefixNames }}
- name: ALLOWED_SERVICE_ACCOUNTS_PREFIX_NAMES
value: {{ .Values.allowedServiceAccountsPrefixNames | toYaml }}
{{- end }}
ports:
- name: https
containerPort: 4000
protocol: TCP
{{- if eq .Values.global.prometheus.enabled true }}
- name: metrics
containerPort: {{ .Values.global.prometheus.port }}
protocol: TCP
{{- end }}
{{- if eq .Values.debug.enabled true }}
- name: debug
containerPort: {{ .Values.debug.port }}
protocol: TCP
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
volumeMounts:
- name: dapr-trust-bundle
mountPath: /var/run/secrets/dapr.io/tls
readOnly: true
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
mountPath: /var/run/secrets/dapr.io/sentrytoken
readOnly: true
{{- end }}
{{- with .Values.global.extraVolumeMounts.injector }}
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: dapr-trust-bundle
configMap:
name: dapr-trust-bundle
{{- if .Values.global.mtls.mountSentryVolume }}
- name: dapr-identity-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 600
audience: "spiffe://{{ .Values.global.mtls.controlPlaneTrustDomain }}/ns/{{ .Release.Namespace }}/dapr-sentry"
{{- end }}
{{- with .Values.global.extraVolumes.injector }}
{{- toYaml . | nindent 6 }}
{{- end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- {{ .Values.global.daprControlPlaneOs }}
{{- if .Values.global.daprControlPlaneArch }}
- key: kubernetes.io/arch
operator: In
values:
- {{ .Values.global.daprControlPlaneArch }}
{{- end }}
{{- if .Values.global.ha.enabled }}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- dapr-sidecar-injector
topologyKey: topology.kubernetes.io/zone
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "dapr.imagePullSecrets" (dict "imagePullSecrets" .Values.global.imagePullSecrets) | nindent 8 -}}
{{- end }}
{{- if .Values.global.nodeSelector }}
nodeSelector:
{{ toYaml .Values.global.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.global.tolerations }}
tolerations:
{{ toYaml .Values.global.tolerations | indent 8 }}
{{- end }}
{{- if .Values.global.priorityClassName }}
priorityClassName:
{{ toYaml .Values.global.priorityClassName | indent 8 }}
{{- end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/templates/dapr_sidecar_injector_deployment.yaml
|
YAML
|
mit
| 10,006 |
{{- if eq .Values.enabled true }}
{{- if eq .Values.global.ha.enabled true }}
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: dapr-sidecar-injector-disruption-budget
namespace: {{ .Release.Namespace }}
labels:
app: dapr-sidecar-injector
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
spec:
{{- if .Values.global.ha.disruption.minimumAvailable }}
minAvailable: {{ .Values.global.ha.disruption.minimumAvailable }}
{{- end }}
{{- if .Values.global.ha.disruption.maximumUnavailable }}
maxUnavailable: {{ .Values.global.ha.disruption.maximumUnavailable }}
{{- end }}
selector:
matchLabels:
app: dapr-sidecar-injector
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/templates/dapr_sidecar_injector_poddisruptionbudget.yaml
|
YAML
|
mit
| 1,046 |
{{- if eq .Values.enabled true }}
apiVersion: v1
kind: Service
metadata:
name: dapr-sidecar-injector
namespace: {{ .Release.Namespace }}
labels:
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4}}
{{- end }}
spec:
selector:
app: dapr-sidecar-injector
type: {{ .Values.service.type }}
ports:
- port: 443
targetPort: https
protocol: TCP
name: https
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/templates/dapr_sidecar_injector_service.yaml
|
YAML
|
mit
| 533 |
{{- if eq .Values.enabled true }}
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: dapr-sidecar-injector
labels:
app: dapr-sidecar-injector
{{- range $key, $value := .Values.global.k8sLabels }}
{{ $key }}: {{ tpl $value $ }}
{{- end }}
webhooks:
- name: sidecar-injector.dapr.io
reinvocationPolicy: IfNeeded
clientConfig:
service:
namespace: {{ .Release.Namespace }}
name: dapr-sidecar-injector
path: "/mutate"
rules:
- apiGroups:
- ""
apiVersions:
- v1
resources:
- pods
operations:
- CREATE
failurePolicy: {{ .Values.webhookFailurePolicy}}
sideEffects: None
admissionReviewVersions: ["v1", "v1beta1"]
{{- end }}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/templates/dapr_sidecar_injector_webhook_config.yaml
|
YAML
|
mit
| 744 |
enabled: true
replicaCount: 1
logLevel: info
component: sidecar-injector
# Override this to use a custom sidecar image.
# If the image name contains a "/", it is assumed to be a full docker image name, including the registry url and tag.
# Otherwise, the helm chart will use {{ .Values.global.registry }}/{{ .Values.image.name }}:{{ .Values.global.tag }}
image:
name: "daprd"
# Override this to use a custom injector service image.
# If the image name contains a "/", it is assumed to be a full docker image name, including the registry url and tag.
# Otherwise, the helm chart will use {{ .Values.global.registry }}/{{ .Values.injectorImage.name }}:{{ .Values.global.tag }}
injectorImage:
name: "injector"
deploymentAnnotations: {}
service:
type: ClusterIP
annotations: {}
nameOverride: ""
fullnameOverride: ""
webhookFailurePolicy: Ignore
sidecarImagePullPolicy: IfNotPresent
runAsNonRoot: true
sidecarRunAsNonRoot: true
sidecarReadOnlyRootFilesystem: true
sidecarDropALLCapabilities: false
enableK8sDownwardAPIs: false
allowedServiceAccounts: ""
allowedServiceAccountsPrefixNames: ""
resources: {}
kubeClusterDomain: cluster.local
ignoreEntrypointTolerations: "[{\\\"effect\\\":\\\"NoSchedule\\\",\\\"key\\\":\\\"alibabacloud.com/eci\\\"},{\\\"effect\\\":\\\"NoSchedule\\\",\\\"key\\\":\\\"azure.com/aci\\\"},{\\\"effect\\\":\\\"NoSchedule\\\",\\\"key\\\":\\\"aws\\\"},{\\\"effect\\\":\\\"NoSchedule\\\",\\\"key\\\":\\\"huawei.com/cci\\\"}]"
hostNetwork: false
healthzPort: 8080
livenessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
readinessProbe:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 5
debug:
enabled: false
port: 40000
initialDelaySeconds: 30000
extraEnvVars: {}
|
mikeee/dapr
|
charts/dapr/charts/dapr_sidecar_injector/values.yaml
|
YAML
|
mit
| 1,745 |
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: components.dapr.io
labels:
app.kubernetes.io/part-of: "dapr"
spec:
group: dapr.io
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Component describes an Dapr component type.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
auth:
description: Auth represents authentication details for the component.
properties:
secretStore:
type: string
required:
- secretStore
type: object
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
scopes:
items:
type: string
type: array
spec:
description: ComponentSpec is the spec for a component.
properties:
ignoreErrors:
type: boolean
initTimeout:
type: string
metadata:
items:
description: NameValuePair is a name/value pair.
properties:
envRef:
description: EnvRef is the name of an environmental variable
to read the value from.
type: string
name:
description: Name of the property.
type: string
secretKeyRef:
description: SecretKeyRef is the reference of a value in a secret
store component.
properties:
key:
description: Field in the secret.
type: string
name:
description: Secret name.
type: string
required:
- name
type: object
value:
description: Value of the property, in plaintext.
x-kubernetes-preserve-unknown-fields: true
required:
- name
type: object
type: array
type:
type: string
version:
type: string
required:
- metadata
- type
- version
type: object
type: object
served: true
storage: true
names:
kind: Component
plural: components
singular: component
categories:
- all
- dapr
scope: Namespaced
|
mikeee/dapr
|
charts/dapr/crds/components.yaml
|
YAML
|
mit
| 3,392 |
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: configurations.dapr.io
labels:
app.kubernetes.io/part-of: "dapr"
spec:
group: dapr.io
names:
kind: Configuration
listKind: ConfigurationList
plural: configurations
singular: configuration
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Configuration describes an Dapr configuration setting.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ConfigurationSpec is the spec for an configuration.
properties:
accessControl:
description: AccessControlSpec is the spec object in ConfigurationSpec.
properties:
defaultAction:
type: string
policies:
items:
description: AppPolicySpec defines the policy data structure
for each app.
properties:
appId:
type: string
defaultAction:
type: string
namespace:
type: string
operations:
items:
description: AppOperationAction defines the data structure
for each app operation.
properties:
action:
type: string
httpVerb:
items:
type: string
type: array
name:
type: string
required:
- action
- name
type: object
type: array
trustDomain:
type: string
required:
- appId
type: object
type: array
trustDomain:
type: string
type: object
api:
description: APISpec describes the configuration for Dapr APIs.
properties:
allowed:
description: List of allowed APIs. Can be used in conjunction with denied.
items:
description: APIAccessRule describes an access rule for allowing or denying a Dapr API.
properties:
name:
type: string
protocol:
type: string
version:
type: string
required:
- name
- version
type: object
type: array
denied:
description: List of denied APIs. Can be used in conjunction with allowed.
items:
description: APIAccessRule describes an access rule for allowing or denying a Dapr API.
properties:
name:
type: string
protocol:
type: string
version:
type: string
required:
- name
- version
type: object
type: array
type: object
appHttpPipeline:
description: PipelineSpec defines the middleware pipeline.
properties:
handlers:
items:
description: HandlerSpec defines a request handlers.
properties:
name:
type: string
selector:
description: SelectorSpec selects target services to which
the handler is to be applied.
properties:
fields:
items:
description: SelectorField defines a selector fields.
properties:
field:
type: string
value:
type: string
required:
- field
- value
type: object
type: array
required:
- fields
type: object
type:
type: string
required:
- name
- type
type: object
type: array
required:
- handlers
type: object
components:
description: ComponentsSpec describes the configuration for Dapr components
properties:
deny:
description: Denylist of component types that cannot be instantiated
items:
type: string
type: array
type: object
features:
items:
description: FeatureSpec defines the features that are enabled/disabled.
properties:
enabled:
type: boolean
name:
type: string
required:
- enabled
- name
type: object
type: array
httpPipeline:
description: PipelineSpec defines the middleware pipeline.
properties:
handlers:
items:
description: HandlerSpec defines a request handlers.
properties:
name:
type: string
selector:
description: SelectorSpec selects target services to which
the handler is to be applied.
properties:
fields:
items:
description: SelectorField defines a selector fields.
properties:
field:
type: string
value:
type: string
required:
- field
- value
type: object
type: array
required:
- fields
type: object
type:
type: string
required:
- name
- type
type: object
type: array
required:
- handlers
type: object
logging:
description: LoggingSpec defines the configuration for logging.
properties:
apiLogging:
description: Configure API logging.
properties:
enabled:
description: Default value for enabling API logging. Sidecars
can always override this by setting `--enable-api-logging`
to true or false explicitly. The default value is false.
type: boolean
obfuscateURLs:
description: 'When enabled, obfuscates the values of URLs
in HTTP API logs, logging the route name rather than the
full path being invoked, which could contain PII. Default:
false. This option has no effect if API logging is disabled.'
type: boolean
omitHealthChecks:
description: 'If true, health checks are not reported in API
logs. Default: false. This option has no effect if API logging
is disabled.'
type: boolean
type: object
type: object
metric:
default:
enabled: true
description: MetricSpec defines metrics configuration.
properties:
enabled:
type: boolean
http:
description: MetricHTTP defines configuration for metrics for
the HTTP server
properties:
increasedCardinality:
description: 'If true, metrics for the HTTP server are collected
with increased cardinality. The default is true in Dapr 1.13,
but will be changed to false in 1.15+'
type: boolean
pathMatching:
description: PathMatching defines the path matching configuration for HTTP server metrics.
properties:
ingress:
type: array
items:
type: string
egress:
type: array
items:
type: string
type: object
type: object
rules:
items:
description: MetricsRule defines configuration options for a
metric.
properties:
labels:
items:
description: MetricsLabel defines an object that allows
to set regex expressions for a label.
properties:
name:
type: string
regex:
additionalProperties:
type: string
type: object
required:
- name
- regex
type: object
type: array
name:
type: string
required:
- labels
- name
type: object
type: array
required:
- enabled
type: object
metrics:
default:
enabled: true
description: MetricSpec defines metrics configuration.
properties:
enabled:
type: boolean
http:
description: MetricHTTP defines configuration for metrics for
the HTTP server
properties:
increasedCardinality:
description: 'If true, metrics for the HTTP server are collected
with increased cardinality. The default is true in Dapr 1.13,
but will be changed to false in 1.14+'
type: boolean
pathMatching:
description: PathMatching defines the path matching configuration for HTTP server metrics.
properties:
ingress:
type: array
items:
type: string
egress:
type: array
items:
type: string
type: object
type: object
rules:
items:
description: MetricsRule defines configuration options for a
metric.
properties:
labels:
items:
description: MetricsLabel defines an object that allows
to set regex expressions for a label.
properties:
name:
type: string
regex:
additionalProperties:
type: string
type: object
required:
- name
- regex
type: object
type: array
name:
type: string
required:
- labels
- name
type: object
type: array
required:
- enabled
type: object
mtls:
description: MTLSSpec defines mTLS configuration.
properties:
allowedClockSkew:
type: string
controlPlaneTrustDomain:
type: string
enabled:
type: boolean
sentryAddress:
type: string
tokenValidators:
description: Additional token validators to use. When Dapr is
running in Kubernetes mode, this is in addition to the built-in
"kubernetes" validator. In self-hosted mode, enabling a custom
validator will disable the built-in "insecure" validator.
items:
description: ValidatorSpec contains additional token validators
to use.
properties:
name:
description: Name of the validator
enum:
- jwks
type: string
options:
description: Options for the validator, if any
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- name
type: object
type: array
workloadCertTTL:
type: string
required:
- enabled
type: object
nameResolution:
description: NameResolutionSpec is the spec for name resolution configuration.
properties:
component:
type: string
configuration:
description: DynamicValue is a dynamic value struct for the component.metadata
pair value.
type: object
x-kubernetes-preserve-unknown-fields: true
version:
type: string
required:
- component
- configuration
- version
type: object
secrets:
description: SecretsSpec is the spec for secrets configuration.
properties:
scopes:
items:
description: SecretsScope defines the scope for secrets.
properties:
allowedSecrets:
items:
type: string
type: array
defaultAccess:
type: string
deniedSecrets:
items:
type: string
type: array
storeName:
type: string
required:
- storeName
type: object
type: array
required:
- scopes
type: object
tracing:
description: TracingSpec defines distributed tracing configuration.
properties:
otel:
description: OtelSpec defines Otel exporter configurations.
properties:
endpointAddress:
type: string
isSecure:
type: boolean
protocol:
type: string
required:
- endpointAddress
- isSecure
- protocol
type: object
samplingRate:
type: string
stdout:
type: boolean
zipkin:
description: ZipkinSpec defines Zipkin trace configurations.
properties:
endpointAddress:
type: string
required:
- endpointAddress
type: object
required:
- samplingRate
type: object
wasm:
description: WasmSpec describes the security profile for all Dapr Wasm components.
properties:
strictSandbox:
type: boolean
type: object
type: object
type: object
served: true
storage: true
|
mikeee/dapr
|
charts/dapr/crds/configuration.yaml
|
YAML
|
mit
| 19,861 |
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: httpendpoints.dapr.io
labels:
app.kubernetes.io/part-of: "dapr"
spec:
group: dapr.io
names:
kind: HTTPEndpoint
listKind: HTTPEndpointList
plural: httpendpoints
singular: httpendpoint
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: HTTPEndpoint describes a Dapr HTTPEndpoint type for external
service invocation. This endpoint can be external to Dapr, or external to
the environment.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
auth:
description: Auth represents authentication details for the component.
properties:
secretStore:
type: string
required:
- secretStore
type: object
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
scopes:
items:
type: string
type: array
spec:
description: HTTPEndpointSpec describes an access specification for allowing
external service invocations.
properties:
baseUrl:
type: string
clientTLS:
description: TLS describes how to build client or server TLS configurations.
properties:
certificate:
description: TLSDocument describes and in-line or pointer to a
document to build a TLS configuration.
properties:
secretKeyRef:
description: SecretKeyRef is the reference of a value in a
secret store component.
properties:
key:
description: Field in the secret.
type: string
name:
description: Secret name.
type: string
required:
- name
type: object
value:
description: Value of the property, in plaintext.
x-kubernetes-preserve-unknown-fields: true
type: object
privateKey:
description: TLSDocument describes and in-line or pointer to a
document to build a TLS configuration.
properties:
secretKeyRef:
description: SecretKeyRef is the reference of a value in a
secret store component.
properties:
key:
description: Field in the secret.
type: string
name:
description: Secret name.
type: string
required:
- name
type: object
value:
description: Value of the property, in plaintext.
x-kubernetes-preserve-unknown-fields: true
type: object
renegotiation:
default: Never
description: Renegotiation sets the underlying tls negotiation
strategy for an http channel.
enum:
- Never
- OnceAsClient
- FreelyAsClient
type: string
rootCA:
description: TLSDocument describes and in-line or pointer to a
document to build a TLS configuration.
properties:
secretKeyRef:
description: SecretKeyRef is the reference of a value in a
secret store component.
properties:
key:
description: Field in the secret.
type: string
name:
description: Secret name.
type: string
required:
- name
type: object
value:
description: Value of the property, in plaintext.
x-kubernetes-preserve-unknown-fields: true
type: object
type: object
headers:
items:
description: NameValuePair is a name/value pair.
properties:
envRef:
description: EnvRef is the name of an environmental variable
to read the value from.
type: string
name:
description: Name of the property.
type: string
secretKeyRef:
description: SecretKeyRef is the reference of a value in a secret
store component.
properties:
key:
description: Field in the secret.
type: string
name:
description: Secret name.
type: string
required:
- name
type: object
value:
description: Value of the property, in plaintext.
x-kubernetes-preserve-unknown-fields: true
required:
- name
type: object
type: array
required:
- baseUrl
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
|
mikeee/dapr
|
charts/dapr/crds/httpendpoints.yaml
|
YAML
|
mit
| 6,940 |
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
name: resiliencies.dapr.io
labels:
app.kubernetes.io/part-of: "dapr"
spec:
group: dapr.io
names:
kind: Resiliency
listKind: ResiliencyList
plural: resiliencies
singular: resiliency
categories:
- dapr
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
scopes:
items:
type: string
type: array
spec:
properties:
policies:
properties:
circuitBreakers:
additionalProperties:
properties:
interval:
type: string
maxRequests:
type: integer
timeout:
type: string
trip:
type: string
type: object
type: object
retries:
additionalProperties:
properties:
duration:
type: string
maxInterval:
type: string
maxRetries:
type: integer
policy:
type: string
type: object
type: object
timeouts:
additionalProperties:
type: string
type: object
type: object
targets:
properties:
actors:
additionalProperties:
properties:
circuitBreaker:
type: string
circuitBreakerCacheSize:
type: integer
circuitBreakerScope:
type: string
retry:
type: string
timeout:
type: string
type: object
type: object
apps:
additionalProperties:
properties:
circuitBreaker:
type: string
circuitBreakerCacheSize:
type: integer
retry:
type: string
timeout:
type: string
type: object
type: object
components:
additionalProperties:
properties:
inbound:
properties:
circuitBreaker:
type: string
retry:
type: string
timeout:
type: string
type: object
outbound:
properties:
circuitBreaker:
type: string
retry:
type: string
timeout:
type: string
type: object
type: object
type: object
type: object
required:
- policies
- targets
type: object
type: object
served: true
storage: true
|
mikeee/dapr
|
charts/dapr/crds/resiliency.yaml
|
YAML
|
mit
| 4,723 |
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: subscriptions.dapr.io
labels:
app.kubernetes.io/part-of: "dapr"
spec:
group: dapr.io
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: replaceme # Patched by post-install webhook
name: dapr-webhook
path: /convert
#caBundle: Patched by post-install webhook
conversionReviewVersions:
- v1
- v2alpha1
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Subscription describes an pub/sub event subscription.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
scopes:
items:
type: string
type: array
spec:
description: SubscriptionSpec is the spec for an event subscription.
properties:
pubsubname:
type: string
route:
type: string
topic:
type: string
deadLetterTopic:
type: string
bulkSubscribe:
description: Represents bulk subscribe properties
properties:
enabled:
type: boolean
maxMessagesCount:
type: integer
maxAwaitDurationMs:
type: integer
required:
- enabled
type: object
metadata:
additionalProperties:
type: string
type: object
required:
- pubsubname
- route
- topic
type: object
type: object
served: true
storage: false
- name: v2alpha1
schema:
openAPIV3Schema:
description: Subscription describes an pub/sub event subscription.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
scopes:
items:
type: string
type: array
spec:
description: SubscriptionSpec is the spec for an event subscription.
properties:
metadata:
additionalProperties:
type: string
description: The optional metadata to provide the subscription.
type: object
pubsubname:
description: The PubSub component name.
type: string
routes:
description: The Routes configuration for this topic.
properties:
default:
type: string
rules:
description: The list of rules for this topic.
items:
description: Rule is used to specify the condition for sending
a message to a specific path.
properties:
match:
description: The optional CEL expression used to match the
event. If the match is not specified, then the route is
considered the default. The rules are tested in the order
specified, so they should be define from most-to-least
specific. The default route should appear last in the
list.
type: string
path:
description: The path for events that match this rule.
type: string
required:
- match
- path
type: object
type: array
type: object
topic:
description: The topic name to subscribe to.
type: string
deadLetterTopic:
description: The optional dead letter queue for this topic to send events to.
type: string
bulkSubscribe:
description: Represents bulk subscribe properties
properties:
enabled:
type: boolean
maxMessagesCount:
type: integer
maxAwaitDurationMs:
type: integer
required:
- enabled
type: object
required:
- pubsubname
- routes
- topic
type: object
type: object
served: true
storage: true
names:
kind: Subscription
listKind: SubscriptionList
plural: subscriptions
singular: subscription
categories:
- all
- dapr
scope: Namespaced
|
mikeee/dapr
|
charts/dapr/crds/subscription.yaml
|
YAML
|
mit
| 6,452 |
Thank you for installing Dapr: High-performance, lightweight serverless runtime for cloud and edge
Your release is named {{ .Release.Name }}.
To get started with Dapr, we recommend using our quickstarts:
https://github.com/dapr/quickstarts
For more information on running Dapr, visit:
https://dapr.io
|
mikeee/dapr
|
charts/dapr/templates/NOTES.txt
|
Text
|
mit
| 304 |
{{/*
Returns the address and port of the placement service
The returned value is a string in the format "<name>:<port>"
*/}}
{{- define "address.placement" -}}
{{- "dapr-placement-server:50005" }}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/templates/_address_placement.tpl
|
tpl
|
mit
| 208 |
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "k8s_operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "k8s_operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "k8s_operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Formats imagePullSecrets. Input is dict( "imagePullSecrets" .{specific imagePullSecrets}).
*/}}
{{- define "dapr.imagePullSecrets" -}}
{{- if eq (typeOf .imagePullSecrets) "string" -}} {{- /* Single string value */ -}}
- name: {{ .imagePullSecrets }}
{{- else -}} {{- /* Not a string value, iterate */ -}}
{{- range .imagePullSecrets -}}
{{- if eq (typeOf .) "map[string]interface {}" -}} {{- /* k8s style */ -}}
- {{ toYaml (dict "name" .name) | trim }}
{{ else }} {{- /* helm style */ -}}
- name: {{ . }}
{{ end }} {{- /* End of inner if */ -}}
{{- end -}}
{{- end -}}
{{- end -}}
|
mikeee/dapr
|
charts/dapr/templates/_helpers.tpl
|
tpl
|
mit
| 1,649 |
global:
registry: ghcr.io/dapr
tag: 'edge'
dnsSuffix: ".cluster.local"
logAsJson: false
imagePullPolicy: IfNotPresent
# To help compatibility with other charts which use global.imagePullSecrets.
# Allow either a string with single imagepullsecret or an array of {name: pullSecret} maps (k8s-style) or an array of strings (more common helm-style).
# global:
# imagePullSecrets: "pullSecret"
# or
# global:
# imagePullSecrets:
# - name: pullSecret1
# - name: pullSecret2
# or
# global:
# imagePullSecrets:
# - pullSecret1
# - pullSecret2
imagePullSecrets: ""
priorityClassName: ""
nodeSelector: {}
tolerations: []
rbac:
namespaced: false
ha:
enabled: false
replicaCount: 3
disruption:
minimumAvailable: ""
maximumUnavailable: "25%"
prometheus:
enabled: true
port: 9090
mtls:
enabled: true
workloadCertTTL: 24h
allowedClockSkew: 15m
controlPlaneTrustDomain: "cluster.local"
# If set to true, a bound service account token will be mounted and used to
# authenticate to Sentry.
mountSentryVolume: true
# Used to override `dapr-sentry.{{ .Release.Namespace }}.svc.cluster.local:443`
#sentryAddress:
# extraVolumes and extraVolumeMounts are used to mount additional volumes to
# the Dapr control plane pods. Useful for using alternative authentication
# credentials to sentry.
extraVolumes: {}
# sentry:
# placement:
# operator:
# injector:
extraVolumeMounts: {}
# sentry:
# placement:
# operator:
# injector:
actors:
# Enables actor functionality in the cluster
enabled: true
# Name of the service that provides actor placement services
serviceName: "placement"
reminders:
# Name of the service that provides reminders
# If empty, uses the built-in reminders capabilities in Dapr sidecars
serviceName: ""
daprControlPlaneOs: linux
labels: {}
seccompProfile: ""
k8sLabels:
app.kubernetes.io/name: "{{ .Release.Name }}"
app.kubernetes.io/version: "{{ .Values.global.tag }}"
app.kubernetes.io/part-of: "dapr"
app.kubernetes.io/managed-by: "helm"
app.kubernetes.io/component: "{{ .Values.component }}" # Should be set in each subchart
issuerFilenames: {}
## the issuerFilenames dictionary, if setup, have to contain 3 keys: ca,cert,key
# issuerFilenames:
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
argoRolloutServiceReconciler:
enabled: false
operator:
watchdogCanPatchPodLabels: false
|
mikeee/dapr
|
charts/dapr/values.yaml
|
YAML
|
mit
| 2,554 |
# Dapr daprd documentation
Please see the [Dapr daprd documentation](https://docs.dapr.io/concepts/dapr-services/sidecar/) for more information.
|
mikeee/dapr
|
cmd/daprd/README.md
|
Markdown
|
mit
| 145 |
/*
Copyright 2023 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"context"
"fmt"
"os"
"go.uber.org/automaxprocs/maxprocs"
// Register all components
_ "github.com/dapr/dapr/cmd/daprd/components"
"github.com/dapr/dapr/cmd/daprd/options"
"github.com/dapr/dapr/pkg/buildinfo"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
configurationLoader "github.com/dapr/dapr/pkg/components/configuration"
cryptoLoader "github.com/dapr/dapr/pkg/components/crypto"
lockLoader "github.com/dapr/dapr/pkg/components/lock"
httpMiddlewareLoader "github.com/dapr/dapr/pkg/components/middleware/http"
nrLoader "github.com/dapr/dapr/pkg/components/nameresolution"
pubsubLoader "github.com/dapr/dapr/pkg/components/pubsub"
secretstoresLoader "github.com/dapr/dapr/pkg/components/secretstores"
stateLoader "github.com/dapr/dapr/pkg/components/state"
wfbeLoader "github.com/dapr/dapr/pkg/components/wfbackend"
workflowsLoader "github.com/dapr/dapr/pkg/components/workflows"
"github.com/dapr/dapr/pkg/modes"
"github.com/dapr/dapr/pkg/runtime/registry"
"github.com/dapr/dapr/pkg/security"
"github.com/dapr/kit/concurrency"
"github.com/dapr/kit/signals"
"github.com/dapr/dapr/pkg/runtime"
"github.com/dapr/kit/logger"
)
var (
log = logger.NewLogger("dapr.runtime")
logContrib = logger.NewLogger("dapr.contrib")
)
func Run() {
// set GOMAXPROCS
_, _ = maxprocs.Set()
opts, err := options.New(os.Args[1:])
if err != nil {
log.Fatalf("Failed to parse flags: %v", err)
}
if opts.RuntimeVersion {
//nolint:forbidigo
fmt.Println(buildinfo.Version())
os.Exit(0)
}
if opts.BuildInfo {
//nolint:forbidigo
fmt.Printf("Version: %s\nGit Commit: %s\nGit Version: %s\n", buildinfo.Version(), buildinfo.Commit(), buildinfo.GitVersion())
os.Exit(0)
}
if opts.WaitCommand {
runtime.WaitUntilDaprOutboundReady(opts.DaprHTTPPort)
os.Exit(0)
}
// Apply options to all loggers.
opts.Logger.SetAppID(opts.AppID)
err = logger.ApplyOptionsToLoggers(&opts.Logger)
if err != nil {
log.Fatal(err)
}
log.Infof("Starting Dapr Runtime -- version %s -- commit %s", buildinfo.Version(), buildinfo.Commit())
log.Infof("Log level set to: %s", opts.Logger.OutputLevel)
secretstoresLoader.DefaultRegistry.Logger = logContrib
stateLoader.DefaultRegistry.Logger = logContrib
cryptoLoader.DefaultRegistry.Logger = logContrib
configurationLoader.DefaultRegistry.Logger = logContrib
lockLoader.DefaultRegistry.Logger = logContrib
pubsubLoader.DefaultRegistry.Logger = logContrib
nrLoader.DefaultRegistry.Logger = logContrib
bindingsLoader.DefaultRegistry.Logger = logContrib
workflowsLoader.DefaultRegistry.Logger = logContrib
wfbeLoader.DefaultRegistry.Logger = logContrib
httpMiddlewareLoader.DefaultRegistry.Logger = log // Note this uses log on purpose
reg := registry.NewOptions().
WithSecretStores(secretstoresLoader.DefaultRegistry).
WithStateStores(stateLoader.DefaultRegistry).
WithConfigurations(configurationLoader.DefaultRegistry).
WithLocks(lockLoader.DefaultRegistry).
WithPubSubs(pubsubLoader.DefaultRegistry).
WithNameResolutions(nrLoader.DefaultRegistry).
WithBindings(bindingsLoader.DefaultRegistry).
WithCryptoProviders(cryptoLoader.DefaultRegistry).
WithHTTPMiddlewares(httpMiddlewareLoader.DefaultRegistry).
WithWorkflows(workflowsLoader.DefaultRegistry).
WithWorkflowBackends(wfbeLoader.DefaultRegistry)
ctx := signals.Context()
secProvider, err := security.New(ctx, security.Options{
SentryAddress: opts.SentryAddress,
ControlPlaneTrustDomain: opts.ControlPlaneTrustDomain,
ControlPlaneNamespace: opts.ControlPlaneNamespace,
TrustAnchors: opts.TrustAnchors,
AppID: opts.AppID,
MTLSEnabled: opts.EnableMTLS,
Mode: modes.DaprMode(opts.Mode),
})
if err != nil {
log.Fatal(err)
}
err = concurrency.NewRunnerManager(
secProvider.Run,
func(ctx context.Context) error {
sec, serr := secProvider.Handler(ctx)
if serr != nil {
return serr
}
rt, rerr := runtime.FromConfig(ctx, &runtime.Config{
AppID: opts.AppID,
ActorsService: opts.ActorsService,
RemindersService: opts.RemindersService,
AllowedOrigins: opts.AllowedOrigins,
ResourcesPath: opts.ResourcesPath,
ControlPlaneAddress: opts.ControlPlaneAddress,
AppProtocol: opts.AppProtocol,
Mode: opts.Mode,
DaprHTTPPort: opts.DaprHTTPPort,
DaprInternalGRPCPort: opts.DaprInternalGRPCPort,
DaprInternalGRPCListenAddress: opts.DaprInternalGRPCListenAddress,
DaprAPIGRPCPort: opts.DaprAPIGRPCPort,
DaprAPIListenAddresses: opts.DaprAPIListenAddresses,
DaprPublicPort: opts.DaprPublicPort,
DaprPublicListenAddress: opts.DaprPublicListenAddress,
ApplicationPort: opts.AppPort,
ProfilePort: opts.ProfilePort,
EnableProfiling: opts.EnableProfiling,
AppMaxConcurrency: opts.AppMaxConcurrency,
EnableMTLS: opts.EnableMTLS,
SentryAddress: opts.SentryAddress,
MaxRequestSize: opts.MaxRequestSize,
ReadBufferSize: opts.ReadBufferSize,
UnixDomainSocket: opts.UnixDomainSocket,
DaprGracefulShutdownSeconds: opts.DaprGracefulShutdownSeconds,
DaprBlockShutdownDuration: opts.DaprBlockShutdownDuration,
DisableBuiltinK8sSecretStore: opts.DisableBuiltinK8sSecretStore,
EnableAppHealthCheck: opts.EnableAppHealthCheck,
AppHealthCheckPath: opts.AppHealthCheckPath,
AppHealthProbeInterval: opts.AppHealthProbeInterval,
AppHealthProbeTimeout: opts.AppHealthProbeTimeout,
AppHealthThreshold: opts.AppHealthThreshold,
AppChannelAddress: opts.AppChannelAddress,
EnableAPILogging: opts.EnableAPILogging,
Config: opts.Config,
Metrics: opts.Metrics,
AppSSL: opts.AppSSL,
ComponentsPath: opts.ComponentsPath,
Registry: reg,
Security: sec,
})
if rerr != nil {
return rerr
}
return rt.Run(ctx)
},
).Run(ctx)
if err != nil {
log.Fatalf("Fatal error from runtime: %s", err)
}
log.Info("Daprd shutdown gracefully")
}
|
mikeee/dapr
|
cmd/daprd/app/app.go
|
GO
|
mit
| 7,076 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/alicloud/oss"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(oss.NewAliCloudOSS, "alicloud.oss")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_alicloud_oss.go
|
GO
|
mit
| 848 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/alicloud/sls"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(sls.NewAliCloudSlsLogstorage, "alicloud.sls")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_alicloud_sls.go
|
GO
|
mit
| 858 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/alicloud/tablestore"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(tablestore.NewAliCloudTableStore, "alicloud.tablestore")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_alicloud_tablestore.go
|
GO
|
mit
| 876 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/apns"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(apns.NewAPNS, "apns")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_apns.go
|
GO
|
mit
| 826 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/aws/dynamodb"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(dynamodb.NewDynamoDB, "aws.dynamodb")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_aws_dynamodb.go
|
GO
|
mit
| 850 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/aws/kinesis"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return kinesis.NewAWSKinesis(l)
}, "aws.kinesis")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return kinesis.NewAWSKinesis(l)
}, "aws.kinesis")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_aws_kinesis.go
|
GO
|
mit
| 1,140 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/aws/s3"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(s3.NewAWSS3, "aws.s3")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_aws_s3.go
|
GO
|
mit
| 849 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/aws/ses"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(ses.NewAWSSES, "aws.ses")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_aws_ses.go
|
GO
|
mit
| 833 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/aws/sns"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(sns.NewAWSSNS, "aws.sns")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_aws_sns.go
|
GO
|
mit
| 833 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/aws/sqs"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return sqs.NewAWSSQS(l)
}, "aws.sqs")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return sqs.NewAWSSQS(l)
}, "aws.sqs")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_aws_sqs.go
|
GO
|
mit
| 1,112 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/azure/blobstorage"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(blobstorage.NewAzureBlobStorage, "azure.blobstorage")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_blobstorage.go
|
GO
|
mit
| 891 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/azure/cosmosdb"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(cosmosdb.NewCosmosDB, "azure.cosmosdb")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_cosmosdb.go
|
GO
|
mit
| 874 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/azure/cosmosdbgremlinapi"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(cosmosdbgremlinapi.NewCosmosDBGremlinAPI, "azure.cosmosdb.gremlinapi")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_cosmosdb_gremlinapi.go
|
GO
|
mit
| 895 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/azure/eventgrid"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return eventgrid.NewAzureEventGrid(l)
}, "azure.eventgrid")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return eventgrid.NewAzureEventGrid(l)
}, "azure.eventgrid")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_eventgrid.go
|
GO
|
mit
| 1,164 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/azure/eventhubs"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return eventhubs.NewAzureEventHubs(l)
}, "azure.eventhubs")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return eventhubs.NewAzureEventHubs(l)
}, "azure.eventhubs")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_eventhubs.go
|
GO
|
mit
| 1,184 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/azure/openai"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(openai.NewOpenAI, "azure.openai")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_openai.go
|
GO
|
mit
| 846 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/azure/servicebusqueues"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return servicebusqueues.NewAzureServiceBusQueues(l)
}, "azure.servicebus.queues", "azure.servicebusqueues")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return servicebusqueues.NewAzureServiceBusQueues(l)
}, "azure.servicebus.queues", "azure.servicebusqueues")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_servicebus_queues.go
|
GO
|
mit
| 1,287 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/azure/signalr"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(signalr.NewSignalR, "azure.signalr")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_signalr.go
|
GO
|
mit
| 850 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/azure/storagequeues"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return storagequeues.NewAzureStorageQueues(l)
}, "azure.storagequeues")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return storagequeues.NewAzureStorageQueues(l)
}, "azure.storagequeues")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_azure_storagequeues.go
|
GO
|
mit
| 1,212 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
cfqueues "github.com/dapr/components-contrib/bindings/cloudflare/queues"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(cfqueues.NewCFQueues, "cloudflare.queues")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_cloudflare_queues.go
|
GO
|
mit
| 869 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/commercetools"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(commercetools.NewCommercetools, "commercetools")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_commercetools.go
|
GO
|
mit
| 862 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/cron"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(cron.NewCron, "cron")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_cron.go
|
GO
|
mit
| 845 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/alicloud/dingtalk/webhook"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return webhook.NewDingTalkWebhook(l)
}, "dingtalk.webhook")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return webhook.NewDingTalkWebhook(l)
}, "dingtalk.webhook")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_dingtalk_webhook.go
|
GO
|
mit
| 1,174 |
//go:build allcomponents || stablecomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/dubbo"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(dubbo.NewDubboOutput, "dubbo", "alicloud.dubbo")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_dubbo.go
|
GO
|
mit
| 874 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings/gcp/bucket"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterOutputBinding(bucket.NewGCPStorage, "gcp.bucket")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_gcp_bucket.go
|
GO
|
mit
| 846 |
//go:build allcomponents
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package components
import (
"github.com/dapr/components-contrib/bindings"
"github.com/dapr/components-contrib/bindings/gcp/pubsub"
bindingsLoader "github.com/dapr/dapr/pkg/components/bindings"
"github.com/dapr/kit/logger"
)
func init() {
bindingsLoader.DefaultRegistry.RegisterInputBinding(func(l logger.Logger) bindings.InputBinding {
return pubsub.NewGCPPubSub(l)
}, "gcp.pubsub")
bindingsLoader.DefaultRegistry.RegisterOutputBinding(func(l logger.Logger) bindings.OutputBinding {
return pubsub.NewGCPPubSub(l)
}, "gcp.pubsub")
}
|
mikeee/dapr
|
cmd/daprd/components/binding_gcp_pubsub.go
|
GO
|
mit
| 1,133 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.