code
stringlengths 0
56.1M
| repo_name
stringclasses 515
values | path
stringlengths 2
147
| language
stringclasses 447
values | license
stringclasses 7
values | size
int64 0
56.8M
|
---|---|---|---|---|---|
import crypto from "crypto";
import type { OpenAIKey, SerializedKey } from "../index";
import { KeySerializerBase } from "../key-serializer-base";
const SERIALIZABLE_FIELDS: (keyof OpenAIKey)[] = [
"key",
"service",
"hash",
"organizationId",
"promptCount",
"gpt4Tokens",
"gpt4-32kTokens",
"turboTokens",
];
export type SerializedOpenAIKey = SerializedKey &
Partial<Pick<OpenAIKey, (typeof SERIALIZABLE_FIELDS)[number]>>;
export class OpenAIKeySerializer extends KeySerializerBase<OpenAIKey> {
constructor() {
super(SERIALIZABLE_FIELDS);
}
deserialize({ key, ...rest }: SerializedOpenAIKey): OpenAIKey {
return {
key,
service: "openai",
modelFamilies: ["turbo" as const, "gpt4" as const],
isTrial: false,
isDisabled: false,
isRevoked: false,
isOverQuota: false,
lastUsed: 0,
lastChecked: 0,
promptCount: 0,
hash: `oai-${crypto
.createHash("sha256")
.update(key)
.digest("hex")
.slice(0, 8)}`,
rateLimitedAt: 0,
rateLimitRequestsReset: 0,
rateLimitTokensReset: 0,
turboTokens: 0,
gpt4Tokens: 0,
"gpt4-32kTokens": 0,
...rest,
};
}
}
|
JJNeverkry/jj
|
src/shared/key-management/openai/serializer.ts
|
TypeScript
|
unknown
| 1,211 |
import { logger } from "../../../logger";
import type { GooglePalmModelFamily } from "../../models";
import { KeyProviderBase } from "../key-provider-base";
import { Key } from "../types";
const RATE_LIMIT_LOCKOUT = 2000;
const KEY_REUSE_DELAY = 500;
// https://developers.generativeai.google.com/models/language
export const GOOGLE_PALM_SUPPORTED_MODELS = ["text-bison-001"] as const;
export type GooglePalmModel = (typeof GOOGLE_PALM_SUPPORTED_MODELS)[number];
type GooglePalmKeyUsage = {
[K in GooglePalmModelFamily as `${K}Tokens`]: number;
};
export interface GooglePalmKey extends Key, GooglePalmKeyUsage {
readonly service: "google-palm";
readonly modelFamilies: GooglePalmModelFamily[];
/** The time at which this key was last rate limited. */
rateLimitedAt: number;
/** The time until which this key is rate limited. */
rateLimitedUntil: number;
}
export class GooglePalmKeyProvider extends KeyProviderBase<GooglePalmKey> {
readonly service = "google-palm";
protected keys: GooglePalmKey[] = [];
protected log = logger.child({ module: "key-provider", service: this.service });
public async init() {
const storeName = this.store.constructor.name;
const loadedKeys = await this.store.load();
if (loadedKeys.length === 0) {
return this.log.warn({ via: storeName }, "No Google PaLM keys found.");
}
this.keys.push(...loadedKeys);
this.log.info(
{ count: this.keys.length, via: storeName },
"Loaded PaLM keys."
);
}
public get(_model: GooglePalmModel) {
const availableKeys = this.keys.filter((k) => !k.isDisabled);
if (availableKeys.length === 0) {
throw new Error("No Google PaLM keys available");
}
// (largely copied from the OpenAI provider, without trial key support)
// Select a key, from highest priority to lowest priority:
// 1. Keys which are not rate limited
// a. If all keys were rate limited recently, select the least-recently
// rate limited key.
// 3. Keys which have not been used in the longest time
const now = Date.now();
const keysByPriority = availableKeys.sort((a, b) => {
const aRateLimited = now - a.rateLimitedAt < RATE_LIMIT_LOCKOUT;
const bRateLimited = now - b.rateLimitedAt < RATE_LIMIT_LOCKOUT;
if (aRateLimited && !bRateLimited) return 1;
if (!aRateLimited && bRateLimited) return -1;
if (aRateLimited && bRateLimited) {
return a.rateLimitedAt - b.rateLimitedAt;
}
return a.lastUsed - b.lastUsed;
});
const selectedKey = keysByPriority[0];
selectedKey.lastUsed = now;
selectedKey.rateLimitedAt = now;
// Intended to throttle the queue processor as otherwise it will just
// flood the API with requests and we want to wait a sec to see if we're
// going to get a rate limit error on this key.
selectedKey.rateLimitedUntil = now + KEY_REUSE_DELAY;
return { ...selectedKey };
}
public incrementUsage(hash: string, _model: string, tokens: number) {
const key = this.keys.find((k) => k.hash === hash);
if (!key) return;
key.promptCount++;
key.bisonTokens += tokens;
}
public getLockoutPeriod(_model: GooglePalmModel) {
const activeKeys = this.keys.filter((k) => !k.isDisabled);
// Don't lock out if there are no keys available or the queue will stall.
// Just let it through so the add-key middleware can throw an error.
if (activeKeys.length === 0) return 0;
const now = Date.now();
const rateLimitedKeys = activeKeys.filter((k) => now < k.rateLimitedUntil);
const anyNotRateLimited = rateLimitedKeys.length < activeKeys.length;
if (anyNotRateLimited) return 0;
// If all keys are rate-limited, return the time until the first key is
// ready.
return Math.min(...activeKeys.map((k) => k.rateLimitedUntil - now));
}
/**
* This is called when we receive a 429, which means there are already five
* concurrent requests running on this key. We don't have any information on
* when these requests will resolve, so all we can do is wait a bit and try
* again. We will lock the key for 2 seconds after getting a 429 before
* retrying in order to give the other requests a chance to finish.
*/
public markRateLimited(keyHash: string) {
this.log.debug({ key: keyHash }, "Key rate limited");
const key = this.keys.find((k) => k.hash === keyHash)!;
const now = Date.now();
key.rateLimitedAt = now;
key.rateLimitedUntil = now + RATE_LIMIT_LOCKOUT;
}
public recheck() {}
}
|
JJNeverkry/jj
|
src/shared/key-management/palm/provider.ts
|
TypeScript
|
unknown
| 4,550 |
import crypto from "crypto";
import type { GooglePalmKey, SerializedKey } from "../index";
import { KeySerializerBase } from "../key-serializer-base";
const SERIALIZABLE_FIELDS: (keyof GooglePalmKey)[] = [
"key",
"service",
"hash",
"promptCount",
"bisonTokens",
];
export type SerializedGooglePalmKey = SerializedKey &
Partial<Pick<GooglePalmKey, (typeof SERIALIZABLE_FIELDS)[number]>>;
export class GooglePalmKeySerializer extends KeySerializerBase<GooglePalmKey> {
constructor() {
super(SERIALIZABLE_FIELDS);
}
deserialize(serializedKey: SerializedGooglePalmKey): GooglePalmKey {
const { key, ...rest } = serializedKey;
return {
key,
service: "google-palm" as const,
modelFamilies: ["bison"],
isDisabled: false,
isRevoked: false,
promptCount: 0,
lastUsed: 0,
rateLimitedAt: 0,
rateLimitedUntil: 0,
hash: `plm-${crypto
.createHash("sha256")
.update(key)
.digest("hex")
.slice(0, 8)}`,
lastChecked: 0,
bisonTokens: 0,
...rest,
};
}
}
|
JJNeverkry/jj
|
src/shared/key-management/palm/serializer.ts
|
TypeScript
|
unknown
| 1,083 |
import { assertNever } from "../utils";
import {
Key,
KeySerializer,
LLMService,
SerializedKey,
ServiceToKey,
} from "./types";
import { OpenAIKeySerializer } from "./openai/serializer";
import { AnthropicKeySerializer } from "./anthropic/serializer";
import { GooglePalmKeySerializer } from "./palm/serializer";
import { AwsBedrockKeySerializer } from "./aws/serializer";
export function assertSerializedKey(k: any): asserts k is SerializedKey {
if (typeof k !== "object" || !k || typeof (k as any).key !== "string") {
throw new Error("Invalid serialized key data");
}
}
export function getSerializer<S extends LLMService>(
service: S
): KeySerializer<ServiceToKey[S]>;
export function getSerializer(service: LLMService): KeySerializer<Key> {
switch (service) {
case "openai":
return new OpenAIKeySerializer();
case "anthropic":
return new AnthropicKeySerializer();
case "google-palm":
return new GooglePalmKeySerializer();
case "aws":
return new AwsBedrockKeySerializer();
default:
assertNever(service);
}
}
|
JJNeverkry/jj
|
src/shared/key-management/serializers.ts
|
TypeScript
|
unknown
| 1,086 |
import firebase from "firebase-admin";
import { config, getFirebaseApp } from "../../../config";
import { logger } from "../../../logger";
import { assertSerializedKey } from "../serializers";
import type {
Key,
KeySerializer,
KeyStore,
LLMService,
SerializedKey,
} from "../types";
import { MemoryKeyStore } from "./index";
export class FirebaseKeyStore<K extends Key> implements KeyStore<K> {
private readonly db: firebase.database.Database;
private readonly log: typeof logger;
private readonly pendingUpdates: Map<string, Partial<SerializedKey>>;
private readonly root: string;
private readonly serializer: KeySerializer<K>;
private readonly service: LLMService;
private flushInterval: NodeJS.Timeout | null = null;
private keysRef: firebase.database.Reference | null = null;
constructor(
service: LLMService,
serializer: KeySerializer<K>,
app = getFirebaseApp()
) {
this.db = firebase.database(app);
this.log = logger.child({ module: "firebase-key-store", service });
this.root = `keys/${config.firebaseRtdbRoot.toLowerCase()}/${service}`;
this.serializer = serializer;
this.service = service;
this.pendingUpdates = new Map();
this.scheduleFlush();
}
public async load(isMigrating = false): Promise<K[]> {
const keysRef = this.db.ref(this.root);
const snapshot = await keysRef.once("value");
const keys = snapshot.val();
this.keysRef = keysRef;
if (!keys) {
if (isMigrating) return [];
this.log.warn("No keys found in Firebase. Migrating from environment.");
await this.migrate();
return this.load(true);
}
return Object.values(keys).map((k) => {
assertSerializedKey(k);
return this.serializer.deserialize(k);
});
}
public add(key: K) {
const serialized = this.serializer.serialize(key);
this.pendingUpdates.set(key.hash, serialized);
this.forceFlush();
}
public update(id: string, update: Partial<K>, force = false) {
const existing = this.pendingUpdates.get(id) ?? {};
Object.assign(existing, this.serializer.partialSerialize(id, update));
this.pendingUpdates.set(id, existing);
if (force) this.forceFlush();
}
private forceFlush() {
if (this.flushInterval) clearInterval(this.flushInterval);
this.flushInterval = setTimeout(() => this.flush(), 0);
}
private scheduleFlush() {
if (this.flushInterval) clearInterval(this.flushInterval);
this.flushInterval = setInterval(() => this.flush(), 1000 * 60 * 5);
}
private async flush() {
if (!this.keysRef) {
this.log.warn(
{ pendingUpdates: this.pendingUpdates.size },
"Database not loaded yet. Skipping flush."
);
return this.scheduleFlush();
}
if (this.pendingUpdates.size === 0) {
this.log.debug("No pending key updates to flush.");
return this.scheduleFlush();
}
const updates: Record<string, Partial<SerializedKey>> = {};
this.pendingUpdates.forEach((v, k) => (updates[k] = v));
this.pendingUpdates.clear();
console.log(updates);
await this.keysRef.update(updates);
this.log.debug(
{ count: Object.keys(updates).length },
"Flushed pending key updates."
);
this.scheduleFlush();
}
private async migrate(): Promise<SerializedKey[]> {
const keysRef = this.db.ref(this.root);
const envStore = new MemoryKeyStore<K>(this.service, this.serializer);
const keys = await envStore.load();
if (keys.length === 0) {
this.log.warn("No keys found in environment or Firebase.");
return [];
}
const updates: Record<string, SerializedKey> = {};
keys.forEach((k) => (updates[k.hash] = this.serializer.serialize(k)));
await keysRef.update(updates);
this.log.info({ count: keys.length }, "Migrated keys from environment.");
return Object.values(updates);
}
}
|
JJNeverkry/jj
|
src/shared/key-management/stores/firebase.ts
|
TypeScript
|
unknown
| 3,883 |
export { FirebaseKeyStore } from "./firebase";
export { MemoryKeyStore } from "./memory";
|
JJNeverkry/jj
|
src/shared/key-management/stores/index.ts
|
TypeScript
|
unknown
| 90 |
import { assertNever } from "../../utils";
import { Key, KeySerializer, KeyStore, LLMService } from "../types";
export class MemoryKeyStore<K extends Key> implements KeyStore<K> {
private readonly env: string;
private readonly serializer: KeySerializer<K>;
constructor(service: LLMService, serializer: KeySerializer<K>) {
switch (service) {
case "anthropic":
this.env = "ANTHROPIC_KEY";
break;
case "openai":
this.env = "OPENAI_KEY";
break;
case "google-palm":
this.env = "GOOGLE_PALM_KEY";
break;
case "aws":
this.env = "AWS_CREDENTIALS";
break;
default:
assertNever(service);
}
this.serializer = serializer;
}
public async load() {
let envKeys: string[];
envKeys = [
...new Set(process.env[this.env]?.split(",").map((k) => k.trim())),
];
return envKeys
.filter((k) => k)
.map((k) => this.serializer.deserialize({ key: k }));
}
public add() {}
public update() {}
}
|
JJNeverkry/jj
|
src/shared/key-management/stores/memory.ts
|
TypeScript
|
unknown
| 1,035 |
import type { OpenAIKey, OpenAIModel } from "./openai/provider";
import type { AnthropicKey, AnthropicModel } from "./anthropic/provider";
import type { GooglePalmKey, GooglePalmModel } from "./palm/provider";
import type { AwsBedrockKey, AwsBedrockModel } from "./aws/provider";
import type { ModelFamily } from "../models";
/** The request and response format used by a model's API. */
export type APIFormat = "openai" | "anthropic" | "google-palm" | "openai-text";
/**
* The service that a model is hosted on; distinct because services like AWS
* provide APIs from other service providers, but have their own authentication
* and key management.
*/
export type LLMService = "openai" | "anthropic" | "google-palm" | "aws";
export type Model =
| OpenAIModel
| AnthropicModel
| GooglePalmModel
| AwsBedrockModel;
type AllKeys = OpenAIKey | AnthropicKey | GooglePalmKey | AwsBedrockKey;
export type ServiceToKey = {
[K in AllKeys["service"]]: Extract<AllKeys, { service: K }>;
};
export type SerializedKey = { key: string };
export interface Key {
/** The API key itself. Never log this, use `hash` instead. */
readonly key: string;
/** The service that this key is for. */
service: LLMService;
/** The model families that this key has access to. */
modelFamilies: ModelFamily[];
/** Whether this key is currently disabled for some reason. */
isDisabled: boolean;
/**
* Whether this key specifically has been revoked. This is different from
* `isDisabled` because a key can be disabled for other reasons, such as
* exceeding its quota. A revoked key is assumed to be permanently disabled,
* and KeyStore implementations should not return it when loading keys.
*/
isRevoked: boolean;
/** The number of prompts that have been sent with this key. */
promptCount: number;
/** The time at which this key was last used. */
lastUsed: number;
/** The time at which this key was last checked. */
lastChecked: number;
/** Hash of the key, for logging and to find the key in the pool. */
hash: string;
}
export interface KeySerializer<K> {
serialize(keyObj: K): SerializedKey;
deserialize(serializedKey: SerializedKey): K;
partialSerialize(key: string, update: Partial<K>): Partial<SerializedKey>;
}
export interface KeyStore<K extends Key> {
load(): Promise<K[]>;
add(key: K): void;
update(id: string, update: Partial<K>, force?: boolean): void;
}
|
JJNeverkry/jj
|
src/shared/key-management/types.ts
|
TypeScript
|
unknown
| 2,420 |
import { logger } from "../logger";
export type OpenAIModelFamily = "turbo" | "gpt4" | "gpt4-32k";
export type AnthropicModelFamily = "claude";
export type GooglePalmModelFamily = "bison";
export type AwsBedrockModelFamily = "aws-claude";
export type ModelFamily =
| OpenAIModelFamily
| AnthropicModelFamily
| GooglePalmModelFamily
| AwsBedrockModelFamily;
export const MODEL_FAMILIES = (<A extends readonly ModelFamily[]>(
arr: A & ([ModelFamily] extends [A[number]] ? unknown : never)
) => arr)([
"turbo",
"gpt4",
"gpt4-32k",
"claude",
"bison",
"aws-claude",
] as const);
export const OPENAI_MODEL_FAMILY_MAP: { [regex: string]: OpenAIModelFamily } = {
"^gpt-4-32k-\\d{4}$": "gpt4-32k",
"^gpt-4-32k$": "gpt4-32k",
"^gpt-4-\\d{4}$": "gpt4",
"^gpt-4$": "gpt4",
"^gpt-3.5-turbo": "turbo",
"^text-embedding-ada-002$": "turbo",
};
export function getOpenAIModelFamily(model: string): OpenAIModelFamily {
for (const [regex, family] of Object.entries(OPENAI_MODEL_FAMILY_MAP)) {
if (model.match(regex)) return family;
}
const stack = new Error().stack;
logger.warn({ model, stack }, "Unmapped model family");
return "gpt4";
}
export function getClaudeModelFamily(_model: string): ModelFamily {
return "claude";
}
export function getGooglePalmModelFamily(model: string): ModelFamily {
if (model.match(/^\w+-bison-\d{3}$/)) return "bison";
const stack = new Error().stack;
logger.warn({ model, stack }, "Unmapped PaLM model family");
return "bison";
}
export function getAwsBedrockModelFamily(_model: string): ModelFamily {
return "aws-claude";
}
export function assertIsKnownModelFamily(
modelFamily: string
): asserts modelFamily is ModelFamily {
if (!MODEL_FAMILIES.includes(modelFamily as ModelFamily)) {
throw new Error(`Unknown model family: ${modelFamily}`);
}
}
|
JJNeverkry/jj
|
src/shared/models.ts
|
TypeScript
|
unknown
| 1,841 |
export * as sheets from "./sheets";
|
JJNeverkry/jj
|
src/shared/prompt-logging/backends/index.ts
|
TypeScript
|
unknown
| 36 |
/* Google Sheets backend for prompt logger. Upon every flush, this backend
writes the batch to a Sheets spreadsheet. If the sheet becomes too large, it
will create a new sheet and continue writing there.
This is essentially a really shitty ORM for Sheets. Absolutely no concurrency
support because it relies on local state to match up with the remote state. */
import { google, sheets_v4 } from "googleapis";
import type { CredentialBody } from "google-auth-library";
import type { GaxiosResponse } from "googleapis-common";
import { config } from "../../../config";
import { logger } from "../../../logger";
import { PromptLogEntry } from "..";
// There is always a sheet called __index__ which contains a list of all the
// other sheets. We use this rather than iterating over all the sheets in case
// the user needs to manually work with the spreadsheet.
// If no __index__ sheet exists, we will assume that the spreadsheet is empty
// and create one.
type IndexSheetModel = {
/**
* Stored in cell B2. Set on startup; if it changes, we assume that another
* instance of the proxy is writing to the spreadsheet and stop.
*/
lockId: string;
/**
* Data starts at row 4. Row 1-3 are headers
*/
rows: { logSheetName: string; createdAt: string; rowCount: number }[];
};
type LogSheetModel = {
sheetName: string;
rows: {
model: string;
endpoint: string;
promptRaw: string;
promptFlattened: string;
response: string;
}[];
};
const MAX_ROWS_PER_SHEET = 2000;
const log = logger.child({ module: "sheets" });
let sheetsClient: sheets_v4.Sheets | null = null;
/** Called when log backend aborts to tell the log queue to stop. */
let stopCallback: (() => void) | null = null;
/** Lock/synchronization ID for this session. */
let lockId = Math.random().toString(36).substring(2, 15);
/** In-memory cache of the index sheet. */
let indexSheet: IndexSheetModel | null = null;
/** In-memory cache of the active log sheet. */
let activeLogSheet: LogSheetModel | null = null;
/**
* Loads the __index__ sheet into memory. By default, asserts that the lock ID
* has not changed since the start of the session.
*/
const loadIndexSheet = async (assertLockId = true) => {
const client = sheetsClient!;
const spreadsheetId = config.googleSheetsSpreadsheetId!;
log.info({ assertLockId }, "Loading __index__ sheet.");
const res = await client.spreadsheets.values.get({
spreadsheetId: spreadsheetId,
range: "__index__!A1:D",
majorDimension: "ROWS",
});
const data = assertData(res);
if (!data.values || data.values[2][0] !== "logSheetName") {
log.error({ values: data.values }, "Unexpected format for __index__ sheet");
throw new Error("Unexpected format for __index__ sheet");
}
if (assertLockId) {
const lockIdCell = data.values[1][1];
if (lockIdCell !== lockId) {
log.error(
{ receivedLock: lockIdCell, expectedLock: lockId },
"Another instance of the proxy is writing to the spreadsheet; stopping."
);
stop();
throw new Error(`Lock ID assertion failed`);
}
}
const rows = data.values.slice(3).map((row) => {
return {
logSheetName: row[0],
createdAt: row[1],
rowCount: row[2],
};
});
indexSheet = { lockId, rows };
};
/** Creates empty __index__ sheet for a new spreadsheet. */
const createIndexSheet = async () => {
const client = sheetsClient!;
const spreadsheetId = config.googleSheetsSpreadsheetId!;
log.info("Creating empty __index__ sheet.");
const res = await client.spreadsheets.batchUpdate({
spreadsheetId: spreadsheetId,
requestBody: {
requests: [
{
addSheet: {
properties: {
title: "__index__",
gridProperties: { rowCount: 1, columnCount: 3 },
},
},
},
],
},
});
assertData(res);
indexSheet = { lockId, rows: [] };
await writeIndexSheet();
};
/** Writes contents of in-memory indexSheet to the remote __index__ sheet. */
const writeIndexSheet = async () => {
const client = sheetsClient!;
const spreadsheetId = config.googleSheetsSpreadsheetId!;
const headerRows = [
["Don't edit this sheet while the server is running.", "", ""],
["Lock ID", lockId, ""],
["logSheetName", "createdAt", "rowCount"],
];
const contentRows = indexSheet!.rows.map((row) => {
return [row.logSheetName, row.createdAt, row.rowCount];
});
log.info("Persisting __index__ sheet.");
await client.spreadsheets.values.batchUpdate({
spreadsheetId: spreadsheetId,
requestBody: {
valueInputOption: "RAW",
data: [
{ range: "__index__!A1:D", values: [...headerRows, ...contentRows] },
],
},
});
};
/** Creates a new log sheet, adds it to the index, and sets it as active. */
const createLogSheet = async () => {
const client = sheetsClient!;
const spreadsheetId = config.googleSheetsSpreadsheetId!;
// Sheet name format is Log_YYYYMMDD_HHMMSS
const sheetName = `Log_${new Date()
.toISOString()
// YYYY-MM-DDTHH:MM:SS.sssZ -> YYYYMMDD_HHMMSS
.replace(/[-:.]/g, "")
.replace(/T/, "_")
.substring(0, 15)}`;
log.info({ sheetName }, "Creating new log sheet.");
const res = await client.spreadsheets.batchUpdate({
spreadsheetId: spreadsheetId,
requestBody: {
requests: [
{
addSheet: {
properties: {
title: sheetName,
gridProperties: { rowCount: MAX_ROWS_PER_SHEET, columnCount: 5 },
},
},
},
],
},
});
assertData(res);
// Increase row/column size and wrap text for readability.
const sheetId = res.data.replies![0].addSheet!.properties!.sheetId;
await client.spreadsheets.batchUpdate({
spreadsheetId: spreadsheetId,
requestBody: {
requests: [
{
repeatCell: {
range: { sheetId },
cell: {
userEnteredFormat: {
wrapStrategy: "WRAP",
verticalAlignment: "TOP",
},
},
fields: "*",
},
},
{
updateDimensionProperties: {
range: {
sheetId,
dimension: "COLUMNS",
startIndex: 3,
endIndex: 5,
},
properties: { pixelSize: 500 },
fields: "pixelSize",
},
},
{
updateDimensionProperties: {
range: {
sheetId,
dimension: "ROWS",
startIndex: 1,
},
properties: { pixelSize: 200 },
fields: "pixelSize",
},
},
],
},
});
await client.spreadsheets.values.batchUpdate({
spreadsheetId: spreadsheetId,
requestBody: {
valueInputOption: "RAW",
data: [
{
range: `${sheetName}!A1:E`,
values: [
["model", "endpoint", "prompt json", "prompt string", "response"],
],
},
],
},
});
indexSheet!.rows.push({
logSheetName: sheetName,
createdAt: new Date().toISOString(),
rowCount: 0,
});
await writeIndexSheet();
activeLogSheet = { sheetName, rows: [] };
};
export const appendBatch = async (batch: PromptLogEntry[]) => {
if (!activeLogSheet) {
// Create a new log sheet if we don't have one yet.
await createLogSheet();
} else {
// Check lock to ensure we're the only instance writing to the spreadsheet.
await loadIndexSheet(true);
}
const client = sheetsClient!;
const spreadsheetId = config.googleSheetsSpreadsheetId!;
const sheetName = activeLogSheet!.sheetName;
const newRows = batch.map((entry) => {
return [
entry.model,
entry.endpoint,
entry.promptRaw.slice(0, 50000),
entry.promptFlattened.slice(0, 50000),
entry.response.slice(0, 50000),
];
});
log.info({ sheetName, rowCount: newRows.length }, "Appending log batch.");
const data = await client.spreadsheets.values.append({
spreadsheetId: spreadsheetId,
range: `${sheetName}!A1:D`,
valueInputOption: "RAW",
requestBody: { values: newRows, majorDimension: "ROWS" },
});
assertData(data);
if (data.data.updates && data.data.updates.updatedRows) {
const newRowCount = data.data.updates.updatedRows;
log.info({ sheetName, rowCount: newRowCount }, "Successfully appended.");
activeLogSheet!.rows = activeLogSheet!.rows.concat(
newRows.map((row) => ({
model: row[0],
endpoint: row[1],
promptRaw: row[2],
promptFlattened: row[3],
response: row[4],
}))
);
} else {
// We didn't receive an error but we didn't get any updates either.
// We may need to create a new sheet and throw to make the queue retry the
// batch.
log.warn(
{ sheetName, rowCount: newRows.length },
"No updates received from append. Creating new sheet and retrying."
);
await createLogSheet();
throw new Error("No updates received from append.");
}
await finalizeBatch();
};
const finalizeBatch = async () => {
const sheetName = activeLogSheet!.sheetName;
const rowCount = activeLogSheet!.rows.length;
const indexRow = indexSheet!.rows.find(
({ logSheetName }) => logSheetName === sheetName
)!;
indexRow.rowCount = rowCount;
if (rowCount >= MAX_ROWS_PER_SHEET) {
await createLogSheet(); // Also updates index sheet
} else {
await writeIndexSheet();
}
log.info({ sheetName, rowCount }, "Batch finalized.");
};
type LoadLogSheetArgs = {
sheetName: string;
/** The starting row to load. If omitted, loads all rows (expensive). */
fromRow?: number;
};
/** Not currently used. */
export const loadLogSheet = async ({
sheetName,
fromRow = 2, // omit header row
}: LoadLogSheetArgs) => {
const client = sheetsClient!;
const spreadsheetId = config.googleSheetsSpreadsheetId!;
const range = `${sheetName}!A${fromRow}:E`;
const res = await client.spreadsheets.values.get({
spreadsheetId: spreadsheetId,
range,
});
const data = assertData(res);
const values = data.values || [];
const rows = values.slice(1).map((row) => {
return {
model: row[0],
endpoint: row[1],
promptRaw: row[2],
promptFlattened: row[3],
response: row[4],
};
});
activeLogSheet = { sheetName, rows };
};
export const init = async (onStop: () => void) => {
if (sheetsClient) {
return;
}
if (!config.googleSheetsKey || !config.googleSheetsSpreadsheetId) {
throw new Error(
"Missing required Google Sheets config. Refer to documentation for setup instructions."
);
}
log.info("Initializing Google Sheets backend.");
const encodedCreds = config.googleSheetsKey;
// encodedCreds is a base64-encoded JSON key from the GCP console.
const creds: CredentialBody = JSON.parse(
Buffer.from(encodedCreds, "base64").toString("utf8").trim()
);
const auth = new google.auth.GoogleAuth({
scopes: ["https://www.googleapis.com/auth/spreadsheets"],
credentials: creds,
});
sheetsClient = google.sheets({ version: "v4", auth });
stopCallback = onStop;
const sheetId = config.googleSheetsSpreadsheetId;
const res = await sheetsClient.spreadsheets.get({
spreadsheetId: sheetId,
});
if (!res.data) {
const { status, statusText, headers } = res;
log.error(
{
res: { status, statusText, headers },
creds: {
client_email: creds.client_email?.slice(0, 5) + "********",
private_key: creds.private_key?.slice(0, 5) + "********",
},
sheetId: config.googleSheetsSpreadsheetId,
},
"Could not connect to Google Sheets."
);
stop();
throw new Error("Could not connect to Google Sheets.");
} else {
const sheetTitle = res.data.properties?.title;
log.info({ sheetId, sheetTitle }, "Connected to Google Sheets.");
}
// Load or create the index sheet and write the lockId to it.
try {
log.info("Loading index sheet.");
await loadIndexSheet(false);
await writeIndexSheet();
} catch (e) {
log.info("Creating new index sheet.");
await createIndexSheet();
}
};
/** Called during some unrecoverable error to tell the log queue to stop. */
function stop() {
log.warn("Stopping Google Sheets backend.");
if (stopCallback) {
stopCallback();
}
sheetsClient = null;
}
function assertData<T = sheets_v4.Schema$ValueRange>(res: GaxiosResponse<T>) {
if (!res.data) {
const { status, statusText, headers } = res;
log.error(
{ res: { status, statusText, headers } },
"Unexpected response from Google Sheets API."
);
}
return res.data!;
}
|
JJNeverkry/jj
|
src/shared/prompt-logging/backends/sheets.ts
|
TypeScript
|
unknown
| 12,739 |
/* Logs prompts and model responses to a persistent storage backend, if enabled.
Since the proxy is generally deployed to free-tier services, our options for
persistent storage are pretty limited. We'll use Google Sheets as a makeshift
database for now.
Due to the limitations of Google Sheets, we'll queue up log entries and flush
them to the API periodically. */
export interface PromptLogEntry {
model: string;
endpoint: string;
/** JSON prompt passed to the model */
promptRaw: string;
/** Prompt with user and assistant messages flattened into a single string */
promptFlattened: string;
response: string;
// TODO: temperature, top_p, top_k, etc.
}
export * as logQueue from "./log-queue";
|
JJNeverkry/jj
|
src/shared/prompt-logging/index.ts
|
TypeScript
|
unknown
| 715 |
/* Queues incoming prompts/responses and periodically flushes them to configured
* logging backend. */
import { logger } from "../../logger";
import { PromptLogEntry } from ".";
import { sheets } from "./backends";
const FLUSH_INTERVAL = 1000 * 10;
const MAX_BATCH_SIZE = 25;
const queue: PromptLogEntry[] = [];
const log = logger.child({ module: "log-queue" });
let started = false;
let timeoutId: NodeJS.Timeout | null = null;
let retrying = false;
let consecutiveFailedBatches = 0;
export const enqueue = (payload: PromptLogEntry) => {
if (!started) {
log.warn("Log queue not started, discarding incoming log entry.");
return;
}
queue.push(payload);
};
export const flush = async () => {
if (!started) {
return;
}
if (queue.length > 0) {
const batchSize = Math.min(MAX_BATCH_SIZE, queue.length);
const nextBatch = queue.splice(0, batchSize);
log.info({ size: nextBatch.length }, "Submitting new batch.");
try {
await sheets.appendBatch(nextBatch);
retrying = false;
consecutiveFailedBatches = 0;
} catch (e: any) {
if (retrying) {
log.error(
{ message: e.message, stack: e.stack },
"Failed twice to flush batch, discarding."
);
retrying = false;
consecutiveFailedBatches++;
} else {
// Put the batch back at the front of the queue and try again
log.warn(
{ message: e.message, stack: e.stack },
"Failed to flush batch. Retrying."
);
queue.unshift(...nextBatch);
retrying = true;
setImmediate(() => flush());
return;
}
}
}
const useHalfInterval = queue.length > MAX_BATCH_SIZE / 2;
scheduleFlush(useHalfInterval);
};
export const start = async () => {
try {
await sheets.init(() => stop());
log.info("Logging backend initialized.");
started = true;
} catch (e) {
log.error(e, "Could not initialize logging backend.");
return;
}
scheduleFlush();
};
export const stop = () => {
if (timeoutId) {
clearTimeout(timeoutId);
}
log.info("Stopping log queue.");
started = false;
};
const scheduleFlush = (halfInterval = false) => {
if (consecutiveFailedBatches > 3) {
// TODO: may cause memory issues on busy servers, though if we crash that
// may actually fix the problem with logs randomly not being flushed.
const oneMinute = 60 * 1000;
const maxBackoff = 10 * oneMinute;
const backoff = Math.min(consecutiveFailedBatches * oneMinute, maxBackoff);
timeoutId = setTimeout(() => {
flush();
}, backoff);
log.warn(
{ consecutiveFailedBatches, backoffMs: backoff },
"Failed to flush 3 batches in a row, pausing for a few minutes."
);
return;
}
if (halfInterval) {
log.warn(
{ queueSize: queue.length },
"Queue is falling behind, switching to faster flush interval."
);
}
timeoutId = setTimeout(
() => {
flush();
},
halfInterval ? FLUSH_INTERVAL / 2 : FLUSH_INTERVAL
);
};
|
JJNeverkry/jj
|
src/shared/prompt-logging/log-queue.ts
|
TypeScript
|
unknown
| 3,044 |
import { ModelFamily } from "./models";
// technically slightly underestimates, because completion tokens cost more
// than prompt tokens but we don't track those separately right now
export function getTokenCostUsd(model: ModelFamily, tokens: number) {
let cost = 0;
switch (model) {
case "gpt4-32k":
cost = 0.00006;
break;
case "gpt4":
cost = 0.00003;
break;
case "turbo":
cost = 0.0000015;
break;
case "aws-claude":
case "claude":
cost = 0.00001102;
break;
}
return cost * Math.max(0, tokens);
}
export function prettyTokens(tokens: number): string {
const absTokens = Math.abs(tokens);
if (absTokens < 1000) {
return tokens.toString();
} else if (absTokens < 1000000) {
return (tokens / 1000).toFixed(1) + "k";
} else if (absTokens < 1000000000) {
return (tokens / 1000000).toFixed(2) + "m";
} else {
return (tokens / 1000000000).toFixed(2) + "b";
}
}
|
JJNeverkry/jj
|
src/shared/stats.ts
|
TypeScript
|
unknown
| 962 |
import { Request, Response } from "express";
import { IncomingMessage } from "http";
import { assertNever } from "./utils";
export function initializeSseStream(res: Response) {
res.statusCode = 200;
res.setHeader("Content-Type", "text/event-stream; charset=utf-8");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
res.setHeader("X-Accel-Buffering", "no"); // nginx-specific fix
res.flushHeaders();
}
/**
* Copies headers received from upstream API to the SSE response, excluding
* ones we need to set ourselves for SSE to work.
*/
export function copySseResponseHeaders(
proxyRes: IncomingMessage,
res: Response
) {
const toOmit = [
"content-length",
"content-encoding",
"transfer-encoding",
"content-type",
"connection",
"cache-control",
];
for (const [key, value] of Object.entries(proxyRes.headers)) {
if (!toOmit.includes(key) && value) {
res.setHeader(key, value);
}
}
}
/**
* Returns an SSE message that looks like a completion event for the service
* that the request is being proxied to. Used to send error messages to the
* client in the middle of a streaming request.
*/
export function buildFakeSse(
type: string,
string: string,
req: Request
) {
let fakeEvent;
const content = `\`\`\`\n[${type}: ${string}]\n\`\`\`\n`;
switch (req.inboundApi) {
case "openai":
fakeEvent = {
id: "chatcmpl-" + req.id,
object: "chat.completion.chunk",
created: Date.now(),
model: req.body?.model,
choices: [{ delta: { content }, index: 0, finish_reason: type }]
};
break;
case "openai-text":
fakeEvent = {
id: "cmpl-" + req.id,
object: "text_completion",
created: Date.now(),
choices: [
{ text: content, index: 0, logprobs: null, finish_reason: type }
],
model: req.body?.model
};
break;
case "anthropic":
fakeEvent = {
completion: content,
stop_reason: type,
truncated: false, // I've never seen this be true
stop: null,
model: req.body?.model,
log_id: "proxy-req-" + req.id
};
break;
case "google-palm":
throw new Error("PaLM not supported as an inbound API format");
default:
assertNever(req.inboundApi);
}
if (req.inboundApi === "anthropic") {
return [
"event: completion",
`data: ${JSON.stringify(fakeEvent)}`,
].join("\n") + "\n\n";
}
return `data: ${JSON.stringify(fakeEvent)}\n\n`;
}
|
JJNeverkry/jj
|
src/shared/streaming.ts
|
TypeScript
|
unknown
| 2,570 |
import { getTokenizer } from "@anthropic-ai/tokenizer";
import { Tiktoken } from "tiktoken/lite";
let encoder: Tiktoken;
export function init() {
// they export a `countTokens` function too but it instantiates a new
// tokenizer every single time and it is not fast...
encoder = getTokenizer();
return true;
}
export function getTokenCount(prompt: string, _model: string) {
// Don't try tokenizing if the prompt is massive to prevent DoS.
// 500k characters should be sufficient for all supported models.
if (prompt.length > 500000) {
return {
tokenizer: "length fallback",
token_count: 100000,
};
}
return {
tokenizer: "@anthropic-ai/tokenizer",
token_count: encoder.encode(prompt.normalize("NFKC"), "all").length,
};
}
|
JJNeverkry/jj
|
src/shared/tokenization/claude.ts
|
TypeScript
|
unknown
| 773 |
export { OpenAIPromptMessage } from "./openai";
export { init, countTokens } from "./tokenizer";
|
JJNeverkry/jj
|
src/shared/tokenization/index.ts
|
TypeScript
|
unknown
| 97 |
import { Tiktoken } from "tiktoken/lite";
import cl100k_base from "tiktoken/encoders/cl100k_base.json";
let encoder: Tiktoken;
export function init() {
encoder = new Tiktoken(
cl100k_base.bpe_ranks,
cl100k_base.special_tokens,
cl100k_base.pat_str
);
return true;
}
// Tested against:
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
export function getTokenCount(
prompt: string | OpenAIPromptMessage[],
model: string
) {
if (typeof prompt === "string") {
return getTextTokenCount(prompt);
}
const gpt4 = model.startsWith("gpt-4");
const tokensPerMessage = gpt4 ? 3 : 4;
const tokensPerName = gpt4 ? 1 : -1; // turbo omits role if name is present
let numTokens = 0;
for (const message of prompt) {
numTokens += tokensPerMessage;
for (const key of Object.keys(message)) {
{
const value = message[key as keyof OpenAIPromptMessage];
if (!value || typeof value !== "string") continue;
// Break if we get a huge message or exceed the token limit to prevent
// DoS.
// 100k tokens allows for future 100k GPT-4 models and 500k characters
// is just a sanity check
if (value.length > 500000 || numTokens > 100000) {
numTokens = 100000;
return {
tokenizer: "tiktoken (prompt length limit exceeded)",
token_count: numTokens,
};
}
numTokens += encoder.encode(value).length;
if (key === "name") {
numTokens += tokensPerName;
}
}
}
}
numTokens += 3; // every reply is primed with <|start|>assistant<|message|>
return { tokenizer: "tiktoken", token_count: numTokens };
}
function getTextTokenCount(prompt: string) {
if (prompt.length > 500000) {
return {
tokenizer: "length fallback",
token_count: 100000,
};
}
return {
tokenizer: "tiktoken",
token_count: encoder.encode(prompt).length,
};
}
export type OpenAIPromptMessage = {
name?: string;
content: string;
role: string;
};
|
JJNeverkry/jj
|
src/shared/tokenization/openai.ts
|
TypeScript
|
unknown
| 2,094 |
import { Request } from "express";
import { assertNever } from "../utils";
import {
init as initClaude,
getTokenCount as getClaudeTokenCount,
} from "./claude";
import {
init as initOpenAi,
getTokenCount as getOpenAITokenCount,
OpenAIPromptMessage,
} from "./openai";
import { APIFormat } from "../key-management";
export async function init() {
initClaude();
initOpenAi();
}
/** Tagged union via `service` field of the different types of requests that can
* be made to the tokenization service, for both prompts and completions */
type TokenCountRequest = { req: Request } & (
| { prompt: OpenAIPromptMessage[]; completion?: never; service: "openai" }
| {
prompt: string;
completion?: never;
service: "openai-text" | "anthropic" | "google-palm";
}
| { prompt?: never; completion: string; service: APIFormat }
);
type TokenCountResult = {
token_count: number;
tokenizer: string;
tokenization_duration_ms: number;
};
export async function countTokens({
req,
service,
prompt,
completion,
}: TokenCountRequest): Promise<TokenCountResult> {
const time = process.hrtime();
switch (service) {
case "anthropic":
return {
...getClaudeTokenCount(prompt ?? completion, req.body.model),
tokenization_duration_ms: getElapsedMs(time),
};
case "openai":
case "openai-text":
return {
...getOpenAITokenCount(prompt ?? completion, req.body.model),
tokenization_duration_ms: getElapsedMs(time),
};
case "google-palm":
// TODO: Can't find a tokenization library for PaLM. There is an API
// endpoint for it but it adds significant latency to the request.
return {
...getOpenAITokenCount(prompt ?? completion, req.body.model),
tokenization_duration_ms: getElapsedMs(time),
};
default:
assertNever(service);
}
}
function getElapsedMs(time: [number, number]) {
const diff = process.hrtime(time);
return diff[0] * 1000 + diff[1] / 1e6;
}
|
JJNeverkry/jj
|
src/shared/tokenization/tokenizer.ts
|
TypeScript
|
unknown
| 2,009 |
import { ZodType, z } from "zod";
import type { ModelFamily } from "../models";
import { makeOptionalPropsNullable } from "../utils";
export const tokenCountsSchema: ZodType<UserTokenCounts> = z.object({
turbo: z.number().optional().default(0),
gpt4: z.number().optional().default(0),
"gpt4-32k": z.number().optional().default(0),
claude: z.number().optional().default(0),
bison: z.number().optional().default(0),
"aws-claude": z.number().optional().default(0),
});
export const UserSchema = z
.object({
/** User's personal access token. */
token: z.string(),
/** IP addresses the user has connected from. */
ip: z.array(z.string()),
/** User's nickname. */
nickname: z.string().max(80).optional(),
/**
* The user's privilege level.
* - `normal`: Default role. Subject to usual rate limits and quotas.
* - `special`: Special role. Higher quotas and exempt from
* auto-ban/lockout.
**/
type: z.enum(["normal", "special", "temporary"]),
/** Number of prompts the user has made. */
promptCount: z.number(),
/**
* @deprecated Use `tokenCounts` instead.
* Never used; retained for backwards compatibility.
*/
tokenCount: z.any().optional(),
/** Number of tokens the user has consumed, by model family. */
tokenCounts: tokenCountsSchema,
/** Maximum number of tokens the user can consume, by model family. */
tokenLimits: tokenCountsSchema,
/** Time at which the user was created. */
createdAt: z.number(),
/** Time at which the user last connected. */
lastUsedAt: z.number().optional(),
/** Time at which the user was disabled, if applicable. */
disabledAt: z.number().optional(),
/** Reason for which the user was disabled, if applicable. */
disabledReason: z.string().optional(),
/** Time at which the user will expire and be disabled (for temp users). */
expiresAt: z.number().optional(),
/** The user's maximum number of IP addresses; supercedes global max. */
maxIps: z.coerce.number().int().min(0).optional(),
/** Private note about the user. */
adminNote: z.string().optional(),
})
.strict();
/**
* Variant of `UserSchema` which allows for partial updates, and makes any
* optional properties on the base schema nullable. Null values are used to
* indicate that the property should be deleted from the user object.
*/
export const UserPartialSchema = makeOptionalPropsNullable(UserSchema)
.partial()
.extend({ token: z.string() });
export type UserTokenCounts = {
[K in ModelFamily]?: number;
};
export type User = z.infer<typeof UserSchema>;
export type UserUpdate = z.infer<typeof UserPartialSchema>;
|
JJNeverkry/jj
|
src/shared/users/schema.ts
|
TypeScript
|
unknown
| 2,699 |
/**
* Basic user management. Handles creation and tracking of proxy users, personal
* access tokens, and quota management. Supports in-memory and Firebase Realtime
* Database persistence stores.
*
* Users are identified solely by their personal access token. The token is
* used to authenticate the user for all proxied requests.
*/
import admin from "firebase-admin";
import schedule from "node-schedule";
import { v4 as uuid } from "uuid";
import { config, getFirebaseApp } from "../../config";
import { MODEL_FAMILIES, ModelFamily } from "../models";
import { logger } from "../../logger";
import { User, UserTokenCounts, UserUpdate } from "./schema";
const log = logger.child({ module: "users" });
const INITIAL_TOKENS: Required<UserTokenCounts> = {
turbo: 0,
gpt4: 0,
"gpt4-32k": 0,
claude: 0,
bison: 0,
"aws-claude": 0,
};
const users: Map<string, User> = new Map();
const usersToFlush = new Set<string>();
let quotaRefreshJob: schedule.Job | null = null;
let userCleanupJob: schedule.Job | null = null;
export async function init() {
log.info({ store: config.persistenceProvider }, "Initializing user store...");
if (config.persistenceProvider === "firebase_rtdb") {
await initFirebase();
}
if (config.quotaRefreshPeriod) {
const crontab = getRefreshCrontab();
quotaRefreshJob = schedule.scheduleJob(crontab, refreshAllQuotas);
if (!quotaRefreshJob) {
throw new Error(
"Unable to schedule quota refresh. Is QUOTA_REFRESH_PERIOD set correctly?"
);
}
log.debug(
{ nextRefresh: quotaRefreshJob.nextInvocation() },
"Scheduled token quota refresh."
);
}
userCleanupJob = schedule.scheduleJob("* * * * *", cleanupExpiredTokens);
log.info("User store initialized.");
}
/**
* Creates a new user and returns their token. Optionally accepts parameters
* for setting an expiry date and/or token limits for temporary users.
**/
export function createUser(createOptions?: {
type?: User["type"];
expiresAt?: number;
tokenLimits?: User["tokenLimits"];
}) {
const token = uuid();
const newUser: User = {
token,
ip: [],
type: "normal",
promptCount: 0,
tokenCounts: { ...INITIAL_TOKENS },
tokenLimits: createOptions?.tokenLimits ?? { ...config.tokenQuota },
createdAt: Date.now(),
};
if (createOptions?.type === "temporary") {
Object.assign(newUser, {
type: "temporary",
expiresAt: createOptions.expiresAt,
});
} else {
Object.assign(newUser, { type: createOptions?.type ?? "normal" });
}
users.set(token, newUser);
usersToFlush.add(token);
return token;
}
/** Returns the user with the given token if they exist. */
export function getUser(token: string) {
return users.get(token);
}
/** Returns a list of all users. */
export function getUsers() {
return Array.from(users.values()).map((user) => ({ ...user }));
}
/**
* Upserts the given user. Intended for use with the /admin API for updating
* arbitrary fields on a user; use the other functions in this module for
* specific use cases. `undefined` values are left unchanged. `null` will delete
* the property from the user.
*
* Returns the upserted user.
*/
export function upsertUser(user: UserUpdate) {
const existing: User = users.get(user.token) ?? {
token: user.token,
ip: [],
type: "normal",
promptCount: 0,
tokenCounts: { ...INITIAL_TOKENS },
tokenLimits: { ...config.tokenQuota },
createdAt: Date.now(),
};
const updates: Partial<User> = {};
for (const field of Object.entries(user)) {
const [key, value] = field as [keyof User, any]; // already validated by zod
if (value === undefined || key === "token") continue;
if (value === null) {
delete existing[key];
} else {
updates[key] = value;
}
}
// TODO: Write firebase migration to backfill new fields
if (updates.tokenCounts) {
for (const family of MODEL_FAMILIES) {
updates.tokenCounts[family] ??= 0;
}
}
if (updates.tokenLimits) {
for (const family of MODEL_FAMILIES) {
updates.tokenLimits[family] ??= 0;
}
}
users.set(user.token, Object.assign(existing, updates));
usersToFlush.add(user.token);
// Immediately schedule a flush to the database if we're using Firebase.
if (config.persistenceProvider === "firebase_rtdb") {
setImmediate(flushUsers);
}
return users.get(user.token);
}
/** Increments the prompt count for the given user. */
export function incrementPromptCount(token: string) {
const user = users.get(token);
if (!user) return;
user.promptCount++;
usersToFlush.add(token);
}
/** Increments token consumption for the given user and model. */
export function incrementTokenCount(
token: string,
model: string,
consumption: number
) {
const user = users.get(token);
if (!user) return;
const modelFamily = getModelFamilyForQuotaUsage(model);
const existing = user.tokenCounts[modelFamily] ?? 0;
user.tokenCounts[modelFamily] = existing + consumption;
usersToFlush.add(token);
}
/**
* Given a user's token and IP address, authenticates the user and adds the IP
* to the user's list of IPs. Returns the user if they exist and are not
* disabled, otherwise returns undefined.
*/
export function authenticate(token: string, ip: string) {
const user = users.get(token);
if (!user || user.disabledAt) return;
if (!user.ip.includes(ip)) user.ip.push(ip);
const configIpLimit = user.maxIps ?? config.maxIpsPerUser;
const ipLimit =
user.type === "special" || !configIpLimit ? Infinity : configIpLimit;
if (user.ip.length > ipLimit) {
disableUser(token, "IP address limit exceeded.");
return;
}
user.lastUsedAt = Date.now();
usersToFlush.add(token);
return user;
}
export function hasAvailableQuota(
token: string,
model: string,
requested: number
) {
const user = users.get(token);
if (!user) return false;
if (user.type === "special") return true;
const modelFamily = getModelFamilyForQuotaUsage(model);
const { tokenCounts, tokenLimits } = user;
const tokenLimit = tokenLimits[modelFamily];
if (!tokenLimit) return true;
const tokensConsumed = (tokenCounts[modelFamily] ?? 0) + requested;
return tokensConsumed < tokenLimit;
}
export function refreshQuota(token: string) {
const user = users.get(token);
if (!user) return;
const { tokenCounts, tokenLimits } = user;
const quotas = Object.entries(config.tokenQuota) as [ModelFamily, number][];
quotas
// If a quota is not configured, don't touch any existing limits a user may
// already have been assigned manually.
.filter(([, quota]) => quota > 0)
.forEach(
([model, quota]) =>
(tokenLimits[model] = (tokenCounts[model] ?? 0) + quota)
);
usersToFlush.add(token);
}
export function resetUsage(token: string) {
const user = users.get(token);
if (!user) return;
const { tokenCounts } = user;
const counts = Object.entries(tokenCounts) as [ModelFamily, number][];
counts.forEach(([model]) => (tokenCounts[model] = 0));
usersToFlush.add(token);
}
/** Disables the given user, optionally providing a reason. */
export function disableUser(token: string, reason?: string) {
const user = users.get(token);
if (!user) return;
user.disabledAt = Date.now();
user.disabledReason = reason;
usersToFlush.add(token);
}
export function getNextQuotaRefresh() {
if (!quotaRefreshJob) return "never (manual refresh only)";
return quotaRefreshJob.nextInvocation().getTime();
}
/**
* Cleans up expired temporary tokens by disabling tokens past their access
* expiry date and permanently deleting tokens three days after their access
* expiry date.
*/
function cleanupExpiredTokens() {
const now = Date.now();
let disabled = 0;
let deleted = 0;
for (const user of users.values()) {
if (user.type !== "temporary") continue;
if (user.expiresAt && user.expiresAt < now && !user.disabledAt) {
disableUser(user.token, "Temporary token expired.");
disabled++;
}
if (user.disabledAt && user.disabledAt + 72 * 60 * 60 * 1000 < now) {
users.delete(user.token);
usersToFlush.add(user.token);
deleted++;
}
}
log.trace({ disabled, deleted }, "Expired tokens cleaned up.");
}
function refreshAllQuotas() {
let count = 0;
for (const user of users.values()) {
if (user.type === "temporary") continue;
refreshQuota(user.token);
count++;
}
log.info(
{ refreshed: count, nextRefresh: quotaRefreshJob!.nextInvocation() },
"Token quotas refreshed."
);
}
// TODO: Firebase persistence is pretend right now and just polls the in-memory
// store to sync it with Firebase when it changes. Will refactor to abstract
// persistence layer later so we can support multiple stores.
let firebaseTimeout: NodeJS.Timeout | undefined;
async function initFirebase() {
log.info("Connecting to Firebase...");
const app = getFirebaseApp();
const db = admin.database(app);
const usersRef = db.ref("users");
const snapshot = await usersRef.once("value");
const users: Record<string, User> | null = snapshot.val();
firebaseTimeout = setInterval(flushUsers, 20 * 1000);
if (!users) {
log.info("No users found in Firebase.");
return;
}
for (const token in users) {
upsertUser(users[token]);
}
usersToFlush.clear();
const numUsers = Object.keys(users).length;
log.info({ users: numUsers }, "Loaded users from Firebase");
}
async function flushUsers() {
const app = getFirebaseApp();
const db = admin.database(app);
const usersRef = db.ref("users");
const updates: Record<string, User> = {};
const deletions = [];
for (const token of usersToFlush) {
const user = users.get(token);
if (!user) {
deletions.push(token);
continue;
}
updates[token] = user;
}
usersToFlush.clear();
const numUpdates = Object.keys(updates).length + deletions.length;
if (numUpdates === 0) {
return;
}
await usersRef.update(updates);
await Promise.all(deletions.map((token) => usersRef.child(token).remove()));
log.info(
{ users: Object.keys(updates).length, deletions: deletions.length },
"Flushed changes to Firebase"
);
}
// TODO: use key-management/models.ts for family mapping
function getModelFamilyForQuotaUsage(model: string): ModelFamily {
if (model.includes("32k")) {
return "gpt4-32k";
}
if (model.startsWith("gpt-4")) {
return "gpt4";
}
if (model.startsWith("gpt-3.5")) {
return "turbo";
}
if (model.includes("bison")) {
return "bison";
}
if (model.startsWith("claude")) {
return "claude";
}
if(model.startsWith("anthropic.claude")) {
return "aws-claude";
}
throw new Error(`Unknown quota model family for model ${model}`);
}
function getRefreshCrontab() {
switch (config.quotaRefreshPeriod!) {
case "hourly":
return "0 * * * *";
case "daily":
return "0 0 * * *";
default:
return config.quotaRefreshPeriod ?? "0 0 * * *";
}
}
|
JJNeverkry/jj
|
src/shared/users/user-store.ts
|
TypeScript
|
unknown
| 11,008 |
import { Query } from "express-serve-static-core";
import sanitize from "sanitize-html";
import { z } from "zod";
export function parseSort(sort: Query["sort"]) {
if (!sort) return null;
if (typeof sort === "string") return sort.split(",");
if (Array.isArray(sort)) return sort.splice(3) as string[];
return null;
}
export function sortBy(fields: string[], asc = true) {
return (a: any, b: any) => {
for (const field of fields) {
if (a[field] !== b[field]) {
// always sort nulls to the end
if (a[field] == null) return 1;
if (b[field] == null) return -1;
const valA = Array.isArray(a[field]) ? a[field].length : a[field];
const valB = Array.isArray(b[field]) ? b[field].length : b[field];
const result = valA < valB ? -1 : 1;
return asc ? result : -result;
}
}
return 0;
};
}
export function paginate(set: unknown[], page: number, pageSize: number = 20) {
const p = Math.max(1, Math.min(page, Math.ceil(set.length / pageSize)));
return {
page: p,
items: set.slice((p - 1) * pageSize, p * pageSize),
pageSize,
pageCount: Math.ceil(set.length / pageSize),
totalCount: set.length,
nextPage: p * pageSize < set.length ? p + 1 : null,
prevPage: p > 1 ? p - 1 : null,
};
}
export function sanitizeAndTrim(
input?: string | null,
options: sanitize.IOptions = {
allowedTags: [],
allowedAttributes: {},
}
) {
return sanitize((input ?? "").trim(), options);
}
// https://github.com/colinhacks/zod/discussions/2050#discussioncomment-5018870
export function makeOptionalPropsNullable<Schema extends z.AnyZodObject>(
schema: Schema
) {
const entries = Object.entries(schema.shape) as [
keyof Schema["shape"],
z.ZodTypeAny
][];
const newProps = entries.reduce(
(acc, [key, value]) => {
acc[key] =
value instanceof z.ZodOptional ? value.unwrap().nullable() : value;
return acc;
},
{} as {
[key in keyof Schema["shape"]]: Schema["shape"][key] extends z.ZodOptional<
infer T
>
? z.ZodNullable<T>
: Schema["shape"][key];
}
);
return z.object(newProps);
}
export function redactIp(ip: string) {
const ipv6 = ip.includes(":");
return ipv6 ? "redacted:ipv6" : ip.replace(/\.\d+\.\d+$/, ".xxx.xxx");
}
export function assertNever(x: never): never {
throw new Error(`Called assertNever with argument ${x}.`);
}
|
JJNeverkry/jj
|
src/shared/utils.ts
|
TypeScript
|
unknown
| 2,438 |
<% if (flashData) {
let flashStyle = { title: "", style: "" };
switch (flashData.type) {
case "success":
flashStyle.title = "β
Success:";
flashStyle.style = "color: green; background-color: #ddffee; padding: 1em";
break;
case "error":
flashStyle.title = "β οΈ Error:";
flashStyle.style = "color: red; background-color: #eedddd; padding: 1em";
break;
case "warning":
flashStyle.title = "β οΈ Alert:";
flashStyle.style = "color: darkorange; background-color: #ffeecc; padding: 1em";
break;
case "info":
flashStyle.title = "βΉοΈ Notice:";
flashStyle.style = "color: blue; background-color: #ddeeff; padding: 1em";
break;
}
%>
<p style="<%= flashStyle.style %>">
<strong><%= flashStyle.title %></strong> <%= flashData.message %>
</p>
<% } %>
|
JJNeverkry/jj
|
src/shared/views/partials/shared_flash.ejs
|
ejs
|
unknown
| 842 |
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="csrf-token" content="<%= csrfToken %>">
<title><%= title %></title>
<style>
a:hover {
background-color: #e0e6f6;
}
a:visited:hover {
background-color: #e7e0f6;
}
.pagination {
list-style-type: none;
padding: 0;
}
.pagination li {
display: inline-block;
}
.pagination li a {
display: block;
padding: 0.5em 1em;
text-decoration: none;
}
.pagination li.active a {
background-color: #58739c;
color: #fff;
}
table {
border-collapse: collapse;
border: 1px solid #ccc;
}
table.striped tr:nth-child(even) {
background-color: #eaeaea
}
table td, table th {
border: 1px solid #ccc;
padding: 0.25em 0.5em;
}
th.active {
background-color: #e0e6f6;
}
td.actions {
padding: 0;
width: 0;
text-align: center;
}
td.actions a {
text-decoration: none;
background-color: transparent;
padding: 0.5em;
height: 100%;
width: 100%;
}
td.actions:hover {
background-color: #e0e6f6;
}
@media (max-width: 600px) {
table {
width: 100%;
}
table td, table th {
display: block;
width: 100%;
}
}
</style>
</head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;">
<%- include("partials/shared_flash", { flashData: flash }) %>
|
JJNeverkry/jj
|
src/shared/views/partials/shared_header.ejs
|
ejs
|
unknown
| 1,688 |
<div>
<label for="pageSize">Page Size</label>
<select id="pageSize" onchange="setPageSize(this.value)" style="margin-bottom: 1rem;">
<option value="10" <% if (pageSize === 10) { %>selected<% } %>>10</option>
<option value="20" <% if (pageSize === 20) { %>selected<% } %>>20</option>
<option value="50" <% if (pageSize === 50) { %>selected<% } %>>50</option>
<option value="100" <% if (pageSize === 100) { %>selected<% } %>>100</option>
<option value="200" <% if (pageSize === 200) { %>selected<% } %>>200</option>
</select>
</div>
<script>
function getPageSize() {
var match = window.location.search.match(/perPage=(\d+)/);
if (match) return parseInt(match[1]); else return document.cookie.match(/perPage=(\d+)/)?.[1] ?? 10;
}
function setPageSize(size) {
document.cookie = "perPage=" + size + "; path=/admin";
window.location.reload();
}
document.getElementById("pageSize").value = getPageSize();
</script>
|
JJNeverkry/jj
|
src/shared/views/partials/shared_pagination.ejs
|
ejs
|
unknown
| 962 |
<p>Next refresh: <time><%- nextQuotaRefresh %></time></p>
<table>
<thead>
<tr>
<th scope="col">Model Family</th>
<th scope="col">Usage</th>
<% if (showTokenCosts) { %>
<th scope="col">Cost</th>
<% } %>
<th scope="col">Limit</th>
<th scope="col">Remaining</th>
<th scope="col">Refresh Amount</th>
</tr>
</thead>
<tbody>
<% Object.entries(quota).forEach(([key, limit]) => { %>
<tr>
<th scope="row"><%- key %></th>
<td><%- prettyTokens(user.tokenCounts[key]) %></td>
<% if (showTokenCosts) { %>
<td>$<%- tokenCost(key, user.tokenCounts[key]).toFixed(2) %></td>
<% } %>
<% if (!user.tokenLimits[key]) { %>
<td colspan="2" style="text-align: center">unlimited</td>
<% } else { %>
<td><%- prettyTokens(user.tokenLimits[key]) %></td>
<td><%- prettyTokens(user.tokenLimits[key] - user.tokenCounts[key]) %></td>
<% } %>
<% if (user.type === "temporary") { %>
<td>N/A</td>
<% } else { %>
<td><%- prettyTokens(quota[key]) %></td>
<% } %>
</tr>
<% }) %>
</tbody>
</table>
|
JJNeverkry/jj
|
src/shared/views/partials/shared_quota-info.ejs
|
ejs
|
unknown
| 1,142 |
<a href="#" id="ip-list-toggle">Show all (<%- user.ip.length %>)</a>
<ol id="ip-list" style="display: none; padding-left: 1em; margin: 0">
<% user.ip.forEach((ip) => { %>
<li><code><%- shouldRedact ? redactIp(ip) : ip %></code></li>
<% }) %>
</ol>
<script>
document.getElementById("ip-list-toggle").addEventListener("click", (e) => {
e.preventDefault();
document.getElementById("ip-list").style.display = "block";
document.getElementById("ip-list-toggle").style.display = "none";
});
</script>
|
JJNeverkry/jj
|
src/shared/views/partials/shared_user_ip_list.ejs
|
ejs
|
unknown
| 517 |
import cookieParser from "cookie-parser";
import expressSession from "express-session";
import MemoryStore from "memorystore";
import { COOKIE_SECRET } from "../config";
const ONE_WEEK = 1000 * 60 * 60 * 24 * 7;
const cookieParserMiddleware = cookieParser(COOKIE_SECRET);
const sessionMiddleware = expressSession({
secret: COOKIE_SECRET,
resave: false,
saveUninitialized: false,
store: new (MemoryStore(expressSession))({ checkPeriod: ONE_WEEK }),
cookie: { sameSite: "strict", maxAge: ONE_WEEK, signed: true },
});
const withSession = [cookieParserMiddleware, sessionMiddleware];
export { withSession };
|
JJNeverkry/jj
|
src/shared/with-session.ts
|
TypeScript
|
unknown
| 620 |
import type { HttpRequest } from "@smithy/types";
import { Express } from "express-serve-static-core";
import { APIFormat, Key, LLMService } from "../shared/key-management";
import { User } from "../shared/users/schema";
declare global {
namespace Express {
interface Request {
key?: Key;
service?: LLMService;
/** Denotes the format of the user's submitted request. */
inboundApi: APIFormat;
/** Denotes the format of the request being proxied to the API. */
outboundApi: APIFormat;
/** If the request comes from a RisuAI.xyz user, this is their token. */
risuToken?: string;
user?: User;
isStreaming?: boolean;
startTime: number;
retryCount: number;
queueOutTime?: number;
onAborted?: () => void;
proceed: () => void;
heartbeatInterval?: NodeJS.Timeout;
promptTokens?: number;
outputTokens?: number;
// TODO: remove later
debug: Record<string, any>;
signedRequest: HttpRequest;
}
}
}
declare module "express-session" {
interface SessionData {
adminToken?: string;
userToken?: string;
csrf?: string;
flash?: { type: string; message: string };
}
}
|
JJNeverkry/jj
|
src/types/custom.d.ts
|
TypeScript
|
unknown
| 1,208 |
import express, { Router } from "express";
import { injectCsrfToken, checkCsrfToken } from "../shared/inject-csrf";
import { selfServiceRouter } from "./web/self-service";
import { injectLocals } from "../shared/inject-locals";
import { withSession } from "../shared/with-session";
const userRouter = Router();
userRouter.use(
express.json({ limit: "1mb" }),
express.urlencoded({ extended: true, limit: "1mb" })
);
userRouter.use(withSession);
userRouter.use(injectCsrfToken, checkCsrfToken);
userRouter.use(injectLocals);
userRouter.use(selfServiceRouter);
userRouter.use(
(
err: Error,
_req: express.Request,
res: express.Response,
_next: express.NextFunction
) => {
const data: any = { message: err.message, stack: err.stack, status: 500 };
res.status(500).render("user_error", { ...data, flash: null });
}
);
export { userRouter };
|
JJNeverkry/jj
|
src/user/routes.ts
|
TypeScript
|
unknown
| 875 |
import { Router } from "express";
import { UserPartialSchema } from "../../shared/users/schema";
import * as userStore from "../../shared/users/user-store";
import { ForbiddenError, UserInputError } from "../../shared/errors";
import { sanitizeAndTrim } from "../../shared/utils";
import { config } from "../../config";
const router = Router();
router.use((req, res, next) => {
if (req.session.userToken) {
res.locals.currentSelfServiceUser =
userStore.getUser(req.session.userToken) || null;
}
next();
});
router.get("/", (_req, res) => {
res.redirect("/");
});
router.get("/lookup", (_req, res) => {
const ipLimit =
(res.locals.currentSelfServiceUser?.maxIps ?? config.maxIpsPerUser) || 0;
res.render("user_lookup", {
user: res.locals.currentSelfServiceUser,
ipLimit,
});
});
router.post("/lookup", (req, res) => {
const token = req.body.token;
const user = userStore.getUser(token);
if (!user) {
req.session.flash = { type: "error", message: "Invalid user token." };
return res.redirect("/user/lookup");
}
req.session.userToken = user.token;
return res.redirect("/user/lookup");
});
router.post("/edit-nickname", (req, res) => {
const existing = res.locals.currentSelfServiceUser;
if (!existing) {
throw new ForbiddenError("Not logged in.");
} else if (!config.allowNicknameChanges || existing.disabledAt) {
throw new ForbiddenError("Nickname changes are not allowed.");
}
const schema = UserPartialSchema.pick({ nickname: true })
.strict()
.transform((v) => ({ nickname: sanitizeAndTrim(v.nickname) }));
const result = schema.safeParse(req.body);
if (!result.success) {
throw new UserInputError(result.error.message);
}
const newNickname = result.data.nickname || null;
userStore.upsertUser({ token: existing.token, nickname: newNickname });
req.session.flash = { type: "success", message: "Nickname updated." };
return res.redirect("/user/lookup");
});
export { router as selfServiceRouter };
|
JJNeverkry/jj
|
src/user/web/self-service.ts
|
TypeScript
|
unknown
| 2,009 |
<hr />
<footer>
<a href="/user">Index</a>
</footer>
<script>
document.querySelectorAll("td,time").forEach(function(td) {
if (td.innerText.match(/^\d{13}$/)) {
if (td.innerText == 0) return 'never';
const date = new Date(parseInt(td.innerText));
td.innerText = date.toString();
}
});
</script>
</body>
</html>
|
JJNeverkry/jj
|
src/user/web/views/partials/user_footer.ejs
|
ejs
|
unknown
| 341 |
<%- include("partials/shared_header", { title: "Error" }) %>
<div id="error-content" style="color: red; background-color: #eedddd; padding: 1em">
<p><strong>β οΈ Error <%= status %>:</strong> <%= message %></p>
<pre><%= stack %></pre>
<a href="#" onclick="window.history.back()">Go Back</a>
</div>
</body>
</html>
|
JJNeverkry/jj
|
src/user/web/views/user_error.ejs
|
ejs
|
unknown
| 344 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="robots" content="noindex" />
<title><%= title %></title>
</head>
<body style="font-family: sans-serif; background-color: #f0f0f0; padding: 1em;">
<%= pageHeader %>
<hr />
<h2>Service Info</h2>
<pre><%= JSON.stringify(serviceInfo, null, 2) %></pre>
</body>
</html>
|
JJNeverkry/jj
|
src/user/web/views/user_index.ejs
|
ejs
|
unknown
| 365 |
<%- include("partials/shared_header", { title: "User Token Lookup" }) %>
<h1>User Token Lookup</h1>
<p>Provide your user token to check your usage and quota information.</p>
<form action="/user/lookup" method="post">
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<label for="token">User Token</label>
<input type="password" name="token" value="<%= user?.token %>" />
<input type="submit" value="Lookup" />
</form>
<% if (user) { %>
<hr />
<% if (user.type === "temporary" && Boolean(user.disabledAt)) { %>
<%- include("partials/shared_flash", { flashData: {
type: "info",
message: "This temporary user token has expired and is no longer usable. These records will be deleted soon.",
} }) %>
<% } else if (user.disabledAt) { %>
<%- include("partials/shared_flash", { flashData: {
type: "warning",
message: "This user token has been disabled." + (user.disabledReason ? ` Reason: ${user.disabledReason}` : ""),
} }) %>
<% } %>
<table class="striped">
<tbody>
<tr>
<th scope="row">User Token</th>
<td colspan="2"><code> <%- "..." + user.token.slice(-5) %> </code></td>
</tr>
<tr>
<th scope="row">Nickname</th>
<td><%- user.nickname ?? "none" %></td>
<td class="actions">
<a title="Edit" id="edit-nickname" href="#" onclick="updateNickname()">βοΈ</a>
</td>
</tr>
<tr>
<th scope="row">Type</th>
<td colspan="2"><%- user.type %></td>
</tr>
<tr>
<th scope="row">Prompts</th>
<td colspan="2"><%- user.promptCount %></td>
</tr>
<tr>
<th scope="row">Created At</th>
<td colspan="2"><%- user.createdAt %></td>
</tr>
<tr>
<th scope="row">Last Used At</th>
<td colspan="2"><%- user.lastUsedAt || "never" %></td>
</tr>
<tr>
<th scope="row">IPs<%- ipLimit ? ` (max ${ipLimit})` : "" %></th>
<td colspan="2"><%- include("partials/shared_user_ip_list", { user, shouldRedact: true }) %></td>
</tr>
<% if (user.type === "temporary") { %>
<tr>
<th scope="row">Expires At</th>
<td colspan="2"><%- user.expiresAt %></td>
</tr>
<% } %>
</tbody>
</table>
<h3>Quota Information</h3>
<%- include("partials/shared_quota-info", { quota, user }) %>
<form id="edit-nickname-form" style="display: none" action="/user/edit-nickname" method="post">
<input type="hidden" name="_csrf" value="<%= csrfToken %>" />
<input type="hidden" name="nickname" value="<%= user.nickname %>" />
</form>
<script>
function updateNickname() {
const form = document.getElementById("edit-nickname-form");
const existing = form.nickname.value;
const value = prompt("Enter a new nickname", existing);
if (value !== null) {
form.nickname.value = value;
form.submit();
}
}
</script>
<% } %> <%- include("partials/user_footer") %>
|
JJNeverkry/jj
|
src/user/web/views/user_lookup.ejs
|
ejs
|
unknown
| 2,868 |
{
"compilerOptions": {
"strict": true,
"target": "ES2020",
"module": "CommonJS",
"moduleResolution": "node",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"skipLibCheck": true,
"skipDefaultLibCheck": true,
"outDir": "build",
"sourceMap": true,
"resolveJsonModule": true,
"useUnknownInCatchVariables": false
},
"include": ["src"],
"exclude": ["node_modules"],
"files": ["src/types/custom.d.ts"]
}
|
JJNeverkry/jj
|
tsconfig.json
|
JSON
|
unknown
| 475 |
# Finance Assignment Help Service
## Getting started
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
## Add your files
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
- [ ] [Add files using the command line](https://docs.gitlab.com/topics/git/add_files/#add-files-to-a-git-repository) or push an existing Git repository with the following command:
```
cd existing_repo
git remote add origin https://gitgud.io/angelikawartina12/finance-assignment-help-service.git
git branch -M master
git push -uf origin master
```
## Integrate with your tools
- [ ] [Set up project integrations](https://gitgud.io/angelikawartina12/finance-assignment-help-service/-/settings/integrations)
## Collaborate with your team
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
- [ ] [Set auto-merge](https://docs.gitlab.com/user/project/merge_requests/auto_merge/)
## Test and Deploy
Use the built-in continuous integration in GitLab.
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
***
# Editing this README
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template.
## Suggestions for a good README
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
## Name
Choose a self-explaining name for your project.
## Description
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
## Badges
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
## Visuals
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
## Installation
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
## Usage
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
## Support
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
## Roadmap
If you have ideas for releases in the future, it is a good idea to list them in the README.
## Contributing
State if you are open to contributions and what your requirements are for accepting them.
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
## Authors and acknowledgment
Show your appreciation to those who have contributed to the project.
## License
For open source projects, say how it is licensed.
## Project status
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
|
angelikawartina12/finance
|
README.md
|
Markdown
|
unknown
| 6,207 |
# Compiled files
build-tools
build-tools.exe
|
mikeee/dapr
|
.build-tools/.gitignore
|
Git
|
mit
| 45 |
# Dapr build-tools CLI
This folder contains a CLI that implements a number of build tools used by Dapr.
The CLI is written in Go and based on the [cobra](https://github.com/spf13/cobra) framework. In order to use it, Go (1.18+) must be installed on your system.
## Running the CLI
You have two ways to run the CLI:
1. Within the `build-tools`Β directly, you can run the CLI with `go run .` directly. For example `go run . help` shows the help page. Make sure that `GOOS` and `GOARCH` are set to the correct values for your system.
2. You can build a pre-compiled binary by running `make compile-build-tools` in the root folder of this repository. This will build an executable called `build-tools` (or `build-tools.exe` on Windows) in the `build-tools` folder. You can then run the command directly, for example `./build-tools help`
## Available commands
The list of available commands in this CLI is dynamic and is subject to change at any time. Each command, including the "root" one (no sub-command), are self-documented in the CLI, and you can read the help page by adding `--help`.
For example, `./build-tools --help` shows the full list of commands the CLI offers. `./build-tools e2e --help` shows the help page for the `e2e` sub-command.
|
mikeee/dapr
|
.build-tools/README.md
|
Markdown
|
mit
| 1,253 |
package cmd
import (
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"github.com/spf13/cobra"
"golang.org/x/mod/semver"
"gopkg.in/yaml.v3"
)
var (
ErrVersionNotSupported = errors.New("version not supported")
ErrVersionNotFound = errors.New("version not found")
)
type GHWorkflow struct {
Jobs struct {
Lint struct {
Env struct {
GOVER string `yaml:"GOVER"`
GOLANGCILINTVER string `yaml:"GOLANGCILINT_VER"`
} `yaml:"env"`
} `yaml:"lint"`
} `yaml:"jobs"`
}
func parseWorkflowVersionFromFile(path string) (string, error) {
var ghWorkflow GHWorkflow
raw, err := os.ReadFile(path)
if err != nil {
return "", err
}
err = yaml.Unmarshal(raw, &ghWorkflow)
if err != nil {
return "", err
}
return ghWorkflow.Jobs.Lint.Env.GOLANGCILINTVER, err
}
func getCurrentVersion() (string, error) {
out, err := exec.Command("golangci-lint", "--version").Output()
if err != nil {
return "", err
}
regex, err := regexp.Compile(`golangci-lint\shas\sversion\sv?([\d+.]+[\d])`)
if err != nil {
return "", err
}
matches := regex.FindStringSubmatch(string(out))
if matches == nil {
return "", fmt.Errorf("no version found: %v", string(out))
}
return fmt.Sprintf("v%s", matches[1]), err
}
func isVersionValid(workflowVersion, currentVersion string) bool {
res := semver.MajorMinor(workflowVersion) == semver.MajorMinor(currentVersion)
return res
}
func compareVersions(path string) (string, error) {
workflowVersion, err := parseWorkflowVersionFromFile(path)
if err != nil {
return fmt.Sprintf("Error parsing workflow version: %v", err), ErrVersionNotFound
}
currentVersion, err := getCurrentVersion()
if err != nil {
return fmt.Sprintf("Error getting current version: %v", err), ErrVersionNotFound
}
validVersion := isVersionValid(workflowVersion, currentVersion)
if !validVersion {
return fmt.Sprintf("Invalid version, expected: %s, current: %s ", workflowVersion, currentVersion), ErrVersionNotSupported
}
return fmt.Sprintf("Linter version is valid (MajorMinor): %s", currentVersion), nil
}
func getCmdCheckLint(cmdType string) *cobra.Command {
// Base command
cmd := &cobra.Command{
Use: cmdType,
Short: "Compare local golangci-lint version against workflow version",
Run: func(cmd *cobra.Command, args []string) {
path := cmd.Flag("path").Value.String()
res, err := compareVersions(path)
fmt.Println(res)
if err != nil {
fmt.Println("Please install the correct version using the guide - https://golangci-lint.run/welcome/install/")
if err == ErrVersionNotSupported {
fmt.Println("Alternatively review the golangci-lint version in the workflow file at .github/workflows/dapr.yml")
}
os.Exit(1)
}
},
}
cmd.PersistentFlags().String("path", "../.github/workflows/dapr.yml", "Path to workflow file")
return cmd
}
func init() {
// checkLintCmd represents the checkLint command
checkLintCmd := getCmdCheckLint("check-linter")
rootCmd.AddCommand(checkLintCmd)
}
|
mikeee/dapr
|
.build-tools/cmd/check-lint-version.go
|
GO
|
mit
| 2,981 |
package cmd
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseWorkflow(t *testing.T) {
t.Run("parse invalid workflow file", func(t *testing.T) {
parsedVersion, err := parseWorkflowVersionFromFile("../testdata/check-lint-version/invalid.yml")
assert.Equal(t, "", parsedVersion)
assert.Error(t, err)
})
t.Run("parse workflow file with a missing key", func(t *testing.T) {
parsedVersion, err := parseWorkflowVersionFromFile("../testdata/check-lint-version/invalid-test.yml")
assert.Equal(t, "", parsedVersion)
assert.NoError(t, err)
})
t.Run("parse an invalid workflow file", func(t *testing.T) {
parsedVersion, err := parseWorkflowVersionFromFile("../testdata/check-lint-version/invalid-yaml.yml")
assert.Equal(t, "", parsedVersion)
assert.Error(t, err)
})
t.Run("parse testing workflow file", func(t *testing.T) {
parsedVersion, err := parseWorkflowVersionFromFile("../testdata/check-lint-version/valid-test.yml")
assert.Equal(t, "123.123.123", parsedVersion)
assert.NoError(t, err)
})
}
func TestGetCurrentVersion(t *testing.T) {
t.Run("get current version from system", func(t *testing.T) {
currentVersion, err := getCurrentVersion()
assert.Equal(t, "v1.55.2", currentVersion)
assert.NoError(t, err)
})
// TODO: test failure to detect current version
// TODO: test failure to compile regex expression
// TODO: test failure finding matches
}
func TestIsVersionValid(t *testing.T) {
t.Run("compare versions - exactly equal to", func(t *testing.T) {
assert.Equal(t, true, isVersionValid("v1.54.2", "v1.54.2"))
})
t.Run("compare versions - patch version greater (workflow)", func(t *testing.T) {
assert.Equal(t, true, isVersionValid("v1.54.3", "v1.54.2"))
})
t.Run("compare versions - patch version greater (installed)", func(t *testing.T) {
assert.Equal(t, true, isVersionValid("v1.54.2", "v1.54.3"))
})
t.Run("compare versions - invalid (installed)", func(t *testing.T) {
assert.Equal(t, false, isVersionValid("v1.54.2", "v1.52.2"))
})
t.Run("compare versions - invalid (workflow)", func(t *testing.T) {
assert.Equal(t, false, isVersionValid("v1.52.2", "v1.54.2"))
})
}
func TestCompareVersions(t *testing.T) {
t.Run("Valid comparison", func(t *testing.T) {
res, err := compareVersions("../../.github/workflows/dapr.yml")
assert.Contains(t, res, "Linter version is valid")
assert.NoError(t, err)
})
t.Run("Invalid comparison", func(t *testing.T) {
res, err := compareVersions("../testdata/check-lint-version/invalid-test.yml")
assert.Contains(t, res, "Invalid version")
assert.Error(t, err)
})
// TODO: test function for failure to get the current version using getCurrentVersion()
t.Run("Invalid path for comparison", func(t *testing.T) {
res, err := compareVersions("../testdata/check-lint-version/invalid-test-incorrect-path.yml")
assert.Contains(t, res, "Error parsing workflow")
assert.Error(t, err)
})
}
|
mikeee/dapr
|
.build-tools/cmd/check-lint-version_test.go
|
GO
|
mit
| 2,940 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
func init() {
// e2eCmd represents the e2e command
e2eCmd := getCmdE2EPerf("e2e")
rootCmd.AddCommand(e2eCmd)
}
|
mikeee/dapr
|
.build-tools/cmd/e2e.go
|
GO
|
mit
| 688 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
func init() {
// perfCmd represents the perf command
perfCmd := getCmdE2EPerf("perf")
rootCmd.AddCommand(perfCmd)
}
|
mikeee/dapr
|
.build-tools/cmd/perf.go
|
GO
|
mit
| 693 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"os"
"github.com/spf13/cobra"
)
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "dapr-build-tools",
Short: "Build tools for Dapr",
Long: `A collection of commands and tools used to build and package Dapr`,
}
// Execute adds all child commands to the root command and sets flags appropriately.
func Execute() {
err := rootCmd.Execute()
if err != nil {
os.Exit(1)
}
}
|
mikeee/dapr
|
.build-tools/cmd/root.go
|
GO
|
mit
| 1,027 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strings"
"github.com/google/go-containerregistry/pkg/crane"
gitignore "github.com/sabhiram/go-gitignore"
"github.com/spf13/cobra"
)
// Manages the commands for e2e and perf
type cmdE2EPerf struct {
cmdType string
flags *cmdE2EPerfFlags
}
// Flags for the e2e/perf commands
type cmdE2EPerfFlags struct {
AppDir string
DestRegistry string
DestTag string
CacheRegistry string
Dockerfile string
TargetOS string
TargetArch string
IgnoreFile string
CacheIncludeFile string
Name string
WindowsVersion string
}
// Returns the command for e2e or perf
func getCmdE2EPerf(cmdType string) *cobra.Command {
// Object
obj := &cmdE2EPerf{
cmdType: cmdType,
flags: &cmdE2EPerfFlags{},
}
// Base command
cmd := &cobra.Command{
Use: cmdType,
Short: fmt.Sprintf("Build and push %s test apps", cmdType),
Long: fmt.Sprintf(`Tools to build %s test apps, including building and pushing Docker containers.`, cmdType),
}
// "build" sub-command
buildCmd := &cobra.Command{
Use: "build",
Short: "Build Docker image locally",
Long: fmt.Sprintf(`Build a %s test app and its Docker images.
If the image is available in the cache and is up-to-date, it will be pulled from the cache instead.
If the "--cache-registry" option is set, it will be pushed to the cache too.
`, cmdType),
RunE: obj.buildCmd,
}
buildCmd.Flags().StringVarP(&obj.flags.Name, "name", "n", "", "Name of the app")
buildCmd.MarkFlagRequired("name")
buildCmd.Flags().StringVarP(&obj.flags.AppDir, "appdir", "d", "", "Directory where the test apps are stored")
buildCmd.MarkFlagRequired("appdir")
buildCmd.MarkFlagDirname("appdir")
buildCmd.Flags().StringVar(&obj.flags.DestRegistry, "dest-registry", "", "Registry for the Docker image")
buildCmd.MarkFlagRequired("dest-registry")
buildCmd.Flags().StringVar(&obj.flags.DestTag, "dest-tag", "", "Tag to apply to the Docker image")
buildCmd.MarkFlagRequired("dest-tag")
buildCmd.Flags().StringVar(&obj.flags.CacheRegistry, "cache-registry", "", "Cache container registry (optional)")
buildCmd.Flags().StringVar(&obj.flags.Dockerfile, "dockerfile", "Dockerfile", "Dockerfile to use")
buildCmd.Flags().StringVar(&obj.flags.TargetOS, "target-os", runtime.GOOS, "Target OS")
buildCmd.Flags().StringVar(&obj.flags.TargetArch, "target-arch", runtime.GOARCH, "Target architecture")
buildCmd.Flags().StringVar(&obj.flags.IgnoreFile, "ignore-file", ".gitignore", "Name of the file with files to exclude (in the format of .gitignore)")
buildCmd.Flags().StringVar(&obj.flags.CacheIncludeFile, "cache-include-file", ".cache-include", "Name of the file inside the app folder with additional files to include in checksumming (in the format of .gitignore)")
buildCmd.Flags().StringVar(&obj.flags.WindowsVersion, "windows-version", "", "Windows version to use for Windows containers")
// "push" sub-command
pushCmd := &cobra.Command{
Use: "push",
Short: "push Docker image",
Long: fmt.Sprintf(`Pushes the pre-built Docker image for a %s test app.
The image must have been aleady built using the "build" sub-command.
`, cmdType),
RunE: obj.pushCmd,
}
pushCmd.Flags().StringVarP(&obj.flags.Name, "name", "n", "", "Name of the app")
pushCmd.MarkFlagRequired("name")
pushCmd.Flags().StringVar(&obj.flags.DestRegistry, "dest-registry", "", "Registry for the Docker image")
pushCmd.MarkFlagRequired("dest-registry")
pushCmd.Flags().StringVar(&obj.flags.DestTag, "dest-tag", "", "Tag to apply to the Docker image")
pushCmd.MarkFlagRequired("dest-tag")
// "build-and-push" sub-command
buildAndPushCmd := &cobra.Command{
Use: "build-and-push",
Short: "Build and push Docker image",
Long: fmt.Sprintf(`Single command to build and push the Docker image for a %s test app.
If the "--cahce-registry" option is set and the image exists in the cache, it will be copied directly from the cache, without pulling it locally first.
`, cmdType),
RunE: obj.buildAndPushCmd,
}
buildAndPushCmd.Flags().StringVarP(&obj.flags.Name, "name", "n", "", "Name of the app")
buildAndPushCmd.MarkFlagRequired("name")
buildAndPushCmd.Flags().StringVarP(&obj.flags.AppDir, "appdir", "d", "", "Directory where the test apps are stored")
buildAndPushCmd.MarkFlagRequired("appdir")
buildAndPushCmd.MarkFlagDirname("appdir")
buildAndPushCmd.Flags().StringVar(&obj.flags.DestRegistry, "dest-registry", "", "Registry for the Docker image")
buildAndPushCmd.MarkFlagRequired("dest-registry")
buildAndPushCmd.Flags().StringVar(&obj.flags.DestTag, "dest-tag", "", "Tag to apply to the Docker image")
buildAndPushCmd.MarkFlagRequired("dest-tag")
buildAndPushCmd.Flags().StringVar(&obj.flags.CacheRegistry, "cache-registry", "", "Cache container registry (optional)")
buildAndPushCmd.Flags().StringVar(&obj.flags.Dockerfile, "dockerfile", "Dockerfile", "Dockerfile to use")
buildAndPushCmd.Flags().StringVar(&obj.flags.TargetOS, "target-os", runtime.GOOS, "Target OS")
buildAndPushCmd.Flags().StringVar(&obj.flags.TargetArch, "target-arch", runtime.GOARCH, "Target architecture")
buildAndPushCmd.Flags().StringVar(&obj.flags.IgnoreFile, "ignore-file", ".gitignore", "Name of the file with files to exclude (in the format of .gitignore)")
buildAndPushCmd.Flags().StringVar(&obj.flags.CacheIncludeFile, "cache-include-file", ".cache-include", "Name of the file inside the app folder with additional files to include in checksumming (in the format of .gitignore)")
buildAndPushCmd.Flags().StringVar(&obj.flags.WindowsVersion, "windows-version", "", "Windows version to use for Windows containers")
// Register the commands
cmd.AddCommand(buildCmd)
cmd.AddCommand(pushCmd)
cmd.AddCommand(buildAndPushCmd)
return cmd
}
// Handler for the "build" sub-command
func (c *cmdE2EPerf) buildCmd(cmd *cobra.Command, args []string) error {
// Target image and tag
destImage := c.getDestImage()
// Compute the hash of the folder with the app, then returns the full Docker image name (including the tag, which is based on the hash)
cachedImage, err := c.getCachedImage()
if err != nil {
return err
}
// If cache is enabled, try pulling from cache first
if c.flags.CacheRegistry != "" {
fmt.Printf("Looking for image %s in cacheβ¦\n", cachedImage)
// If there's no error, the image was pulled from the cache
// Just tag the image and we're done
if exec.Command("docker", "pull", cachedImage).Run() == nil {
fmt.Printf("Image pulled from the cache: %s\n", cachedImage)
err = exec.Command("docker", "tag", cachedImage, destImage).Run()
// Return, whether we have an erorr or not, as we're done
return err
} else {
fmt.Printf("Image not found in cache; it will be built: %s\n", cachedImage)
}
}
// Build the image
// It also pushes to the cache registry if needed
err = c.buildDockerImage(cachedImage)
if err != nil {
return err
}
return nil
}
// Handler for the "push" sub-command
func (c *cmdE2EPerf) pushCmd(cmd *cobra.Command, args []string) error {
// Target image and tag
destImage := c.getDestImage()
// Push the image
err := c.pushDockerImage(destImage)
if err != nil {
return err
}
return nil
}
// Handler for the "build-and-push" sub-command
func (c *cmdE2EPerf) buildAndPushCmd(cmd *cobra.Command, args []string) error {
// Target image and tag
destImage := c.getDestImage()
// Cached image and tag
cachedImage, err := c.getCachedImage()
if err != nil {
return err
}
// Try to copy the image between the cache and the target directly, without pulling first
// This will fail if the image is not in cache, and that's ok
if c.flags.CacheRegistry != "" {
fmt.Printf("Trying to copy image %s directly from cache %s\n", destImage, cachedImage)
err = crane.Copy(cachedImage, destImage)
if err == nil {
// If there's no error, we're done
fmt.Println("Image copied from cache directly! You're all set.")
return nil
}
// Copying the image failed, so we'll resort to build + push
fmt.Printf("Copying image directly from cache failed with error '%s'. Will build image.\n", err)
} else {
fmt.Println("Cache registry not set: will not use cache")
}
// Build the image
// It also pushes to the cache registry if needed
err = c.buildDockerImage(cachedImage)
if err != nil {
return err
}
// Push the image
err = c.pushDockerImage(destImage)
if err != nil {
return err
}
return nil
}
// Returns the cached image name and tag
func (c *cmdE2EPerf) getCachedImage() (string, error) {
// Get the hash of the files in the directory
hashDir, err := c.getHashDir()
if err != nil {
return "", err
}
tag := fmt.Sprintf("%s-%s-%s", c.flags.TargetOS, c.flags.TargetArch, hashDir)
if c.flags.WindowsVersion != "" {
tag = fmt.Sprintf("%s-%s-%s-%s", c.flags.TargetOS, c.flags.WindowsVersion, c.flags.TargetArch, hashDir)
}
cachedImage := fmt.Sprintf("%s/%s-%s:%s", c.flags.CacheRegistry, c.cmdType, c.flags.Name, tag)
return cachedImage, nil
}
// Returns the target image name and tag
func (c *cmdE2EPerf) getDestImage() string {
return fmt.Sprintf("%s/%s-%s:%s", c.flags.DestRegistry, c.cmdType, c.flags.Name, c.flags.DestTag)
}
// Returns the directory where the app is stored
func (c *cmdE2EPerf) getAppDir() string {
if c.cmdType == "perf" {
return filepath.Join(c.flags.AppDir, "perf")
} else {
return c.flags.AppDir
}
}
// Builds a Docker image for the app
// It also pushes it to the cache registry if that's enabled
func (c *cmdE2EPerf) buildDockerImage(cachedImage string) error {
destImage := c.getDestImage()
appDir := c.getAppDir()
// First, check if the image has its own Dockerfile
dockerfile := filepath.Join(appDir, c.flags.Name, c.flags.Dockerfile)
_, err := os.Stat(dockerfile)
if err != nil {
// App doesn't have a Dockerfile
// First, compile the Go app
ext := ""
if c.flags.TargetOS == "windows" {
ext = ".exe"
}
e := exec.Command("go",
"build",
"-o", "app"+ext,
".",
)
e.Env = os.Environ()
e.Env = append(
e.Env,
"CGO_ENABLED=0",
"GOOS="+c.flags.TargetOS,
"GOARCH="+c.flags.TargetArch,
)
e.Dir = filepath.Join(appDir, c.flags.Name)
e.Stdout = os.Stdout
e.Stderr = os.Stderr
err = e.Run()
if err != nil {
fmt.Println("'go build' returned an error:", err)
return err
}
// Use the "shared" Dockerfile
if c.cmdType == "perf" {
dockerfile = filepath.Join(appDir, "..", "Dockerfile")
} else {
dockerfile = filepath.Join(appDir, c.flags.Dockerfile)
}
}
// Build the Docker image
fmt.Printf("Building Docker image: %s\n", destImage)
args := []string{
"build",
"-f", dockerfile,
"-t", destImage,
filepath.Join(appDir, c.flags.Name, "."),
}
switch c.flags.TargetArch {
case "arm64":
args = append(args, "--platform", c.flags.TargetOS+"/arm64/v8")
case "amd64":
args = append(args, "--platform", c.flags.TargetOS+"/amd64")
default:
args = append(args, "--platform", c.flags.TargetOS+"/amd64")
}
if c.flags.WindowsVersion != "" {
args = append(args, "--build-arg", "WINDOWS_VERSION="+c.flags.WindowsVersion)
}
fmt.Printf("Running 'docker %s'\n", strings.Join(args, " "))
e := exec.Command("docker", args...)
e.Stdout = os.Stdout
e.Stderr = os.Stderr
err = e.Run()
if err != nil {
fmt.Println("'docker build' returned an error:", err)
return err
}
// Push to the cache, if needed
if c.flags.CacheRegistry != "" {
fmt.Printf("Pushing image %s to cacheβ¦\n", cachedImage)
err = exec.Command("docker", "tag", destImage, cachedImage).Run()
if err != nil {
fmt.Println("'docker tag' returned an error:", err)
return err
}
e := exec.Command("docker", "push", cachedImage)
e.Stdout = os.Stdout
e.Stderr = os.Stderr
err = e.Run()
// If there's an error, we probably didn't have permissions to push to the registry, so we can just ignore that
if err != nil {
fmt.Println("Failed to push to the cache registry; ignored")
}
}
return nil
}
// Pushes the pre-built Docker image to the target registry
func (c *cmdE2EPerf) pushDockerImage(destImage string) error {
fmt.Println("Pushing image", destImage)
e := exec.Command("docker", "push", destImage)
e.Stdout = os.Stdout
e.Stderr = os.Stderr
err := e.Run()
if err != nil {
fmt.Println("'docker push' returned an error:", err)
return err
}
return nil
}
// Loads the ".gitignore" (or whatever the value of ignoreFile is) in the appDir and in the appDir/name folders
func (c *cmdE2EPerf) getIgnores() *gitignore.GitIgnore {
appDir := c.getAppDir()
files := []string{
filepath.Join(appDir, c.flags.IgnoreFile),
// Add the ".gitignore" inside the app's folder too if it exists
filepath.Join(appDir, c.flags.Name, c.flags.IgnoreFile),
}
lines := []string{}
for _, f := range files {
read, err := os.ReadFile(f)
if err != nil {
continue
}
lines = append(lines, strings.Split(string(read), "\n")...)
}
if len(lines) == 0 {
return nil
}
return gitignore.CompileIgnoreLines(lines...)
}
// Loads the ".cache-include" (or whatever the value of "includeFile" is) in the appDir/name folder
func (c *cmdE2EPerf) getIncludes() []string {
appDir := c.getAppDir()
read, err := os.ReadFile(
filepath.Join(appDir, c.flags.Name, c.flags.CacheIncludeFile),
)
if err != nil || len(read) == 0 {
// Just ignore errors
return nil
}
return strings.Split(string(read), "\n")
}
func hashFilesInDir(basePath string, ignores *gitignore.GitIgnore) ([]string, error) {
files := []string{}
err := filepath.WalkDir(basePath, func(path string, d fs.DirEntry, _ error) error {
// Check if the file is ignored
relPath, err := filepath.Rel(basePath, path)
if err != nil {
return err
}
// Skip the folders and ignored files
if relPath == "." || d.IsDir() || (ignores != nil && ignores.MatchesPath(path)) {
return nil
}
// Add the hash of the file
checksum, err := hashEntryForFile(path, relPath)
if err != nil {
return err
}
files = append(files, checksum)
return nil
})
if err != nil {
return nil, err
}
return files, nil
}
func hashEntryForFile(path string, relPath string) (string, error) {
// Compute the sha256 of the file
checksum, err := checksumFile(path)
if err != nil {
return "", err
}
// Convert all slashes to / so the hash is the same on Windows and Linux
relPath = filepath.ToSlash(relPath)
return relPath + " " + checksum, nil
}
// Returns the checksum of the files in the directory
func (c *cmdE2EPerf) getHashDir() (string, error) {
basePath := filepath.Join(c.getAppDir(), c.flags.Name)
_, err := os.Stat(basePath)
if err != nil {
fmt.Printf("could not find app %s\n", basePath)
return "", err
}
// Load the files to exclude
ignores := c.getIgnores()
// Load additional paths to include
includes := c.getIncludes()
// Compute the hash of the app's files
files, err := hashFilesInDir(basePath, ignores)
if err != nil {
return "", err
}
if len(files) == 0 {
return "", fmt.Errorf("no file found in the folder")
}
// Include other files or folders as needed
for _, pattern := range includes {
if !filepath.IsAbs(pattern) {
pattern = filepath.Join(basePath, pattern)
}
matches, err := filepath.Glob(pattern)
if err != nil || len(matches) == 0 {
continue
}
for _, match := range matches {
if match == "" {
continue
}
info, err := os.Stat(match)
if err != nil {
continue
}
if info.IsDir() {
// Note: we are not passing "ignores" here because .gitignore files are usually specific for the folder they live in, while "match" is often outside of the app's folder.
// Best is to make sure that include paths are specific, such as ending with `*.go` or `*.proto`, rather than including the entire folder.
addFiles, err := hashFilesInDir(match, nil)
if err != nil || len(addFiles) == 0 {
continue
}
files = append(files, addFiles...)
} else {
relPath, err := filepath.Rel(basePath, match)
if err != nil {
continue
}
checksum, err := hashEntryForFile(match, relPath)
if err != nil {
continue
}
files = append(files, checksum)
}
}
}
// Sort files to have a consistent order, then compute the checksum of that slice (getting the first 10 chars only)
sort.Strings(files)
fileList := strings.Join(files, "\n")
hashDir := checksumString(fileList)[0:10]
return hashDir, nil
}
// Calculates the checksum of a file
func checksumFile(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
fmt.Printf("failed to open file %s for hashing: %v\n", path, err)
return "", err
}
defer f.Close()
h := sha256.New()
_, err = io.Copy(h, f)
if err != nil {
fmt.Printf("failed to copy file %s into hasher: %v\n", path, err)
return "", err
}
res := hex.EncodeToString(h.Sum(nil))
return res, nil
}
// Calculates the checksum of a string
func checksumString(str string) string {
h := sha256.New()
h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
|
mikeee/dapr
|
.build-tools/cmd/zz-e2e-perf.go
|
GO
|
mit
| 17,652 |
module build-tools
go 1.22.3
require (
github.com/google/go-containerregistry v0.11.1-0.20220802162123-c1f9836a4fa9
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/cobra v1.7.0
github.com/stretchr/testify v1.7.0
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/cli v20.10.17+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.17+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/klauspost/compress v1.15.8 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20220729202839-6ad7100eb087 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.10.0 // indirect
)
|
mikeee/dapr
|
.build-tools/go.mod
|
mod
|
mit
| 1,369 |
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.11.1-0.20220802162123-c1f9836a4fa9 h1:CUtt5NgbyWugcBKsuZ2MqU9sWtvnn9GlF7E0wCVkN2A=
github.com/google/go-containerregistry v0.11.1-0.20220802162123-c1f9836a4fa9/go.mod h1:jKEnAxppqUcGiMBChcvD0yTlWQwbZBitVvUeOaYUUTE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.3-0.20220729202839-6ad7100eb087 h1:vm7/Jb0eH7oibgUngG/ljkvHBxF+mHlekCvVFyLGOc8=
github.com/opencontainers/image-spec v1.0.3-0.20220729202839-6ad7100eb087/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
mikeee/dapr
|
.build-tools/go.sum
|
sum
|
mit
| 8,516 |
/*
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"build-tools/cmd"
)
func main() {
cmd.Execute()
}
|
mikeee/dapr
|
.build-tools/main.go
|
GO
|
mit
| 637 |
name: Test
on:
push:
pull_request:
branches:
- main
jobs:
build:
env:
NOGOLANGCILINT_VER: "123.123.123"
|
mikeee/dapr
|
.build-tools/testdata/check-lint-version/invalid-test.yml
|
YAML
|
mit
| 132 |
testfile
|
mikeee/dapr
|
.build-tools/testdata/check-lint-version/invalid-yaml.yml
|
YAML
|
mit
| 8 |
name: Test
on:
push:
pull_request:
branches:
- main
jobs:
lint:
env:
GOLANGCILINT_VER: "123.123.123"
|
mikeee/dapr
|
.build-tools/testdata/check-lint-version/valid-test.yml
|
YAML
|
mit
| 129 |
coverage:
# Commit status https://docs.codecov.io/docs/commit-status are used
# to block PR based on coverage threshold.
status:
project:
default:
target: auto
threshold: 0%
patch:
default:
informational: true
ignore:
# Configure what to ignore.
- "**/zz_generated*.go" # - Generated files.
- "pkg/apis" # - CRD related files including generated ones.
- "pkg/proto" # - GRPC Protobuf client for dapr.
- "pkg/testing" # - Testing mock.
|
mikeee/dapr
|
.codecov.yaml
|
YAML
|
mit
| 525 |
{
"name": "Dapr Dev Environment",
// Update the container version when you publish dev-container
"image": "ghcr.io/dapr/dapr-dev:latest",
// Replace with uncommented line below to build your own local copy of the image
//"dockerFile": "../docker/Dockerfile-dev",
"containerEnv": {
// Uncomment to overwrite devcontainer .kube/config and .minikube certs with the localhost versions
// each time the devcontainer starts, if the respective .kube-localhost/config and .minikube-localhost
// folders respectively are bind mounted to the devcontainer.
// "SYNC_LOCALHOST_KUBECONFIG": "true",
// Uncomment to disable docker-in-docker and automatically proxy default /var/run/docker.sock to
// the localhost bind-mount /var/run/docker-host.sock.
// "BIND_LOCALHOST_DOCKER": "true",
// Necessary for components-contrib's certification tests
"GOLANG_PROTOBUF_REGISTRATION_CONFLICT": "ignore"
},
"features": {
"ghcr.io/devcontainers/features/sshd:1": {},
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/azure-cli:1": {}
},
"mounts": [
// Mount docker-in-docker library volume
"type=volume,source=dind-var-lib-docker,target=/var/lib/docker",
// Bind mount docker socket under an alias to support docker-from-docker
"type=bind,source=/var/run/docker.sock,target=/var/run/docker-host.sock"
// Uncomment to clone local .kube/config into devcontainer
// "type=bind,source=${env:HOME}${env:USERPROFILE}/.kube,target=/home/dapr/.kube-localhost",
// Uncomment to additionally clone minikube certs into devcontainer for use with .kube/config
// "type=bind,source=${env:HOME}${env:USERPROFILE}/.minikube,target=/home/dapr/.minikube-localhost"
],
// Always run image-defined default command
"overrideCommand": false,
// On Linux, this will prevent new files getting created as root, but you
// may need to update the USER_UID and USER_GID in docker/Dockerfile-dev
// to match your user if not 1000.
"remoteUser": "dapr",
"runArgs": [
// Enable ptrace-based debugging for go
"--cap-add=SYS_PTRACE",
"--security-opt",
"seccomp=unconfined",
// Uncomment to bind to host network for local devcontainer; this is necessary if using the
// bind-mounted /var/run/docker-host.sock directly.
// "--net=host",
// Enable docker-in-docker configuration. Comment out if not using for better security.
"--privileged",
// Run the entrypoint defined in container image.
"--init"
],
"customizations": {
"vscode": {
"extensions": [
"davidanson.vscode-markdownlint",
"golang.go",
"ms-azuretools.vscode-dapr",
"ms-azuretools.vscode-docker",
"ms-kubernetes-tools.vscode-kubernetes-tools"
],
"settings": {
"go.toolsManagement.checkForUpdates": "local",
"go.useLanguageServer": true,
"go.gopath": "/go",
"go.buildTags": "e2e,perf,conftests,unit,integration_test,certtests,allcomponents",
"git.alwaysSignOff": true,
"terminal.integrated.env.linux": {
"GOLANG_PROTOBUF_REGISTRATION_CONFLICT": "ignore"
}
}
}
},
"workspaceFolder": "/workspaces/dapr",
"workspaceMount": "type=bind,source=${localWorkspaceFolder},target=/workspaces/dapr"
}
|
mikeee/dapr
|
.devcontainer/devcontainer.json
|
JSON
|
mit
| 3,648 |
*.go text eol=lf
|
mikeee/dapr
|
.gitattributes
|
Git
|
mit
| 16 |
---
name: Bug report
about: Report a bug in Dapr
title: ''
labels: kind/bug
assignees: ''
---
<!-- If you need to report a security issue please visit https://docs.dapr.io/operations/support/support-security-issues -->
## In what area(s)?
<!-- Remove the '> ' to select -->
> /area runtime
> /area operator
> /area placement
> /area docs
> /area test-and-release
## What version of Dapr?
<!-- Delete all but your choice -->
> 1.1.x
> 1.0.x
> edge: output of `git describe --dirty`
## Expected Behavior
<!-- Briefly describe what you expect to happen -->
## Actual Behavior
<!-- Briefly describe what is actually happening -->
## Steps to Reproduce the Problem
<!-- How can a maintainer reproduce this issue (be detailed) -->
## Release Note
<!-- How should the fix for this issue be communicated in our release notes? It can be populated later. -->
<!-- Keep it as a single line. Examples: -->
<!-- RELEASE NOTE: **ADD** New feature in Dapr. -->
<!-- RELEASE NOTE: **FIX** Bug in runtime. -->
<!-- RELEASE NOTE: **UPDATE** Runtime dependency. -->
RELEASE NOTE:
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/bug_report.md
|
Markdown
|
mit
| 1,085 |
blank_issues_enabled: true
contact_links:
- name: Dapr Community Repo
url: https://github.com/dapr/community
about: Please see our community docs here.
- name: Dapr Discord Chat
url: https://aka.ms/dapr-discord
about: Please ask questions or troubleshooting help here.
- name: Report a security issue
url: https://docs.dapr.io/operations/support/support-security-issues
about: Please report security vulnerabilities using these instructions.
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/config.yml
|
YAML
|
mit
| 472 |
---
name: Discussion
about: Start a discussion for Dapr
title: ''
labels: kind/discussion
assignees: ''
---
<!-- If you need to report a security issue please visit https://docs.dapr.io/operations/support/support-security-issues -->
<!-- Please visit https://aka.ms/dapr-discord to ask questions and troubleshoot. For all other design discussions please continue. -->
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/discussion.md
|
Markdown
|
mit
| 368 |
---
name: Feature Request
about: Create a Feature Request for Dapr
title: ''
labels: kind/enhancement
assignees: ''
---
<!-- If you need to report a security issue please visit https://docs.dapr.io/operations/support/support-security-issues -->
## In what area(s)?
<!-- Remove the '> ' to select -->
> /area runtime
> /area operator
> /area placement
> /area docs
> /area test-and-release
## Describe the feature
<!-- Please also discuss possible business value -->
## Release Note
<!-- How should this new feature be announced in our release notes? It can be populated later. -->
<!-- Keep it as a single line. Examples: -->
<!-- RELEASE NOTE: **ADD** New feature in Dapr. -->
<!-- RELEASE NOTE: **FIX** Bug in runtime. -->
<!-- RELEASE NOTE: **UPDATE** Runtime dependency. -->
RELEASE NOTE:
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/feature_request.md
|
Markdown
|
mit
| 805 |
---
name: Minor Patch Release
about: Minor Patch Release
title: 'Minor Patch Release Checklist'
labels: kind/bug
assignees: ''
---
- [ ] Dapr maintainer(s) communicate out the discovery of issues that require a patch release and the reason why to the Discord Maintainers and Release channels. Patch releases are made for one of categories below:
- [ ] Security vulnerability
- <>
- [ ] Regression that does not have a work around:
- <>
- [ ] Broken mainline scenario that has a missing test case:
- <>
- [ ] Create Tag
- [ ] <>
- [ ] Performance tests passing
- [ ] End to End tests passing on Linux and Windows
- [ ] New test case written to catch future occurrences
- [ ] Notify users to try RC (Announce on Discord Announcements channel)
- [ ] <>
- [ ] Update the longhaul tests to use RC
- [ ] <>
- [ ] Write release notes
- [ ] <>
- [ ] Review release notes [@dapr/maintainers-dapr]
- [ ] Create Tag [@dapr/maintainers-dapr]
- [ ] Backport fixes into master branch [@dapr/maintainers-dapr]
- [ ] Update the documentation: Latest version & versions in supported releases
- [ ] Push new tag in installer-bundle repo
- [ ] Announce the patch release on Discord Announcements channel
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/minor-patch-release.md
|
Markdown
|
mit
| 1,248 |
---
name: Proposal
about: Create a technical proposal for Dapr
title: ''
labels: kind/proposal
assignees: ''
---
<!-- If you need to report a security issue please visit https://docs.dapr.io/operations/support/support-security-issues -->
## In what area(s)?
<!-- Remove the '> ' to select -->
> /area runtime
> /area operator
> /area placement
> /area docs
> /area test-and-release
## Describe the proposal
<!-- Please use this for a concrete design proposal for functionality. -->
<!-- If you just want to request a new feature and discuss the possible business value, create a Feature Request. -->
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/proposal.md
|
Markdown
|
mit
| 607 |
---
name: Question
about: Ask a question about Dapr
title: ''
labels: kind/question
assignees: ''
---
<!-- If you need to report a security issue please visit https://docs.dapr.io/operations/support/support-security-issues -->
**Note:** If you have a general support question and are looking for a quicker response, please checkout our discord channel for answers from the community:
https://aka.ms/dapr-discord
## In what area(s)?
<!-- Remove the '> ' to select -->
> /area runtime
> /area operator
> /area placement
> /area docs
> /area test-and-release
## Ask your question here
|
mikeee/dapr
|
.github/ISSUE_TEMPLATE/question.md
|
Markdown
|
mit
| 594 |
organization: dapr
defaultSticker: clrqh1xny39170fl75cawk0h5
stickers:
-
id: clrqh1xny39170fl75cawk0h5
alias: runtime-badge
|
mikeee/dapr
|
.github/holopin.yml
|
YAML
|
mit
| 134 |
# Description
<!--
Please explain the changes you've made.
-->
## Issue reference
<!--
We strive to have all PR being opened based on an issue, where the problem or feature have been discussed prior to implementation.
-->
Please reference the issue this PR will close: #_[issue number]_
## Checklist
Please make sure you've completed the relevant tasks for this PR, out of the following list:
* [ ] Code compiles correctly
* [ ] Created/updated tests
* [ ] Unit tests passing
* [ ] End-to-end tests passing
* [ ] Extended the documentation / Created issue in the https://github.com/dapr/docs/ repo: dapr/docs#_[issue number]_
* [ ] Specification has been updated / Created issue in the https://github.com/dapr/docs/ repo: dapr/docs#_[issue number]_
* [ ] Provided sample for the feature / Created issue in the https://github.com/dapr/docs/ repo: dapr/docs#_[issue number]_
|
mikeee/dapr
|
.github/pull_request_template.md
|
Markdown
|
mit
| 881 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script automerges PRs in Dapr.
import os
from github import Github
g = Github(os.getenv("GITHUB_TOKEN"))
repo = g.get_repo(os.getenv("GITHUB_REPOSITORY"))
maintainers = [m.strip() for m in os.getenv("MAINTAINERS").split(',')]
def fetch_pulls(mergeable_state, labels = {'automerge'}):
return [pr for pr in repo.get_pulls(state='open', sort='created') \
if (not pr.draft) and pr.mergeable_state == mergeable_state and \
(not labels or len(labels.intersection({l.name for l in pr.labels})) > 0)]
def is_approved(pr):
approvers = [r.user.login for r in pr.get_reviews() if r.state == 'APPROVED' and r.user.login in maintainers]
return len([a for a in approvers if repo.get_collaborator_permission(a) in ['admin', 'write']]) > 0
# First, find a PR that can be merged
pulls = fetch_pulls('clean')
print(f"Detected {len(pulls)} open pull requests in {repo.name} to be automerged.")
merged = False
for pr in pulls:
if is_approved(pr):
# Merge only one PR per run.
print(f"Merging PR {pr.html_url}")
try:
pr.merge(merge_method='squash')
merged = True
break
except:
print(f"Failed to merge PR {pr.html_url}")
if len(pulls) > 0 and not merged:
print("No PR was automerged.")
# Now, update all PRs that are behind, regardless of automerge label.
pulls = fetch_pulls('behind', {'automerge', 'autoupdate'})
print(f"Detected {len(pulls)} open pull requests in {repo.name} to be updated.")
for pr in pulls:
print(f"Updating PR {pr.html_url}")
try:
pr.update_branch()
except:
print(f"Failed to update PR {pr.html_url}")
pulls = fetch_pulls('dirty')
print(f"Detected {len(pulls)} open pull requests in {repo.name} to be automerged but are in dirty state.")
for pr in pulls:
print(f"PR is in dirty state: {pr.html_url}")
print("Done.")
|
mikeee/dapr
|
.github/scripts/automerge.py
|
Python
|
mit
| 2,466 |
#!/usr/bin/env node
import { readFile } from 'node:fs/promises'
const match = `// Uncomment for local development for testing with changes in the components-contrib && kit repositories.
// Don't commit with this uncommented!
//
// replace github.com/dapr/components-contrib => ../components-contrib
// replace github.com/dapr/kit => ../kit
//
// Then, run \`make modtidy-all\` in this repository.
// This ensures that go.mod and go.sum are up-to-date for each go.mod file.`
const read = await readFile('go.mod', { encoding: 'utf8' })
if (!read.includes(match)) {
console.log(
'File go.mod was committed with a change in the block around "replace github.com/dapr/components-contrib"'
)
process.exit(1)
}
console.log('go.mod is ok')
|
mikeee/dapr
|
.github/scripts/check_go_mod.mjs
|
JavaScript
|
mit
| 754 |
#!/usr/bin/env bash
#
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ue
# Thanks to https://ihateregex.io/expr/semver/
SEMVER_REGEX='^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
REL_VERSION=`echo $1 | sed -r 's/^[vV]?([0-9].+)$/\1/'`
if [ `echo $REL_VERSION | pcre2grep "$SEMVER_REGEX"` ]; then
echo "$REL_VERSION is a valid semantic version."
else
echo "$REL_VERSION is not a valid semantic version."
exit 1
fi
MAJOR_MINOR_VERSION=`echo $REL_VERSION | cut -d. -f1,2`
RELEASE_BRANCH="release-$MAJOR_MINOR_VERSION"
RELEASE_TAG="v$REL_VERSION"
if [ `git rev-parse --verify origin/$RELEASE_BRANCH 2>/dev/null` ]; then
echo "$RELEASE_BRANCH branch already exists, checking it out ..."
git checkout $RELEASE_BRANCH
else
echo "$RELEASE_BRANCH does not exist, creating ..."
git checkout -b $RELEASE_BRANCH
git push origin $RELEASE_BRANCH
fi
echo "$RELEASE_BRANCH branch is ready."
if [ `git rev-parse --verify $RELEASE_TAG 2>/dev/null` ]; then
echo "$RELEASE_TAG tag already exists, aborting ..."
exit 2
fi
echo "Tagging $RELEASE_TAG ..."
git tag $RELEASE_TAG
echo "$RELEASE_TAG is tagged."
echo "Pushing $RELEASE_TAG tag ..."
git push origin $RELEASE_TAG
echo "$RELEASE_TAG tag is pushed."
|
mikeee/dapr
|
.github/scripts/create-release.sh
|
Shell
|
mit
| 1,881 |
// List of owner who can control dapr-bot workflow
// IMPORTANT: Make sure usernames are lower-cased
// TODO: Read owners from OWNERS file.
const owners = [
'addjuarez',
'artursouza',
'ashiquemd',
'berndverst',
'cicoyle',
'daixiang0',
'deepanshua',
'halspang',
'italypaleale',
'johnewart',
'joshvanl',
'kaibocai',
'mcandeia',
'mikeee',
'msfussell',
'mukundansundar',
'pkedy',
'pruthvidhodda',
'rabollin',
'robertojrojas',
'ryanlettieri',
'shivamkm07',
'shubham1172',
'skyao',
'taction',
'tanvigour',
'yaron2'
]
const SDKs = [
'dotnet-sdk',
'go-sdk',
'java-sdk',
'js-sdk',
'python-sdk',
'php-sdk',
]
const docsIssueBodyTpl = (
issueNumber
) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml) because a \"docs-needed\" label \
was added to dapr/dapr#${issueNumber}. \n\n\
TODO: Add more details as per [this template](.github/ISSUE_TEMPLATE/new-content-needed.md).`
const sdkIssueBodyTpl = (
issueNumber
) => `This issue was automatically created by \
[Dapr Bot](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml) because a \"sdk-needed\" label \
was added to dapr/dapr#${issueNumber}. \n\n\
TODO: Add more details.`
module.exports = async ({ github, context }) => {
if (
context.eventName == 'issue_comment' &&
context.payload.action == 'created'
) {
await handleIssueCommentCreate({ github, context })
} else if (
context.eventName == 'issues' &&
context.payload.action == 'labeled'
) {
await handleIssueLabeled({ github, context })
} else {
console.log(`[main] event ${context.eventName} not supported, exiting.`)
}
}
/**
* Handle issue comment create event.
*/
async function handleIssueCommentCreate({ github, context }) {
const payload = context.payload
const issue = context.issue
const username = context.actor.toLowerCase()
const isFromPulls = !!payload.issue.pull_request
const commentBody = ((payload.comment.body || '') + '').trim()
if (!commentBody || !commentBody.startsWith('/')) {
// Not a command
return
}
const commandParts = commentBody.split(/\s+/)
const command = commandParts.shift()
// Commands that can be executed by anyone.
if (command == '/assign') {
await cmdAssign(github, issue, username, isFromPulls)
return
}
// Commands that can only be executed by owners.
if (!owners.includes(username)) {
console.log(
`[handleIssueCommentCreate] user ${username} is not an owner, exiting.`
)
await commentUserNotAllowed(github, issue, username)
return
}
switch (command) {
case '/make-me-laugh':
await cmdMakeMeLaugh(github, issue)
break
case '/ok-to-test':
await cmdOkToTest(github, issue, isFromPulls)
break
case '/ok-to-perf':
await cmdOkToPerf(
github,
issue,
isFromPulls,
commandParts.join(' ')
)
break
case '/ok-to-perf-components':
await cmdOkToPerfComponents(
github,
issue,
isFromPulls,
commandParts.join(' ')
)
break
case '/test-sdk-all':
case '/test-sdk-java':
case '/test-sdk-python':
case '/test-sdk-js':
case '/test-sdk-go':
await cmdTestSDK(
github,
issue,
isFromPulls,
command,
commandParts.join(' ')
)
break
case '/test-version-skew':
const previousVersion =
commandParts.length > 0 ? commandParts.shift() : null
await cmdTestVersionSkew(
github,
issue,
isFromPulls,
command,
previousVersion,
commandParts.join(' ')
)
break
default:
console.log(
`[handleIssueCommentCreate] command ${command} not found, exiting.`
)
break
}
}
/**
* Handle issue labeled event.
*/
async function handleIssueLabeled({ github, context }) {
const payload = context.payload
const label = payload.label.name
const issueNumber = payload.issue.number
// This should not run in forks.
if (context.repo.owner !== 'dapr') {
console.log('[handleIssueLabeled] not running in dapr repo, exiting.')
return
}
// Authorization is not required here because it's triggered by an issue label event.
// Only authorized users can add labels to issues.
if (label == 'docs-needed') {
// Open a new issue
await github.rest.issues.create({
owner: 'dapr',
repo: 'docs',
title: `New content needed for dapr/dapr#${issueNumber}`,
labels: ['content/missing-information', 'created-by/dapr-bot'],
body: docsIssueBodyTpl(issueNumber),
})
} else if (label == 'sdk-needed') {
// Open an issue in all SDK repos.
for (const sdk of SDKs) {
await github.rest.issues.create({
owner: 'dapr',
repo: sdk,
title: `Add support for dapr/dapr#${issueNumber}`,
labels: ['enhancement', 'created-by/dapr-bot'],
body: sdkIssueBodyTpl(issueNumber),
})
}
} else {
console.log(
`[handleIssueLabeled] label ${label} not supported, exiting.`
)
}
}
/**
* Assign the issue to the user who commented.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {string} username GitHub user who commented
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdAssign(github, issue, username, isFromPulls) {
if (isFromPulls) {
console.log(
'[cmdAssign] pull requests unsupported, skipping command execution.'
)
return
} else if (issue.assignees && issue.assignees.length !== 0) {
console.log(
'[cmdAssign] issue already has assignees, skipping command execution.'
)
return
}
await github.rest.issues.addAssignees({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
assignees: [username],
})
}
/**
* Comment a funny joke.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
*/
async function cmdMakeMeLaugh(github, issue) {
const result = await github.request(
'https://official-joke-api.appspot.com/random_joke'
)
jokedata = result.data
joke = 'I have a bad feeling about this.'
if (jokedata && jokedata.setup && jokedata.punchline) {
joke = `${jokedata.setup} - ${jokedata.punchline}`
}
await github.rest.issues.createComment({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
body: joke,
})
}
/**
* Trigger e2e test for the pull request.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdOkToTest(github, issue, isFromPulls) {
if (!isFromPulls) {
console.log(
'[cmdOkToTest] only pull requests supported, skipping command execution.'
)
return
}
// Get pull request
const pull = await github.rest.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number,
})
if (pull && pull.data) {
// Get commit id and repo from pull head
const testPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: 'ok-to-test',
issue: issue,
}
// Fire repository_dispatch event to trigger e2e test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: 'e2e-test',
client_payload: testPayload,
})
console.log(
`[cmdOkToTest] triggered E2E test for ${JSON.stringify(
testPayload
)}`
)
}
}
/**
* Trigger performance test for the pull request.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdOkToPerf(github, issue, isFromPulls, args) {
if (!isFromPulls) {
console.log(
'[cmdOkToPerf] only pull requests supported, skipping command execution.'
)
return
}
// Get pull request
const pull = await github.rest.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number,
})
if (pull && pull.data) {
// Get commit id and repo from pull head
const perfPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: 'ok-to-perf',
args,
issue: issue,
}
// Fire repository_dispatch event to trigger e2e test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: 'perf-test',
client_payload: perfPayload,
})
console.log(
`[cmdOkToPerf] triggered perf test for ${JSON.stringify(
perfPayload
)}`
)
}
}
/**
* Trigger components performance test for the pull request.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
*/
async function cmdOkToPerfComponents(github, issue, isFromPulls, args) {
if (!isFromPulls) {
console.log(
'[cmdOkToPerfComponents] only pull requests supported, skipping command execution.'
)
return
}
// Get pull request
const pull = await github.rest.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number,
})
if (pull && pull.data) {
// Get commit id and repo from pull head
const perfPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: 'ok-to-perf-components',
args,
issue: issue,
}
// Fire repository_dispatch event to trigger e2e test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: 'components-perf-test',
client_payload: perfPayload,
})
console.log(
`[cmdOkToPerfComponents] triggered perf test for ${JSON.stringify(
perfPayload
)}`
)
}
}
/**
* Trigger SDK test(s) for the pull request.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
* @param {string} command which was used
*/
async function cmdTestSDK(github, issue, isFromPulls, command, args) {
if (!isFromPulls) {
console.log(
'[cmdTestSDK] only pull requests supported, skipping command execution.'
)
return
}
// Get pull request
const pull = await github.rest.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number,
})
if (pull && pull.data) {
// Get commit id and repo from pull head
const testSDKPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: command.substring(1),
args,
issue: issue,
}
// Fire repository_dispatch event to trigger e2e test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: command.substring(1),
client_payload: testSDKPayload,
})
console.log(
`[cmdTestSDK] triggered SDK test for ${JSON.stringify(
testSDKPayload
)}`
)
}
}
/**
* Sends a comment when the user who tried triggering the bot action is not allowed to do so.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {string} username GitHub user who commented
*/
async function commentUserNotAllowed(github, issue, username) {
await github.rest.issues.createComment({
owner: issue.owner,
repo: issue.repo,
issue_number: issue.number,
body: `π @${username}, my apologies but I can't perform this action for you because your username is not in the allowlist in the file ${'`.github/scripts/dapr_bot.js`'}.`,
})
}
/**
* Trigger Version Skew tests for the pull request.
* @param {*} github GitHub object reference
* @param {*} issue GitHub issue object
* @param {boolean} isFromPulls is the workflow triggered by a pull request?
* @param {string} command which was used
* @param {string} previousVersion previous version to test against
*/
async function cmdTestVersionSkew(
github,
issue,
isFromPulls,
command,
previousVersion,
args
) {
if (!isFromPulls) {
console.log(
'[cmdTestVersionSkew] only pull requests supported, skipping command execution.'
)
return
}
// Get pull request
const pull = await github.rest.pulls.get({
owner: issue.owner,
repo: issue.repo,
pull_number: issue.number,
})
if (pull && pull.data) {
// Get commit id and repo from pull head
const testVersionSkewPayload = {
pull_head_ref: pull.data.head.sha,
pull_head_repo: pull.data.head.repo.full_name,
command: command.substring(1),
previous_version: previousVersion,
args,
issue: issue,
}
// Fire repository_dispatch event to trigger e2e test
await github.rest.repos.createDispatchEvent({
owner: issue.owner,
repo: issue.repo,
event_type: command.substring(1),
client_payload: testVersionSkewPayload,
})
console.log(
`[cmdTestVersionSkew] triggered Version Skew test for ${JSON.stringify(
testVersionSkewPayload
)}`
)
}
}
|
mikeee/dapr
|
.github/scripts/dapr_bot.js
|
JavaScript
|
mit
| 15,073 |
const fs = require('fs')
module.exports = async ({ glob, core }) => {
const globber = await glob.create(
process.env['TEST_OUTPUT_FILE_PREFIX'] + '_summary_table_*.json'
)
for await (const file of globber.globGenerator()) {
const testSummary = JSON.parse(fs.readFileSync(file, 'utf8'))
await core.summary
.addHeading(testSummary.test)
.addTable(testSummary.data)
.write()
}
}
|
mikeee/dapr
|
.github/scripts/dapr_tests_summary.js
|
JavaScript
|
mit
| 453 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script finds all release notes from issues in the milestone project.
import os
import re
import sys
from datetime import date
from string import Template
from github import Github
releaseIssueRegex = "^v(.*) Release Planning$"
releaseNoteRegex = "^RELEASE NOTE:(.*)$"
dashboardReleaseVersionRegex = "v([0-9\.]+)-?.*"
majorReleaseRegex = "^([0-9]+\.[0-9]+)\.[0-9]+.*$"
milestoneRegex = "https://github.com/dapr/(.+)/milestone/([0-9]+)"
githubToken = os.getenv("GITHUB_TOKEN")
top_repos=[
'dapr',
'cli',
'components-contrib',
'dashboard',
'dotnet-sdk',
'go-sdk',
'java-sdk',
'python-sdk',
'php-sdk',
'rust-sdk',
'cpp-sdk',
'js-sdk',
'workflows',
'docs',
'quickstarts',
'samples']
subtitles={
"dapr": "Dapr Runtime",
"cli": "Dapr CLI",
"dashboard": "Dashboard",
"components-contrib": "Components",
"java-sdk": "Java SDK",
"dotnet-sdk": ".NET SDK",
"go-sdk": "Go SDK",
"python-sdk": "Python SDK",
"php-sdk": "PHP SDK",
"js-sdk": "JavaScript SDK",
"rust-sdk": "Rust SDK",
"cpp-sdk": "C++ SDK",
"docs": "Documentation",
"test-infra": "Test Infrastructure"
}
text_substitutions=[
(re.compile(re.escape("**ADD**"), re.IGNORECASE), "**ADDED**"),
(re.compile(re.escape("**SOLVE**"), re.IGNORECASE), "**SOLVED**"),
(re.compile(re.escape("**FIX**"), re.IGNORECASE), "**FIXED**"),
(re.compile(re.escape("**RESOLVE**"), re.IGNORECASE), "**RESOLVED**"),
(re.compile(re.escape("**REMOVE**"), re.IGNORECASE), "**REMOVED**"),
(re.compile(re.escape("**UPDATE**"), re.IGNORECASE), "**UPDATED**"),
(re.compile(re.escape("**DOCUMENT**"), re.IGNORECASE), "**DOCUMENTED**"),
]
changes=[]
def get_repo_subtitle(name):
if name in subtitles:
return subtitles[name]
return name.capitalize()
def get_repo_priority(name):
if name in top_repos:
return top_repos.index(name)
return len(top_repos)
# using an access token
g = Github(githubToken)
# discover milestone project
issues = [i for i in g.get_repo("dapr/dapr").get_issues(state='open') if re.search(releaseIssueRegex, i.title)]
issues = sorted(issues, key=lambda i:i.id)
if len(issues) == 0:
print("FATAL: could not find issue for release.")
sys.exit(0)
if len(issues) > 1:
print("WARNING: found more than one issue for release, so first issue created will be picked: {}".format(
[i.title for i in issues]))
issue = issues[0]
print("Found issue: {}".format(issue.title))
# get release version from project name
releaseVersion = re.search(releaseIssueRegex, issue.title).group(1)
print("Generating release notes for Dapr {}...".format(releaseVersion))
# Set REL_VERSION.
if os.getenv("GITHUB_ENV"):
with open(os.getenv("GITHUB_ENV"), "a") as githubEnv:
githubEnv.write("REL_VERSION={}\n".format(releaseVersion))
githubEnv.write("REL_BRANCH=release-{}\n".format(
re.search(majorReleaseRegex, releaseVersion).group(1)))
# get dashboard release version
releases = sorted([r for r in g.get_repo("dapr/dashboard").get_releases()], key=lambda r: r.created_at, reverse=True)
dashboardReleaseVersion = re.search(dashboardReleaseVersionRegex, releases[0].tag_name).group(1)
print("Detected Dapr Dashboard version {}".format(dashboardReleaseVersion))
releaseNotePath="docs/release_notes/v{}.md".format(releaseVersion)
# get all issues previously released to avoid adding issues in previous release candidates.
# GitHub API does not have an easy way to list all projects for an issue or PR.
# So, we extract all issues references in previous release notes.
issuesOrPRsPreviouslyReleased = {}
for filename in os.listdir(os.path.join(os.getcwd(), 'docs/release_notes')):
filepath = os.path.join('docs/release_notes', filename)
if releaseNotePath == filepath:
continue
with open(filepath, 'r') as f:
for line in f:
for m in re.findall(r'\((https://github.com/\S+)\)', line):
issuesOrPRsPreviouslyReleased[m] = True
# get all milestones
repoMilestonePairs = re.findall(milestoneRegex, issue.body)
issuesOrPRs = []
for repoMilestonePair in repoMilestonePairs:
repo = g.get_repo(f"dapr/{repoMilestonePair[0]}")
milestone = repo.get_milestone(int(repoMilestonePair[1]))
# PRs are also returned as `issue`
issues = [i for i in repo.get_issues(milestone, state='all')]
print(f"Detected milestone {milestone.title} for repo {repoMilestonePair[0]} with {len(issues)} issues or pull requests")
issuesOrPRs = issuesOrPRs + issues
print("Detected {} issues or pull requests.".format(len(issuesOrPRs)))
contributors = set()
# generate changes and add contributors to set with or without release notes.
for issueOrPR in issuesOrPRs:
url = issueOrPR.html_url
if url in issuesOrPRsPreviouslyReleased:
# Issue was previously released, ignoring.
continue
try:
# only a PR can be converted to a PR object, otherwise will throw error.
pr = issueOrPR.as_pull_request()
contributors.add("@" + str(pr.user.login))
except:
a = [l.login for l in issueOrPR.assignees]
if len(a) == 0:
print("Issue is unassigned: {}".format(url))
for c in a:
contributors.add("@" + str(c))
repo = issueOrPR.repository
if repo == "docs":
# Do not add this to the list of changes (but add to contributors).
continue
hasNote = False
if not (issueOrPR.body is None):
match = re.search(releaseNoteRegex, issueOrPR.body, re.M)
if match:
note = match.group(1).strip()
if note:
if note.upper() not in ["NOT APPLICABLE", "N/A"]:
for text_substitution in text_substitutions:
note = text_substitution[0].sub(text_substitution[1], note)
changes.append((repo, issueOrPR, note, contributors, url))
hasNote = True
if not hasNote:
# Issue or PR has no release note.
# Auto-generate a release note as fallback.
note = '**RESOLVED** ' + issueOrPR.title
changes.append((repo, issueOrPR, note, contributors, url))
assignee = 'nobody'
if issueOrPR.assignee:
assignee = issueOrPR.assignee.login
warnings=[]
changeLines=[]
lastSubtitle=""
breakingChangeLines=[]
lastBreakingChangeSubtitle=""
deprecationNoticeLines=[]
lastDeprecationNoticeSubtitle=""
# generate changes for release notes (only issues/pr that have release notes)
for change in sorted(changes, key=lambda c: (get_repo_priority(c[0].name), c[0].stargazers_count * -1, c[0].id, c[1].id)):
breakingChange='breaking-change' in [l.name for l in change[1].labels]
deprecationNotice='deprecation' in [l.name for l in change[1].labels]
subtitle=get_repo_subtitle(change[0].name)
if lastSubtitle != subtitle:
lastSubtitle = subtitle
changeLines.append("### " + subtitle)
# set issue url
changeUrl = " [" + str(change[1].number) + "](" + change[4] + ")"
changeLines.append("- " + change[2] + changeUrl)
if breakingChange:
if lastBreakingChangeSubtitle != subtitle:
lastBreakingChangeSubtitle = subtitle
breakingChangeLines.append("### " + subtitle)
breakingChangeLines.append("- " + change[2] + changeUrl)
if deprecationNotice:
if lastDeprecationNoticeSubtitle != subtitle:
lastDeprecationNoticeSubtitle = subtitle
deprecationNoticeLines.append("### " + subtitle)
deprecationNoticeLines.append("- " + change[2] + changeUrl)
if len(breakingChangeLines) > 0:
warnings.append("> **Note: This release contains a few [breaking changes](#breaking-changes).**")
# generate release notes from template
template=''
releaseNoteTemplatePath="docs/release_notes/template.md"
with open(releaseNoteTemplatePath, 'r') as file:
template = file.read()
changesText='\n'.join(changeLines)
breakingChangesText='None.'
if len(breakingChangeLines) > 0:
breakingChangesText='\n'.join(breakingChangeLines)
deprecationNoticesText='None.'
if len(deprecationNoticeLines) > 0:
deprecationNoticesText='\n'.join(deprecationNoticeLines)
warningsText=''
if len(warnings) > 0:
warningsText='\n\n'.join(warnings)
with open(releaseNotePath, 'w') as file:
file.write(Template(template).safe_substitute(
dapr_version=releaseVersion,
dapr_dashboard_version=dashboardReleaseVersion,
dapr_changes=changesText,
dapr_breaking_changes=breakingChangesText,
dapr_deprecation_notices=deprecationNoticesText,
warnings=warningsText,
dapr_contributors=", ".join(sorted(list(contributors), key=str.casefold)),
today=date.today().strftime("%Y-%m-%d")))
print("Done.")
|
mikeee/dapr
|
.github/scripts/generate_release_notes.py
|
Python
|
mit
| 9,408 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script parses release version from Git tag and set the parsed version to
# environment variable, REL_VERSION. If the tag is the final version, it sets
# LATEST_RELEASE to true to add 'latest' tag to docker image.
import os
import sys
import datetime
gitRef = os.getenv("GITHUB_REF")
tagRefPrefix = "refs/tags/v"
with open(os.getenv("GITHUB_ENV"), "a") as githubEnv:
if "schedule" in sys.argv:
dateTag = datetime.datetime.utcnow().strftime("%Y-%m-%d")
githubEnv.write("REL_VERSION=nightly-{}\n".format(dateTag))
print ("Nightly release build nightly-{}".format(dateTag))
sys.exit(0)
if gitRef is None or not gitRef.startswith(tagRefPrefix):
githubEnv.write("REL_VERSION=edge\n")
print ("This is daily build from {}...".format(gitRef))
sys.exit(0)
releaseVersion = gitRef[len(tagRefPrefix):]
releaseNotePath="docs/release_notes/v{}.md".format(releaseVersion)
if gitRef.find("-rc.") > 0:
print ("Release Candidate build from {}...".format(gitRef))
else:
print ("Checking if {} exists".format(releaseNotePath))
if os.path.exists(releaseNotePath):
print ("Found {}".format(releaseNotePath))
# Set LATEST_RELEASE to true
githubEnv.write("LATEST_RELEASE=true\n")
else:
print ("{} is not found".format(releaseNotePath))
sys.exit(1)
print ("Release build from {}...".format(gitRef))
githubEnv.write("REL_VERSION={}\n".format(releaseVersion))
|
mikeee/dapr
|
.github/scripts/get_release_version.py
|
Python
|
mit
| 2,111 |
#!/usr/bin/env bash
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
replace_all() {
SUBSTRING=$1
CONTENT=$2
FILES=`grep -Hrl $SUBSTRING charts`
for file in $FILES; do
echo "Replacing \"$SUBSTRING\" with \"$CONTENT\" in $file ..."
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
sed -i '' -e "s/$SUBSTRING/$CONTENT/" "$file"
else
# Linux
sed -e "s/$SUBSTRING/$CONTENT/" -i "$file"
fi
done
}
if [ -z $REL_VERSION ]; then
echo "REL_VERSION is not set. Exiting ..."
exit 1
fi
DAPR_VERSION_HELM="${REL_VERSION}"
DAPR_VERSION_TAG="${REL_VERSION}"
if [[ "$REL_VERSION" == "edge" || "$REL_VERSION" == "nightly"* ]]; then
DAPR_VERSION_HELM="0.0.0"
fi
replace_all "'edge'" "'$DAPR_VERSION_TAG'"
replace_all "'0.0.0'" "'$DAPR_VERSION_HELM'"
|
mikeee/dapr
|
.github/scripts/set_helm_dapr_version.sh
|
Shell
|
mit
| 1,327 |
#!/usr/bin/env python3
#
# Copyright 2024 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script validates resource utilization of a Dapr sidecar.
import re
import subprocess
import time
import os
import numpy as np
import psutil
import requests
from pathlib import Path
from scipy.stats import ttest_ind
def get_binary_size(binary_path):
try:
size = os.path.getsize(Path(binary_path).expanduser()) // 1024 # in kilobytes
return size
except FileNotFoundError:
print(f"Could not find file size for {binary_path}")
return None
def run_process_background(args):
process = subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return process
def kill_process(process):
process.terminate()
def get_memory_info(process):
try:
process_info = psutil.Process(process.pid)
resident_memory = process_info.memory_info().rss / (1024 * 1024) # in megabytes
return resident_memory
except psutil.NoSuchProcess:
return None
def get_goroutine_count():
try:
response = requests.get('http://localhost:9090/metrics')
metrics = response.text
match = re.search(r'go_goroutines (\d+)', metrics)
if match:
goroutine_count = int(match.group(1))
return goroutine_count
else:
raise ValueError("Failed to extract Goroutine count from metrics.")
except (requests.ConnectionError, IndexError, ValueError):
return None
def run_sidecar(executable, app_id):
print(f"Running {executable} ...")
expanded_executable=Path(executable).expanduser()
args = [expanded_executable, f"--app-id", f"{app_id}"]
# Run the process in the background
background_process = run_process_background(args)
memory_data = []
goroutine_data = []
# Initial wait to remove any noise from initialization.
time.sleep(10)
# Collect resident memory every second for X seconds
cycles = int(getenv("SECONDS_FOR_PROCESS_TO_RUN", 5))
for _ in range(cycles):
time.sleep(1)
memory = get_memory_info(background_process)
goroutine_count = get_goroutine_count()
if memory is not None:
memory_data.append(memory)
if goroutine_count is not None:
goroutine_data.append(goroutine_count)
# Kill the process
kill_process(background_process)
if len(memory_data) == 0 or len(goroutine_data) == 0:
raise Exception(f"Could not collect data for {executable}: {( memory_data, goroutine_data)}")
print(f"Collected metrics for {executable}.")
return memory_data, goroutine_data
def test_diff(arr_old, arr_new, label, test='ttest'):
# Output mean and median for memory utilization
p25_new, p50_new, p75_new = np.percentile(arr_new, [25, 50, 75])
p25_old, p50_old, p75_old = np.percentile(arr_old, [25, 50, 75])
print(f"Mean for {label} (new): {np.mean(arr_new):.2f}")
print(f"25th percentile for {label} (new): {p25_new:.2f}")
print(f"50th percentile (median) for {label} (new): {p50_new:.2f}")
print(f"75th percentile for {label} (new): {p75_new:.2f}")
print(f"Mean for {label} (old): {np.mean(arr_old):.2f}")
print(f"25th percentile for {label} (old): {p25_old:.2f}")
print(f"50th percentile (median) for {label} (old): {p50_old:.2f}")
print(f"75th percentile for {label} (old): {p75_old:.2f}")
if test == 'ttest':
# Perform t-test to invalidate a > b.
t_statistic, p_value = ttest_ind(a=arr_new, b=arr_old, alternative="greater", trim=.2)
print(f"T-Statistic ({label}): {t_statistic}")
print(f"P-Value ({label}): {p_value}")
if p_value < 0.05:
print(f"Warning! Found statistically significant increase in {label}.")
return True
print(f"Passed! Did not find statistically significant increase in {label}.")
elif test == 'tp75_plus_10percent':
# Memory measurement has enough variation that the t-test is too strict.
# So, we created this custom comparison to avoid false positives.
# Picking 10% as a good enough margin observed by various runs with the same binary.
if p75_new > p75_old * 1.10:
print(f"Warning! Found significant increase in {label}.")
return True
print(f"Passed! Did not find significant increase in {label}.")
return False
def size_diff(old_binary, new_binary):
max_diff = int(getenv("LIMIT_DELTA_BINARY_SIZE", 7168)) # in KB, default is 7 MB.
old_size = get_binary_size(old_binary)
new_size = get_binary_size(new_binary)
if new_size > old_size + max_diff:
print(f"Warning! Significant increase in file size: was {old_size} KB, now {new_size} KB.")
return True
print(f"Passed! Did not find significant increase in file size: was {old_size} KB, now {new_size} KB.")
return False
def getenv(key, default):
v = os.getenv(key)
if not v or v == "":
return default
return v
if __name__ == "__main__":
goos=getenv("GOOS", "linux")
goarch=getenv("GOARCH", "amd64")
new_binary = f"./dist/{goos}_{goarch}/release/daprd"
old_binary = "~/.dapr/bin/daprd"
binary_size_diff = size_diff(old_binary, new_binary)
memory_data_new, goroutine_data_new = run_sidecar(new_binary, "treatment")
memory_data_old, goroutine_data_old = run_sidecar(old_binary, "control")
memory_diff = test_diff(memory_data_old, memory_data_new, "memory utilization (in MB)", "tp75_plus_10percent")
goroutine_diff = test_diff(goroutine_data_old, goroutine_data_new, "number of go routines", "ttest")
if binary_size_diff or memory_diff or goroutine_diff:
raise Exception("Found significant differences.")
|
mikeee/dapr
|
.github/scripts/validate_sidecar_resources.py
|
Python
|
mit
| 6,320 |
diff --git a/tests/e2e/actor_reminder/actor_reminder_test.go b/tests/e2e/actor_reminder/actor_reminder_test.go
index 45047f423..fbcfd8321 100644
--- a/tests/e2e/actor_reminder/actor_reminder_test.go
+++ b/tests/e2e/actor_reminder/actor_reminder_test.go
@@ -375,6 +375,8 @@ func TestActorReminder(t *testing.T) {
})
t.Run("Actor reminder rename should succeed.", func(t *testing.T) {
+ t.Skip("Actor reminder rename is no longer supported in 1.13")
+
var wg sync.WaitGroup
for iteration := 1; iteration <= numIterations; iteration++ {
wg.Add(1)
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/e2e/release-1.12/dapr-sidecar-master/0001-remove-actor-reminder-rename.patch
|
patch
|
mit
| 561 |
diff --git a/tests/e2e/service_invocation/service_invocation_test.go b/tests/e2e/service_invocation/service_invocation_test.go
index 6c1a455ec0..53545b4752 100644
--- a/tests/e2e/service_invocation/service_invocation_test.go
+++ b/tests/e2e/service_invocation/service_invocation_test.go
@@ -1493,8 +1493,7 @@ func TestNegativeCases(t *testing.T) {
// TODO: This doesn't return as an error, it should be handled more gracefully in dapr
require.False(t, testResults.MainCallSuccessful)
require.Equal(t, 500, status)
- require.Contains(t, string(testResults.RawBody), "failed to invoke")
- require.Contains(t, string(testResults.RawBody), "missing-service-0")
+ require.Contains(t, string(testResults.RawBody), "failed to resolve address for 'missing-service-0-dapr.dapr-tests.svc.cluster.local'")
require.Nil(t, err)
})
@@ -1517,8 +1516,7 @@ func TestNegativeCases(t *testing.T) {
require.Nil(t, testResults.RawBody)
require.Nil(t, err)
require.NotNil(t, testResults.RawError)
- require.Contains(t, testResults.RawError, "failed to invoke")
- require.Contains(t, testResults.RawError, "missing-service-0")
+ require.Contains(t, testResults.RawError, "failed to resolve address for 'missing-service-0-dapr.dapr-tests.svc.cluster.local'")
})
t.Run("service_timeout_http", func(t *testing.T) {
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/e2e/release-1.12/dapr-sidecar-master/0001-service-invocation-e2e-error-condition.patch
|
patch
|
mit
| 1,358 |
From 01ae6f45a75608c0322f9ac556b73d53fb3d8983 Mon Sep 17 00:00:00 2001
From: Shivam Kumar <shivamkm07@gmail.com>
Date: Tue, 19 Dec 2023 06:28:51 +0530
Subject: Remove Deprecated RenameReminder API from tests
Signed-off-by: Shivam Kumar <shivamkm07@gmail.com>
---
tests/apps/actorfeatures/app.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/tests/apps/actorfeatures/app.go b/tests/apps/actorfeatures/app.go
index 9344f204..62e8c935 100644
--- a/tests/apps/actorfeatures/app.go
+++ b/tests/apps/actorfeatures/app.go
@@ -621,7 +621,6 @@ func nonHostedTestHandler(w http.ResponseWriter, r *http.Request) {
"GetReminder": {"GET", nil},
"CreateReminder": {"PUT", struct{}{}},
"DeleteReminder": {"DELETE", struct{}{}},
- "RenameReminder": {"PATCH", struct{}{}},
}
for op, t := range tests {
body, err := httpCall(t.Method, url, t.Body, http.StatusForbidden)
--
2.34.1
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/e2e/release-1.12/dapr-sidecar-master/0002-Remove-Deprecated-RenameReminder-API-from-tests.patch
|
patch
|
mit
| 891 |
commit 6dea5b910e90647ad3bffc1458a24fceb94a6044
Author: Yaron Schneider <schneider.yaron@live.com>
Date: Wed Feb 14 10:43:04 2024 -0800
* update tests
Signed-off-by: yaron2 <schneider.yaron@live.com>
---------
Signed-off-by: yaron2 <schneider.yaron@live.com>
diff --git a/tests/e2e/pubsub/pubsub_test.go b/tests/e2e/pubsub/pubsub_test.go
index c9b548143..dedd528c3 100644
--- a/tests/e2e/pubsub/pubsub_test.go
+++ b/tests/e2e/pubsub/pubsub_test.go
@@ -278,12 +278,7 @@ func testPublish(t *testing.T, publisherExternalURL string, protocol string) rec
require.NoError(t, err)
offset += numberOfMessagesToPublish + 1
- // Test bug where content-length metadata conflict makes message undeliverable in grpc subscriber.
- // We set an arbitrarily large number that it is unlikely to match the size of the payload daprd delivers.
- metadataContentLengthConflict := map[string]string{
- "content-length": "9999999",
- }
- sentTopicAMessages, err := sendToPublisher(t, publisherExternalURL, "pubsub-a-topic", protocol, metadataContentLengthConflict, "")
+ sentTopicAMessages, err := sendToPublisher(t, publisherExternalURL, "pubsub-a-topic", protocol, nil, "")
require.NoError(t, err)
offset += numberOfMessagesToPublish + 1
@@ -295,10 +290,10 @@ func testPublish(t *testing.T, publisherExternalURL string, protocol string) rec
require.NoError(t, err)
offset += numberOfMessagesToPublish + 1
- metadataRawPayload := map[string]string{
+ metadata := map[string]string{
"rawPayload": "true",
}
- sentTopicRawMessages, err := sendToPublisher(t, publisherExternalURL, "pubsub-raw-topic", protocol, metadataRawPayload, "")
+ sentTopicRawMessages, err := sendToPublisher(t, publisherExternalURL, "pubsub-raw-topic", protocol, metadata, "")
require.NoError(t, err)
offset += numberOfMessagesToPublish + 1
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/e2e/release-1.12/dapr-sidecar-master/0003-remote-content-length-test.patch
|
patch
|
mit
| 1,855 |
diff --git a/tests/e2e/workflows/workflow_test.go b/tests/e2e/workflows/workflow_test.go
index 3194acf7a..af94a2e96 100644
--- a/tests/e2e/workflows/workflow_test.go
+++ b/tests/e2e/workflows/workflow_test.go
@@ -35,7 +35,7 @@ import (
var (
tr *runner.TestRunner
- backends = []string{"actors", "sqlite"}
+ backends = []string{"actors"}
appNamePrefix = "workflowsapp"
)
@@ -55,16 +55,7 @@ func TestMain(m *testing.M) {
testApps = append(testApps, getTestApp(backend))
}
- comps := []kube.ComponentDescription{
- {
- Name: "sqlitebackend",
- TypeName: "workflowbackend.sqlite",
- MetaData: map[string]kube.MetadataValue{
- "connectionString": {Raw: `""`},
- },
- Scopes: []string{appNamePrefix + "-sqlite"},
- },
- }
+ comps := []kube.ComponentDescription{}
tr = runner.NewTestRunner("workflowsapp", testApps, comps, nil)
os.Exit(tr.Start(m))
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/e2e/release-1.13/control-plane-master/0001-remove-sqlite-backend.patch
|
patch
|
mit
| 921 |
diff --git a/tests/e2e/workflows/workflow_test.go b/tests/e2e/workflows/workflow_test.go
index 3194acf7a..af94a2e96 100644
--- a/tests/e2e/workflows/workflow_test.go
+++ b/tests/e2e/workflows/workflow_test.go
@@ -35,7 +35,7 @@ import (
var (
tr *runner.TestRunner
- backends = []string{"actors", "sqlite"}
+ backends = []string{"actors"}
appNamePrefix = "workflowsapp"
)
@@ -55,16 +55,7 @@ func TestMain(m *testing.M) {
testApps = append(testApps, getTestApp(backend))
}
- comps := []kube.ComponentDescription{
- {
- Name: "sqlitebackend",
- TypeName: "workflowbackend.sqlite",
- MetaData: map[string]kube.MetadataValue{
- "connectionString": {Raw: `""`},
- },
- Scopes: []string{appNamePrefix + "-sqlite"},
- },
- }
+ comps := []kube.ComponentDescription{}
tr = runner.NewTestRunner("workflowsapp", testApps, comps, nil)
os.Exit(tr.Start(m))
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/e2e/release-1.13/dapr-sidecar-master/0001-remove-sqlite-backend.patch
|
patch
|
mit
| 921 |
diff --git a/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go b/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go
index 1bb56944f..18bb9c63a 100644
--- a/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go
+++ b/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go
@@ -209,7 +209,7 @@ func (h *httpendpoints) Run(t *testing.T, ctx context.Context) {
t.Run("bad PKI", func(t *testing.T) {
invokeTests(t, http.StatusInternalServerError, func(t *testing.T, body string) {
assert.Contains(t, body, `"errorCode":"ERR_DIRECT_INVOKE"`)
- assert.Contains(t, body, "tls: bad certificate")
+ assert.Contains(t, body, "remote error: tls: unknown certificate authority")
}, h.daprd2)
})
}
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.12/0001-go-verison-tls-error-message.patch
|
patch
|
mit
| 772 |
diff --git a/tests/integration/suite/daprd/metadata/metadata.go b/tests/integration/suite/daprd/metadata/metadata.go
index 5e5a86452..0cccd8bd8 100644
--- a/tests/integration/suite/daprd/metadata/metadata.go
+++ b/tests/integration/suite/daprd/metadata/metadata.go
@@ -19,6 +19,7 @@ import (
"fmt"
"io"
"net/http"
+ "strings"
"testing"
"time"
@@ -82,11 +83,13 @@ func validateResponse(t *testing.T, appID string, appPort int, body io.Reader) {
require.NoError(t, err)
require.Equal(t, appID, bodyMap["id"])
- require.Equal(t, "edge", bodyMap["runtimeVersion"])
+ require.True(t, "edge" == bodyMap["runtimeVersion"].(string) ||
+ strings.HasPrefix(bodyMap["runtimeVersion"].(string), "1.12."))
extended, ok := bodyMap["extended"].(map[string]interface{})
require.True(t, ok)
- require.Equal(t, "edge", extended["daprRuntimeVersion"])
+ require.True(t, "edge" == extended["daprRuntimeVersion"].(string) ||
+ strings.HasPrefix(extended["daprRuntimeVersion"].(string), "1.12."))
appConnectionProperties, ok := bodyMap["appConnectionProperties"].(map[string]interface{})
require.True(t, ok)
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.12/0002-match-metadata-verison-for-1.12.patch
|
patch
|
mit
| 1,118 |
diff --git a/tests/integration/suite/actors/healthz/healthz.go b/tests/integration/suite/actors/healthz/healthz.go
index 00142601d..528e91117 100644
--- a/tests/integration/suite/actors/healthz/healthz.go
+++ b/tests/integration/suite/actors/healthz/healthz.go
@@ -39,15 +39,17 @@ func init() {
// initerror tests that Daprd will block actor calls until actors have been
// initialized.
type initerror struct {
- daprd *daprd.Daprd
- place *placement.Placement
- configCalled chan struct{}
- blockConfig chan struct{}
+ daprd *daprd.Daprd
+ place *placement.Placement
+ configCalled chan struct{}
+ blockConfig chan struct{}
+ healthzCalled chan struct{}
}
func (i *initerror) Setup(t *testing.T) []framework.Option {
i.configCalled = make(chan struct{})
i.blockConfig = make(chan struct{})
+ i.healthzCalled = make(chan struct{})
handler := http.NewServeMux()
handler.HandleFunc("/dapr/config", func(w http.ResponseWriter, r *http.Request) {
@@ -55,6 +57,10 @@ func (i *initerror) Setup(t *testing.T) []framework.Option {
<-i.blockConfig
w.Write([]byte(`{"entities": ["myactortype"]}`))
})
+ handler.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ close(i.healthzCalled)
+ })
handler.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`OK`))
})
@@ -119,6 +125,12 @@ func (i *initerror) Run(t *testing.T, ctx context.Context) {
close(i.blockConfig)
+ select {
+ case <-i.healthzCalled:
+ case <-time.After(time.Second * 15):
+ t.Fatal("timed out waiting for healthz call")
+ }
+
req, err = http.NewRequestWithContext(ctx, http.MethodPost, daprdURL, nil)
require.NoError(t, err)
resp, err = client.Do(req)
diff --git a/tests/integration/suite/actors/http/ttl.go b/tests/integration/suite/actors/http/ttl.go
index 47dbd8ff6..5135d424c 100644
--- a/tests/integration/suite/actors/http/ttl.go
+++ b/tests/integration/suite/actors/http/ttl.go
@@ -21,6 +21,7 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync"
"testing"
"time"
@@ -40,11 +41,13 @@ func init() {
}
type ttl struct {
- daprd *daprd.Daprd
- place *placement.Placement
+ daprd *daprd.Daprd
+ place *placement.Placement
+ healthzCalled chan struct{}
}
func (l *ttl) Setup(t *testing.T) []framework.Option {
+ l.healthzCalled = make(chan struct{})
configFile := filepath.Join(t.TempDir(), "config.yaml")
require.NoError(t, os.WriteFile(configFile, []byte(`
apiVersion: dapr.io/v1alpha1
@@ -61,6 +64,13 @@ spec:
handler.HandleFunc("/dapr/config", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`{"entities": ["myactortype"]}`))
})
+ var once sync.Once
+ handler.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ once.Do(func() {
+ close(l.healthzCalled)
+ })
+ })
handler.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`OK`))
})
@@ -93,6 +103,12 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) {
l.place.WaitUntilRunning(t, ctx)
l.daprd.WaitUntilRunning(t, ctx)
+ select {
+ case <-l.healthzCalled:
+ case <-time.After(time.Second * 15):
+ t.Fatal("timed out waiting for healthz call")
+ }
+
client := util.HTTPClient(t)
daprdURL := "http://localhost:" + strconv.Itoa(l.daprd.HTTPPort())
diff --git a/tests/integration/suite/actors/reminders/rebalancing.go b/tests/integration/suite/actors/reminders/rebalancing.go
index d64e73457..53f8e4fa6 100644
--- a/tests/integration/suite/actors/reminders/rebalancing.go
+++ b/tests/integration/suite/actors/reminders/rebalancing.go
@@ -394,6 +394,7 @@ func (i *rebalancing) reportStatusToPlacement(ctx context.Context, stream placem
Port: 1234,
Entities: entities,
Id: "invalidapp",
+ ApiLevel: 10,
})
if err != nil {
return fmt.Errorf("failed to send message: %w", err)
diff --git a/tests/integration/suite/placement/quorum/insecure.go b/tests/integration/suite/placement/quorum/insecure.go
index d0531bed4..df526cb29 100644
--- a/tests/integration/suite/placement/quorum/insecure.go
+++ b/tests/integration/suite/placement/quorum/insecure.go
@@ -124,7 +124,10 @@ func (i *insecure) Run(t *testing.T, ctx context.Context) {
if err != nil {
return false
}
- err = stream.Send(new(v1pb.Host))
+ err = stream.Send(&v1pb.Host{
+ Id: "app-1",
+ ApiLevel: 10,
+ })
if err != nil {
return false
}
@@ -133,7 +136,7 @@ func (i *insecure) Run(t *testing.T, ctx context.Context) {
return false
}
return true
- }, time.Second*10, time.Millisecond*100)
+ }, time.Second*30, time.Millisecond*100)
err = stream.Send(&v1pb.Host{
Name: "app-1",
diff --git a/tests/integration/suite/placement/quorum/jwks.go b/tests/integration/suite/placement/quorum/jwks.go
index 2d555299c..4ef55eb5c 100644
--- a/tests/integration/suite/placement/quorum/jwks.go
+++ b/tests/integration/suite/placement/quorum/jwks.go
@@ -169,7 +169,10 @@ func (j *jwks) Run(t *testing.T, ctx context.Context) {
if err != nil {
return false
}
- err = stream.Send(new(v1pb.Host))
+ err = stream.Send(&v1pb.Host{
+ Id: "app-1",
+ ApiLevel: 10,
+ })
if err != nil {
return false
}
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.12/0003-fix-actor-backwards-compat.patch
|
patch
|
mit
| 5,286 |
diff --git a/tests/integration/suite/daprd/workflow/backend/singular/sqlite.go b/tests/integration/suite/daprd/workflow/backend/singular/sqlite.go
deleted file mode 100644
index f08890f25..000000000
--- a/tests/integration/suite/daprd/workflow/backend/singular/sqlite.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-Copyright 2023 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://wwb.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package singular
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/dapr/dapr/tests/integration/framework"
- "github.com/dapr/dapr/tests/integration/framework/process/daprd"
- "github.com/dapr/dapr/tests/integration/framework/process/exec"
- "github.com/dapr/dapr/tests/integration/framework/process/logline"
- "github.com/dapr/dapr/tests/integration/suite"
-)
-
-func init() {
- suite.Register(new(sqlite))
-}
-
-// sqlite ensures that 2 sqlite workflow backends cannot be loaded at the same time.
-type sqlite struct {
- logline *logline.LogLine
- daprd *daprd.Daprd
-}
-
-func (s *sqlite) Setup(t *testing.T) []framework.Option {
- s.logline = logline.New(t,
- logline.WithStdoutLineContains(
- "Fatal error from runtime: process component wfbackend2 error: [INIT_COMPONENT_FAILURE]: initialization error occurred for wfbackend2 (workflowbackend.sqlite/v1): cannot create more than one workflow backend component",
- ),
- )
-
- s.daprd = daprd.New(t,
- daprd.WithResourceFiles(`
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend1
-spec:
- type: workflowbackend.sqlite
- version: v1
----
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend2
-spec:
- type: workflowbackend.sqlite
- version: v1
-`),
- daprd.WithExecOptions(
- exec.WithExitCode(1),
- exec.WithRunError(func(t *testing.T, err error) {
- require.ErrorContains(t, err, "exit status 1")
- }),
- exec.WithStdout(s.logline.Stdout()),
- ),
- )
-
- return []framework.Option{
- framework.WithProcesses(s.logline, s.daprd),
- }
-}
-
-func (s *sqlite) Run(t *testing.T, ctx context.Context) {
- s.logline.EventuallyFoundAll(t)
-}
diff --git a/tests/integration/suite/daprd/workflow/backend/singular/sqliteactors.go b/tests/integration/suite/daprd/workflow/backend/singular/sqliteactors.go
deleted file mode 100644
index 608429f32..000000000
--- a/tests/integration/suite/daprd/workflow/backend/singular/sqliteactors.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Copyright 2023 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://wwb.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package singular
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/dapr/dapr/tests/integration/framework"
- "github.com/dapr/dapr/tests/integration/framework/process/daprd"
- "github.com/dapr/dapr/tests/integration/framework/process/exec"
- "github.com/dapr/dapr/tests/integration/framework/process/logline"
- "github.com/dapr/dapr/tests/integration/suite"
-)
-
-func init() {
- suite.Register(new(sqliteactors))
-}
-
-// sqliteactors ensures that 2 workflow backends of different type cannot be
-// loaded at the same time.
-type sqliteactors struct {
- logline *logline.LogLine
- daprd *daprd.Daprd
-}
-
-func (s *sqliteactors) Setup(t *testing.T) []framework.Option {
- s.logline = logline.New(t,
- logline.WithStdoutLineContains(
- "Fatal error from runtime: process component wfbackend2 error: [INIT_COMPONENT_FAILURE]: initialization error occurred for wfbackend2 (workflowbackend.actors/v1): cannot create more than one workflow backend component",
- ),
- )
-
- s.daprd = daprd.New(t,
- daprd.WithResourceFiles(`
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend1
-spec:
- type: workflowbackend.sqlite
- version: v1
----
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend2
-spec:
- type: workflowbackend.actors
- version: v1
-`),
- daprd.WithExecOptions(
- exec.WithExitCode(1),
- exec.WithRunError(func(t *testing.T, err error) {
- require.ErrorContains(t, err, "exit status 1")
- }),
- exec.WithStdout(s.logline.Stdout()),
- ),
- )
-
- return []framework.Option{
- framework.WithProcesses(s.logline, s.daprd),
- }
-}
-
-func (s *sqliteactors) Run(t *testing.T, ctx context.Context) {
- s.logline.EventuallyFoundAll(t)
-}
diff --git a/tests/integration/suite/daprd/workflow/backend/sqlite.go b/tests/integration/suite/daprd/workflow/backend/sqlite.go
deleted file mode 100644
index a8d800b4a..000000000
--- a/tests/integration/suite/daprd/workflow/backend/sqlite.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
-Copyright 2024 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://wwb.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package backend
-
-import (
- "context"
- "fmt"
- "path/filepath"
- "testing"
-
- "github.com/microsoft/durabletask-go/api"
- "github.com/microsoft/durabletask-go/backend"
- "github.com/microsoft/durabletask-go/client"
- "github.com/microsoft/durabletask-go/task"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- rtv1 "github.com/dapr/dapr/pkg/proto/runtime/v1"
- "github.com/dapr/dapr/tests/integration/framework"
- "github.com/dapr/dapr/tests/integration/framework/process/daprd"
- "github.com/dapr/dapr/tests/integration/framework/util"
- "github.com/dapr/dapr/tests/integration/suite"
-)
-
-func init() {
- suite.Register(new(sqlite))
-}
-
-type sqlite struct {
- daprd *daprd.Daprd
- dir string
-}
-
-func (s *sqlite) Setup(t *testing.T) []framework.Option {
- s.dir = filepath.Join(t.TempDir(), "wf.db")
- s.daprd = daprd.New(t,
- daprd.WithResourceFiles(fmt.Sprintf(`
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend
-spec:
- type: workflowbackend.sqlite
- version: v1
- metadata:
- - name: connectionString
- value: %s
-`, s.dir)),
- )
-
- return []framework.Option{
- framework.WithProcesses(s.daprd),
- }
-}
-
-func (s *sqlite) Run(t *testing.T, ctx context.Context) {
- s.daprd.WaitUntilRunning(t, ctx)
-
- comps := util.GetMetaComponents(t, ctx, util.HTTPClient(t), s.daprd.HTTPPort())
- require.ElementsMatch(t, []*rtv1.RegisteredComponents{
- {Name: "wfbackend", Type: "workflowbackend.sqlite", Version: "v1"},
- }, comps)
-
- r := task.NewTaskRegistry()
- r.AddOrchestratorN("SingleActivity", func(ctx *task.OrchestrationContext) (any, error) {
- var input string
- if err := ctx.GetInput(&input); err != nil {
- return nil, err
- }
- var output string
- err := ctx.CallActivity("SayHello", task.WithActivityInput(input)).Await(&output)
- return output, err
- })
- r.AddActivityN("SayHello", func(ctx task.ActivityContext) (any, error) {
- var name string
- if err := ctx.GetInput(&name); err != nil {
- return nil, err
- }
- return fmt.Sprintf("Hello, %s!", name), nil
- })
- backendClient := client.NewTaskHubGrpcClient(s.daprd.GRPCConn(t, ctx), backend.DefaultLogger())
- require.NoError(t, backendClient.StartWorkItemListener(ctx, r))
-
- resp, err := s.daprd.GRPCClient(t, ctx).
- StartWorkflowBeta1(ctx, &rtv1.StartWorkflowRequest{
- WorkflowComponent: "dapr",
- WorkflowName: "SingleActivity",
- Input: []byte(`"Dapr"`),
- InstanceId: "myinstance",
- })
- require.NoError(t, err)
-
- id := api.InstanceID(resp.GetInstanceId())
- metadata, err := backendClient.WaitForOrchestrationCompletion(ctx, id, api.WithFetchPayloads(true))
- require.NoError(t, err)
- assert.True(t, metadata.IsComplete())
- assert.Equal(t, `"Hello, Dapr!"`, metadata.SerializedOutput)
-}
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.13/control-plane-master/0001-remove-workflowbacked-sqlite.patch
|
patch
|
mit
| 8,869 |
diff --git a/tests/integration/suite/daprd/metadata/metadata.go b/tests/integration/suite/daprd/metadata/metadata.go
index 419a7ceed..d8605df75 100644
--- a/tests/integration/suite/daprd/metadata/metadata.go
+++ b/tests/integration/suite/daprd/metadata/metadata.go
@@ -100,11 +100,11 @@ func validateResponse(t *testing.T, appID string, appPort int, body io.Reader) {
require.NoError(t, err)
require.Equal(t, appID, bodyMap["id"])
- require.Equal(t, "edge", bodyMap["runtimeVersion"])
+ require.Equal(t, "1.13.0", bodyMap["runtimeVersion"])
extended, ok := bodyMap["extended"].(map[string]interface{})
require.True(t, ok)
- require.Equal(t, "edge", extended["daprRuntimeVersion"])
+ require.Equal(t, "1.13.0", extended["daprRuntimeVersion"])
appConnectionProperties, ok := bodyMap["appConnectionProperties"].(map[string]interface{})
require.True(t, ok)
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.13/control-plane-master/0002-daprd-metadata-version.patch
|
patch
|
mit
| 871 |
diff --git a/tests/integration/framework/process/daprd/daprd.go b/tests/integration/framework/process/daprd/daprd.go
index 8e6960f7c..06c9c9185 100644
--- a/tests/integration/framework/process/daprd/daprd.go
+++ b/tests/integration/framework/process/daprd/daprd.go
@@ -140,6 +140,9 @@ func New(t *testing.T, fopts ...Option) *Daprd {
if opts.blockShutdownDuration != nil {
args = append(args, "--dapr-block-shutdown-duration="+*opts.blockShutdownDuration)
}
+ if opts.controlPlaneTrustDomain != nil {
+ args = append(args, "--control-plane-trust-domain="+*opts.controlPlaneTrustDomain)
+ }
ns := "default"
if opts.namespace != nil {
diff --git a/tests/integration/framework/process/daprd/options.go b/tests/integration/framework/process/daprd/options.go
index d6b39a535..739d0481c 100644
--- a/tests/integration/framework/process/daprd/options.go
+++ b/tests/integration/framework/process/daprd/options.go
@@ -55,6 +55,7 @@ type options struct {
disableK8sSecretStore *bool
gracefulShutdownSeconds *int
blockShutdownDuration *string
+ controlPlaneTrustDomain *string
}
func WithExecOptions(execOptions ...exec.Option) Option {
@@ -246,3 +247,9 @@ func WithDaprBlockShutdownDuration(duration string) Option {
o.blockShutdownDuration = &duration
}
}
+
+func WithControlPlaneTrustDomain(trustDomain string) Option {
+ return func(o *options) {
+ o.controlPlaneTrustDomain = &trustDomain
+ }
+}
diff --git a/tests/integration/suite/daprd/hotreload/operator/informer.go b/tests/integration/suite/daprd/hotreload/operator/informer.go
index 1af786e19..b8b13652f 100644
--- a/tests/integration/suite/daprd/hotreload/operator/informer.go
+++ b/tests/integration/suite/daprd/hotreload/operator/informer.go
@@ -105,6 +105,7 @@ func (i *informer) Setup(t *testing.T) []framework.Option {
daprd.WithExecOptions(exec.WithEnvVars(t,
"DAPR_TRUST_ANCHORS", string(sentry.CABundle().TrustAnchors),
)),
+ daprd.WithControlPlaneTrustDomain("integration.test.dapr.io"),
)
return []framework.Option{
diff --git a/tests/integration/suite/healthz/operator.go b/tests/integration/suite/healthz/operator.go
index bcb188e10..7daf69f9e 100644
--- a/tests/integration/suite/healthz/operator.go
+++ b/tests/integration/suite/healthz/operator.go
@@ -43,7 +43,10 @@ type operator struct {
}
func (o *operator) Setup(t *testing.T) []framework.Option {
- o.sentry = procsentry.New(t, procsentry.WithTrustDomain("integration.test.dapr.io"))
+ o.sentry = procsentry.New(t,
+ procsentry.WithTrustDomain("integration.test.dapr.io"),
+ procsentry.WithNamespace("dapr-system"),
+ )
kubeAPI := kubernetes.New(t, kubernetes.WithBaseOperatorAPI(t,
spiffeid.RequireTrustDomainFromString("integration.test.dapr.io"),
diff --git a/tests/integration/suite/ports/operator.go b/tests/integration/suite/ports/operator.go
index 093fe0bd0..a9643eee0 100644
--- a/tests/integration/suite/ports/operator.go
+++ b/tests/integration/suite/ports/operator.go
@@ -40,7 +40,10 @@ type operator struct {
}
func (o *operator) Setup(t *testing.T) []framework.Option {
- sentry := procsentry.New(t, procsentry.WithTrustDomain("integration.test.dapr.io"))
+ sentry := procsentry.New(t,
+ procsentry.WithTrustDomain("integration.test.dapr.io"),
+ procsentry.WithNamespace("dapr-system"),
+ )
kubeAPI := kubernetes.New(t, kubernetes.WithBaseOperatorAPI(t,
spiffeid.RequireTrustDomainFromString("integration.test.dapr.io"),
diff --git a/tests/integration/suite/sentry/validator/insecure/insecure.go b/tests/integration/suite/sentry/validator/insecure/insecure.go
index 53152d78b..0b4334805 100644
--- a/tests/integration/suite/sentry/validator/insecure/insecure.go
+++ b/tests/integration/suite/sentry/validator/insecure/insecure.go
@@ -59,7 +59,6 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) {
defaultNamespace = "default"
)
defaultAppSPIFFEID := fmt.Sprintf("spiffe://public/ns/%s/%s", defaultNamespace, defaultAppID)
- defaultAppDNSName := fmt.Sprintf("%s.%s.svc.cluster.local", defaultAppID, defaultNamespace)
m.proc.WaitUntilRunning(t, parentCtx)
@@ -102,7 +101,7 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) {
require.NoError(t, err)
require.NotEmpty(t, res.GetWorkloadCertificate())
- validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID, defaultAppDNSName)
+ validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID)
})
t.Run("insecure validator is the default", func(t *testing.T) {
@@ -117,7 +116,7 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) {
require.NoError(t, err)
require.NotEmpty(t, res.GetWorkloadCertificate())
- validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID, defaultAppDNSName)
+ validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID)
})
t.Run("fails with missing CSR", func(t *testing.T) {
@@ -172,7 +171,7 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) {
})
}
-func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateResponse, sentryBundle ca.Bundle, expectSPIFFEID, expectDNSName string) {
+func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateResponse, sentryBundle ca.Bundle, expectSPIFFEID string) {
t.Helper()
require.NotEmpty(t, res.GetWorkloadCertificate())
@@ -193,7 +192,7 @@ func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateRe
certURIs[i] = v.String()
}
assert.Equal(t, []string{expectSPIFFEID}, certURIs)
- assert.Equal(t, []string{expectDNSName}, cert.DNSNames)
+ assert.Empty(t, cert.DNSNames)
assert.Contains(t, cert.ExtKeyUsage, x509.ExtKeyUsageServerAuth)
assert.Contains(t, cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth)
diff --git a/tests/integration/suite/sentry/validator/jwks/shared.go b/tests/integration/suite/sentry/validator/jwks/shared.go
index 0a5c4e65e..2d355fc17 100644
--- a/tests/integration/suite/sentry/validator/jwks/shared.go
+++ b/tests/integration/suite/sentry/validator/jwks/shared.go
@@ -42,7 +42,6 @@ func (s shared) Run(t *testing.T, parentCtx context.Context) {
defaultNamespace = "default"
)
defaultAppSPIFFEID := fmt.Sprintf("spiffe://public/ns/%s/%s", defaultNamespace, defaultAppID)
- defaultAppDNSName := fmt.Sprintf("%s.%s.svc.cluster.local", defaultAppID, defaultNamespace)
s.proc.WaitUntilRunning(t, parentCtx)
@@ -124,7 +123,7 @@ func (s shared) Run(t *testing.T, parentCtx context.Context) {
require.NoError(t, err)
require.NotEmpty(t, res.GetWorkloadCertificate())
- validateCertificateResponse(t, res, s.proc.CABundle(), defaultAppSPIFFEID, defaultAppDNSName)
+ validateCertificateResponse(t, res, s.proc.CABundle(), defaultAppSPIFFEID)
})
testWithTokenError := func(fn func(builder *jwt.Builder), assertErr func(t *testing.T, grpcStatus *status.Status)) func(t *testing.T) {
diff --git a/tests/integration/suite/sentry/validator/jwks/utils.go b/tests/integration/suite/sentry/validator/jwks/utils.go
index 74eb4a09b..14114a56e 100644
--- a/tests/integration/suite/sentry/validator/jwks/utils.go
+++ b/tests/integration/suite/sentry/validator/jwks/utils.go
@@ -106,7 +106,7 @@ func signJWT(builder *jwt.Builder) ([]byte, error) {
return jwt.Sign(token, jwt.WithKey(jwa.ES256, jwtSigningKeyPriv))
}
-func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateResponse, sentryBundle ca.Bundle, expectSPIFFEID, expectDNSName string) {
+func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateResponse, sentryBundle ca.Bundle, expectSPIFFEID string) {
t.Helper()
require.NotEmpty(t, res.GetWorkloadCertificate())
@@ -128,7 +128,7 @@ func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateRe
certURIs[i] = v.String()
}
assert.Equal(t, []string{expectSPIFFEID}, certURIs)
- assert.Equal(t, []string{expectDNSName}, cert.DNSNames)
+ assert.Empty(t, cert.DNSNames)
assert.Contains(t, cert.ExtKeyUsage, x509.ExtKeyUsageServerAuth)
assert.Contains(t, cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth)
}
diff --git a/tests/integration/suite/sentry/validator/kubernetes/common.go b/tests/integration/suite/sentry/validator/kubernetes/common.go
index e93e2f4dd..4e0f7dd6a 100644
--- a/tests/integration/suite/sentry/validator/kubernetes/common.go
+++ b/tests/integration/suite/sentry/validator/kubernetes/common.go
@@ -30,7 +30,14 @@ import (
prockube "github.com/dapr/dapr/tests/integration/framework/process/kubernetes"
)
-func kubeAPI(t *testing.T, bundle ca.Bundle, namespace, serviceaccount string) *prockube.Kubernetes {
+type kubeAPIOptions struct {
+ bundle ca.Bundle
+ namespace string
+ serviceAccount string
+ appID string
+}
+
+func kubeAPI(t *testing.T, opts kubeAPIOptions) *prockube.Kubernetes {
t.Helper()
return prockube.New(t,
@@ -46,15 +53,15 @@ func kubeAPI(t *testing.T, bundle ca.Bundle, namespace, serviceaccount string) *
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"},
ObjectMeta: metav1.ObjectMeta{Namespace: "sentrynamespace", Name: "dapr-trust-bundle"},
Data: map[string][]byte{
- "ca.crt": bundle.TrustAnchors,
- "issuer.crt": bundle.IssChainPEM,
- "issuer.key": bundle.IssKeyPEM,
+ "ca.crt": opts.bundle.TrustAnchors,
+ "issuer.crt": opts.bundle.IssChainPEM,
+ "issuer.key": opts.bundle.IssKeyPEM,
},
}),
prockube.WithConfigMapGet(t, &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"},
ObjectMeta: metav1.ObjectMeta{Namespace: "sentrynamespace", Name: "dapr-trust-bundle"},
- Data: map[string]string{"ca.crt": string(bundle.TrustAnchors)},
+ Data: map[string]string{"ca.crt": string(opts.bundle.TrustAnchors)},
}),
prockube.WithClusterPodList(t, &corev1.PodList{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "PodList"},
@@ -62,10 +69,10 @@ func kubeAPI(t *testing.T, bundle ca.Bundle, namespace, serviceaccount string) *
{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"},
ObjectMeta: metav1.ObjectMeta{
- Namespace: namespace, Name: "mypod",
- Annotations: map[string]string{"dapr.io/app-id": "myappid"},
+ Namespace: opts.namespace, Name: "mypod",
+ Annotations: map[string]string{"dapr.io/app-id": opts.appID},
},
- Spec: corev1.PodSpec{ServiceAccountName: serviceaccount},
+ Spec: corev1.PodSpec{ServiceAccountName: opts.serviceAccount},
},
},
}),
@@ -81,7 +88,7 @@ func kubeAPI(t *testing.T, bundle ca.Bundle, namespace, serviceaccount string) *
resp, err := json.Marshal(&authapi.TokenReview{
Status: authapi.TokenReviewStatus{
Authenticated: true,
- User: authapi.UserInfo{Username: fmt.Sprintf("system:serviceaccount:%s:%s", namespace, serviceaccount)},
+ User: authapi.UserInfo{Username: fmt.Sprintf("system:serviceaccount:%s:%s", opts.namespace, opts.serviceAccount)},
},
})
require.NoError(t, err)
diff --git a/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go b/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go
index ce272e4ea..fa917a424 100644
--- a/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go
+++ b/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go
@@ -50,7 +50,12 @@ func (k *kubernetes) Setup(t *testing.T) []framework.Option {
bundle, err := ca.GenerateBundle(rootKey, "integration.test.dapr.io", time.Second*5, nil)
require.NoError(t, err)
- kubeAPI := kubeAPI(t, bundle, "mynamespace", "myserviceaccount")
+ kubeAPI := kubeAPI(t, kubeAPIOptions{
+ bundle: bundle,
+ namespace: "mynamespace",
+ serviceAccount: "myserviceaccount",
+ appID: "myappid",
+ })
k.sentry = sentry.New(t,
sentry.WithWriteConfig(false),
diff --git a/tests/integration/suite/sentry/validator/kubernetes/longname.go b/tests/integration/suite/sentry/validator/kubernetes/longname.go
index 3a4c4f180..37f449e16 100644
--- a/tests/integration/suite/sentry/validator/kubernetes/longname.go
+++ b/tests/integration/suite/sentry/validator/kubernetes/longname.go
@@ -24,12 +24,16 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
sentrypbv1 "github.com/dapr/dapr/pkg/proto/sentry/v1"
"github.com/dapr/dapr/pkg/sentry/server/ca"
"github.com/dapr/dapr/tests/integration/framework"
"github.com/dapr/dapr/tests/integration/framework/process/exec"
+ prockube "github.com/dapr/dapr/tests/integration/framework/process/kubernetes"
"github.com/dapr/dapr/tests/integration/framework/process/sentry"
"github.com/dapr/dapr/tests/integration/suite"
)
@@ -38,10 +42,13 @@ func init() {
suite.Register(new(longname))
}
-// longname tests that sentry with authenticate requests with legacy identities
-// that use namespace + serviceaccount names longer than 253 characters.
+// longname tests that sentry with _not_ authenticate requests with legacy
+// identities that use namespace + serviceaccount names longer than 253
+// characters, or app IDs longer than 64 characters.
type longname struct {
- sentry *sentry.Sentry
+ sentry1 *sentry.Sentry
+ sentry2 *sentry.Sentry
+ sentry3 *sentry.Sentry
}
func (l *longname) Setup(t *testing.T) []framework.Option {
@@ -50,43 +57,98 @@ func (l *longname) Setup(t *testing.T) []framework.Option {
bundle, err := ca.GenerateBundle(rootKey, "integration.test.dapr.io", time.Second*5, nil)
require.NoError(t, err)
- kubeAPI := kubeAPI(t, bundle, strings.Repeat("n", 253), strings.Repeat("s", 253))
+ kubeAPI1 := kubeAPI(t, kubeAPIOptions{
+ bundle: bundle,
+ namespace: strings.Repeat("n", 253),
+ serviceAccount: strings.Repeat("s", 253),
+ appID: "myapp",
+ })
+
+ kubeAPI2 := kubeAPI(t, kubeAPIOptions{
+ bundle: bundle,
+ namespace: strings.Repeat("n", 253),
+ serviceAccount: strings.Repeat("s", 253),
+ appID: strings.Repeat("a", 65),
+ })
- l.sentry = sentry.New(t,
- sentry.WithWriteConfig(false),
- sentry.WithKubeconfig(kubeAPI.KubeconfigPath(t)),
- sentry.WithNamespace("sentrynamespace"),
- sentry.WithExecOptions(
- // Enable Kubernetes validator.
- exec.WithEnvVars(t, "KUBERNETES_SERVICE_HOST", "anything"),
- ),
- sentry.WithCABundle(bundle),
- sentry.WithTrustDomain("integration.test.dapr.io"),
- )
+ kubeAPI3 := kubeAPI(t, kubeAPIOptions{
+ bundle: bundle,
+ namespace: strings.Repeat("n", 253),
+ serviceAccount: strings.Repeat("s", 253),
+ appID: strings.Repeat("a", 64),
+ })
+
+ sentryOpts := func(kubeAPI *prockube.Kubernetes) *sentry.Sentry {
+ return sentry.New(t,
+ sentry.WithWriteConfig(false),
+ sentry.WithKubeconfig(kubeAPI.KubeconfigPath(t)),
+ sentry.WithExecOptions(
+ // Enable Kubernetes validator.
+ exec.WithEnvVars(t, "KUBERNETES_SERVICE_HOST", "anything"),
+ exec.WithEnvVars(t, "NAMESPACE", "sentrynamespace"),
+ ),
+ sentry.WithCABundle(bundle),
+ sentry.WithTrustDomain("integration.test.dapr.io"),
+ )
+ }
+
+ l.sentry1 = sentryOpts(kubeAPI1)
+ l.sentry2 = sentryOpts(kubeAPI2)
+ l.sentry3 = sentryOpts(kubeAPI3)
return []framework.Option{
- framework.WithProcesses(l.sentry, kubeAPI),
+ framework.WithProcesses(kubeAPI1, kubeAPI2, kubeAPI3, l.sentry1, l.sentry2, l.sentry3),
}
}
func (l *longname) Run(t *testing.T, ctx context.Context) {
- l.sentry.WaitUntilRunning(t, ctx)
+ l.sentry1.WaitUntilRunning(t, ctx)
+ l.sentry2.WaitUntilRunning(t, ctx)
+ l.sentry3.WaitUntilRunning(t, ctx)
- conn := l.sentry.DialGRPC(t, ctx, "spiffe://integration.test.dapr.io/ns/sentrynamespace/dapr-sentry")
- client := sentrypbv1.NewCAClient(conn)
+ conn1 := l.sentry1.DialGRPC(t, ctx, "spiffe://integration.test.dapr.io/ns/sentrynamespace/dapr-sentry")
+ client1 := sentrypbv1.NewCAClient(conn1)
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
csrDer, err := x509.CreateCertificateRequest(rand.Reader, new(x509.CertificateRequest), pk)
require.NoError(t, err)
- resp, err := client.SignCertificate(ctx, &sentrypbv1.SignCertificateRequest{
+ resp, err := client1.SignCertificate(ctx, &sentrypbv1.SignCertificateRequest{
Id: strings.Repeat("n", 253) + ":" + strings.Repeat("s", 253),
Namespace: strings.Repeat("n", 253),
CertificateSigningRequest: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrDer}),
TokenValidator: sentrypbv1.SignCertificateRequest_KUBERNETES,
Token: `{"kubernetes.io":{"pod":{"name":"mypod"}}}`,
})
+ assert.Nil(t, resp)
+ require.ErrorContains(t, err, "app ID must be 64 characters or less")
+ assert.Equal(t, codes.PermissionDenied, status.Code(err))
+
+ conn2 := l.sentry2.DialGRPC(t, ctx, "spiffe://integration.test.dapr.io/ns/sentrynamespace/dapr-sentry")
+ client2 := sentrypbv1.NewCAClient(conn2)
+
+ resp, err = client2.SignCertificate(ctx, &sentrypbv1.SignCertificateRequest{
+ Id: strings.Repeat("a", 65),
+ Namespace: strings.Repeat("n", 253),
+ CertificateSigningRequest: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrDer}),
+ TokenValidator: sentrypbv1.SignCertificateRequest_KUBERNETES,
+ Token: `{"kubernetes.io":{"pod":{"name":"mypod"}}}`,
+ })
+ assert.Nil(t, resp)
+ require.ErrorContains(t, err, "app ID must be 64 characters or less")
+ assert.Equal(t, codes.PermissionDenied, status.Code(err))
+
+ conn3 := l.sentry3.DialGRPC(t, ctx, "spiffe://integration.test.dapr.io/ns/sentrynamespace/dapr-sentry")
+ client3 := sentrypbv1.NewCAClient(conn3)
+
+ resp, err = client3.SignCertificate(ctx, &sentrypbv1.SignCertificateRequest{
+ Id: strings.Repeat("a", 64),
+ Namespace: strings.Repeat("n", 253),
+ CertificateSigningRequest: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrDer}),
+ TokenValidator: sentrypbv1.SignCertificateRequest_KUBERNETES,
+ Token: `{"kubernetes.io":{"pod":{"name":"mypod"}}}`,
+ })
require.NoError(t, err)
require.NotEmpty(t, resp.GetWorkloadCertificate())
}
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.13/control-plane-master/0003-daprd-control-plane-trust-domain.patch
|
patch
|
mit
| 18,349 |
diff --git a/tests/integration/suite/daprd/metrics/httpserver_defaultcardinality.go b/tests/integration/suite/daprd/metrics/httpserver_defaultcardinality.go
index e72854917..33c9bcdc7 100644
--- a/tests/integration/suite/daprd/metrics/httpserver_defaultcardinality.go
+++ b/tests/integration/suite/daprd/metrics/httpserver_defaultcardinality.go
@@ -56,7 +56,7 @@ func (m *httpServerDefaultCardinality) Run(t *testing.T, ctx context.Context) {
// Verify metrics
metrics := m.getMetrics(t, ctx)
- assert.Equal(t, 1, int(metrics["dapr_http_server_request_count|app_id:myapp|method:GET|path:/v1.0/invoke/myapp/method/hi|status:200"]))
+ assert.Equal(t, 1, int(metrics["dapr_http_server_request_count|app_id:myapp|method:InvokeService/myapp|status:200"]))
})
t.Run("state stores", func(t *testing.T) {
@@ -77,7 +77,7 @@ func (m *httpServerDefaultCardinality) Run(t *testing.T, ctx context.Context) {
// Verify metrics
metrics := m.getMetrics(t, ctx)
- assert.Equal(t, 1, int(metrics["dapr_http_server_request_count|app_id:myapp|method:POST|path:/v1.0/state/mystore|status:204"]))
- assert.Equal(t, 1, int(metrics["dapr_http_server_request_count|app_id:myapp|method:GET|path:/v1.0/state/mystore|status:200"]))
+ assert.Equal(t, 1, int(metrics["dapr_http_server_request_count|app_id:myapp|method:SaveState|status:204"]))
+ assert.Equal(t, 1, int(metrics["dapr_http_server_request_count|app_id:myapp|method:GetState|status:200"]))
})
}
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.13/dapr-sidecar-master/0001-default-cardinality-switch.patch
|
patch
|
mit
| 1,459 |
diff --git a/tests/integration/suite/daprd/workflow/backend/singular/sqlite.go b/tests/integration/suite/daprd/workflow/backend/singular/sqlite.go
deleted file mode 100644
index f08890f25..000000000
--- a/tests/integration/suite/daprd/workflow/backend/singular/sqlite.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-Copyright 2023 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://wwb.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package singular
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/dapr/dapr/tests/integration/framework"
- "github.com/dapr/dapr/tests/integration/framework/process/daprd"
- "github.com/dapr/dapr/tests/integration/framework/process/exec"
- "github.com/dapr/dapr/tests/integration/framework/process/logline"
- "github.com/dapr/dapr/tests/integration/suite"
-)
-
-func init() {
- suite.Register(new(sqlite))
-}
-
-// sqlite ensures that 2 sqlite workflow backends cannot be loaded at the same time.
-type sqlite struct {
- logline *logline.LogLine
- daprd *daprd.Daprd
-}
-
-func (s *sqlite) Setup(t *testing.T) []framework.Option {
- s.logline = logline.New(t,
- logline.WithStdoutLineContains(
- "Fatal error from runtime: process component wfbackend2 error: [INIT_COMPONENT_FAILURE]: initialization error occurred for wfbackend2 (workflowbackend.sqlite/v1): cannot create more than one workflow backend component",
- ),
- )
-
- s.daprd = daprd.New(t,
- daprd.WithResourceFiles(`
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend1
-spec:
- type: workflowbackend.sqlite
- version: v1
----
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend2
-spec:
- type: workflowbackend.sqlite
- version: v1
-`),
- daprd.WithExecOptions(
- exec.WithExitCode(1),
- exec.WithRunError(func(t *testing.T, err error) {
- require.ErrorContains(t, err, "exit status 1")
- }),
- exec.WithStdout(s.logline.Stdout()),
- ),
- )
-
- return []framework.Option{
- framework.WithProcesses(s.logline, s.daprd),
- }
-}
-
-func (s *sqlite) Run(t *testing.T, ctx context.Context) {
- s.logline.EventuallyFoundAll(t)
-}
diff --git a/tests/integration/suite/daprd/workflow/backend/singular/sqliteactors.go b/tests/integration/suite/daprd/workflow/backend/singular/sqliteactors.go
deleted file mode 100644
index 608429f32..000000000
--- a/tests/integration/suite/daprd/workflow/backend/singular/sqliteactors.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Copyright 2023 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://wwb.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package singular
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/dapr/dapr/tests/integration/framework"
- "github.com/dapr/dapr/tests/integration/framework/process/daprd"
- "github.com/dapr/dapr/tests/integration/framework/process/exec"
- "github.com/dapr/dapr/tests/integration/framework/process/logline"
- "github.com/dapr/dapr/tests/integration/suite"
-)
-
-func init() {
- suite.Register(new(sqliteactors))
-}
-
-// sqliteactors ensures that 2 workflow backends of different type cannot be
-// loaded at the same time.
-type sqliteactors struct {
- logline *logline.LogLine
- daprd *daprd.Daprd
-}
-
-func (s *sqliteactors) Setup(t *testing.T) []framework.Option {
- s.logline = logline.New(t,
- logline.WithStdoutLineContains(
- "Fatal error from runtime: process component wfbackend2 error: [INIT_COMPONENT_FAILURE]: initialization error occurred for wfbackend2 (workflowbackend.actors/v1): cannot create more than one workflow backend component",
- ),
- )
-
- s.daprd = daprd.New(t,
- daprd.WithResourceFiles(`
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend1
-spec:
- type: workflowbackend.sqlite
- version: v1
----
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend2
-spec:
- type: workflowbackend.actors
- version: v1
-`),
- daprd.WithExecOptions(
- exec.WithExitCode(1),
- exec.WithRunError(func(t *testing.T, err error) {
- require.ErrorContains(t, err, "exit status 1")
- }),
- exec.WithStdout(s.logline.Stdout()),
- ),
- )
-
- return []framework.Option{
- framework.WithProcesses(s.logline, s.daprd),
- }
-}
-
-func (s *sqliteactors) Run(t *testing.T, ctx context.Context) {
- s.logline.EventuallyFoundAll(t)
-}
diff --git a/tests/integration/suite/daprd/workflow/backend/sqlite.go b/tests/integration/suite/daprd/workflow/backend/sqlite.go
deleted file mode 100644
index a8d800b4a..000000000
--- a/tests/integration/suite/daprd/workflow/backend/sqlite.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
-Copyright 2024 The Dapr Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://wwb.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package backend
-
-import (
- "context"
- "fmt"
- "path/filepath"
- "testing"
-
- "github.com/microsoft/durabletask-go/api"
- "github.com/microsoft/durabletask-go/backend"
- "github.com/microsoft/durabletask-go/client"
- "github.com/microsoft/durabletask-go/task"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- rtv1 "github.com/dapr/dapr/pkg/proto/runtime/v1"
- "github.com/dapr/dapr/tests/integration/framework"
- "github.com/dapr/dapr/tests/integration/framework/process/daprd"
- "github.com/dapr/dapr/tests/integration/framework/util"
- "github.com/dapr/dapr/tests/integration/suite"
-)
-
-func init() {
- suite.Register(new(sqlite))
-}
-
-type sqlite struct {
- daprd *daprd.Daprd
- dir string
-}
-
-func (s *sqlite) Setup(t *testing.T) []framework.Option {
- s.dir = filepath.Join(t.TempDir(), "wf.db")
- s.daprd = daprd.New(t,
- daprd.WithResourceFiles(fmt.Sprintf(`
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name: wfbackend
-spec:
- type: workflowbackend.sqlite
- version: v1
- metadata:
- - name: connectionString
- value: %s
-`, s.dir)),
- )
-
- return []framework.Option{
- framework.WithProcesses(s.daprd),
- }
-}
-
-func (s *sqlite) Run(t *testing.T, ctx context.Context) {
- s.daprd.WaitUntilRunning(t, ctx)
-
- comps := util.GetMetaComponents(t, ctx, util.HTTPClient(t), s.daprd.HTTPPort())
- require.ElementsMatch(t, []*rtv1.RegisteredComponents{
- {Name: "wfbackend", Type: "workflowbackend.sqlite", Version: "v1"},
- }, comps)
-
- r := task.NewTaskRegistry()
- r.AddOrchestratorN("SingleActivity", func(ctx *task.OrchestrationContext) (any, error) {
- var input string
- if err := ctx.GetInput(&input); err != nil {
- return nil, err
- }
- var output string
- err := ctx.CallActivity("SayHello", task.WithActivityInput(input)).Await(&output)
- return output, err
- })
- r.AddActivityN("SayHello", func(ctx task.ActivityContext) (any, error) {
- var name string
- if err := ctx.GetInput(&name); err != nil {
- return nil, err
- }
- return fmt.Sprintf("Hello, %s!", name), nil
- })
- backendClient := client.NewTaskHubGrpcClient(s.daprd.GRPCConn(t, ctx), backend.DefaultLogger())
- require.NoError(t, backendClient.StartWorkItemListener(ctx, r))
-
- resp, err := s.daprd.GRPCClient(t, ctx).
- StartWorkflowBeta1(ctx, &rtv1.StartWorkflowRequest{
- WorkflowComponent: "dapr",
- WorkflowName: "SingleActivity",
- Input: []byte(`"Dapr"`),
- InstanceId: "myinstance",
- })
- require.NoError(t, err)
-
- id := api.InstanceID(resp.GetInstanceId())
- metadata, err := backendClient.WaitForOrchestrationCompletion(ctx, id, api.WithFetchPayloads(true))
- require.NoError(t, err)
- assert.True(t, metadata.IsComplete())
- assert.Equal(t, `"Hello, Dapr!"`, metadata.SerializedOutput)
-}
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.13/dapr-sidecar-master/0002-remove-workflowbacked-sqlite.patch
|
patch
|
mit
| 8,869 |
diff --git a/tests/integration/framework/process/daprd/daprd.go b/tests/integration/framework/process/daprd/daprd.go
index 8e6960f7c..06c9c9185 100644
--- a/tests/integration/framework/process/daprd/daprd.go
+++ b/tests/integration/framework/process/daprd/daprd.go
@@ -140,6 +140,9 @@ func New(t *testing.T, fopts ...Option) *Daprd {
if opts.blockShutdownDuration != nil {
args = append(args, "--dapr-block-shutdown-duration="+*opts.blockShutdownDuration)
}
+ if opts.controlPlaneTrustDomain != nil {
+ args = append(args, "--control-plane-trust-domain="+*opts.controlPlaneTrustDomain)
+ }
ns := "default"
if opts.namespace != nil {
diff --git a/tests/integration/framework/process/daprd/options.go b/tests/integration/framework/process/daprd/options.go
index d6b39a535..739d0481c 100644
--- a/tests/integration/framework/process/daprd/options.go
+++ b/tests/integration/framework/process/daprd/options.go
@@ -55,6 +55,7 @@ type options struct {
disableK8sSecretStore *bool
gracefulShutdownSeconds *int
blockShutdownDuration *string
+ controlPlaneTrustDomain *string
}
func WithExecOptions(execOptions ...exec.Option) Option {
@@ -246,3 +247,9 @@ func WithDaprBlockShutdownDuration(duration string) Option {
o.blockShutdownDuration = &duration
}
}
+
+func WithControlPlaneTrustDomain(trustDomain string) Option {
+ return func(o *options) {
+ o.controlPlaneTrustDomain = &trustDomain
+ }
+}
diff --git a/tests/integration/suite/daprd/hotreload/operator/informer.go b/tests/integration/suite/daprd/hotreload/operator/informer.go
index 1af786e19..b8b13652f 100644
--- a/tests/integration/suite/daprd/hotreload/operator/informer.go
+++ b/tests/integration/suite/daprd/hotreload/operator/informer.go
@@ -105,6 +105,7 @@ func (i *informer) Setup(t *testing.T) []framework.Option {
daprd.WithExecOptions(exec.WithEnvVars(t,
"DAPR_TRUST_ANCHORS", string(sentry.CABundle().TrustAnchors),
)),
+ daprd.WithControlPlaneTrustDomain("integration.test.dapr.io"),
)
return []framework.Option{
|
mikeee/dapr
|
.github/scripts/version-skew-test-patches/integration/release-1.13/dapr-sidecar-master/0003-daprd-control-plane-trust-domain.patch
|
patch
|
mit
| 2,030 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: Create a release
on:
workflow_dispatch:
inputs:
rel_version:
description: 'Release version (examples: 1.9.0-rc.1, 1.9.1)'
required: true
type: string
jobs:
create-release:
name: Creates release branch and tag
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install required packages
run: |
sudo apt-get update
sudo apt-get install pcre2-utils
- name: Create release branch and tag
env:
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
run: |
git config user.email "daprweb@microsoft.com"
git config user.name "Dapr Bot"
# Update origin with token
git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git
./.github/scripts/create-release.sh ${{ inputs.rel_version }}
trigger:
name: Triggers the Dapr runtime build
runs-on: ubuntu-latest
needs: create-release
steps:
- name: Triggers the build
env:
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
run: |
gh workflow run dapr.yml --repo dapr/dapr --ref v$(echo '${{ inputs.rel_version }}' | sed -r 's/^[vV]?([0-9].+)$/\1/')
|
mikeee/dapr
|
.github/workflows/create-release.yaml
|
YAML
|
mit
| 1,911 |
name: dapr-mirror-images
on:
schedule:
- cron: '0 0 1 * *' # trigger on the 1st of every month at midnight
workflow_dispatch:
jobs:
mirror:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v4
- # ghcr logins for pushing image after testing
name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
-
name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_REGISTRY_ID }}
password: ${{ secrets.DOCKER_REGISTRY_PASS }}
- # copy 3rd party zookeeper image from dockerhub to ghcr
name: Push Image (zookeeper) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: wurstmeister/zookeeper:latest
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/zookeeper:latest
- # copy 3rd party kafka image from dockerhub to ghcr
name: Push Image (kafka) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: wurstmeister/kafka:latest
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/kafka:latest
- # copy 3rd party redis image from dockerhub to ghcr
name: Push Image (redis) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: redis:latest
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/redis:latest
- # copy 3rd party redis v6 image from dockerhub to ghcr
name: Push Image (redis) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: redis:6
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/redis:6
- # copy 3rd party rejson image from dockerhub to ghcr
name: Push Image (rejson) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: redislabs/rejson:latest
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/rejson:latest
- # copy 3rd party zipkin image from dockerhub to ghcr
name: Push Image (zipkin) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: openzipkin/zipkin:latest
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/zipkin:latest
- # copy 3rd party cassandra image from dockerhub to ghcr
name: Push Image (cassandra) to target registries
uses: akhilerm/tag-push-action@v2.0.0
with:
src: cassandra:3.11.3
dst: |
ghcr.io/${{ github.repository_owner }}/3rdparty/cassandra:3.11.3
|
mikeee/dapr
|
.github/workflows/dapr-3rdparty-images.yaml
|
YAML
|
mit
| 2,863 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr-base-containers
on:
# Manual trigger
workflow_dispatch:
# Dispatch on external events
repository_dispatch:
types: [windows-base]
jobs:
build:
strategy:
matrix:
include:
- os: "windows-2019"
windows-version: "1809"
- os: "windows-2022"
windows-version: "ltsc2022"
name: Build base Windows images
runs-on: ${{ matrix.os }}
env:
WINDOWS_VERSION: ${{ matrix.windows-version }}
TARGET_OS: windows
steps:
- name: Set up for manual runs
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse event payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@0.3.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "windows-base") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}`);
}
- name: Check out dapr
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set REPO_OWNER
shell: bash
run: |
REPO_OWNER=${{ github.repository_owner }}
# Lowercase the value
echo "REPO_OWNER=${REPO_OWNER,,}" >>${GITHUB_ENV}
- name: Docker Hub login
if: env.CHECKOUT_REPO != '' && env.DAPR_REGISTRY != ''
env:
DAPR_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
run: |
docker login -u ${{ secrets.DOCKER_REGISTRY_ID }} -p ${{ secrets.DOCKER_REGISTRY_PASS }}
- name: GitHub container registry login
if: env.CHECKOUT_REPO != ''
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build Windows base images for Docker Hub
if: env.CHECKOUT_REPO != '' && env.DAPR_REGISTRY != ''
env:
DAPR_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
run: |
echo "Build docker image..."
make docker-windows-base-build
- name: Push Windows base images to Docker Hub
if: env.CHECKOUT_REPO != '' && env.DAPR_REGISTRY != ''
env:
DAPR_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
run: |
echo "Push docker image..."
make docker-windows-base-push
# If the images were built for Docker Hub, this step is cached
- name: Build Windows base images for GHCR
if: env.CHECKOUT_REPO != ''
env:
DAPR_REGISTRY: ghcr.io/${{ env.REPO_OWNER }}
run: |
echo "Build docker image..."
make docker-windows-base-build
- name: Push Windows base images to GHCR
if: env.CHECKOUT_REPO != ''
env:
DAPR_REGISTRY: ghcr.io/${{ env.REPO_OWNER }}
run: |
echo "Push docker image..."
make docker-windows-base-push
|
mikeee/dapr
|
.github/workflows/dapr-base-containers.yaml
|
YAML
|
mit
| 4,051 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr-bot-schedule
on:
schedule:
- cron: '*/10 * * * *'
workflow_dispatch:
jobs:
automerge:
if: github.repository_owner == 'dapr'
name: Automerge and update PRs.
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Install dependencies
run: pip install PyGithub
- name: Automerge and update
env:
MAINTAINERS: artursouza,mukundansundar,yaron2
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
run: python ./.github/scripts/automerge.py
prune_stale:
name: Prune Stale
runs-on: ubuntu-latest
steps:
- name: Prune Stale
uses: actions/stale@v7.0.0
with:
repo-token: ${{ secrets.DAPR_BOT_TOKEN }}
days-before-pr-stale: 60
days-before-pr-close: 7
days-before-issue-stale: 60
days-before-issue-close: 7
stale-issue-message: >
This issue has been automatically marked as stale because it has not had activity in the
last 60 days. It will be closed in the next 7 days unless it is tagged (pinned, good first issue, help wanted or triaged/resolved) or other activity
occurs. Thank you for your contributions.
close-issue-message: >
This issue has been automatically closed because it has not had activity in the
last 67 days. If this issue is still valid, please ping a maintainer and ask them to label it as pinned, good first issue, help wanted or triaged/resolved.
Thank you for your contributions.
stale-pr-message: >
This pull request has been automatically marked as stale because it has not had
activity in the last 60 days. It will be closed in 7 days if no further activity occurs. Please
feel free to give a status update now, ping for review, or re-open when it's ready.
Thank you for your contributions!
close-pr-message: >
This pull request has been automatically closed because it has not had
activity in the last 67 days. Please feel free to give a status update now, ping for review, or re-open when it's ready.
Thank you for your contributions!
stale-issue-label: 'stale'
exempt-issue-labels: 'pinned,good first issue,help wanted,triaged/resolved'
stale-pr-label: 'stale'
exempt-pr-labels: 'pinned'
operations-per-run: 500
ascending: true
|
mikeee/dapr
|
.github/workflows/dapr-bot-schedule.yml
|
YAML
|
mit
| 3,033 |
name: dapr-bot
on:
issue_comment:
types: [created]
issues:
types: [labeled]
jobs:
daprbot:
name: bot-processor
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4 # required to make the script available for next step
- name: Comment analyzer
uses: actions/github-script@v6
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const script = require('./.github/scripts/dapr_bot.js')
await script({github, context})
|
mikeee/dapr
|
.github/workflows/dapr-bot.yml
|
YAML
|
mit
| 553 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr-dev-container
on:
workflow_dispatch:
schedule:
# Run weekly on Tuesdays
- cron: "22 03 * * 2"
jobs:
build:
name: build dev container
runs-on: ubuntu-22.04
env:
DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
- name: Set REPO_OWNER
shell: bash
run: |
REPO_OWNER=${{ github.repository_owner }}
# Lowercase the value
echo "REPO_OWNER=${REPO_OWNER,,}" >>${GITHUB_ENV}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
image: tonistiigi/binfmt:latest
platforms: arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker Hub login
if: github.event_name != 'pull_request'
run: |
docker login -u ${{ secrets.DOCKER_REGISTRY_ID }} -p ${{ secrets.DOCKER_REGISTRY_PASS }}
- name: GitHub Container Registry login
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build multi-arch dev container
run: make build-dev-container-all-arch
- name: Push multi-arch dev container to Docker Hub
if: github.event_name != 'pull_request'
run: make push-dev-container-all-arch DAPR_REGISTRY=${{ env.DOCKER_REGISTRY }}
# If the images were built for Docker Hub, this step is cached
- name: Push multi-arch dev container to GitHub Container Registry
if: github.event_name != 'pull_request'
run: make push-dev-container-all-arch DAPR_REGISTRY="ghcr.io/${{ env.REPO_OWNER }}"
|
mikeee/dapr
|
.github/workflows/dapr-dev-container.yml
|
YAML
|
mit
| 2,405 |
#
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Required secrets:
# - AZURE_CREDENTIALS: JSON object containing the Azure service principal credentials. Docs: https://github.com/Azure/login#configure-a-service-principal-with-a-secret
# - PERF_AZURE_STORAGE_ACCOUNT and PERF_AZURE_STORAGE_KEY: Credentials for the Storage Account where to store the result of perf tests
# - DAPR_BOT_TOKEN: Token for the Dapr bot
#
# Optional secrets:
# - AZURE_DIAG_LOG_ANALYTICS_WORKSPACE_ID: Resource ID of the Log Analytics Workspace where to store certain diagnostic logs (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.OperationalInsights/workspaces/<workspace name>`)
# - AZURE_DIAG_STORAGE_ID: Resource ID of the Azure Storage account where to store certain diagnostic logs (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.Storage/storageAccounts/<storage account name>`)
name: dapr-perf-components
on:
# Run every Saturday at 4.34am UTC
schedule:
- cron: "34 4 * * 6" # Manual trigger
workflow_dispatch:
# Dispatch on external events
repository_dispatch:
types: [components-perf-test]
env:
# Configure proxy for Go modules
GOPROXY: https://proxy.golang.org
# Version of kubectl
KUBECTLVER: "v1.27.6"
# Version of Helm
HELMVER: "v3.10.0"
# Kubernetes namespace to use
DAPR_NAMESPACE: "dapr-tests"
# Timeout for tests
MAX_TEST_TIMEOUT: 5400
# Space-separated of supported Azure regions: one will be picked randomly for each cluster
AZURE_REGIONS: "westus3"
# Container registry where to cache perf test images
DAPR_CACHE_REGISTRY: "dapre2eacr.azurecr.io"
jobs:
deploy-infrastructure:
name: Deploy test infrastructure
runs-on: ubuntu-latest
steps:
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-perf-components") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
hide: true
hide_classify: OUTDATED
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr perf test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-Perf-${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Deploy the test cluster
if: env.TEST_PREFIX != ''
run: |
# Select two random Azure regions
REGIONS=(${{ env.AZURE_REGIONS }})
REGIONS_SIZE=${#REGIONS[@]}
REGIONS_IDX=$(($RANDOM % $REGIONS_SIZE))
REGION=${REGIONS[$REGIONS_IDX]}
echo "AZURE_REGION=${REGION}" >> $GITHUB_ENV
echo "Deploying to Azure region: ${REGION}"
# Tags
TAGS="date=$(date --iso-8601=seconds)"
echo "Tags: ${TAGS}"
# Create a resource group
az group create \
--resource-group "${{ env.TEST_RESOURCE_GROUP }}" \
--location ${REGION} \
--tags "${TAGS}"
# Deploy the test cluster, deploying AKS only
# Retry the deployment twice in case of transient failures (such as capacity constraints)
success=false
for i in 1 2 3; do
az deployment group create \
--resource-group "${{ env.TEST_RESOURCE_GROUP }}" \
--template-file ./tests/test-infra/azure-aks.bicep \
--parameters \
namePrefix="${{ env.TEST_PREFIX }}" \
location=${REGION} \
linuxVMSize=Standard_D8s_v4 \
diagLogAnalyticsWorkspaceResourceId="${{ secrets.AZURE_DIAG_LOG_ANALYTICS_WORKSPACE_ID }}" \
diagStorageResourceId="${{ secrets.AZURE_DIAG_STORAGE_ID }}" \
&& success=true \
&& break \
|| sleep 120
done
# Exit with error if failed
$success || exit 1
shell: bash
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Infrastructure deployed
- Resource group name: `Dapr-Perf-${{ env.TEST_PREFIX }}`
- Azure region: ${{ env.AZURE_REGION }}
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Infrastructure deployment failed
- Resource group name: `Dapr-Perf-${{ env.TEST_PREFIX }}`
- Azure region: ${{ env.AZURE_REGION }}
Please check the logs for details on the failure.
build:
name: Build
runs-on: ubuntu-latest
env:
GOOS: linux
GOARCH: amd64
TARGET_OS: linux
TARGET_ARCH: amd64
steps:
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-perf-components") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Login to cache registry
if: env.CHECKOUT_REPO != '' && env.DAPR_CACHE_REGISTRY != ''
run: |
az acr login --name ${{ env.DAPR_CACHE_REGISTRY }}
shell: bash
- name: Build test prefix and set env vars
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_REGISTRY=${TEST_PREFIX}acr.azurecr.io" >> $GITHUB_ENV
echo "TEST_CLUSTER=${TEST_PREFIX}-aks" >> $GITHUB_ENV
echo "DAPR_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_TEST_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-Perf-${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_GO_BUILD_TAGS=wfbackendsqlite" >> $GITHUB_ENV
shell: bash
- name: Build dapr and its docker image
if: env.TEST_PREFIX != ''
run: |
make build
make docker-build
shell: bash
- name: Wait for Azure Container Registry deployment
timeout-minutes: 30
if: env.TEST_PREFIX != ''
run: |
until az acr show --name ${{ env.TEST_PREFIX }}acr --query "id"
do
echo "Azure Container Registry not ready yet: sleeping for 20 seconds"
sleep 20
done
shell: bash
- name: Login to Azure Container Registry
if: env.TEST_PREFIX != ''
run: |
az acr login --name ${{ env.TEST_PREFIX }}acr
shell: bash
- name: Push Dapr container images
if: env.TEST_PREFIX != ''
run: |
make docker-push
shell: bash
- name: Build and push perf test apps
if: env.TEST_PREFIX != ''
run: |
make build-push-perf-app-all
shell: bash
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Build succeeded
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Build failed
Please check the logs for details on the error.
test-perf:
name: Perf tests
needs:
- build
- deploy-infrastructure
runs-on: ubuntu-latest
env:
GOOS: linux
GOARCH: amd64
AZURE_STORAGE_ACCOUNT: ${{ secrets.PERF_AZURE_STORAGE_ACCOUNT }}
AZURE_STORAGE_ACCESS_KEY: ${{ secrets.PERF_AZURE_STORAGE_KEY }}
PULL_POLICY: IfNotPresent
steps:
- name: Setup test output
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
shell: bash
- name: Set up log paths
run: |
echo "DAPR_CONTAINER_LOG_PATH=$GITHUB_WORKSPACE/container_logs/perf_tests" >> $GITHUB_ENV
echo "DAPR_TEST_LOG_PATH=$GITHUB_WORKSPACE/test_logs/perf_tests" >> $GITHUB_ENV
shell: bash
- name: Set up for scheduled test
if: github.event_name != 'repository_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Parse test payload
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-perf-components") {
let selectedTestsEnvVar = ""
if (testPayload?.args) {
selectedTestsEnvVar = `DAPR_PERF_TEST=${testPayload.args}\n`
}
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`${selectedTestsEnvVar}`+
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- uses: azure/setup-kubectl@v3
with:
version: ${{ env.KUBECTLVER }}
id: install
- name: Set up Helm ${{ env.HELMVER }}
uses: azure/setup-helm@v3
with:
version: ${{ env.HELMVER }}
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix and set env vars
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_REGISTRY=${TEST_PREFIX}acr.azurecr.io" >> $GITHUB_ENV
echo "TEST_CLUSTER=${TEST_PREFIX}-aks" >> $GITHUB_ENV
echo "DAPR_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_TEST_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-Perf-${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Connect to Kubernetes
if: env.TEST_PREFIX != ''
run: |
az aks get-credentials -n "${{ env.TEST_CLUSTER }}" -g "${{ env.TEST_RESOURCE_GROUP }}"
kubectl create namespace ${{ env.DAPR_NAMESPACE }}
shell: bash
- name: Preparing AKS cluster for test
if: env.TEST_PREFIX != ''
run: |
make setup-helm-init
make setup-test-env
make setup-pubsub-subs-perf-test-components
kubectl get pods -n ${{ env.DAPR_NAMESPACE }}
- name: Deploy dapr to AKS cluster
if: env.TEST_PREFIX != ''
env:
ADDITIONAL_HELM_SET: "dapr_operator.logLevel=debug,dapr_operator.watchInterval=20s,dapr_dashboard.enabled=false"
run: make docker-deploy-k8s
- name: Deploy test components
if: env.TEST_PREFIX != ''
run: |
make setup-test-components
make setup-components-perf-test
- name: Run pub-sub subscribe http components performance tests
if: env.TEST_PREFIX != ''
run: make test-perf-pubsub-subscribe-http-components
- name: Save control plane logs
if: always() && env.TEST_PREFIX != ''
run: |
make save-dapr-control-plane-k8s-logs
- name: Upload container logs
if: always() && env.TEST_PREFIX != ''
uses: actions/upload-artifact@v4
with:
name: perf_container_logs
path: ${{ env.DAPR_CONTAINER_LOG_PATH }}
- name: Upload test logs
if: always()
uses: actions/upload-artifact@v4
with:
name: perf_test_logs
path: ${{ env.DAPR_TEST_LOG_PATH }}
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
#TODO: .json suffix can be removed from artifact name after test analytics scripts are updated
name: test_perf.json
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_perf*.*
- name: Add job test summary
if: always()
uses: test-summary/action@v2
with:
paths: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_perf*.xml
- name: Add job test outputs
if: always()
uses: actions/github-script@v6
with:
script: |
const script = require('./.github/scripts/dapr_tests_summary.js')
await script({core, glob})
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Perf tests succeeded
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Perf tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: cancelled() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Perf tests cancelled
The Action has been canceled
cleanup:
name: Clean up Azure resources
runs-on: ubuntu-latest
needs:
- test-perf
if: always()
steps:
- name: Login to Azure
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Delete cluster
run: |
# We are not waiting for these commands to complete, and we're ignoring errors
echo "Starting removal of resource group Dapr-Perf-${TEST_PREFIX}"
az group delete --no-wait --yes --name "Dapr-Perf-${TEST_PREFIX}" || true
shell: bash
|
mikeee/dapr
|
.github/workflows/dapr-perf-components.yml
|
YAML
|
mit
| 20,540 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Required secrets:
# - AZURE_CREDENTIALS: JSON object containing the Azure service principal credentials. Docs: https://github.com/Azure/login#configure-a-service-principal-with-a-secret
# - PERF_AZURE_STORAGE_ACCOUNT and PERF_AZURE_STORAGE_KEY: Credentials for the Storage Account where to store the result of perf tests
# - DAPR_BOT_TOKEN: Token for the Dapr bot
#
# Optional secrets:
# - AZURE_DIAG_LOG_ANALYTICS_WORKSPACE_ID: Resource ID of the Log Analytics Workspace where to store certain diagnostic logs (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.OperationalInsights/workspaces/<workspace name>`)
# - AZURE_DIAG_STORAGE_ID: Resource ID of the Azure Storage account where to store certain diagnostic logs (e.g. `/subscriptions/<subscription>/resourcegroups/<resource group>/providers/Microsoft.Storage/storageAccounts/<storage account name>`)
name: dapr-perf
on:
# Run at minute 44 past hour 6, 14, and 22 on every day-of-week from Monday through Friday.
# Run at 23:44 on Sunday and Saturday.
schedule:
- cron: "44 6,14,22 * * 1-5"
- cron: "44 23 * * 0,6"
# Manual trigger
workflow_dispatch:
inputs:
tests:
description: "Space-separated list of perf tests to run only"
required: false
type: string
# Dispatch on external events
repository_dispatch:
types: [perf-test]
env:
# Configure proxy for Go modules
GOPROXY: https://proxy.golang.org
# Version of kubectl
KUBECTLVER: "v1.27.6"
# Version of Helm
HELMVER: "v3.10.0"
# Kubernetes namespace to use
DAPR_NAMESPACE: "dapr-tests"
# Timeout for tests
MAX_TEST_TIMEOUT: 5400
# Space-separated of supported Azure regions: one will be picked randomly for each cluster
AZURE_REGIONS: "westus3"
# Container registry where to cache perf test images
DAPR_CACHE_REGISTRY: "dapre2eacr.azurecr.io"
jobs:
deploy-infrastructure:
name: Deploy test infrastructure
runs-on: ubuntu-latest
steps:
- name: Set up for manual runs
if: github.event_name == 'workflow_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
shell: bash
- name: Set up for scheduled test
if: github.event_name == 'schedule'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Set up for dispatched events
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-perf") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Create PR comment
if: env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
hide: true
hide_classify: OUTDATED
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
# Dapr perf test
π **[Link to Action run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})**
Commit ref: ${{ env.CHECKOUT_REF }}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-Perf-${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Deploy the test cluster
if: env.TEST_PREFIX != ''
run: |
# Select two random Azure regions
REGIONS=(${{ env.AZURE_REGIONS }})
REGIONS_SIZE=${#REGIONS[@]}
REGIONS_IDX=$(($RANDOM % $REGIONS_SIZE))
REGION=${REGIONS[$REGIONS_IDX]}
echo "AZURE_REGION=${REGION}" >> $GITHUB_ENV
echo "Deploying to Azure region: ${REGION}"
# Tags
TAGS="date=$(date --iso-8601=seconds)"
echo "Tags: ${TAGS}"
# Create a resource group
az group create \
--resource-group "${{ env.TEST_RESOURCE_GROUP }}" \
--location ${REGION} \
--tags "${TAGS}"
# Deploy the test cluster, deploying AKS only
# Retry the deployment twice in case of transient failures (such as capacity constraints)
success=false
for i in 1 2 3; do
az deployment group create \
--resource-group "${{ env.TEST_RESOURCE_GROUP }}" \
--template-file ./tests/test-infra/azure-aks.bicep \
--parameters \
namePrefix="${{ env.TEST_PREFIX }}" \
location=${REGION} \
linuxVMSize=Standard_D8s_v4 \
diagLogAnalyticsWorkspaceResourceId="${{ secrets.AZURE_DIAG_LOG_ANALYTICS_WORKSPACE_ID }}" \
diagStorageResourceId="${{ secrets.AZURE_DIAG_STORAGE_ID }}" \
&& success=true \
&& break \
|| sleep 120
done
# Exit with error if failed
$success || exit 1
shell: bash
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Infrastructure deployed
- Resource group name: `Dapr-Perf-${{ env.TEST_PREFIX }}`
- Azure region: ${{ env.AZURE_REGION }}
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Infrastructure deployment failed
- Resource group name: `Dapr-Perf-${{ env.TEST_PREFIX }}`
- Azure region: ${{ env.AZURE_REGION }}
Please check the logs for details on the failure.
build:
name: Build
runs-on: ubuntu-latest
env:
GOOS: linux
GOARCH: amd64
TARGET_OS: linux
TARGET_ARCH: amd64
steps:
- name: Set up for manual runs
if: github.event_name == 'workflow_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
shell: bash
- name: Set up for scheduled test
if: github.event_name == 'schedule'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Set up for dispatched events
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-perf") {
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Login to cache registry
if: env.CHECKOUT_REPO != '' && env.DAPR_CACHE_REGISTRY != ''
run: |
az acr login --name ${{ env.DAPR_CACHE_REGISTRY }}
shell: bash
- name: Build test prefix and set env vars
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_REGISTRY=${TEST_PREFIX}acr.azurecr.io" >> $GITHUB_ENV
echo "TEST_CLUSTER=${TEST_PREFIX}-aks" >> $GITHUB_ENV
echo "DAPR_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_TEST_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-Perf-${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_GO_BUILD_TAGS=wfbackendsqlite" >> $GITHUB_ENV
shell: bash
- name: Build dapr and its docker image
if: env.TEST_PREFIX != ''
run: |
make build
make docker-build
shell: bash
- name: Wait for Azure Container Registry deployment
timeout-minutes: 30
if: env.TEST_PREFIX != ''
run: |
until az acr show --name ${{ env.TEST_PREFIX }}acr --query "id"
do
echo "Azure Container Registry not ready yet: sleeping for 20 seconds"
sleep 20
done
shell: bash
- name: Login to Azure Container Registry
if: env.TEST_PREFIX != ''
run: |
az acr login --name ${{ env.TEST_PREFIX }}acr
shell: bash
- name: Push Dapr container images
if: env.TEST_PREFIX != ''
run: |
make docker-push
shell: bash
- name: Build and push perf test apps
if: env.TEST_PREFIX != ''
run: |
make build-push-perf-app-all
shell: bash
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Build succeeded
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Build failed
Please check the logs for details on the error.
test-perf:
name: Perf tests
needs:
- build
- deploy-infrastructure
runs-on: ubuntu-latest
env:
GOOS: linux
GOARCH: amd64
AZURE_STORAGE_ACCOUNT: ${{ secrets.PERF_AZURE_STORAGE_ACCOUNT }}
AZURE_STORAGE_ACCESS_KEY: ${{ secrets.PERF_AZURE_STORAGE_KEY }}
DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_URL: ${{ secrets.DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_URL }}
DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_USERNAME: ${{ secrets.DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_USERNAME }}
DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_PASSWORD: ${{ secrets.DAPR_PERF_METRICS_PROMETHEUS_PUSHGATEWAY_PASSWORD }}
PULL_POLICY: IfNotPresent
steps:
- name: Setup test output
run: |
export TEST_OUTPUT_FILE_PREFIX=$GITHUB_WORKSPACE/test_report
echo "TEST_OUTPUT_FILE_PREFIX=$TEST_OUTPUT_FILE_PREFIX" >> $GITHUB_ENV
shell: bash
- name: Set up log paths
run: |
echo "DAPR_CONTAINER_LOG_PATH=$GITHUB_WORKSPACE/container_logs/perf_tests" >> $GITHUB_ENV
echo "DAPR_TEST_LOG_PATH=$GITHUB_WORKSPACE/test_logs/perf_tests" >> $GITHUB_ENV
shell: bash
- name: Set up for manual runs
if: github.event_name == 'workflow_dispatch'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
if [ "${{ inputs.tests }}" != "" ] ; then
echo "DAPR_PERF_TEST=${{ inputs.tests }}" >> $GITHUB_ENV
fi
shell: bash
- name: Set up for scheduled test
if: github.event_name == 'schedule'
run: |
echo "CHECKOUT_REPO=${{ github.repository }}" >> $GITHUB_ENV
echo "CHECKOUT_REF=refs/heads/master" >> $GITHUB_ENV
shell: bash
- name: Set up for dispatched events
if: github.event_name == 'repository_dispatch'
uses: actions/github-script@v6.2.0
with:
github-token: ${{secrets.DAPR_BOT_TOKEN}}
script: |
const testPayload = context.payload.client_payload;
if (testPayload && testPayload.command == "ok-to-perf") {
let selectedTestsEnvVar = ""
if (testPayload?.args) {
selectedTestsEnvVar = `DAPR_PERF_TEST=${testPayload.args}\n`
}
var fs = require('fs');
// Set environment variables
fs.appendFileSync(process.env.GITHUB_ENV,
`${selectedTestsEnvVar}`+
`CHECKOUT_REPO=${testPayload.pull_head_repo}\n`+
`CHECKOUT_REF=${testPayload.pull_head_ref}\n`+
`PR_NUMBER=${testPayload.issue.number}`
);
}
- name: Check out code
if: env.CHECKOUT_REPO != ''
uses: actions/checkout@v4
with:
repository: ${{ env.CHECKOUT_REPO }}
ref: ${{ env.CHECKOUT_REF }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- uses: azure/setup-kubectl@v3
with:
version: ${{ env.KUBECTLVER }}
id: install
- name: Set up Helm ${{ env.HELMVER }}
uses: azure/setup-helm@v3
with:
version: ${{ env.HELMVER }}
- name: Login to Azure
if: env.CHECKOUT_REPO != ''
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix and set env vars
if: env.CHECKOUT_REPO != ''
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_REGISTRY=${TEST_PREFIX}acr.azurecr.io" >> $GITHUB_ENV
echo "TEST_CLUSTER=${TEST_PREFIX}-aks" >> $GITHUB_ENV
echo "DAPR_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "DAPR_TEST_TAG=${TEST_PREFIX}" >> $GITHUB_ENV
echo "TEST_RESOURCE_GROUP=Dapr-Perf-${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Connect to Kubernetes
if: env.TEST_PREFIX != ''
run: |
az aks get-credentials -n "${{ env.TEST_CLUSTER }}" -g "${{ env.TEST_RESOURCE_GROUP }}"
kubectl create namespace ${{ env.DAPR_NAMESPACE }}
shell: bash
- name: Preparing AKS cluster for test
if: env.TEST_PREFIX != ''
run: |
make setup-helm-init
make setup-test-env
kubectl get pods -n ${{ env.DAPR_NAMESPACE }}
- name: Deploy dapr to AKS cluster
if: env.TEST_PREFIX != ''
env:
ADDITIONAL_HELM_SET: "dapr_operator.logLevel=debug,dapr_operator.watchInterval=20s,dapr_dashboard.enabled=false"
run: make docker-deploy-k8s
- name: Deploy test components
if: env.TEST_PREFIX != ''
run: make setup-test-components
- name: Run all performance tests
if: env.TEST_PREFIX != ''
run: make test-perf-all
- name: Save control plane logs
if: always() && env.TEST_PREFIX != ''
run: |
make save-dapr-control-plane-k8s-logs
- name: Upload container logs
if: always() && env.TEST_PREFIX != ''
uses: actions/upload-artifact@v4
with:
name: perf_container_logs
path: ${{ env.DAPR_CONTAINER_LOG_PATH }}
- name: Upload test logs
if: always()
uses: actions/upload-artifact@v4
with:
name: perf_test_logs
path: ${{ env.DAPR_TEST_LOG_PATH }}
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
#TODO: .json suffix can be removed from artifact name after test analytics scripts are updated
name: test_perf.json
path: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_perf*.*
- name: Add job test summary
if: always()
uses: test-summary/action@v2
with:
paths: ${{ env.TEST_OUTPUT_FILE_PREFIX }}_perf*.xml
- name: Add job test outputs
if: always()
uses: actions/github-script@v6
with:
script: |
const script = require('./.github/scripts/dapr_tests_summary.js')
await script({core, glob})
- name: Update PR comment for success
if: success() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β
Perf tests succeeded
- Image tag: `${{ env.DAPR_TAG }}`
- Test image tag: `${{ env.DAPR_TEST_TAG }}`
- name: Update PR comment for failure
if: failure() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β Perf tests failed
Please check the logs for details on the error.
- name: Update PR comment for cancellation
if: cancelled() && env.PR_NUMBER != ''
uses: artursouza/sticky-pull-request-comment@v2.2.0
with:
header: ${{ github.run_id }}
number: ${{ env.PR_NUMBER }}
append: true
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
message: |
## β οΈ Perf tests cancelled
The Action has been canceled
cleanup:
name: Clean up Azure resources
runs-on: ubuntu-latest
needs:
- test-perf
if: always()
steps:
- name: Login to Azure
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Build test prefix
run: |
BASE_STR="PERF|${GITHUB_SHA}|${GITHUB_SERVER_URL}|${GITHUB_REPOSITORY}|${GITHUB_RUN_ID}|${GITHUB_RUN_ATTEMPT}"
echo "Base string is ${BASE_STR}"
SUFFIX=$(echo $BASE_STR | sha1sum | head -c 10)
echo "Suffix is ${SUFFIX}"
TEST_PREFIX="daprprf${SUFFIX}"
echo "Test prefix is ${TEST_PREFIX}"
echo "TEST_PREFIX=${TEST_PREFIX}" >> $GITHUB_ENV
shell: bash
- name: Delete cluster
run: |
# We are not waiting for these commands to complete, and we're ignoring errors
echo "Starting removal of resource group Dapr-Perf-${TEST_PREFIX}"
az group delete --no-wait --yes --name "Dapr-Perf-${TEST_PREFIX}" || true
shell: bash
|
mikeee/dapr
|
.github/workflows/dapr-perf.yml
|
YAML
|
mit
| 21,859 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr-release-notes
on:
workflow_dispatch:
jobs:
build:
name: Generate release notes
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Install dependencies
run: pip install PyGithub
- name: Generate release notes
env:
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
run: python ./.github/scripts/generate_release_notes.py
- name: Commit and push to branch
env:
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
COMMIT_MSG: |
Generating Dapr release notes.
skip-checks: true
run: |
git config user.email "Dapr Bot"
git config user.name "daprweb@microsoft.com"
# Update origin with token
git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git
git checkout ${REL_BRANCH} || git checkout master
echo "BASE_BRANCH=$(git branch --show-current)" >> $GITHUB_ENV
git checkout -b pr-release-notes-${REL_VERSION}
git add .
# Only commit and push if we have changes
git diff --quiet && git diff --staged --quiet || (git commit -m "${COMMIT_MSG}"; git push --force origin pr-release-notes-${REL_VERSION})
- name: Create pull request
env:
GITHUB_TOKEN: ${{ secrets.DAPR_BOT_TOKEN }}
run: |
gh pr create --title "Create release notes for ${{ env.REL_VERSION }}." --body "Release notes." --base ${{ env.BASE_BRANCH }}
|
mikeee/dapr
|
.github/workflows/dapr-release-notes.yml
|
YAML
|
mit
| 2,143 |
#
# Copyright 2023 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr-standalone-validation
on:
# Manual trigger
workflow_dispatch:
push:
branches:
- master
- release-*
- feature/*
tags:
- v*
pull_request:
branches:
- master
- release-*
- feature/*
jobs:
validate-virt-mem:
name: Standalone validations
runs-on: ubuntu-latest
env:
GOOS: linux
GOARCH: amd64
DAPR_INSTALL_URL: https://raw.githubusercontent.com/dapr/cli/master/install/install.sh
steps:
- name: Install required packages
run: pip3 install scipy psutil
# This is a good example where we want to always compare against the latest stable release.
# Pinning to a given Dapr version does not add any value in this workflow.
- name: Install latest Dapr CLI
run: wget -q ${{ env.DAPR_INSTALL_URL }} -O - | /bin/bash
- name: Initialize with latest Dapr runtime
run: dapr init --slim
- name: Check out code
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ github.ref }}
- name: Set up Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
- name: Build Dapr's sidecar
run: |
git status
make ./dist/${GOOS}_${GOARCH}/release/daprd
- name: "Check out code at ${{ github.base_ref }} for PR validation"
if: github.event_name == 'pull_request'
uses: actions/checkout@v4
with:
repository: ${{ github.repository }}
ref: ${{ github.base_ref }}
path: .baseline
- name: "Build and override daprd from ${{ github.base_ref }}"
if: github.event_name == 'pull_request'
run: |
cd .baseline
git status
make ./dist/${GOOS}_${GOARCH}/release/daprd
mkdir -p $HOME/.dapr/bin/
cp dist/${GOOS}_${GOARCH}/release/daprd $HOME/.dapr/bin/daprd
cd ..
- name: Validate sidecar's basic resource utilization
env:
SECONDS_FOR_PROCESS_TO_RUN: 30
LIMIT_DELTA_BINARY_SIZE: 7168 # KB (7 MB)
run: ./.github/scripts/validate_sidecar_resources.py
|
mikeee/dapr
|
.github/workflows/dapr-standalone-validation.yml
|
YAML
|
mit
| 2,788 |
#
# Copyright 2022 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: dapr-sync-feature-branches
on:
workflow_dispatch:
push:
branches:
- master
jobs:
merge:
name: Merge master into feature branches
runs-on: ubuntu-latest
strategy:
matrix:
branch: [feature/workflows]
steps:
- name: Merge master -> ${{ matrix.branch }}
# artursouza/merge-branch is a fork of devmasx/merge-branch
# to minimize risk of a 3rd party repo running arbitrary code with our creds
uses: artursouza/merge-branch@v1.4.0
with:
type: now
from_branch: master
target_branch: ${{ matrix.branch }}
github_token: ${{ secrets.DAPR_BOT_TOKEN }}
|
mikeee/dapr
|
.github/workflows/dapr-sync-feature-branches.yml
|
YAML
|
mit
| 1,253 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Required secrets:
# - AZURE_CREDENTIALS: JSON object containing the Azure service principal credentials. Docs: https://github.com/Azure/login#configure-a-service-principal-with-a-secret
name: Cleanup Azure test resources
on:
# Run every 6 hours
schedule:
- cron: '30 */6 * * *'
# Manual trigger
workflow_dispatch:
jobs:
cleanup:
runs-on: ubuntu-latest
steps:
- name: Login to Azure
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: "Run cleanup script"
run: |
DELETE_BEFORE_DATE=$(date --date '4 hours ago' +'%s')
QUERY="[?(starts_with(name, 'Dapr-Perf-') || starts_with(name, 'Dapr-E2E-') && properties.provisioningState != 'Deleting' && tags.date != '')].{name: name, id: id, date: tags.date}"
LINES=$(az group list --query "$QUERY" | jq -c ".[]")
(
# Run in a sub-shell so we can modify IFS safely
IFS=$'\n'
for line in $(az group list --query "$QUERY" | jq -c '.[]')
do
RG_DATE_STR=$(echo "$line" | jq -r ".date")
if [ "$RG_DATE_STR" != "null" ]; then
RG_DATE=$(date --date "$RG_DATE_STR" +'%s')
RG_NAME=$(echo "$line" | jq -r ".name")
if [ $RG_DATE -lt $DELETE_BEFORE_DATE ] ; then
echo "DELETING Resource Group ${RG_NAME} that was deployed on ${RG_DATE_STR}"
az group delete --no-wait --yes --name "${RG_NAME}" || true
else
echo "Keeping Resource Group ${RG_NAME} that was deployed on ${RG_DATE_STR}"
fi
fi
done
)
shell: bash
|
mikeee/dapr
|
.github/workflows/dapr-test-azure-cleanup.yml
|
YAML
|
mit
| 2,309 |
#
# Copyright 2021 The Dapr Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This workflows is triggered manually and it deletes all images in the E2E image cache
#
# Required secrets:
# - AZURE_CREDENTIALS: JSON object containing the Azure service principal credentials. Docs: https://github.com/Azure/login#configure-a-service-principal-with-a-secret
name: dapr-test-cache-clean
on:
# Manual trigger
workflow_dispatch:
jobs:
cleanup:
runs-on: ubuntu-22.04
steps:
- name: Login to Azure
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: "Clean E2E image cache"
run: |
for repo in $(az acr repository list -n dapre2eacr -o tsv); do
echo "Deleting: $repo"
az acr repository delete --repository "$repo" -n dapre2eacr --yes
done
shell: bash
|
mikeee/dapr
|
.github/workflows/dapr-test-cache-clean.yml
|
YAML
|
mit
| 1,381 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.