id
stringlengths 4
10
| text
stringlengths 4
2.14M
| source
stringclasses 2
values | created
timestamp[s]date 2001-05-16 21:05:09
2025-01-01 03:38:30
| added
stringdate 2025-04-01 04:05:38
2025-04-01 07:14:06
| metadata
dict |
---|---|---|---|---|---|
2393235436
|
Unable to access to trace information using opentelemetry-js SDK + aws lambda layer
What happened?
We are unable to access tracecontext like showed here: https://opentelemetry.io/docs/languages/js/propagation/#generic-example
We want tracecontext information to propagate manually to other services like SNS, SQS, KaFka.
Even we can see in Xray traces working correctly, due the example acces to other service like s3 and trace is visualized.
Steps to Reproduce
Use only OTEL API
Use aws lambda layer => "arn:aws:lambda:us-east-1:901920570463:layer:aws-otel-nodejs-amd64-ver-1-18-1:1"
Execute lambda code to access to trace info using Otel SDK API.
Expected Result
Access to current trace information using OTEL API SDK
Actual Result
Empty trace information from OTEL API SDK
Additional Details
IaC code
resource "aws_iam_role" "lambda_role" {
name = "CreateFnRole"
assume_role_policy = jsonencode(
{
"Version" : "2012-10-17",
"Statement" : [
{
"Action" : "sts:AssumeRole",
"Principal" : {
"Service" : "lambda.amazonaws.com"
},
"Effect" : "Allow",
"Sid" : ""
}
]
})
managed_policy_arns = [
"arn:aws:iam::aws:policy/AmazonS3FullAccess",
"arn:aws:iam::aws:policy/AWSLambda_FullAccess",
"arn:aws:iam::aws:policy/CloudWatchFullAccess",
"arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess",
"arn:aws:iam::aws:policy/AWSXrayFullAccess"
]
inline_policy {
name = "CreateFnPolicy"
policy = jsonencode(
{
"Version" : "2012-10-17",
"Statement" : [
{
"Action" : [
"appconfig:*",
"kms:*"
],
"Resource" : "*",
"Effect" : "Allow"
}
]
},
)
}
}
variable "service_name" {}
variable "env" {}
data "archive_file" "zip" {
source_dir = "../app/dist/functions/create"
output_path = "../app/dist/create.zip"
type = "zip"
}
resource "aws_lambda_function" "create_fnc" {
function_name = "${var.env}-${var.service_name}_create"
runtime = "nodejs18.x"
filename = data.archive_file.zip.output_path
source_code_hash = data.archive_file.zip.output_base64sha256
handler = "handler.main"
role = aws_iam_role.lambda_role.arn
memory_size = 256
tracing_config {
mode = "Active"
}
environment {
variables = {
AWS_LAMBDA_EXEC_WRAPPER : "/opt/otel-handler"
SERVICE_NAME = var.service_name
ENV = var.env
OTEL_TRACES_EXPORTER = "oltp"
OTEL_METRICS_EXPORTER = "oltp"
OTEL_LOG_LEVEL = "ERROR"
OTEL_TRACES_SAMPLER = "xray"
OTEL_PROPAGATORS = "tracecontext, baggage, xray-lambda"
OTEL_SERVICE_NAME = var.service_name
}
}
layers = [
"arn:aws:lambda:us-east-1:901920570463:layer:aws-otel-nodejs-amd64-ver-1-18-1:1"
]
}
output "create_fn_arn" {
value = aws_lambda_function.create_fnc.arn
}
OpenTelemetry Setup Code
import "reflect-metadata"
import {Context} from 'aws-lambda';
import {Logger} from '@aws-lambda-powertools/logger';
import {LambdaInterface} from '@aws-lambda-powertools/commons/types';
import middy from '@middy/core';
import {ListBucketsCommand, S3Client} from '@aws-sdk/client-s3';
import {inject, injectable, singleton} from "tsyringe";
import api, {context, propagation} from '@opentelemetry/api';
const logger = new Logger();
const s3 = new S3Client({});
@injectable()
@singleton()
class Handler implements LambdaInterface {
constructor() {
}
public async handler(_event: any, _context: Context): Promise<any> {
logger.info(`==> currentTrace: ${currentSpan()} | ${JSON.stringify(_event)}`);
const result = await s3.send(new ListBucketsCommand({}));
logger.info(`context => ${JSON.stringify(api.context.active())} || ${JSON.stringify(currentContext())}`);
if (api.context.active()) {
const span = api.trace.getSpan(api.context.active());
logger.info(`span => ${span}`);
if (span) {
logger.info(`QUE PASO??? ==> ${JSON.stringify(span)}`);
}
}
return {
result: 'hello world => ' + result.Buckets.length,
};
}
}
const main = middy()
.handler(new Handler().handler)
module.exports = {main};
export function currentContext(): Record<string, any> {
const output = {};
propagation.inject(context.active(), output);
return output;
}
export function currentSpan(): Record<string, string> | null {
if (!api.context.active() || !api.trace.getSpan(api.context.active())) {
return null;
}
let currentSpan = api.trace.getSpan(api.context.active());
return {
// @ts-ignore
traceId: currentSpan.spanContext().traceId,
// @ts-ignore
spanId: currentSpan.spanContext().spanId,
// @ts-ignore
traceFlags: currentSpan.spanContext().traceFlags
}
}
package.json
{
"name": "hello_world",
"version": "1.0.0",
"description": "hello world sample for NodeJS",
"main": "app.js",
"repository": "https://github.com/aws/aws-sam-cli-app-templates/tree/master/nodejs18.x/hello-ts-pt",
"author": "SAM CLI",
"license": "MIT",
"dependencies": {
"@aws-lambda-powertools/commons": "^2.1.1",
"@aws-lambda-powertools/logger": "^2.0.3",
"@aws-lambda-powertools/metrics": "^2.0.3",
"@aws-lambda-powertools/parameters": "^2.2.0",
"@aws-lambda-powertools/tracer": "^2.0.3",
"@aws-sdk/client-s3": "^3.592.0",
"@middy/appconfig": "^5.4.0",
"@opentelemetry/api": "^1.9.0",
"esbuild": "^0.17.6",
"reflect-metadata": "^0.1.13",
"tiny-glob": "^0.2.9",
"tsyringe": "4.8.0"
},
"scripts": {
"unit": "jest",
"lint": "eslint '*.ts' --quiet --fix",
"compile": "tsc",
"test": "npm run compile && npm run unit",
"build": "esbuild src/functions/**/*.ts --bundle --minify --outdir=dist --outbase=src --sourcemap=inline --platform=node --target=node18.16.1 "
},
"devDependencies": {
"@aws-sdk/client-appconfigdata": "^3.598.0",
"@jest/globals": "^29.4.0",
"@middy/core": "^5.4.0",
"@middy/util": "^5.4.0",
"@types/aws-lambda": "^8.10.109",
"@types/jest": "^29.4.0",
"@types/node": "^18.13.0",
"@typescript-eslint/eslint-plugin": "^5.46.1",
"@typescript-eslint/parser": "^5.46.1",
"eslint": "^8.30.0",
"eslint-config-prettier": "^8.3.0",
"eslint-plugin-prettier": "^4.0.0",
"jest": "^29.3.1",
"prettier": "^2.5.1",
"ts-jest": "^29.0.5",
"ts-node": "^10.9.1",
"typescript": "^4.9.4"
}
}
Relevant log output
{
"level": "INFO",
"message": "==> 10 currentTrace: null | {\"go\":1}",
"sampling_rate": 0,
"service": "service_undefined",
"timestamp": "2024-07-05T23:26:10.195Z",
"xray_trace_id": "1-6688810f-a224dfbcdd26280c3fd8f8c6"
}
{
"level": "INFO",
"message": "context => {\"_currentContext\":{}} || {}",
"sampling_rate": 0,
"service": "service_undefined",
"timestamp": "2024-07-05T23:26:10.358Z",
"xray_trace_id": "1-6688810f-a224dfbcdd26280c3fd8f8c6"
}
@mbrevda
It is not the same case, as I mentioned , We can visualize traces in x-ray using the OTEL layer,
Our main problem is that we can not access to the information about current trace using OTEL SDK Api,
We require that information to other porpuses like: logs, manual propagation
So we are stuck with this issue.
Regard
|
gharchive/issue
| 2024-07-05T23:35:05 |
2025-04-01T04:35:19.264747
|
{
"authors": [
"jarpz"
],
"repo": "open-telemetry/opentelemetry-js",
"url": "https://github.com/open-telemetry/opentelemetry-js/issues/4851",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
469486372
|
Skeleton: Add automatic-tracer package
Updates https://github.com/open-telemetry/opentelemetry-js/issues/91
Codecov Report
Merging #107 into master will not change coverage.
The diff coverage is n/a.
@@ Coverage Diff @@
## master #107 +/- ##
=======================================
Coverage 98.67% 98.67%
=======================================
Files 21 21
Lines 1511 1511
Branches 173 173
=======================================
Hits 1491 1491
Misses 20 20
|
gharchive/pull-request
| 2019-07-17T22:52:59 |
2025-04-01T04:35:19.267917
|
{
"authors": [
"codecov-io",
"mayurkale22"
],
"repo": "open-telemetry/opentelemetry-js",
"url": "https://github.com/open-telemetry/opentelemetry-js/pull/107",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2565826424
|
Add documentation for scope manager omission (ot-shim: document the omission of scope manager #2700)
Which problem is this PR solving?
This PR addresses the missing documentation related to the omission of the "Scope Manager" in the OpenTelemetry shim for OpenTracing. The scope manager was never implemented, and this PR adds a note about that omission to inform developers.
Fixes #2700.
Short description of the changes
Added a new documentation file (SCOPE_MANAGER_OMISSION.md) that describes the omission of the scope manager in the OpenTelemetry shim for OpenTracing.
The documentation informs developers that there is no support for a scope manager and provides context to avoid confusion.
Type of change
[x] This change requires a documentation update
How Has This Been Tested?
This is a documentation-only change, so no tests were required.
[x] No code changes to test
Checklist:
[x] Followed the style guidelines of this project
[ ] Unit tests have been added (Not applicable since it's a documentation update)
[x] Documentation has been updated
@Asheklm6 thanks for opening this PR. :slightly_smiling_face:
Would you mind adding this documentation to packages/opentelemetry-shim-opentracing/README.md instead?
I think there it could be more visible to users.
|
gharchive/pull-request
| 2024-10-04T08:49:58 |
2025-04-01T04:35:19.272146
|
{
"authors": [
"Asheklm6",
"pichlermarc"
],
"repo": "open-telemetry/opentelemetry-js",
"url": "https://github.com/open-telemetry/opentelemetry-js/pull/5039",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1774850563
|
fixing API refs for beta10
cleaning up the last couple of breaks not included in #171
Monolog tests are failing
Monolog tests are failing
Fixed by https://github.com/open-telemetry/opentelemetry-php/pull/1057, which I need to tag. Will do that and confirm monolog tests go green before I merge
|
gharchive/pull-request
| 2023-06-26T13:56:42 |
2025-04-01T04:35:19.273982
|
{
"authors": [
"brettmc",
"pdelewski"
],
"repo": "open-telemetry/opentelemetry-php-contrib",
"url": "https://github.com/open-telemetry/opentelemetry-php-contrib/pull/173",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
806814803
|
need more control over packet_count of a flow
FYI, I have not tested this in snappi, issue found with tgen, assuming implementation is the same.
If you have a traffic item with 16 flow groups and configure the tgen.flow to run until 10k packets are sent ( duration=Duration(FixedPackets(packets=packet_count) ) the traffic item is actually set to have 'Fixed Packet Count' 625 (10k/16 flow groups). This is fine for the scenario where the user wants a total of 10k packets sent no matter the flows/ports.
But we need to consider the scenario where the user actually wants to know how many packets it will be sending per port (which I believe will be more common). This user would expect that when setting packet_count=10k, he will see 10k per traffic item (and from that he could determine per port distribution). Right now this user would have to do the reverse math: if I want to send 10k per flow group, then I need to configure tgen to send 10K x flow groups...
So to overcome this, instead of having tgen silently doing the math, it should take the packet_count as is, and expose a setting for frame count distribution (and let the user decide how to split this count among flow groups).
The intent of the models is that a user specifies a packet count per flow. This means if there is one flow and a packet count of 100 packets that is exactly what will be sent out - it is an aggregate.
There is no concept of traffic items, flow groups etc as those are implementation specific.
|
gharchive/issue
| 2021-02-11T23:06:39 |
2025-04-01T04:35:19.315612
|
{
"authors": [
"ajbalogh",
"dgalan-xxia"
],
"repo": "open-traffic-generator/snappi",
"url": "https://github.com/open-traffic-generator/snappi/issues/48",
"license": "mit",
"license_type": "permissive",
"license_source": "bigquery"
}
|
1027418462
|
PWG: Voice Privacy Guidelines 1.0 - Iteration 2 (OVON friends and family)
PWG: Voice Privacy Guidelines 1.0 - Iteration 2 (OVON friends and family)
Closing to add new 2022 deliverables + milestones.
|
gharchive/issue
| 2021-10-15T13:00:43 |
2025-04-01T04:35:19.316601
|
{
"authors": [
"oitacoleman"
],
"repo": "open-voice-network/docs",
"url": "https://github.com/open-voice-network/docs/issues/274",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
459529739
|
[Testing] a11y suite for testing
Fixes #512 - accessibility tests for web components.
This PR adds axa tests via accessibility-developer-tools. It creates new helper function a11ySuite to be used to perform axa tests. It accepts 3 arguments: test name, template string or element instance to test, and optional list of rules to ignore.
Example use:
import { a11ySuite } from '@open-wc/testing';
describe('<my-component>', () => {
a11ySuite('Suite name', `Template string or element instance`, ['list of tests to ignore']);
});
Thank you for your submission, we really appreciate it. Like many open source projects, we ask that you sign our Contributor License Agreement before we can accept your contribution.You have signed the CLA already but the status is still pending? Let us recheck it.
Hi @jarrodek, thanks for this submission. After some conversation, we'd prefer that a11y testing move forward with the help of axe-core rather than accessibility-developer-tools. If you are interested in updating this PR with that preference, I'd be happy to review what you come up with. If not the conversation in #512 will be the basis for future additions in this area and we'd love to have your input on what we're planning there. Thanks, again!
Hey,
Sorry for late response. I will take a look into axe-core. I am using similar module already with my components and I am trying to standardize @open-wc in my organization. Or at least something based on it. I'll be happy to help with development.
I have updated the PR as suggested in https://github.com/open-wc/open-wc/issues/512#issuecomment-513746605
Now the a11y is a plugin used by @open-wc/testing. It provides interface for Assert and Should libraries to test for accessibility. The API is documented in packages/testing-a11y.
Please, let me know what you think.
uh that is super awesome 💪
there were just some outdated files and master to master syncing is always annoying so I move it into a separate branch 👍
I also renamed it to chai-a11y-axe as it's more general than just for web components 👍
it is still under your name so please take a look at https://github.com/open-wc/open-wc/pull/620 and say if that is ok :)
also feel free to fork this branch and create your own branch and make an extra PR if there are more changes you wanna do 💪
very nice work - thaaankkk you 🤗
PS: closing this in favour of https://github.com/open-wc/open-wc/pull/620
|
gharchive/pull-request
| 2019-06-23T02:40:10 |
2025-04-01T04:35:19.326383
|
{
"authors": [
"CLAassistant",
"Westbrook",
"daKmoR",
"jarrodek"
],
"repo": "open-wc/open-wc",
"url": "https://github.com/open-wc/open-wc/pull/513",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1337506387
|
List for translating fake IP addresses into the TCP/IP addresses of the SDN laboratory
The CONFIGfile contains all data that is necessary to establish the connections, which are required for properly operating the application.
TCP/IP addresses, operation names and application release number might depend on the environment (e.g. SDN test laboratory or live network) in which the application gets instantiated in.
As a consequence the CONFIGfile has to be adapted to the environment before instantiation.
This adaption shall be automated.
For supporting the automation, fake IP addresses have to be put into the CONFIGfile during specification.
Fake IP addresses will be replaced by environmental specific addresses during creating the container.
[x] List for translating fake IP addresses into the TCP/IP addresses of the SDN laboratory to be created and to be maintained.
[x] List has to cover current release numbers and operations, too.
[x] Responsibility for updating the list has to be defined and documented.
Assign to vanithavalluripalli9
“A list that consists of the application-name , release-number and original-ip-address , original-port , original-protocol for the lab environment is maintained in the CICD pipeline to change the config file details”
It is implemented as per requirement and using it in current environment.
|
gharchive/issue
| 2022-08-12T17:21:48 |
2025-04-01T04:35:19.340035
|
{
"authors": [
"Rajithapitta",
"openBackhaul"
],
"repo": "openBackhaul/ApplicationPattern",
"url": "https://github.com/openBackhaul/ApplicationPattern/issues/245",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2376911823
|
show measurements should return retention policy information
Is your feature request related to a problem? Please describe.(如果该特性或者功能和已知某个问题相关,请描述问题是什么)
currently, the show measurement statement returns measurements from all retention policies, without the retention policy information, this result in two issues:
it is hard to understand which retention policy a measurement belongs to.
if there measurement with same name in different retention policies, only one will be listed.
Describe the solution you'd like(问题的解决办法是什么)
include retention policy information in the output of show measurements
Describe alternatives you've considered(描述您考虑过的替代方案)
No response
Additional context(其他补充内容)
No response
I propose show measurements command to return more information, such as column definitions, shard keys(I found there's an undocumented show measurement schema, but seems it is not usable) and etc.
currently, users need to merge the output of two or more commands to get these information, this is not user friendly and also make these command less useful.
不影响对InfluxDB兼容性,show measurements 自身命令上做修改
Hi @localvar, thanks for proposing this issue. Considering the compatibility of InfluxDB, I think it would be better to not changing the return value of SHOW MEASUREMENTS . Instead, we could use a new command which return a more detailed schema information. I've proposed a possible implement in discussion https://github.com/openGemini/openGemini/discussions/706, maybe you could take a look and give us advice there?
|
gharchive/issue
| 2024-06-27T03:41:59 |
2025-04-01T04:35:19.350822
|
{
"authors": [
"StepY1aoZz",
"localvar",
"xiangyu5632"
],
"repo": "openGemini/openGemini",
"url": "https://github.com/openGemini/openGemini/issues/640",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2382907386
|
API output is inconsistent
Describe the bug(Bug 描述)
When using HTTP API, the type of replicaN field is string in the show databases detail command, while it is an integer in the show retention policies command. this is not a big issue, but make them consistent is better.
output of show databases detail:
{
"results": [
{
"statement_id": 0,
"series": [
{
"name": "databases",
"columns": [
"name",
"ReplicaN",
"Tag Attribute"
],
"values": [
[
"NOAA_water_database",
"1",
"default"
]
]
}
]
}
]
}
output of show retention plicies:
{
"results": [
{
"statement_id": 0,
"series": [
{
"columns": [
"name",
"duration",
"shardGroupDuration",
"hot duration",
"warm duration",
"index duration",
"replicaN",
"default"
],
"values": [
[
"autogen",
"0s",
"168h0m0s",
"0s",
"0s",
"168h0m0s",
1,
true
]
]
}
]
}
]
}
To Reproduce(Bug 复现步骤)
No response
Expected behavior(期望结果)
the type of replicaN field always be an integer.
Screenshots(屏幕截图)
No response
Logs(完整的错误日志)
No response
Additional context(其他的一些补充内容)
No response
btw, the column name style of the output is really diverse:
lower camel case: name, shardGroupDuration, replicaN, ...
upper camel case: ReplicaN
space case: hot duration, ...
upper space case: Tag Attribute
and for shardGroupDuration, it is shard duration in the create retention policy command.
these inconsistence could result in many confusions to the users.
|
gharchive/issue
| 2024-07-01T05:58:53 |
2025-04-01T04:35:19.356659
|
{
"authors": [
"localvar"
],
"repo": "openGemini/openGemini",
"url": "https://github.com/openGemini/openGemini/issues/641",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2252667669
|
The codespell Github action seems to ignore the .codespellrc file
so we need to modify the action config file
Thanks for making the adjustments! Could you also include the errors from this PR #43? I went through those as well and all of these should be ignored as well (they are the abbreviations of the brain regions from the atlas).
ok, I've added those cases. Please go ahead and merge if you're happy with it.
|
gharchive/pull-request
| 2024-04-19T10:44:52 |
2025-04-01T04:35:19.374399
|
{
"authors": [
"UlrikeS91",
"apdavison"
],
"repo": "openMetadataInitiative/openMINDS_instances",
"url": "https://github.com/openMetadataInitiative/openMINDS_instances/pull/48",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1928825704
|
Generalize Record Definition
There is a desire to generalize the record definition to allow:
adding a sub-directory that can contain "heavy" meta-data, such as non-uniform grid-spacing (of regular meshes)
add mesh-refinement levels
...
I like the non-uniform grid-spacing idea
|
gharchive/issue
| 2023-10-05T18:22:25 |
2025-04-01T04:35:19.379837
|
{
"authors": [
"ax3l",
"pordyna"
],
"repo": "openPMD/openPMD-standard",
"url": "https://github.com/openPMD/openPMD-standard/issues/283",
"license": "CC-BY-4.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
368747372
|
Link suggestion for address points of us/ga/clayon (old link failed)
In order to replace us/ga/clayon, which has failed several times, I want to contribute a candidate link that might be the right one.
Please check out:
https://weba.co.clayton.ga.us:5443/server/rest/services/Reference/AddressPoints/MapServer/0
thanks a ton for finding a replacement source! We depend heavily on folks having a few seconds to go out and find new sources as the occasionally get moved around.
I've created a PR here to update this source: https://github.com/openaddresses/openaddresses/pull/4232
Provided it passes we will include the new data in our next run.
|
gharchive/issue
| 2018-10-10T16:23:48 |
2025-04-01T04:35:19.437917
|
{
"authors": [
"eugeneYWang",
"ingalls"
],
"repo": "openaddresses/openaddresses",
"url": "https://github.com/openaddresses/openaddresses/issues/4230",
"license": "BSD-3-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
1524712783
|
DMs
Can't I manipulate it to support DMs ?
You can, in permission check you can remove the guild requirement
|
gharchive/issue
| 2023-01-08T22:43:06 |
2025-04-01T04:35:19.438799
|
{
"authors": [
"YufeiG",
"mohamedmoez123"
],
"repo": "openai/gpt-discord-bot",
"url": "https://github.com/openai/gpt-discord-bot/issues/12",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
2580419602
|
Adds GPT action cookbook about connecting to Google Ads real-time reporting data
Summary
This PR aims to expands the cookbook library on GPT actions by adding a new one about Google Ads. This cookbook explains how to connect to Google Ads reporting data to ChatGPT via Adzviser, a 3rd-party middleware that retrieves and transforms the Google Ads data for the user.
Motivation
There are many marketers who use ChatGPT on a daily basis today. Some upload their downloaded reporting data files from Google Ads UI to ChatGPT for it to analyze it deeper and brainstorm optimization ideas. This cookbook gives a tutorial on how to connect Google Ads reporting data directly to ChatGPT through GPT actions, so that marketers don't have to download and upload files each time. It will save them time and overheads.
For new content
When contributing new content, read through our contribution guidelines, and mark the following action items as completed:
[✅ ] I have added a new entry in registry.yaml (and, optionally, in authors.yaml) so that my content renders on the cookbook website.
[✅ ] I have conducted a self-review of my content based on the contribution guidelines:
[✅ ] Relevance: This content is related to building with OpenAI technologies and is useful to others.
[ ✅] Uniqueness: I have searched for related examples in the OpenAI Cookbook, and verified that my content offers new insights or unique information compared to existing documentation.
[ ✅] Spelling and Grammar: I have checked for spelling or grammatical mistakes.
[✅ ] Clarity: I have done a final read-through and verified that my submission is well-organized and easy to understand.
[✅ ] Correctness: The information I include is correct and all of my code executes successfully.
[✅ ] Completeness: I have explained everything fully, including all necessary references and citations.
We will rate each of these areas on a scale from 1 to 4, and will only accept contributions that score 3 or higher on all areas. Refer to our contribution guidelines for more details.
@pap-openai, do you mind reviewing this PR so that we could add tutorials about Google Ads connector to our Cookbook? Thanks in advance. This entire cookbook library has been instrumental for us.
@pap-openai, @aaronwilkowitz-openai, @joecasson-openai, @msingh-openai
Hey! Sorry for the follow-up, but could you review this PR when you get a chance? I'd love to make this my first contribution to the cookbook! 🙇 Thanks so much!
@pap-openai, @aaronwilkowitz-openai, @joecasson-openai, @msingh-openai
Gentle ping again. Thanks in advance guys.
@pap-openai, @aaronwilkowitz-openai, @joecasson-openai, @msingh-openai
Gentle ping. Thanks in advance guys.
Gentle ping. Trying to see if this PR could get a review before the Thanksgiving holidays.
Gentle ping again. @pap-openai, @aaronwilkowitz-openai, @joecasson-openai, @msingh-openai
Trying again to see if anyone could take a look at this PR. This PR gives instructions on how marketers could retrieve real-time Google Ads data directly to ChatGPT.
@pap-openai, @aaronwilkowitz-openai, @joecasson-openai, @msingh-openai
Hope you guys had a great weekend. Gentle ping on this PR again. It would benefit PPC marketers to get their real-time Google Ads data to GPTs.
|
gharchive/pull-request
| 2024-10-11T04:43:06 |
2025-04-01T04:35:19.453856
|
{
"authors": [
"GoooGu"
],
"repo": "openai/openai-cookbook",
"url": "https://github.com/openai/openai-cookbook/pull/1462",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
2404284500
|
Update long-running operation APIs to use LRO subclient pattern - streaming LRO subclient inherits from polling LRO subclient
Currently prototype work-in-progress.
Uses SCM LRO types introduced in https://github.com/Azure/azure-sdk-for-net/pull/44728
Closing in favor of https://github.com/openai/openai-dotnet/pull/156
|
gharchive/pull-request
| 2024-07-11T22:40:33 |
2025-04-01T04:35:19.455808
|
{
"authors": [
"annelo-msft"
],
"repo": "openai/openai-dotnet",
"url": "https://github.com/openai/openai-dotnet/pull/118",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1791529723
|
Model properties that are both string and enum breaks csharp client generation
Fx:
model:
description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
type: string
example: "text-davinci-edit-001"
anyOf:
- type: string
- type: string
enum: ["text-davinci-edit-001","code-davinci-edit-001"]
This example also have an additional type: string and breaks RicoSuter/NSwag
Could it be changed to:
model:
description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
example: "text-davinci-edit-001"
type: string
enum: ["text-davinci-edit-001","code-davinci-edit-001"]
without breaking compatibles with other language client generators?
The anyOf type is done on purpose – customers sometimes need to pass fine-tuned models into the SDKs which are arbitrary strings. The SDK generator will need to be updated to support this.
|
gharchive/issue
| 2023-07-06T13:07:50 |
2025-04-01T04:35:19.458039
|
{
"authors": [
"bobend",
"schnerd"
],
"repo": "openai/openai-openapi",
"url": "https://github.com/openai/openai-openapi/issues/56",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1670317178
|
Since openai does not provide a Java version of the method for calculating gpt-3.5 tokens, can I use Java to run Python code?
can i use java to run "npm install titoken"?
OpenAI does not currently provide a Java version of tiktoken or a JavaScript version of tiktoken. So npm install tiktoken will not get you code made by OpenAI.
|
gharchive/issue
| 2023-04-17T03:01:22 |
2025-04-01T04:35:19.459141
|
{
"authors": [
"TreeFireMen",
"hauntsaninja"
],
"repo": "openai/tiktoken",
"url": "https://github.com/openai/tiktoken/issues/110",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1657407813
|
Illegal memory access for large enough shapes on 4D tensors GEMM
Hi, I doubt this is a duplicate of https://github.com/openai/triton/issues/1058 because I am not overflowing on the program ids (largest PID should be cdiv(8192, 16) * cdiv(8192, 32) = 131072 in my case).
I extended https://github.com/openai/triton/blob/main/python/triton/ops/matmul.py to support 4D tensors, e.g. of shapes (2, 8, 1024, 1024) and (2, 8, 1024, 512).
For large enough values in dimension 1 (while keeping all other dims equal), a CUDA error: an illegal memory access was encountered is raised:
✅ Triton and Torch match
[M,N,K=8192, bs=1, n_head=30] triton TFLOPS: 190.21, ms: 173.412354
[M,N,K=8192, bs=1, n_head=30] cublas TFLOPS: 130.25, ms: 253.250565
[M,N,K=8192, bs=1, n_head=32] triton TFLOPS: 181.86, ms: 193.468414
[M,N,K=8192, bs=1, n_head=32] cublas TFLOPS: 127.34, ms: 276.293640
Traceback (most recent call last):
File "/home/felix/test_triton/batched_gemm.py", line 227, in <module>
benchmark(val, val, val, bs, n_head, provider)
File "/home/felix/test_triton/batched_gemm.py", line 214, in benchmark
ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))
File "/home/felix/triton/python/triton/testing.py", line 44, in do_bench
torch.cuda.synchronize()
File "/home/felix/miniconda3/envs/fx/lib/python3.9/site-packages/torch/cuda/__init__.py", line 688, in synchronize
return torch._C._cuda_synchronize()
RuntimeError: CUDA error: an illegal memory access was encountered
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
Is it an issue in my implementation or expected at first glance? I have no issues on smaller shapes. This is on A100-SXM4-80GB on triton main.
Here is my kernel. You can see that the only change is grabbing a pid_batch and pid_dim1, and changing the offset to account the many GEMMs as we have 4D tensors.
import torch
import triton
import triton.language as tl
from matmul_perf_model import early_config_prune, estimate_matmul_time
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
def get_configs_io_bound():
configs = []
for num_stages in [2, 3, 4, 5, 6]:
for block_m in [16, 32]:
for block_k in [32, 64]:
for block_n in [32, 64, 128, 256]:
num_warps = 2 if block_n <= 64 else 4
configs.append(
triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
num_stages=num_stages, num_warps=num_warps))
# split_k
for split_k in [2, 4, 8, 16]:
configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
return configs
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2),
# good for int8
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2),
] + get_configs_io_bound(),
key=['M', 'N', 'K'],
prune_configs_by={
'early_config_prune': early_config_prune,
'perf_model': estimate_matmul_time,
'top_k': 10
},
)
@triton.heuristics({
'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0,
})
@triton.jit
def matmul_kernel(
# Pointers to matrices
A,
B,
C,
# Matrix dimensions
M,
N,
K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
dim1,
stride_batch_a,
stride_batch_b,
stride_batch_c,
stride_dim1_a,
stride_dim1_b,
stride_dim1_c,
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
# Meta-parameters
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr,
SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr,
):
# matrix multiplication
pid = tl.program_id(0)
pid_z = tl.program_id(1)
pid_first_dims = tl.program_id(axis=2)
# pid_dim1 = tl.program_id(axis=3)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
# do matrix multiplication
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
# pointer to the i-th matrix
pid_batch = pid_first_dims // dim1 # (pid_first_dims + dim1 - 1) // dim1
pid_dim1 = pid_first_dims % dim1
a_ith_ptr = pid_batch * stride_batch_a + pid_dim1 * stride_dim1_a
b_ith_ptr = pid_batch * stride_batch_b + pid_dim1 * stride_dim1_b
c_ith_ptr = pid_batch * stride_batch_c + pid_dim1 * stride_dim1_c
# pointers
A = A + a_ith_ptr + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + b_ith_ptr + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(K, 0, -BLOCK_K * SPLIT_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.)
b = tl.load(B, mask=rk[:, None] < k, other=0.)
acc += tl.dot(a, b)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
acc = acc.to(C.dtype.element_ty)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + c_ith_ptr + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
# handles write-back with reduction-splitting
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
def matmul(a, b):
# checks constraints
assert a.shape[-1] == b.shape[-2], "incompatible dimensions"
assert a.is_contiguous(), "matrix A must be contiguous"
assert b.is_contiguous(), "matrix B must be contiguous"
assert len(a.shape) == 4, "4D kernel"
assert len(b.shape) == 4, "4D kernel"
batch_size_a, dim1_a, M, K = a.shape
batch_size_b, dim1_b, K, N = b.shape
assert (
K % 32 == 0
), "We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_K"
# allocates output
assert batch_size_a == batch_size_b, "only same batch size supported"
assert dim1_a == dim1_b, "only same dim 1 is supported"
c = torch.empty((batch_size_a, dim1_a, M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']),
META['SPLIT_K'],
batch_size_a * dim1_a,
#dim1_a,
)
matmul_kernel[grid](
a, b, c,
M, N, K,
dim1_a,
a.stride(0), b.stride(0), c.stride(0),
a.stride(1), b.stride(1), c.stride(1),
a.stride(2), a.stride(3),
b.stride(2), b.stride(3),
c.stride(2), c.stride(3),
GROUP_M=8
)
return c
#a = torch.rand((1, 16, 2048, 2048), dtype=torch.float16, device='cuda')
#b = torch.rand((1, 16, 2048, 2048), dtype=torch.float16, device='cuda')
a = torch.rand((2, 4, 1024, 1024), dtype=torch.float16, device='cuda')
b = torch.rand((2, 4, 1024, 1024), dtype=torch.float16, device='cuda')
triton_output = matmul(a, b)
torch_output = torch.matmul(a, b)
if torch.allclose(triton_output, torch_output):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
print(f" Maxdiff: {torch.abs(torch_output - triton_output).max()}")
num_diff = torch.sum(torch.abs(torch_output - triton_output) > 1e-5)
total_items = torch.numel(torch_output)
print(f" Num diff: {num_diff} ({num_diff / total_items * 100:.2f} %)")
def benchmark(M, N, K, bs, n_head, provider):
a = torch.randn((bs, n_head, M, K), device='cuda', dtype=torch.float16)
b = torch.randn((bs, n_head, K, N), device='cuda', dtype=torch.float16)
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b))
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))
perf = lambda ms: bs * n_head * 2 * M * N * K * 1e-12 / (ms * 1e-3)
print(f"[M,N,K={val}, bs={bs}, n_head={n_head}] {provider} TFLOPS: {perf(ms):.2f}, ms: {ms:.6f}")
#for bs in [1, 4, 8]:
# for n_head in [16, 32, 48]:
# for val in [2048, 4096, 8192]:
for bs in [1]:
for n_head in [30, 32, 34, 36]:
for val in [8192]:
for provider in ["triton", "cublas"]:
benchmark(val, val, val, bs, n_head, provider)
Thank you!
import torch
import triton
import triton.language as tl
from matmul_perf_model import early_config_prune
def init_to_zero(nargs):
return nargs.zero_()
def get_configs_io_bound():
for num_stages in range(2, 7):
for block_m in [16, 32]:
for block_k in [32, 64]:
for block_n in [32, 64, 128, 256]:
num_warps = 2 if block_n <= 64 else 4
yield triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
num_stages=num_stages, num_warps=num_warps)
# split_k
for split_k in [2, 4, 8, 16]:
yield triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero)
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK
|
gharchive/issue
| 2023-04-06T13:14:16 |
2025-04-01T04:35:19.471641
|
{
"authors": [
"Profesor09",
"fxmarty"
],
"repo": "openai/triton",
"url": "https://github.com/openai/triton/issues/1483",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1386614955
|
a bug with a double loop
Hi,
I found that when there is a double loop in triton code where the first (outer) for-loop takes non-constant in its range setting, it gives incorrect results (i.e. incorrect numerical values than the reference).
here is an example of the failing case
@triton.jit
def qmatmul_kernel(
a_ptr, b_ptr, c_ptr,
M, N, K, Q,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_Q: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
offs_sam = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
sa_ptrs = sa_ptr + offs_sam[:, None] * stride_sam
acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.int32)
for k in range(0, K, BLOCK_SIZE_Q): # works with constant
for j in range(0, BLOCK_SIZE_Q, BLOCK_SIZE_K):
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
acc += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
c = acc.to(tl.bfloat16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
if i replace the "K" in the first range in the loop with a constant like 256, it gives correct result but if i pass 256 to K as argument, it fails.
for example, the following works if I fix K to 256 in the code instead
for k in range(0, 256, BLOCK_SIZE_Q): # works with constant
for j in range(0, BLOCK_SIZE_Q, BLOCK_SIZE_K):
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
acc += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
or if i make it a single loop, it works too
for k in range(0, K, BLOCK_SIZE_K):
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
acc += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
is it a known issue?
thanks
seems fixed :)
|
gharchive/issue
| 2022-09-26T19:42:31 |
2025-04-01T04:35:19.475347
|
{
"authors": [
"ptillet",
"stephen-youn"
],
"repo": "openai/triton",
"url": "https://github.com/openai/triton/issues/713",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
2511065580
|
Issues upgrading to 3.1.1 from 3.0.2
We are running in Azure using managed K8S. We've been running shinyproxy:3.0.2 + shinyproxy-operator:2.0.0 without issues. i've since tried upgrading by simply deploying into a new namespace using the sp-namespaced-apps setup.
One of the issues ran into is that the sp-shinyproxy-rs-... pods dont appear to want to cycle after configurations changes. The second issue is that two sp-shinyproxy-rs pods are spun up one with -rs-0 and the other with -rs-null.
When configuration changes occur the new sp-shinyproxy-rs-1 attempts to run and then fails. Here's the log for that too:
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v3.2.6)
2024-09-06T19:11:42.293Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Multiple Spring Data modules found, entering strict repository configuration mode
2024-09-06T19:11:42.298Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data Redis repositories in DEFAULT mode.
2024-09-06T19:11:42.409Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 91 ms. Found 0 Redis repository interfaces.
2024-09-06T19:11:42.599Z INFO 1 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'errorChannel' has been explicitly defined. Therefore, a default PublishSubscribeChannel will be created.
2024-09-06T19:11:42.606Z INFO 1 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'integrationHeaderChannelRegistry' has been explicitly defined. Therefore, a default DefaultHeaderChannelRegistry will be created.
2024-09-06T19:11:43.775Z INFO 1 --- [ main] e.o.c.stat.StatCollectorFactory : Enabled. Sending usage statistics to micrometer.
2024-09-06T19:11:45.219Z WARN 1 --- [ main] io.undertow.websockets.jsr : UT026010: Buffer pool was not set on WebSocketDeploymentInfo, the default pool will be used
2024-09-06T19:11:45.294Z INFO 1 --- [ main] io.undertow.servlet : Initializing Spring embedded WebApplicationContext
2024-09-06T19:11:45.295Z INFO 1 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 6289 ms
2024-09-06T19:11:45.516Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy runtimeId: 8lpg
2024-09-06T19:11:45.688Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy instanceID (hash of config): 9989d0cce455f0153effe8235a0865c6d2ba4e68
2024-09-06T19:11:45.689Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy realmId: shinyproxy-test-shinyproxy
2024-09-06T19:11:45.690Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy version: 1725649893637
SNIP
2024-09-06T19:11:51.679Z INFO 1 --- [ main] o.s.b.a.w.s.WelcomePageHandlerMapping : Adding welcome page template: index
2024-09-06T19:11:53.086Z INFO 1 --- [ main] o.s.s.web.DefaultSecurityFilterChain : Will secure any request with [org.springframework.security.web.session.DisableEncodeUrlFilter@517fb2c3, org.springframework.security.web.session.ForceEagerSessionCreationFilter@6c289070, org.springframework.security.web.context.request.async.WebAsyncManagerIntegrationFilter@69e58566, org.springframework.security.web.context.SecurityContextHolderFilter@6bbff652, org.springframework.security.web.header.HeaderWriterFilter@7ae97a58, org.springframework.web.filter.CorsFilter@2e41e0c, org.springframework.security.web.csrf.CsrfFilter@59da4992, org.springframework.security.web.authentication.logout.LogoutFilter@5a35efac, org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter@130bd00e, org.springframework.security.web.authentication.www.BasicAuthenticationFilter@32aa27a7, eu.openanalytics.containerproxy.util.AppRecoveryFilter@7cc842b0, eu.openanalytics.containerproxy.security.UserAgentFilter@2e214d39, eu.openanalytics.containerproxy.security.FixedRequestCacheAwareFilter@622e39d, org.springframework.security.web.servletapi.SecurityContextHolderAwareRequestFilter@6a9c848, org.springframework.security.web.authentication.AnonymousAuthenticationFilter@43a9988a, org.springframework.security.web.session.SessionManagementFilter@552b7481, org.springframework.security.web.access.ExceptionTranslationFilter@63661fc7, eu.openanalytics.shinyproxy.AuthenticationRequiredFilter@611ffa8d, org.springframework.security.web.access.intercept.AuthorizationFilter@4b02dc4e]
2024-09-06T19:11:53.886Z INFO 1 --- [ main] o.s.l.c.support.AbstractContextSource : Property 'userDn' not set - anonymous context will be used for read-only operations
2024-09-06T19:11:55.481Z INFO 1 --- [ main] o.s.i.endpoint.EventDrivenConsumer : Adding {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel
2024-09-06T19:11:55.482Z INFO 1 --- [ main] o.s.i.channel.PublishSubscribeChannel : Channel 'ContainerProxy.errorChannel' has 1 subscriber(s).
2024-09-06T19:11:55.482Z INFO 1 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started bean '_org.springframework.integration.errorLogger'
2024-09-06T19:11:55.488Z INFO 1 --- [ main] io.undertow : starting server: Undertow - 2.3.13.Final
2024-09-06T19:11:55.495Z INFO 1 --- [ main] org.xnio : XNIO version 3.8.8.Final
2024-09-06T19:11:55.503Z INFO 1 --- [ main] org.xnio.nio : XNIO NIO Implementation Version 3.8.8.Final
2024-09-06T19:11:55.611Z INFO 1 --- [ main] org.jboss.threads : JBoss Threads version 3.5.0.Final
2024-09-06T19:11:55.717Z INFO 1 --- [ main] o.s.b.w.e.undertow.UndertowWebServer : Undertow started on port 8080 (http)
2024-09-06T19:11:55.902Z INFO 1 --- [ main] io.undertow.servlet : Initializing Spring embedded WebApplicationContext
2024-09-06T19:11:55.902Z INFO 1 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 180 ms
2024-09-06T19:11:55.912Z INFO 1 --- [ main] o.s.b.a.e.web.EndpointLinksResolver : Exposing 2 endpoint(s) beneath base path '/actuator'
2024-09-06T19:11:56.003Z INFO 1 --- [ main] io.undertow : starting server: Undertow - 2.3.13.Final
2024-09-06T19:11:56.005Z INFO 1 --- [ main] o.s.b.w.e.undertow.UndertowWebServer : Undertow started on port 9090 (http)
2024-09-06T19:11:56.212Z INFO 1 --- [ main] e.o.c.service.AppRecoveryService : Recovery of running apps disabled
2024-09-06T19:11:56.213Z INFO 1 --- [ main] e.o.c.util.StartupEventListener : Started ShinyProxy 3.1.1 (ContainerProxy 1.1.1)
2024-09-06T19:11:56.383Z INFO 1 --- [ taskExecutor-1] .o.c.s.l.r.RedisCheckLatestConfigService : This server is running the latest configuration (instanceId: 9989d0cce455f0153effe8235a0865c6d2ba4e68, version: 1725649893637), taking part in leader election.
2024-09-06T19:11:56.480Z INFO 1 --- [ck-leadership-1] e.o.c.s.leader.redis.RedisLeaderService : This server (runtimeId: 8lpg) is now the leader.
2024-09-06T19:12:07.214Z INFO 1 --- [ionShutdownHook] io.undertow : stopping server: Undertow - 2.3.13.Final
2024-09-06T19:12:07.224Z INFO 1 --- [ionShutdownHook] io.undertow : stopping server: Undertow - 2.3.13.Final
2024-09-06T19:12:07.227Z INFO 1 --- [ck-leadership-1] e.o.c.s.leader.redis.RedisLeaderService : This server (runtimeId: 8lpg) is no longer the leader.
2024-09-06T19:12:07.333Z INFO 1 --- [ionShutdownHook] o.s.i.endpoint.EventDrivenConsumer : Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel
2024-09-06T19:12:07.333Z INFO 1 --- [ionShutdownHook] o.s.i.channel.PublishSubscribeChannel : Channel 'ContainerProxy.errorChannel' has 0 subscriber(s).
2024-09-06T19:12:07.333Z INFO 1 --- [ionShutdownHook] o.s.i.endpoint.EventDrivenConsumer : stopped bean '_org.springframework.integration.errorLogger'
operator log
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v3.2.6)
2024-09-06T18:07:35.995Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Multiple Spring Data modules found, entering strict repository configuration mode
2024-09-06T18:07:35.999Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data Redis repositories in DEFAULT mode.
2024-09-06T18:07:36.090Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 79 ms. Found 0 Redis repository interfaces.
2024-09-06T18:07:36.222Z INFO 1 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'errorChannel' has been explicitly defined. Therefore, a default PublishSubscribeChannel will be created.
2024-09-06T18:07:36.288Z INFO 1 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'integrationHeaderChannelRegistry' has been explicitly defined. Therefore, a default DefaultHeaderChannelRegistry will be created.
2024-09-06T18:07:37.409Z INFO 1 --- [ main] e.o.c.stat.StatCollectorFactory : Enabled. Sending usage statistics to micrometer.
2024-09-06T18:07:39.124Z WARN 1 --- [ main] io.undertow.websockets.jsr : UT026010: Buffer pool was not set on WebSocketDeploymentInfo, the default pool will be used
2024-09-06T18:07:39.199Z INFO 1 --- [ main] io.undertow.servlet : Initializing Spring embedded WebApplicationContext
2024-09-06T18:07:39.200Z INFO 1 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 6708 ms
2024-09-06T18:07:39.514Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy runtimeId: lbgh
2024-09-06T18:07:39.715Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy instanceID (hash of config): 55ba1652b483cd33b2433ebb35136b980289f0ee
2024-09-06T18:07:39.717Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy realmId: shinyproxy-test-shinyproxy
2024-09-06T18:07:39.717Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy version: 1725646039375
2024-09-06T18:07:42.604Z INFO 1 --- [ main] e.o.c.b.k.KubernetesManifestsRemover : Kubernetes additional manifests is supported for resource [Group: '', Version: 'v1', Kind: 'Pod'] in namespace: test-APP
2024-09-06T18:07:42.610Z INFO 1 --- [ main] e.o.c.b.k.KubernetesManifestsRemover : Kubernetes additional manifests is supported for resource [Group: '', Version: 'v1', Kind: 'Pod'] in namespace: test-APP2
2024-09-06T18:07:42.620Z INFO 1 --- [ main] e.o.c.b.k.KubernetesManifestsRemover : Kubernetes additional manifests is supported for resource [Group: '', Version: 'v1', Kind: 'Pod'] in namespace: test-shinyproxy
2024-09-06T18:07:43.224Z INFO 1 --- [ main] e.o.c.b.k.KubernetesManifestsRemover : Kubernetes additional manifests is supported for resource [Group: '', Version: 'v1', Kind: 'Event'] in namespace: test-APP
2024-09-06T18:07:43.227Z INFO 1 --- [ main] e.o.c.b.k.KubernetesManifestsRemover : Kubernetes additional manifests is supported for resource [Group: '', Version: 'v1', Kind: 'Event'] in namespace: test-APP2
2024-09-06T18:07:43.240Z INFO 1 --- [ main] e.o.c.b.k.KubernetesManifestsRemover : Kubernetes additional manifests is supported for resource [Group: '', Version: 'v1', Kind: 'Event'] in namespace: test-shinyproxy
2024-09-06T18:07:45.412Z INFO 1 --- [ main] o.s.boot.web.servlet.RegistrationBean : Filter registration2 was not registered (disabled)
2024-09-06T18:07:45.711Z INFO 1 --- [ main] o.s.b.a.w.s.WelcomePageHandlerMapping : Adding welcome page template: index
2024-09-06T18:07:47.194Z INFO 1 --- [ main] o.s.s.web.DefaultSecurityFilterChain : Will secure any request with [org.springframework.security.web.session.DisableEncodeUrlFilter@39adf4e6, org.springframework.security.web.session.ForceEagerSessionCreationFilter@73299197, org.springframework.security.web.context.request.async.WebAsyncManagerIntegrationFilter@4ef7145f, org.springframework.security.web.context.SecurityContextHolderFilter@55854382, org.springframework.security.web.header.HeaderWriterFilter@7bf3bf2e, org.springframework.web.filter.CorsFilter@1611ce1c, org.springframework.security.web.csrf.CsrfFilter@3f13720f, org.springframework.security.web.authentication.logout.LogoutFilter@1142843c, org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter@5d7f4cbb, org.springframework.security.web.authentication.www.BasicAuthenticationFilter@726882da, eu.openanalytics.containerproxy.util.AppRecoveryFilter@106ac5f4, eu.openanalytics.containerproxy.security.UserAgentFilter@40cd02fc, eu.openanalytics.containerproxy.security.FixedRequestCacheAwareFilter@7fd7965b, org.springframework.security.web.servletapi.SecurityContextHolderAwareRequestFilter@77d0a492, org.springframework.security.web.authentication.AnonymousAuthenticationFilter@9742012, org.springframework.security.web.session.SessionManagementFilter@7a458c73, org.springframework.security.web.access.ExceptionTranslationFilter@672c4e24, eu.openanalytics.shinyproxy.AuthenticationRequiredFilter@118d7e0e, org.springframework.security.web.access.intercept.AuthorizationFilter@33f059ad]
2024-09-06T18:07:48.103Z INFO 1 --- [ main] o.s.l.c.support.AbstractContextSource : Property 'userDn' not set - anonymous context will be used for read-only operations
2024-09-06T18:07:49.827Z INFO 1 --- [ main] o.s.i.endpoint.EventDrivenConsumer : Adding {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel
2024-09-06T18:07:49.828Z INFO 1 --- [ main] o.s.i.channel.PublishSubscribeChannel : Channel 'ContainerProxy.errorChannel' has 1 subscriber(s).
2024-09-06T18:07:49.828Z INFO 1 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started bean '_org.springframework.integration.errorLogger'
2024-09-06T18:07:49.834Z INFO 1 --- [ main] io.undertow : starting server: Undertow - 2.3.13.Final
2024-09-06T18:07:49.898Z INFO 1 --- [ main] org.xnio : XNIO version 3.8.8.Final
2024-09-06T18:07:49.908Z INFO 1 --- [ main] org.xnio.nio : XNIO NIO Implementation Version 3.8.8.Final
2024-09-06T18:07:50.021Z INFO 1 --- [ main] org.jboss.threads : JBoss Threads version 3.5.0.Final
2024-09-06T18:07:50.108Z INFO 1 --- [ main] o.s.b.w.e.undertow.UndertowWebServer : Undertow started on port 8080 (http)
2024-09-06T18:07:50.301Z INFO 1 --- [ main] io.undertow.servlet : Initializing Spring embedded WebApplicationContext
2024-09-06T18:07:50.301Z INFO 1 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 188 ms
2024-09-06T18:07:50.309Z INFO 1 --- [ main] o.s.b.a.e.web.EndpointLinksResolver : Exposing 3 endpoint(s) beneath base path '/actuator'
2024-09-06T18:07:50.400Z INFO 1 --- [ main] io.undertow : starting server: Undertow - 2.3.13.Final
2024-09-06T18:07:50.402Z INFO 1 --- [ main] o.s.b.w.e.undertow.UndertowWebServer : Undertow started on port 9090 (http)
2024-09-06T18:07:50.606Z INFO 1 --- [ main] e.o.c.service.AppRecoveryService : Recovery of running apps disabled
2024-09-06T18:07:50.606Z INFO 1 --- [ main] e.o.c.util.StartupEventListener : Started ShinyProxy 3.1.1 (ContainerProxy 1.1.1)
2024-09-06T18:07:50.790Z INFO 1 --- [ taskExecutor-1] .o.c.s.l.r.RedisCheckLatestConfigService : This server is running the latest configuration (instanceId: 55ba1652b483cd33b2433ebb35136b980289f0ee, version: 1725646039375), taking part in leader election.
2024-09-06T18:07:50.903Z INFO 1 --- [ck-leadership-1] e.o.c.s.leader.redis.RedisLeaderService : This server (runtimeId: lbgh) is now the leader.
2024-09-06T18:08:29.805Z INFO 1 --- [ XNIO-2 task-2] io.undertow.servlet : Initializing Spring DispatcherServlet 'dispatcherServletRegistration'
2024-09-06T18:08:29.805Z INFO 1 --- [ XNIO-2 task-2] o.s.web.servlet.DispatcherServlet : Initializing Servlet 'dispatcherServletRegistration'
2024-09-06T18:08:29.806Z INFO 1 --- [ XNIO-2 task-2] o.s.web.servlet.DispatcherServlet : Completed initialization in 1 ms
2024-09-06T18:09:10.632Z INFO 1 --- [GlobalEventLoop] .o.c.s.l.r.RedisCheckLatestConfigService : This server is no longer running the latest configuration (instanceId: 55ba1652b483cd33b2433ebb35136b980289f0ee, version: 1725646039375), no longer taking part in leader election.
2024-09-06T18:09:35.634Z INFO 1 --- [ck-leadership-1] e.o.c.s.leader.redis.RedisLeaderService : This server (runtimeId: lbgh) is no longer the leader.
Existing two pods rs-null and rs-0
sp-rs-null
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v3.2.6)
2024-09-06T18:07:35.995Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Multiple Spring Data modules found, entering strict repository configuration mode
2024-09-06T18:07:35.999Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data Redis repositories in DEFAULT mode.
2024-09-06T18:07:36.090Z INFO 1 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 79 ms. Found 0 Redis repository interfaces.
2024-09-06T18:07:36.222Z INFO 1 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'errorChannel' has been explicitly defined. Therefore, a default PublishSubscribeChannel will be created.
2024-09-06T18:07:36.288Z INFO 1 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'integrationHeaderChannelRegistry' has been explicitly defined. Therefore, a default DefaultHeaderChannelRegistry will be created.
2024-09-06T18:07:37.409Z INFO 1 --- [ main] e.o.c.stat.StatCollectorFactory : Enabled. Sending usage statistics to micrometer.
2024-09-06T18:07:39.124Z WARN 1 --- [ main] io.undertow.websockets.jsr : UT026010: Buffer pool was not set on WebSocketDeploymentInfo, the default pool will be used
2024-09-06T18:07:39.199Z INFO 1 --- [ main] io.undertow.servlet : Initializing Spring embedded WebApplicationContext
2024-09-06T18:07:39.200Z INFO 1 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 6708 ms
2024-09-06T18:07:39.514Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy runtimeId: lbgh
2024-09-06T18:07:39.715Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy instanceID (hash of config): 55ba1652b483cd33b2433ebb35136b980289f0ee
2024-09-06T18:07:39.717Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy realmId: shinyproxy-test-shinyproxy
2024-09-06T18:07:39.717Z INFO 1 --- [ main] e.o.c.service.IdentifierService : ShinyProxy version: 1725646039375
SNIP
2024-09-06T18:07:45.412Z INFO 1 --- [ main] o.s.boot.web.servlet.RegistrationBean : Filter registration2 was not registered (disabled)
2024-09-06T18:07:45.711Z INFO 1 --- [ main] o.s.b.a.w.s.WelcomePageHandlerMapping : Adding welcome page template: index
boot-3.2.6.jar!/:3.2.6]
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:754) ~[spring-boot-3.2.6.jar!/:3.2.6]
at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:456) ~[spring-boot-3.2.6.jar!/:3.2.6]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:335) ~[spring-boot-3.2.6.jar!/:3.2.6]
at eu.openanalytics.containerproxy.ContainerProxyApplication.main(ContainerProxyApplication.java:135) ~[containerproxy-1.1.1.jar!/:1.1.1]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:na]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[na:na]
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:na]
at java.base/java.lang.reflect.Method.invoke(Method.java:568) ~[na:na]
at org.springframework.boot.loader.launch.Launcher.launch(Launcher.java:91) ~[shinyproxy.jar:3.1.1]
at org.springframework.boot.loader.launch.Launcher.launch(Launcher.java:53) ~[shinyproxy.jar:3.1.1]
at org.springframework.boot.loader.launch.JarLauncher.main(JarLauncher.java:58) ~[shinyproxy.jar:3.1.1]
2024-09-06T18:07:47.194Z INFO 1 --- [ main] o.s.s.web.DefaultSecurityFilterChain : Will secure any request with [org.springframework.security.web.session.DisableEncodeUrlFilter@39adf4e6, org.springframework.security.web.session.ForceEagerSessionCreationFilter@73299197, org.springframework.security.web.context.request.async.WebAsyncManagerIntegrationFilter@4ef7145f, org.springframework.security.web.context.SecurityContextHolderFilter@55854382, org.springframework.security.web.header.HeaderWriterFilter@7bf3bf2e, org.springframework.web.filter.CorsFilter@1611ce1c, org.springframework.security.web.csrf.CsrfFilter@3f13720f, org.springframework.security.web.authentication.logout.LogoutFilter@1142843c, org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter@5d7f4cbb, org.springframework.security.web.authentication.www.BasicAuthenticationFilter@726882da, eu.openanalytics.containerproxy.util.AppRecoveryFilter@106ac5f4, eu.openanalytics.containerproxy.security.UserAgentFilter@40cd02fc, eu.openanalytics.containerproxy.security.FixedRequestCacheAwareFilter@7fd7965b, org.springframework.security.web.servletapi.SecurityContextHolderAwareRequestFilter@77d0a492, org.springframework.security.web.authentication.AnonymousAuthenticationFilter@9742012, org.springframework.security.web.session.SessionManagementFilter@7a458c73, org.springframework.security.web.access.ExceptionTranslationFilter@672c4e24, eu.openanalytics.shinyproxy.AuthenticationRequiredFilter@118d7e0e, org.springframework.security.web.access.intercept.AuthorizationFilter@33f059ad]
2024-09-06T18:07:48.103Z INFO 1 --- [ main] o.s.l.c.support.AbstractContextSource : Property 'userDn' not set - anonymous context will be used for read-only operations
2024-09-06T18:07:49.827Z INFO 1 --- [ main] o.s.i.endpoint.EventDrivenConsumer : Adding {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel
2024-09-06T18:07:49.828Z INFO 1 --- [ main] o.s.i.channel.PublishSubscribeChannel : Channel 'ContainerProxy.errorChannel' has 1 subscriber(s).
2024-09-06T18:07:49.828Z INFO 1 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started bean '_org.springframework.integration.errorLogger'
2024-09-06T18:07:49.834Z INFO 1 --- [ main] io.undertow : starting server: Undertow - 2.3.13.Final
2024-09-06T18:07:49.898Z INFO 1 --- [ main] org.xnio : XNIO version 3.8.8.Final
2024-09-06T18:07:49.908Z INFO 1 --- [ main] org.xnio.nio : XNIO NIO Implementation Version 3.8.8.Final
2024-09-06T18:07:50.021Z INFO 1 --- [ main] org.jboss.threads : JBoss Threads version 3.5.0.Final
2024-09-06T18:07:50.108Z INFO 1 --- [ main] o.s.b.w.e.undertow.UndertowWebServer : Undertow started on port 8080 (http)
2024-09-06T18:07:50.301Z INFO 1 --- [ main] io.undertow.servlet : Initializing Spring embedded WebApplicationContext
2024-09-06T18:07:50.301Z INFO 1 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 188 ms
2024-09-06T18:07:50.309Z INFO 1 --- [ main] o.s.b.a.e.web.EndpointLinksResolver : Exposing 3 endpoint(s) beneath base path '/actuator'
2024-09-06T18:07:50.400Z INFO 1 --- [ main] io.undertow : starting server: Undertow - 2.3.13.Final
2024-09-06T18:07:50.402Z INFO 1 --- [ main] o.s.b.w.e.undertow.UndertowWebServer : Undertow started on port 9090 (http)
2024-09-06T18:07:50.606Z INFO 1 --- [ main] e.o.c.service.AppRecoveryService : Recovery of running apps disabled
2024-09-06T18:07:50.606Z INFO 1 --- [ main] e.o.c.util.StartupEventListener : Started ShinyProxy 3.1.1 (ContainerProxy 1.1.1)
2024-09-06T18:07:50.790Z INFO 1 --- [ taskExecutor-1] .o.c.s.l.r.RedisCheckLatestConfigService : This server is running the latest configuration (instanceId: 55ba1652b483cd33b2433ebb35136b980289f0ee, version: 1725646039375), taking part in leader election.
2024-09-06T18:07:50.903Z INFO 1 --- [ck-leadership-1] e.o.c.s.leader.redis.RedisLeaderService : This server (runtimeId: lbgh) is now the leader.
2024-09-06T18:08:29.805Z INFO 1 --- [ XNIO-2 task-2] io.undertow.servlet : Initializing Spring DispatcherServlet 'dispatcherServletRegistration'
2024-09-06T18:08:29.805Z INFO 1 --- [ XNIO-2 task-2] o.s.web.servlet.DispatcherServlet : Initializing Servlet 'dispatcherServletRegistration'
2024-09-06T18:08:29.806Z INFO 1 --- [ XNIO-2 task-2] o.s.web.servlet.DispatcherServlet : Completed initialization in 1 ms
2024-09-06T18:09:10.632Z INFO 1 --- [GlobalEventLoop] .o.c.s.l.r.RedisCheckLatestConfigService : This server is no longer running the latest configuration (instanceId: 55ba1652b483cd33b2433ebb35136b980289f0ee, version: 1725646039375), no longer taking part in leader election.
2024-09-06T18:09:35.634Z INFO 1 --- [ck-leadership-1] e.o.c.s.leader.redis.RedisLeaderService : This server (runtimeId: lbgh) is no longer the leader.
rs-0 has a massive log. I can provide this if needed. Likely similar to others just errors from actuator:
2024-09-06T19:21:28.532Z ERROR 1 --- [ XNIO-2 task-6] io.undertow.request : UT005023: Exception handling request to /actuator/health/readiness
java.lang.NullPointerException: null
Spent decent bit of time trying to resolve this but cannot figure out a solution. Besides the odd rs-null and actuator errors apps seem to be running fine. Though I previously had issues with SAML I am using simple authentication now. Will debug SAML at a later time.
Hi @Lukeesec
Can you provide the logs of ShinyProxy operator? It seems you posted logs of shinyproxy instead of the operator.
|
gharchive/issue
| 2024-09-06T19:23:49 |
2025-04-01T04:35:19.485980
|
{
"authors": [
"LEDfan",
"Lukeesec"
],
"repo": "openanalytics/shinyproxy-operator",
"url": "https://github.com/openanalytics/shinyproxy-operator/issues/54",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1692142388
|
description will not be used as javadoc when having custom type mapping
having custom annotation will lead to breaking javadoc generation
having:
openapi-processor-mapping: v3
options:
package-name: xxx
bean-validation: true
javadoc: true
map:
types:
- type: string:date-time => java.time.OffsetDateTime
and
title: DatumGesendet
x-stoplight:
id: nzlj4zrmsiwoh
type: object
description: 'Zeitpunkt, zu dem die Nachricht vom Benutzer an das System übergeben wurde.'
properties:
datumGesendet:
type: string
format: date-time
description: 'Zeitpunkt, zu dem die Nachricht vom Benutzer an das System übergeben wurde.'
required:
- datumGesendet
will generate:
@NotNull
@JsonProperty("datumGesendet")
private OffsetDateTime datumGesendet;
without that string:date-time => java.time.OffsetDateTime:
/** Zeitpunkt, zu dem die Nachricht vom Benutzer an das System übergeben wurde. */
@NotNull
@JsonProperty("datumGesendet")
private OffsetDateTime datumGesendet;
oh, that's a "funny" bug. I wonder how that happened.
Thanks for reporting :-)
|
gharchive/issue
| 2023-05-02T10:08:47 |
2025-04-01T04:35:19.494185
|
{
"authors": [
"Snap252",
"hauner"
],
"repo": "openapi-processor/openapi-processor-spring",
"url": "https://github.com/openapi-processor/openapi-processor-spring/issues/160",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
435114854
|
Design Interface: debug unresponsive host
https://github.com/ibm-openbmc/dev/issues/457 provides the background details.
Issue scope:
Design d-bus interface for SBE sreset chip-op to issue processor reset.
Proposal for the redfish based external interface.
https://gerrit.openbmc-project.xyz/#/c/openbmc/docs/+/21772/
All the review comments addressed. Yet to be integrated
Design proposal is integrated
|
gharchive/issue
| 2019-04-19T09:32:31 |
2025-04-01T04:35:19.536195
|
{
"authors": [
"lkammath"
],
"repo": "openbmc/openbmc-test-automation",
"url": "https://github.com/openbmc/openbmc-test-automation/issues/1746",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
439422562
|
Redfish date time end to end automation
[ ] Test plan
[ ] Automation
Automation completed. Thanks @sivassrr
|
gharchive/issue
| 2019-05-02T04:26:20 |
2025-04-01T04:35:19.537279
|
{
"authors": [
"gkeishin"
],
"repo": "openbmc/openbmc-test-automation",
"url": "https://github.com/openbmc/openbmc-test-automation/issues/1760",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
1188138461
|
Termination service persists maintenance
Fixes #3602 Termination State Services leaves nodes in maintenance mode
Check on start up that host is not in maintenance, due to state from a previous instance
I'm out of practice, against wrong branch
|
gharchive/pull-request
| 2022-03-31T14:05:31 |
2025-04-01T04:35:19.561129
|
{
"authors": [
"JamesUoM"
],
"repo": "opencast/opencast",
"url": "https://github.com/opencast/opencast/pull/3603",
"license": "ECL-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2589745965
|
Upgrade CDK editor to remove warning
The following alert is begin displayed in the maintenance report editing page:
Find the references such as in https://github.com/opencdms/surface/blob/main/api/wx/templates/wx/maintenance_reports/new_report.html#L2C1-L3C1 and change the version number to 4.25.0 as suggested.
CKEditor 4 LTS (4.23.0 and above) requires Software License Agreement, similar issues can be seen in the django-ckeditor repository, ckeditor4 repository and ckeditor4-vue repository
Fixes have been deployed to the cloud server
|
gharchive/issue
| 2024-10-15T19:56:28 |
2025-04-01T04:35:19.566487
|
{
"authors": [
"AbnerBissolli",
"fabiosato"
],
"repo": "opencdms/surface",
"url": "https://github.com/opencdms/surface/issues/159",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
275646273
|
Fix: Add infection as phony target
This PR
[x] adds infection as a phony target to Makefile
Follows #606.
Thank you, @chartjes!
|
gharchive/pull-request
| 2017-11-21T09:47:51 |
2025-04-01T04:35:19.567801
|
{
"authors": [
"localheinz"
],
"repo": "opencfp/opencfp",
"url": "https://github.com/opencfp/opencfp/pull/625",
"license": "mit",
"license_type": "permissive",
"license_source": "bigquery"
}
|
198812468
|
I cant see UI when tried commands
Hi All ,
I am trying host my own wallet on Centos Linux machine .
I have followed following steps . Let know if anything else needed .
git clone https://github.com/openchain/wallet
cd wallet
bower install
http-server www -p 8090 -a 10...* -o
Ensure you have nodejs and http-server npm module installed.
Yes , i have installed both :) yet i see nothing hosted.
What error do you get?
|
gharchive/issue
| 2017-01-04T21:14:17 |
2025-04-01T04:35:19.570098
|
{
"authors": [
"Flavien",
"hackeys",
"jayrulez"
],
"repo": "openchain/wallet",
"url": "https://github.com/openchain/wallet/issues/4",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
1742172421
|
Fix broken init time attempt
Instead of crashing when some NWP variables are missing, we reindex the dimension, effectively adding NaN for the missing variables.
Opened by mistake!
|
gharchive/pull-request
| 2023-06-05T16:57:55 |
2025-04-01T04:35:19.574343
|
{
"authors": [
"simlmx"
],
"repo": "openclimatefix/nwp",
"url": "https://github.com/openclimatefix/nwp/pull/27",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1569868244
|
Get the center coordinates of GSPs
Pull Request
Description
This PR corresponds to the method that derives geographical center of each GSPs
Fixes #
How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce.
Please also list any relevant details for your test configuration
[ ] Yes
If your changes affect data processing, have you plotted any changes? i.e. have you done a quick sanity check?
[ ] Yes
Checklist:
[x] My code follows OCF's coding style guidelines
[x] I have performed a self-review of my own code
[ ] I have made corresponding changes to the documentation
[x] I have added tests that prove my fix is effective or that my feature works
[x] I have checked my code and corrected any misspellings
Codecov Report
:exclamation: No coverage uploaded for pull request base (main@23c21ba). Click here to learn what that means.
The diff coverage is n/a.
:mega: This organization is not using Codecov’s GitHub App Integration. We recommend you install it so Codecov can continue to function properly for your repositories. Learn more
@@ Coverage Diff @@
## main #16 +/- ##
=======================================
Coverage ? 67.25%
=======================================
Files ? 4
Lines ? 113
Branches ? 0
=======================================
Hits ? 76
Misses ? 37
Partials ? 0
:mega: We’re building smart automated test selection to slash your CI/CD build times. Learn more
|
gharchive/pull-request
| 2023-02-03T13:46:16 |
2025-04-01T04:35:19.580839
|
{
"authors": [
"codecov-commenter",
"vrym2"
],
"repo": "openclimatefix/pv-solar-farm-forecasting",
"url": "https://github.com/openclimatefix/pv-solar-farm-forecasting/pull/16",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1384368478
|
Add support for root action flags
Related to https://github.com/opencollective/opencollective/issues/5374
Related Front-end PR: https://github.com/opencollective/opencollective-frontend/pull/8222
Should we put deleting out of scope for now? It seems it's making the issue over complex.
Should we put deleting/deleted out of scope for now? It seems it's making the issue over complex.
I agree. And I'd like to better understand the use case for it: @SudharakaP explained to me that Shannon was currently using the "Search & Destroy" tool to delete accounts, but I don't get why support needs that; people can delete their accounts themselves from the interface.
Should we put deleting/deleted out of scope for now? It seems it's making the issue over complex.
I agree. And I'd like to better understand the use case for it: @SudharakaP explained to me that Shannon was currently using the "Search & Destroy" tool to delete accounts, but I don't get why support needs that; people can delete their accounts themselves from the interface.
Yeah I've removed the deleting and banning for now from this PR. Since if we are supporting this we need to also need to think about restoring or un-banning which I think is too complex for this PR.
@Betree : As for why Search & Destroy was used, for example this support ticket; https://opencollective.freshdesk.com/a/tickets/119533 the user has lost access to one of their accounts and needs to delete this duplicate account. So they cannot do it themselves. I am wondering how we should handle cases like these?
So they cannot do it themselves. I am wondering how we should handle cases like these?
@SudharakaP For now, they should escalate to engineering. Not only because "Ban accounts" is not the right tool, but also because there are some security aspects that we need to consider when someone who's not able to login asks us to delete the account (how can we be sure they own it?).
If we get many requests like that, then maybe we want to introduce a new dedicated action.
|
gharchive/pull-request
| 2022-09-23T22:24:21 |
2025-04-01T04:35:19.593487
|
{
"authors": [
"Betree",
"SudharakaP",
"znarf"
],
"repo": "opencollective/opencollective-api",
"url": "https://github.com/opencollective/opencollective-api/pull/7981",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
415611379
|
Tax: for events, tax should be based on the country
See https://ec.europa.eu/taxation_customs/business/vat/eu-vat-rules-topic/where-tax_en#supply_services
We need to adapt the way we store taxes to acknowledge this change.
https://github.com/opencollective/opencollective/issues/1728 should be done first.
Resolved by https://github.com/opencollective/opencollective-api/pull/1817
|
gharchive/issue
| 2019-02-28T12:58:38 |
2025-04-01T04:35:19.595577
|
{
"authors": [
"Betree"
],
"repo": "opencollective/opencollective",
"url": "https://github.com/opencollective/opencollective/issues/1771",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
995325149
|
Parent Collective Update should go to Project & Event contributors too
Collectives think of Projects as inside their Collective. They don't want to have to send multiple Updates to reach their community of contributors. Project Updates going only to the Project contributors makes sense, but parent Collective Updates should go to everyone who has contributed to that Collective, its Projects, Events, Tiers, etc.
MVP: Parent Updates go to all contributors
Best solution: Update sender can select which Project/Event/Tiers/etc contributors and Update goes to, with all being the default.
Sounds good to me. Let's remember that, by default, the behavior we apply for projects will apply to events as well.
the behavior we apply for projects will apply to events as well.
I think it works. Event Updates should go only to Event attendees/contributors, while parent Updates should go to all Project and Event contributors too.
Taking this issue if no objections. 👍🏽
@SudharakaP was this completed?
@SudharakaP was this completed?
We are working on this one. I have a pull request for this and we are at the review stage. I'll try to get this done this week. 👍🏽
Hi @SudharakaP did this end up getting done? I need to clarify if this is live for a user support ticket.
Hi @SudharakaP did this end up getting done? I need to clarify if this is live for a user support ticket.
Sorry, we have having a small issue and I am having a call with Ben this week about this one. I am pretty confident we can finalize it this week. Sorry about that wait. 😉
Making myself available if we need design support for the UI visualization of choosing updates for global, events and projects
@SudharakaP @Betree
Making myself available if we need design support for the UI visualization of choosing updates for global, events and projects @SudharakaP @Betree
Currently we just send Something that we could I think prioritize for the next sprint. Since Alanna also mentioned this as the best solution I am adding this to the next sprint plan to discuss.
|
gharchive/issue
| 2021-09-13T20:58:35 |
2025-04-01T04:35:19.600588
|
{
"authors": [
"Betree",
"Memo-Es",
"SudharakaP",
"alanna"
],
"repo": "opencollective/opencollective",
"url": "https://github.com/opencollective/opencollective/issues/4673",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
268601898
|
Feedback: Send notification to added members of a collective
Feedback from Danny from La Conexión Collective:
I think it would be great at a minimum for people to get notified they’ve been added as members of a collective- if not also given basic onboarding instructions, too!
Duplicate of #566
Please check if there is no related issue before and if so just +1 it.
Actually, just realized that both our issues were sent the same minute and probably crossed each other ¯_(ツ)_/¯
No worries, good coincidence!
|
gharchive/issue
| 2017-10-26T01:36:01 |
2025-04-01T04:35:19.602767
|
{
"authors": [
"nicobarretoOC",
"xdamman"
],
"repo": "opencollective/opencollective",
"url": "https://github.com/opencollective/opencollective/issues/565",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
2134937997
|
feat: teach omega that 2^(n.succ) = 2^n + 2^n
2^(n.succ) is a common expression when dealing with bitvectors of width greater than zero.
I just noticed the following relevant issue: https://github.com/leanprover/std4/issues/500.
Might want to mix in the discussion there, it seems to suggest that dealing with powers is out of scope for omega (this was a statement by Eric Rodriguez, not an Std maintainer, but Scott did not contradict the statement).
Yes, I've seen this. I feel the correct approach is to add the special case first to show that we understand the code, and the add the hook.
Makes sense, just wanted to make sure you were aware of it
Superceded by #33
|
gharchive/pull-request
| 2024-02-14T18:32:25 |
2025-04-01T04:35:19.604985
|
{
"authors": [
"alexkeizer",
"bollu"
],
"repo": "opencompl/std4",
"url": "https://github.com/opencompl/std4/pull/21",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2636506816
|
Added defer function to cleanup fib table
currently after test is complete OTG retains its config and due to which following tests are failing as fib is full and not able to install any more routes. added defer function to stop BGP protocol that will clear all routes injecected .
"This code is a Contribution to the OpenConfig Feature Profiles project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. This code is provided on an "as is" basis without any warranties of any kind."
Pull Request Functional Test Report for #3561 / f5e4dba46e6c9daf32acdb9117cf48f663591910
Virtual Devices
Device
Test
Test Documentation
Job
Raw Log
Arista cEOS
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Cisco 8000E
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Cisco XRd
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Juniper ncPTX
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Nokia SR Linux
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Openconfig Lemming
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Hardware Devices
Device
Test
Test Documentation
Raw Log
Arista 7808
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Cisco 8808
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Juniper PTX10008
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Nokia 7250 IXR-10e
TE-9.1: FIB FAILURE DUE TO HARDWARE RESOURCE EXHAUST
Help
Pull Request Test Coverage Report for Build 11692813101
Details
0 of 0 changed or added relevant lines in 0 files are covered.
No unchanged relevant lines lost coverage.
Overall coverage remained the same at 55.268%
Totals
Change from base Build 11683343273:
0.0%
Covered Lines:
1983
Relevant Lines:
3588
💛 - Coveralls
|
gharchive/pull-request
| 2024-11-05T21:28:09 |
2025-04-01T04:35:19.643207
|
{
"authors": [
"OpenConfigBot",
"bkreddy143",
"coveralls"
],
"repo": "openconfig/featureprofiles",
"url": "https://github.com/openconfig/featureprofiles/pull/3561",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
1466579633
|
OTG Test - DP-1.4: QoS Interface Output Queue Counters
OTG conversion of DP-1.4: QoS Interface Output Queue Counters
Fixes #796
Output log here
Pull Request Test Coverage Report for Build 3565967363
0 of 0 changed or added relevant lines in 0 files are covered.
No unchanged relevant lines lost coverage.
Overall coverage remained the same at 69.405%
Totals
Change from base Build 3534504596:
0.0%
Covered Lines:
1656
Relevant Lines:
2386
💛 - Coveralls
/gcbrun
This is an experimental code with default qos policy config.
I am going to add several QoS traffic tests to cover the strict priority and WRR. The queue counters will be checked in each test.
I will merge this experimental PR.
Please note, new test will replace this one
I will merge this experimental PR.
Please note, new test will replace this one
sure thanks. noted
|
gharchive/pull-request
| 2022-11-28T15:01:10 |
2025-04-01T04:35:19.649797
|
{
"authors": [
"ANISH-GOTTAPU",
"coveralls",
"sezhang2"
],
"repo": "openconfig/featureprofiles",
"url": "https://github.com/openconfig/featureprofiles/pull/816",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
2582472019
|
README: update runtime-spec links to use main branch
Updated several links in README.md, conversion.md, image-layout.md, and spec.md to point to the 'main' branch of the runtime-spec repository.
Replaced outdated 'master' and 'v1.0.0' references with 'main' for consistency with the latest branch structure.
In general this LGTM with one minor nit. I think image-spec should consider whether we should bind these links to the current version of an external spec when we generate a release (similar to how we modify https://github.com/opencontainers/image-spec/blob/main/specs-go/version.go), but that's separate from this PR.
Thank you for the review! I agree that the branch of the external spec should be updated during the release stage.
I agree, but I disagree that doing so is separate from this PR; because we have these links currently pointing to a version, they're suitable to release as-is, and this PR changes that, so needs to come with some way to make sure we don't forget to update them back at release time.
Because that's complicated to verify, my own preference for external specs would be that we continue to link to the latest release unless/until we can figure out a way to make that harder to forget and less error prone during release.
(So, soft NACK from me for now. ❤️)
I'd disagree that removing the current version pins is a bad thing. They are pointing to the same content from different parts of the spec, and none of those references to old versions appear to be for version specific content.
My thought for how to adjust the release process is a script that does a find and replace for all of the "main" references, replacing them with the current tag of the external spec, and after the release commit is made, reverting those changes for the next dev commit. That would allow any permanent version pins to persist through the release process. So for me, having everything say "main" in our dev branch would be helpful.
I agree with all that, but I think those things should come together (the
swap to main and the script/process updates).
They are pointing to the same content from different parts of the spec, and none of those references to old versions appear to be for version specific content.
@tianon Thanks for the comments. I understand your point. While I think these are two tasks and should be handled in separate PRs to better manage the branch version of the external link.
Current PR: keep the dev branches of external spec links in our dev branch.
Future PR: Update all external spec links to their latest release versions when releasing.
WDYT?
Without automation or process changes, I'm concerned about our ability to
remember to do the latter half, which is why I think unless or until we
address that somehow, the best we can or should do is link to the latest
release or even the latest revision explicitly if we want something
newer/unreleased.
@ChengyuZhu6 thanks for fixing the linter error, I think you've done everything needed for this PR.
@tianon that's certainly a bad habit of ours. I'll see how fast I can put together a script, raise that as a separate PR, and then we can merge both.
That wasn't too bad. #1208 has been opened with a script and release instructions to pin our references on a release.
Ok, I'm convinced now; pending https://github.com/opencontainers/image-spec/pull/1208, this looks good to me (but I do think we have to do them together) :heart:
|
gharchive/pull-request
| 2024-10-12T03:57:54 |
2025-04-01T04:35:19.658788
|
{
"authors": [
"ChengyuZhu6",
"sudo-bmitch",
"tianon"
],
"repo": "opencontainers/image-spec",
"url": "https://github.com/opencontainers/image-spec/pull/1207",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
1910190360
|
Could not create the task (Open the Browser Console to get details)
My actions before raising this issue
Installing CVAT with the Installation Guide under Windows 10 (https://opencv.github.io/cvat/docs/administration/basics/installation/)
Install the Semi-Automatic and Automatic Annotation (https://opencv.github.io/cvat/docs/administration/advanced/installation_automatic_annotation/)
Steps to Reproduce (for bugs)
Open localhost:8080
Log in and create a project
Paste my labels in the required format from a text file
Create a task with a specific name and the labels from the project (no new labels created)
Press Upload and wait until the error message occurs
Expected Behaviour
That everything works fine, the health check of the containers passed succesfully.
I try to upload around ~6300 HQ .png images, but every time I try the upload fails. The other issues with the same issue title didn't helped me. Which specific logs should I provide for more detailed information?
Maybe the RAM is a limited Source (got 11,9GB) and I checked, if any image of the dataset is broken.
please provide logs from cvat_Server container
@azhavoro solved it, with providing more memory for wsl it seems, that every time I uploaded images the max of the memory was used and leads to this failures.
|
gharchive/issue
| 2023-09-24T10:48:41 |
2025-04-01T04:35:19.679091
|
{
"authors": [
"Petros626",
"azhavoro"
],
"repo": "opencv/cvat",
"url": "https://github.com/opencv/cvat/issues/6894",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1842007880
|
Sync release-v0.11 with upstream
What this PR does / why we need it:
Pull changes from upstream release-0.11 branch into ODH release-v0.11 branch
Which issue(s) this PR fixes (optional, in fixes #<issue number>(, fixes #<issue_number>, ...) format, will close the issue(s) when PR gets merged):
Related #58
Release note:
NONE
/test e2e-slow
|
gharchive/pull-request
| 2023-08-08T20:04:49 |
2025-04-01T04:35:22.170201
|
{
"authors": [
"israel-hdez"
],
"repo": "opendatahub-io/kserve",
"url": "https://github.com/opendatahub-io/kserve/pull/59",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1796498949
|
[Feature Request]: Add support for DataScienceCluster CR in dashboard.
Feature description
Related to #1475
Currently dashboard uses KfDef CRs in the code base to enable Application CR. As we are moving away from KfDef CRDs, as part of migration , add support for the new operator CR DataScienceCluster.
Describe alternatives you've considered
Possible Solutions
Update Dashboard code to watch for DataScienceCluster CR.
Allow operator to update ODHApplication CR when a component is enabled/disabled
Anything else?
No response
We will migrate away from the KfDefApplications and thus don't need to follow through with the CR reading.
|
gharchive/issue
| 2023-07-10T10:51:47 |
2025-04-01T04:35:22.173209
|
{
"authors": [
"VaishnaviHire",
"andrewballantyne"
],
"repo": "opendatahub-io/odh-dashboard",
"url": "https://github.com/opendatahub-io/odh-dashboard/issues/1491",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1247000392
|
[DS Project UI] - Add feature flag to set the visibility of data science projects page
Like other feature flags in the ODH configs, we should also add a feature flag to decide whether to show the data science projects page or not.
Completed with the Data Science Projects feature.
|
gharchive/issue
| 2022-05-24T18:54:52 |
2025-04-01T04:35:22.174309
|
{
"authors": [
"DaoDaoNoCode",
"andrewballantyne"
],
"repo": "opendatahub-io/odh-dashboard",
"url": "https://github.com/opendatahub-io/odh-dashboard/issues/230",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2694687247
|
Add NIM flag logic
Add NIM flag logic
Description
This will get the nim flag state of removed or managed from params.env and add to the environment variables for odh-model-controller to be utilize
How Has This Been Tested?
Unit tests
[ ] The commits are squashed in a cohesive manner and have meaningful messages.
[ ] Testing instructions have been added in the PR body (for PRs involving changes that are not immediately obvious).
[ ] The developer has manually tested the changes and verified that the changes work
@spolti do you know if I need to change something for the image tag failure?
/ok-to-test
|
gharchive/pull-request
| 2024-11-26T13:30:48 |
2025-04-01T04:35:22.176596
|
{
"authors": [
"israel-hdez",
"trujillm"
],
"repo": "opendatahub-io/odh-model-controller",
"url": "https://github.com/opendatahub-io/odh-model-controller/pull/312",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1125618385
|
feature(xjx): filter topic before parsing the message
I added a topic to the beginning of the message, so we don't need to call pickle.loads when the current process is not subscribed to the topic.
Move to another branch
|
gharchive/pull-request
| 2022-02-07T07:36:08 |
2025-04-01T04:35:22.202285
|
{
"authors": [
"sailxjx"
],
"repo": "opendilab/DI-engine",
"url": "https://github.com/opendilab/DI-engine/pull/203",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
691865126
|
SSL certificate hot reloading compares two identical certificates
Hello!
This issue relates #238.
Prehistory. I'm running Elasticsearch in Docker (amazon/opendistro-for-elasticsearch:1.8.0 image).
Part of my elasticsearch.yml:
opendistro_security.ssl.transport.pemcert_filepath: fullchain.pem
opendistro_security.ssl.transport.pemkey_filepath: privkey.pem
opendistro_security.ssl.transport.pemtrustedcas_filepath: fullchain.pem
opendistro_security.ssl.transport.enforce_hostname_verification: false
opendistro_security.ssl.http.enabled: true
opendistro_security.ssl.http.pemcert_filepath: fullchain.pem
opendistro_security.ssl.http.pemkey_filepath: privkey.pem
opendistro_security.ssl.http.pemtrustedcas_filepath: fullchain.pem
opendistro_security.allow_unsafe_democertificates: false
opendistro_security.ssl_cert_reload_enabled: true
SSL certificates are mounted into container, my docker-compose.yml:
volumes:
# ...
- ./privkey.pem:/usr/share/elasticsearch/config/privkey.pem
- ./fullchain.pem:/usr/share/elasticsearch/config/fullchain.pem
# ...
My issue: I can't hot reload SSL certificates.
What have I tried exactly:
Copy new certificate to host
*New certificate is being mounted into the container instantly
_opendistro/_security/api/ssl/http/reloadcerts fails with
{"error":"ElasticsearchSecurityException[Error while initializing http SSL layer from PEM: java.lang.Exception: New certificates should not expire before the current ones.]; nested: Exception[New certificates should not expire before the current ones.];"}
I don't know Java good enough, but it seems like "old" certificate is being retrieved from file as well as "new" certificate. So, in my case, two identical certificates (which were read from the same file) were compared. Because new certificate replaced old one (see step 1 and 2 from above).
I used to refresh SSL certificates only with node (container) restart.
I think, that only "new" certificate should be taken from filesystem, and the "old" one should be taken from remote server:
openssl s_client -showcerts -connect my-node.example.com:9200 </dev/null | openssl x509 -text | grep Validity -A 2
Or am I missing something? Thank you in advance!
Hi @Tarasovych,
Hot reload assumes the following -
The new certificate you use for hot reload will be in the same filepath as the old certificate.
It will not allow you to replace your old certificate with an identical one that expires on the same day. We check if the new certificate has a later expiry date than the previously used certificate.
We don't retrieve the old certificate from the file system. On initializing your cluster we store details of the certificate used. https://github.com/opendistro-for-elasticsearch/security/blame/master/src/main/java/com/amazon/opendistroforelasticsearch/security/ssl/DefaultOpenDistroSecurityKeyStore.java#L478. When you're trying to replace the certificates with a new one we check the current certificate details from this object, if the new certificate has the same IssuerDN, SubjectDN and later expiry date only then do we hot reload.
Hope this answers your questions. Thanks for your interest in opendistro.
|
gharchive/issue
| 2020-09-03T10:54:58 |
2025-04-01T04:35:22.208898
|
{
"authors": [
"Tarasovych",
"debjanibnrj"
],
"repo": "opendistro-for-elasticsearch/security",
"url": "https://github.com/opendistro-for-elasticsearch/security/issues/689",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
588539484
|
Rename pool state enums to avoid conflicts with other state enums
This is a preparation commit for converting string states of nexus
and nexus child to enum states (in grpc proto file). Other changes:
"faulty" was changed to more correct "faulted"
improved mapping between enum names and integer values in JS code
For reference I'm including the planned changes to the proto file that will eventually follow this commit:
diff --git a/rpc/proto/mayastor.proto b/rpc/proto/mayastor.proto
index cb06a67..f47de76 100644
--- a/rpc/proto/mayastor.proto
+++ b/rpc/proto/mayastor.proto
@@ -140,17 +140,31 @@ message CreateNexusRequest {
repeated string children = 3; // uris to the targets we connect to
}
+// State of the nexus child.
+enum ChildState {
+ CHILD_ONLINE = 0; // healthy and contains the latest bits
+ CHILD_REBUILD = 1; // rebuild of the child is in progress
+ CHILD_FAULTED = 2; // unrecoverable error
+}
+
// represents a child device part of a nexus
message Child {
string uri = 1; // uri of the child device
- string state = 2; // TODO: enum
+ ChildState state = 2; // state of the child
+}
+
+// State of the nexus (terminology inspired by ZFS).
+enum NexusState {
+ NEXUS_ONLINE = 0; // healthy and working
+ NEXUS_DEGRADED = 1; // not healthy but is able to serve IO (i.e. rebuild is in progress)
+ NEXUS_FAULTED = 2; // broken and unable to serve IO
}
// represents a nexus device
message Nexus {
string uuid = 1; // name of the nexus
uint64 size = 2; // size of the volume in bytes
- string state = 3; // current state of the nexus (TODO: enum)
+ NexusState state = 3; // current state of the nexus
repeated Child children = 4; // array of children
// Path to device file for the volume (missing if not published).
// Missing property and empty string are treated the same.
Blaise, thanks for reviewing the changes 👍
|
gharchive/pull-request
| 2020-03-26T16:09:57 |
2025-04-01T04:35:22.211351
|
{
"authors": [
"jkryl"
],
"repo": "openebs/Mayastor",
"url": "https://github.com/openebs/Mayastor/pull/147",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
984117131
|
Add StorageCohort structure
What does this PR do?
This PR adds the initial structure as per the schema for the StorageCohort
Enhancements to be taken up
Decide the subfields for some structs
Add more verbose documentation
Refine the constants to our use case
This comment is on finalizer for storage cohort CR.
openebs.io/pool-protection
=> This finalizer makes sure that no pool is present at the time of storage cohort CR deletion.
Where do we add these finalizer in the CRD spec? Is it a part of the structure or do we define it in some methods of this CR?
any reason capabilities needs to be specified as part of cohort? Is it used for matching storage pool provisioning?
Yes in a way. This represent the aggregate capabilities of all the pools which are updated by pool provisioners after pool provisioning/deprovisioning. This also helps volume scheduler to narrow down on the pool selection for a volume.
|
gharchive/pull-request
| 2021-08-31T17:38:08 |
2025-04-01T04:35:22.222160
|
{
"authors": [
"Ab-hishek",
"shovanmaity"
],
"repo": "openebs/device-localpv",
"url": "https://github.com/openebs/device-localpv/pull/45",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1467563655
|
Failed to create 1000 disk for testing in provision
Describe the bug
Failed to create 1000 disk for testing in provision, it exists blocking bug.
E1129 06:29:34.290652 1 controller.go:981] error syncing claim "9d89ff6c-a86d-4dad-9c5f-5a869ede4ed9": failed to provision volume with StorageClass "mayastor-1": rpc error: code = Internal desc = Operation failed: GenericOperation("error in response: status code '500 Internal Server Error', content: 'RestJsonError { details: \"Failed to reply back to message id 'v0/getVolumes' through the message bus\", kind: Internal }'")
To Reproduce
I want to test attach/detach 1000 disk in mayastor. I use 10 nodes and create 10 msp. However, the limit number of disk created in provision is about 600. The log error syncing claim "cc04711e-dba1-4c00-92aa-7f8a183ef65a": failed to provision volume with StorageClass "mayastor-1": rpc error: code = DeadlineExceeded desc = context deadline exceeded exists and the time for provision is very slow, so I change the deployment of csi to set long timeout below.
containers:
- args:
- --v=2
- --timeout=180s
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology=false
- --default-fstype=ext4
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.1
imagePullPolicy: IfNotPresent
name: csi-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- args:
- --v=2
- --timeout=2400s
- --csi-address=$(ADDRESS)
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1
imagePullPolicy: IfNotPresent
name: csi-attacher
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
However, another error log is in pod csi-controller csi-provisioner
W1129 06:29:34.069974 1 controller.go:958] Retrying syncing claim "448f4006-d573-4c21-910a-96d1ef5498c1", failure 20
E1129 06:29:34.069997 1 controller.go:981] error syncing claim "448f4006-d573-4c21-910a-96d1ef5498c1": failed to provision volume with StorageClass "mayastor-1": rpc error: code = Internal desc = Operation failed: GenericOperation("error in response: status code '500 Internal Server Error', content: 'RestJsonError { details: \"Failed to reply back to message id 'v0/getVolumes' through the message bus\", kind: Internal }'")
I1129 06:29:34.070032 1 controller.go:1332] provision "test/persistent-storage-statefulset-local-788" class "mayastor-1": started
I1129 06:29:34.070875 1 event.go:282] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"test", Name:"persistent-storage-statefulset-local-704", UID:"448f4006-d573-4c21-910a-96d1ef5498c1", APIVersion:"v1", ResourceVersion:"18189", FieldPath:""}): type: 'Warning' reason: 'ProvisioningFailed' failed to provision volume with StorageClass "mayastor-1": rpc error: code = Internal desc = Operation failed: GenericOperation("error in response: status code '500 Internal Server Error', content: 'RestJsonError { details: \"Failed to reply back to message id 'v0/getVolumes' through the message bus\", kind: Internal }'")
I1129 06:29:34.070904 1 event.go:282] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"test", Name:"persistent-storage-statefulset-local-788", UID:"2243e9d8-5f2f-4bae-89a6-78d8e71dbb80", APIVersion:"v1", ResourceVersion:"18593", FieldPath:""}): type: 'Normal' reason: 'Provisioning' External provisioner is provisioning volume for claim "test/persistent-storage-statefulset-local-788"
I1129 06:29:34.290614 1 controller.go:1099] Final error received, removing PVC 9d89ff6c-a86d-4dad-9c5f-5a869ede4ed9 from claims in progress
W1129 06:29:34.290633 1 controller.go:958] Retrying syncing claim "9d89ff6c-a86d-4dad-9c5f-5a869ede4ed9", failure 19
Expected behavior
A clear and concise description of what you expected to happen.
Screenshots
If applicable, add screenshots to help explain your problem.
** OS info (please complete the following information):**
Distro: [e.g. NixOS]
Kernel version
MayaStor revision or container image
Additional context
Add any other context about the problem here.
msp info is below.
$ k get msp -n mayastor
WARNING: version difference between client (1.25) and server (1.23) exceeds the supported minor version skew of +/-1
NAME NODE STATUS CAPACITY USED AVAILABLE
aks-storagepool-26798083-vmss000000 aks-storagepool-26798083-vmss000000 Online 1098433691648 0 1098433691648
aks-storagepool-26798083-vmss000001 aks-storagepool-26798083-vmss000001 Online 34321989632 0 34321989632
aks-storagepool-26798083-vmss000002 aks-storagepool-26798083-vmss000002 Online 1098433691648 0 1098433691648
aks-storagepool-26798083-vmss000003 aks-storagepool-26798083-vmss000003 Online 34321989632 0 34321989632
aks-storagepool-26798083-vmss000004 aks-storagepool-26798083-vmss000004 Online 34321989632 0 34321989632
aks-storagepool-26798083-vmss000005 aks-storagepool-26798083-vmss000005 Online 1098433691648 0 1098433691648
aks-storagepool-26798083-vmss000006 aks-storagepool-26798083-vmss000006 Online 34321989632 0 34321989632
aks-storagepool-26798083-vmss000007 aks-storagepool-26798083-vmss000007 Online 34321989632 0 34321989632
aks-storagepool-26798083-vmss000008 aks-storagepool-26798083-vmss000008 Online 34321989632 0 34321989632
aks-storagepool-26798083-vmss000009 aks-storagepool-26798083-vmss000009 Online 34321989632 0 34321989632
Which mayastor version is this on?
Do the volumes end up being provisioned or are you left with 400 unprovisioned volumes?
I used v1.0. Left with 400 unprovisioned volumes.
I wonder if you're hitting the max default packet size on the nats message bus.
Try adding "max_payload: 4mb" on the nats configmap.
Great, thank you for letting us now! Btw did you try the current develop version? https://openebs.github.io/mayastor-extensions/
No, I only test the released version.
|
gharchive/issue
| 2022-11-29T07:09:17 |
2025-04-01T04:35:22.232652
|
{
"authors": [
"tiagolobocastro",
"umagnus"
],
"repo": "openebs/mayastor",
"url": "https://github.com/openebs/mayastor/issues/1263",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
267429530
|
Remove too many CONTRIBUTING docs
BUG REPORT
Too many CONTRIBUTING-*.md docs, with same content.
What you expected to happen:
One CONTRIBUTING.md
https://help.github.com/articles/setting-guidelines-for-repository-contributors/
Environment:
kubectl get nodes
kubectl get pods --all-namespaces
kubectl get services
kubectl get sc
kubectl get pv
kubectl get pvc
OS (e.g. from /etc/os-release):
Kernel (e.g. uname -a):
Install tools:
Others:
We will need to merge all the content into this or link them from this single page. https://github.com/openebs/openebs/blob/master/CONTRIBUTING.md
I'll do it thanks kmova
@kmova Do you need us to merge the content of CODE_OF_CONDUCT.md and NOTICE.md into CONTRIBUTING.md and remove the two files?
@nvzard -- Nope. CODE_OF_CONDUCT and NOTICE are both ok. no change to that.
Can you review the content in the https://github.com/openebs/openebs/blob/master/CONTRIBUTING.md
Check to see if it is helpful for new contributors - to find the areas they would like to contribute and the process of contributing. The different areas to contribute is:
Raising Issues
User Documentation
Developer Documentation
Adding new examples showing the usage of openebs
Enhancing the End-to-End testing
Contributing to the source code
@kmova What you are expecting from contributors to fix this issue?
@NisantaSahoo I am taking this up seems simple enough. I have worked on improving documentation before.
Need Clarification
In the contribute directory
Design -
General - A .gitkeep file is for empty directories, this directory isn't empty and I don't why is a pdf there.
Hack - I don't get the use of these shell scripts, a person contributing should know how to git, at least or should learn to while contributing why have shell scripts to do it.
@prateekpandey14 @utkarshmani1997 please assign me to this issue and answer the questions.
|
gharchive/issue
| 2017-10-22T02:21:30 |
2025-04-01T04:35:22.242629
|
{
"authors": [
"Rupeshiya",
"ananya7",
"kmova",
"nvzard",
"vipulgupta2048"
],
"repo": "openebs/openebs",
"url": "https://github.com/openebs/openebs/issues/685",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1957101173
|
What's new
Added what's new screen
Video:
https://github.com/openedx/openedx-app-android/assets/141041606/37399f05-7e0b-4762-8e44-d3dee4e8fabf
Thanks for the pull request, @PavloNetrebchuk! Please note that it may take us up to several weeks or months to complete a review and merge your PR.
Feel free to add as much of the following information to the ticket as you can:
supporting documentation
Open edX discussion forum threads
timeline information ("this must be merged by XX date", and why that is)
partner information ("this is a course on edx.org")
any other information that can help Product understand the context for the PR
All technical communication about the code itself will be done via the GitHub pull request interface. As a reminder, our process documentation is here.
Please let us know once your PR is ready for our review and all tests are green.
Minor note / question from me - is the backgroudn color of the bottom bar something that can be changed? I noticed that for bot hlight and dark modes the color differs from the background screen. If it is possible to match the same color that would be great.
Minor note / question from me - is the backgroudn color of the bottom bar something that can be changed? I noticed that for bot hlight and dark modes the color differs from the background screen. If it is possible to match the same color that would be great.
@marcotuts
It looks like we can implement that behavior, but it might affect all screens in the application. Is that alright?
More info: https://developer.android.com/develop/ui/views/layout/edge-to-edge.
Thank you, these screens look really great!! I only see a few very small potential changes (these are really specific, I don't think they need to be changed to move forward):
When the title is close to the top cutout (camera punch in this case) in portrait mode, is it possible to shift the title down by 8px so the text isn't right up next to the cutout? This would also apply to the other titlebars, like in the Discover tab.
In the Figma designs, update text is vertically snapped to the bottom above the buttons, rather than the top underneath the title. I think it looks good either way here, so I just updated the Figma designs to match this.
As the "Previous" button appears, its icon spins around a bit. Can the animation only be an opacity fade in, without the icon moving?
I don't think these are important enough to spend a lot of time updating, everything looks good to me! Cross-posting a similar comment here https://github.com/openedx/openedx-app-ios/pull/131
I think having this be transparent would be great for all screens, if this causes issues elsewhere we can reverse course but that seems like it'll work
Thank you, these screens look really great!! I only see a few very small potential changes (these are really specific, I don't think they need to be changed to move forward):
When the title is close to the top cutout (camera punch in this case) in portrait mode, is it possible to shift the title down by 8px so the text isn't right up next to the cutout? This would also apply to the other titlebars, like in the Discover tab.
In the Figma designs, update text is vertically snapped to the bottom above the buttons, rather than the top underneath the title. I think it looks good either way here, so I just updated the Figma designs to match this.
As the "Previous" button appears, its icon spins around a bit. Can the animation only be an opacity fade in, without the icon moving?
I don't think these are important enough to spend a lot of time updating, everything looks good to me! Cross-posting a similar comment here openedx/openedx-app-ios#131
Hello, @sdaitzman
The emulator does not work well with the cutout, but we don't have that problem on real devices.
Thank you.
I have resolved the issue.
https://github.com/openedx/openedx-app-android/assets/141041606/6532711c-2348-4887-acb7-893fd773ff45
I think having this be transparent would be great for all screens, if this causes issues elsewhere we can reverse course but that seems like it'll work
Done it. On most screens, the behavior will be the same.
Thanks @PavloNetrebchuk, this all looks great to me!
@PavloNetrebchuk 🎉 Your pull request was merged! Please take a moment to answer a two question survey so we can improve your experience in the future.
|
gharchive/pull-request
| 2023-10-23T12:54:18 |
2025-04-01T04:35:22.326831
|
{
"authors": [
"PavloNetrebchuk",
"marcotuts",
"openedx-webhooks",
"sdaitzman",
"volodymyr-chekyrta"
],
"repo": "openedx/openedx-app-android",
"url": "https://github.com/openedx/openedx-app-android/pull/77",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1954250783
|
[sanity check] PropTypes.oneOfType([PropTypes.element, PropTypes.func]) replaced with PropTypes.elementType
Example component where issue is seen:
DataTable supports a columns prop that allows consumers to customize their table's column configuration. For each column, consumers may override the Cell property within react-table to customize the display and bring in custom UI elements / formatting.
Currently, the Cell property accepts a prop type similar to PropTypes.oneOfType([PropTypes.element, PropTypes.func]). However, this does not support passing an element type, i.e. just the component name.
<DataTable
columns={[
{
id: 'testColumn',
Cell: TestTableColumnCell,
},
]}
/>
where TestTableColumnCell might be the following, for example:
function TestTableColumnCell({ row }) {
return `${row.original.firstName} ${row.original.lastName}`;
}
https://github.com/openedx/paragon/pull/2749
@PKulkoRaccoonGang please check other components for the same issue
|
gharchive/issue
| 2023-10-20T13:02:42 |
2025-04-01T04:35:22.330124
|
{
"authors": [
"adamstankiewicz",
"monteri",
"viktorrusakov"
],
"repo": "openedx/paragon",
"url": "https://github.com/openedx/paragon/issues/2737",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1068590494
|
[BD-46] feat: add new subcomponents to Card
add Section, Divider, Footer, ImageCap components to Card
add horizontal variant according to Figma spec
update documentation for Card component
Preview: https://deploy-preview-914--paragon-edx.netlify.app/components/card/
JIRA:
PAR-308 (Whole Card is clickable)
PAR-309 (Card component technical design)
PAR-311 (ImageCap)
PAR-312 (Section)
PAR-313 (Divider)
PAR-314 (Footer)
PAR-316 (Update Card documentation)
PAR-569(Add horizontal variant)
Thanks for the pull request, @viktorrusakov! I've created BLENDED-1029 to keep track of it in Jira. More details are on the BD-46 project page.
When this pull request is ready, tag your edX technical lead.
Answered your comments and fixed all the nits also, thank you for pointing them out!
For some reason edx/cla check does not pass anymore 🤔 .
I believe I've addressed all the issues, should be good to merge now hopefully 🙂
@adamstankiewicz I've fixed the image sizing, please take a look
@viktorrusakov 🎉 Your pull request was merged! Please take a moment to answer a two question survey so we can improve your experience in the future.
:tada: This PR is included in version 19.0.0 :tada:
The release is available on:
npm package (@latest dist-tag)
GitHub release
Your semantic-release bot :package::rocket:
|
gharchive/pull-request
| 2021-12-01T16:05:39 |
2025-04-01T04:35:22.339443
|
{
"authors": [
"edx-semantic-release",
"edx-webhook",
"viktorrusakov"
],
"repo": "openedx/paragon",
"url": "https://github.com/openedx/paragon/pull/914",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
663408301
|
Refine control: Last modified date selector
#1306
Checklist
[x] the [contributor license agreement][] is signed
[x] commit message follows [commit guidelines][]
[x] tests are included
[ ] screenshots are included showing significant UI changes
[ ] documentation is changed or added
Description of change
Created another refine control: Last modified date selector. This control shows a Dropdown of Quick date options or two custom Date pickers, depending on the value of the Quick mode switch which is also included in this control. Also, this control is compatible with the feature of back to Search page.
Luxon is used to deal with date formats and calculations,.
In Search module, added some utils to help transform data between a date range and a Quick date option. However, I wonder if we should move these utils to a new module dedicated for Last modified date.
Lastly, added a new Storybook for this control.
One more side thing is during developing I found the dependency of material-ui picker we are using is very old. I tried to update it to the latest version, which resulted in some errors in other pages due to significant changes. So I did not proceed.
I also noted that in the screenshot for two Date pickers, the space between them is quite large. This is because each of them is wrapped in a MUI Grid item and the Grid takes full width of Storybook's Canvas area. So when it comes to the Search page refine panel, the space will look fine.
Updates for Storybook screenshots.
Here are two updated storybook screenshots. I removed some unnecessary knobs.
|
gharchive/pull-request
| 2020-07-22T01:34:13 |
2025-04-01T04:35:22.365257
|
{
"authors": [
"PenghaiZhang"
],
"repo": "openequella/openEQUELLA",
"url": "https://github.com/openequella/openEQUELLA/pull/2087",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
253008984
|
[agenda] make speaker links on public talk pages bold, as planned
The agenda scss contains a selector a.speaker to make speaker links on public talk pages bold. But there is no <a class="speaker"> on that page. Only a <div class="speaker"><a>. This PR reflects the actual HTML structure in CSS.
oh. nvm. m)
|
gharchive/pull-request
| 2017-08-25T20:25:01 |
2025-04-01T04:35:22.366849
|
{
"authors": [
"luto"
],
"repo": "openeventstack/pretalx",
"url": "https://github.com/openeventstack/pretalx/pull/75",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2189331491
|
Update security-self-assessment.md
Basic Threat Landscape
Secure Development Practices improved with status badges
all-around fixes
Description
References
Review Checklist
[ ] I have clicked on "allow edits by maintainers".
[ ] I have added documentation for new/changed functionality in this PR or in a PR to openfga.dev [Provide a link to any relevant PRs in the references section above]
[ ] The correct base branch is being used, if not main
[ ] I have added tests to validate that the change in functionality is working as expected
does this supersede https://github.com/openfga/openfga/pull/1206 ?
does this supersede #1206 ?
It does. I have incorporated suggestions. The source of truth for self-assessments seems to be the cncf/tag-security repository.
Hmmm, did I mess something up by closing https://github.com/openfga/openfga/pull/1206?
BTW, we should delete this one instead of updating it here, right? the assessment will live here https://github.com/cncf/tag-security/pull/1235
We should close this PR :) we are handling it here https://github.com/cncf/tag-security/pull/1235
|
gharchive/pull-request
| 2024-03-15T19:28:43 |
2025-04-01T04:35:22.409116
|
{
"authors": [
"aaguiarz",
"lj365",
"miparnisari"
],
"repo": "openfga/openfga",
"url": "https://github.com/openfga/openfga/pull/1456",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
199100745
|
Building a project with two or more fonts is causing the Haxe 3.4 compiler to hang
From @jgranick on January 5, 2017 21:19
This is very strange, and very recent. Let's figure out what's going on 😦
http://community.openfl.org/t/can-t-compile-more-than-1-font-on-asset-folder/8611/2
Copied from original issue: openfl/openfl#1418
I just changed to another system, and it does not hang, both are running Windows 10 and the same version of the Haxe compiler, though one is not using an SSD drive. Still investigating
EDIT: Nevermind, the other system had Haxe 3.2.1, not the Haxe 3.4 RC. This is a regression in Haxe 3.4
|
gharchive/issue
| 2017-01-06T01:09:16 |
2025-04-01T04:35:22.415286
|
{
"authors": [
"jgranick"
],
"repo": "openfl/lime",
"url": "https://github.com/openfl/lime/issues/874",
"license": "mit",
"license_type": "permissive",
"license_source": "bigquery"
}
|
450766828
|
[html5] TextField htmlText <font color=... bug
TextField.htmlText incorrectly displays text with a font color tag:
class App extends Sprite {
constructor() {
super();
var t = new TextField();
var s = "<p><font color='#ff0000'>foo</font></p>";
s += "<p><font color='#00ff00'>bar</font></p>";
s += "<p><font color='#0000ff'>foobar</font></p>";
t.htmlText = s;
this.addChild(t);
}
}
see
https://codesandbox.io/s/orgopenflsamplessandboxes6-0li0u?fontsize=14
Does it work if you replace the p tags with \n? If so, I know where the problem is...
Double checked and fixed it, without breaking the original intended fix
|
gharchive/issue
| 2019-05-31T12:07:14 |
2025-04-01T04:35:22.417845
|
{
"authors": [
"Klug76",
"MSGhero"
],
"repo": "openfl/openfl",
"url": "https://github.com/openfl/openfl/issues/2195",
"license": "mit",
"license_type": "permissive",
"license_source": "bigquery"
}
|
52230471
|
Line stroke improperly drawn on iOS or Mac
I think I ran into a bug on Mac and iOS targets. I've tested it on HTML5 and Flash as well but both works as expected. Basically I'm drawing a rectangle with a thick border and the bottom right corner is for some reason not drawn straight.
It's easy to replicate with the following code:
var background:Sprite;
background = new Sprite();
background.x = 0;
background.y = 0;
background.graphics.lineStyle(20, 0x37B89E);
background.graphics.beginFill(0x47C8AE, 1.0);
background.graphics.drawRect(0, 0, 960, 640);
background.graphics.endFill();
addChild(background);
If I set LineScaleMode.NONE it draws the line straight though so perhaps it's something related to that.
I'm using:
hxcpp 3.1.39
openfl 2.1.7
openfl-native 1.4.0
lime 2.0.1
iOS
OSX
Flash
HTML5
Merging with #1104
|
gharchive/issue
| 2014-12-17T11:41:13 |
2025-04-01T04:35:22.421560
|
{
"authors": [
"ibilon",
"orignaux"
],
"repo": "openfl/openfl",
"url": "https://github.com/openfl/openfl/issues/428",
"license": "mit",
"license_type": "permissive",
"license_source": "bigquery"
}
|
298374261
|
Loading toast in login screen is not properly placed.
Summary:
When a user sign's in the app, the Loading toast appears to be half outside the screen rather it should be below in the bottom middle of the screen.
Steps to reproduce:
Login in the app
Expected behavior:
The Loading toast should not be half outside the screen and rather it should be in the bottom middle of the screen
Observed behavior:
I Would like to work on this issue
You can also take a look at #430
@huzaifaiftikhar is someone working on that ?
Thanks for logging this but this is already being worked on in #739 :) I am going to close this issue for the time being!
@Arshilgenius You can post a comment there and ask if anyone is working on it or not.
|
gharchive/issue
| 2018-02-19T19:00:27 |
2025-04-01T04:35:22.424874
|
{
"authors": [
"Arshilgenius",
"Karljoones",
"huzaifaiftikhar"
],
"repo": "openfoodfacts/openfoodfacts-androidapp",
"url": "https://github.com/openfoodfacts/openfoodfacts-androidapp/issues/817",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1786964671
|
Stuck on splash screen
What
The app keeps crashing upon trying to open it and I can't uninstall the app.
Steps to reproduce the behavior
Open the app
Try to uninstall the app after it fails to open
Cry and smash phone against the wall after the app won't uninstall
Delete F-Droid and never use it again
Take a shower while listening to BFK10 on loop
Shove a 10 pound lobster up your ass
I am able to uninstall it, but it doesn't solve anything. It still shows a totally gray screen with no logo or text whatsoever until Android shows an error message saying the app is not responding.
Is it the same as #4261?
Same issue. I can't uninstall it. App installer error when tryingto uninstall. About same time I tried to uninstall my phone logged out of unknown Twitter account. Weird never neen that before. Security Scan is clear and firewall is now enabled.
Did you install the app via FDroid?
If so, there's a known bug and we wait them to release a new build
I close this issue, as it's a duplicate of #4261
|
gharchive/issue
| 2023-07-04T00:36:34 |
2025-04-01T04:35:22.445569
|
{
"authors": [
"J053Fabi0",
"StarCtrLH",
"UberPolice",
"g123k"
],
"repo": "openfoodfacts/smooth-app",
"url": "https://github.com/openfoodfacts/smooth-app/issues/4276",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1837666972
|
feat: App review in the "scan card"
Hi everyone,
We clearly need to improve our ratings in stores. As the release is approaching, this is just a baby step to better ask users to review the app.
A few things:
We now count the number of scans (will be useful later)
The review thing will have a 1/10 chance to appear after a valid scan
I have created a better way to migrate user preferences (previously we only add a boolean to state if it was the first scan)
With the "no" answer, we redirect to the form
The code to check if the user still use "Smoothie" is removed
The layout is tested on small devices
A video: Demo.webm
Some screenshots:
Codecov Report
Merging #4450 (3bb4675) into develop (1c3921e) will increase coverage by 0.09%.
The diff coverage is 21.01%.
@@ Coverage Diff @@
## develop #4450 +/- ##
===========================================
+ Coverage 10.17% 10.26% +0.09%
===========================================
Files 295 296 +1
Lines 15087 15148 +61
===========================================
+ Hits 1535 1555 +20
- Misses 13552 13593 +41
Files Changed
Coverage Δ
...cards/product_cards/smooth_product_card_found.dart
0.00% <ø> (ø)
...kages/smooth_app/lib/helpers/analytics_helper.dart
6.45% <ø> (ø)
...smooth_app/lib/helpers/haptic_feedback_helper.dart
0.00% <ø> (ø)
...e_panel/knowledge_panels/knowledge_panel_card.dart
0.00% <ø> (ø)
...l/knowledge_panels/knowledge_panel_title_card.dart
0.00% <ø> (ø)
.../lib/knowledge_panel/knowledge_panels_builder.dart
0.00% <ø> (ø)
packages/smooth_app/lib/main.dart
12.90% <ø> (ø)
packages/smooth_app/lib/pages/image_crop_page.dart
0.99% <ø> (ø)
.../smooth_app/lib/pages/navigator/app_navigator.dart
0.00% <ø> (ø)
...p/lib/pages/onboarding/consent_analytics_page.dart
0.00% <ø> (ø)
... and 31 more
:mega: We’re building smart automated test selection to slash your CI/CD build times. Learn more
You don't like (our>>the) app
You don't like (our>>the) app
This is intended, but we can't revert if necessary.
The initial call to action is generic (the app), but when the user optsfor a negative feedback, it's a way to minimize their anger et have a better comment, then a generic "it's just shiit. it don't work blabla"
Fair enough, let's see if that pans out
|
gharchive/pull-request
| 2023-08-05T09:30:26 |
2025-04-01T04:35:22.548027
|
{
"authors": [
"codecov-commenter",
"g123k",
"teolemon"
],
"repo": "openfoodfacts/smooth-app",
"url": "https://github.com/openfoodfacts/smooth-app/pull/4450",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2034389731
|
feat: 4889 - new simplified nutriscore widget
What
This PR adds a simplified version of the nutriscore display conform to the big redesign, on top of the KP and only if the "reordering KP" flag is set on DEV mode.
While coding, several questions:
Should we try to be a11n compliant? At least I tried with colors AND icons. Can easily be turned off back to "just the color"
We don't have any localizations for "salt", and I don't know if we should expect them from the server or if we should localize them in the app
I had to extract the data from the server (e.g. "1.2%") - again, should the server provide us with that raw data?
The simplified nutriscore is about 50% less tall than the current version. That goes in the right direction.
Screenshots
test 1
test 2
Fixes bug(s)
Fixes: #4889
Files
New files:
evaluation_extension.dart: a11y helper for Evaluation.
knowledge_panel_enum.dart: Helper around knowledge panel ids.
knowledge_panel_simplified_row.dart: Row that displays two widgets on same width columns (half max width).
knowledge_panel_simplified_title.dart: Full width title for a simplified knowledge panel.
knowledge_panel_simplified_widget.dart: Simplified widget for knowledge panel.
nutriscore_simplified_widget.dart: Simplified nutriscore widget, with nutriscore and 4 other attributes.
Impacted files:
knowledge_panel_title_card.dart: moved code to new class EvaluationExtension
new_product_page.dart: added simplified nutriscore widget triggered by dev mode only
reorderable_knowledge_panel_page.dart: minor refactoring using new class KnowledgePanelEnum
Codecov Report
Attention: 123 lines in your changes are missing coverage. Please review.
Comparison is base (5ffdca3) 9.68% compared to head (c12d035) 9.62%.
Files
Patch %
Lines
...ig_redesign/knowledge_panel_simplified_widget.dart
0.00%
54 Missing :warning:
...big_redesign/knowledge_panel_simplified_title.dart
0.00%
24 Missing :warning:
...uct/big_redesign/nutriscore_simplified_widget.dart
0.00%
17 Missing :warning:
...ges/product/big_redesign/evaluation_extension.dart
0.00%
14 Missing :warning:
...t/big_redesign/knowledge_panel_simplified_row.dart
0.00%
8 Missing :warning:
...l/knowledge_panels/knowledge_panel_title_card.dart
0.00%
2 Missing :warning:
...smooth_app/lib/pages/product/new_product_page.dart
0.00%
2 Missing :warning:
...ages/product/reorderable_knowledge_panel_page.dart
0.00%
2 Missing :warning:
Additional details and impacted files
@@ Coverage Diff @@
## develop #4890 +/- ##
==========================================
- Coverage 9.68% 9.62% -0.06%
==========================================
Files 318 323 +5
Lines 16121 16218 +97
==========================================
Hits 1561 1561
- Misses 14560 14657 +97
:umbrella: View full report in Codecov by Sentry.
:loudspeaker: Have feedback on the report? Share it here.
Hey @monsieurtanuki :-)
the PR is a good start
as was mentioned in the tracker issue for the PoC implementation, it should eventually rely on server side evolutions (issue there: https://github.com/openfoodfacts/openfoodfacts-server/issues/9368 mentioned in https://github.com/openfoodfacts/smooth-app/issues/4842 )
So we can expect some further refactoring so that the server can trigger this layout, including for the environment page
there are some other issues that rely on server side changes (server side issues are listed). Please ensure to ping @stephanegigandet or @alexgarel before moving
@g123k @monsieurtanuki We discussed the square layout yesterday in the Product Opener call. John is going to work on it, we made the necessary decision, and hopefully it will arrive soon.
We also felt it was necessary to uncouple the a11y decision, although we discussed it along the way. There are several options (smileys, thumbs up and down, or simply unambiguous text) and there's no agreement on it. We also want to bring the website on the same level.
Closing as stale.
|
gharchive/pull-request
| 2023-12-10T14:17:17 |
2025-04-01T04:35:22.572483
|
{
"authors": [
"codecov-commenter",
"monsieurtanuki",
"teolemon"
],
"repo": "openfoodfacts/smooth-app",
"url": "https://github.com/openfoodfacts/smooth-app/pull/4890",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
712325755
|
England 1998/1999 games all one day offset
Hi all,
Looks like in all of the 1998-99 files for England, the games are 1 day too early. For example, see:
https://github.com/openfootball/england/blob/master/archive/1990s/1998-99/1-premierleague.txt
This file tells us that:
Matchday 1
[Fri Aug/14]
Wimbledon FC 3-1 Tottenham
But actually it was played on Sat Aug/15:
https://www.premierleague.com/match/2534
Can I go and correct all these files?
Make it online to play with friends
May be it easily rankable.
Welcome to football.db. Yes, of course - your fixes are more than welcome. If you're interested you are welcome to join the github org(anization) so can change the dataset without going through a pull request. Just let me know. Keep it up. Cheers. Prost.
|
gharchive/issue
| 2020-09-30T22:35:51 |
2025-04-01T04:35:22.622430
|
{
"authors": [
"geraldb",
"sjwarner",
"vivektamore"
],
"repo": "openfootball/england",
"url": "https://github.com/openfootball/england/issues/38",
"license": "CC0-1.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2263917051
|
Add CHANGELOG to rendered docs
I was looking for changes in the docs and they're not included. I (weakly) prefer everything being in one place since that's how our other projects track changes and I've developed a habit of expecting it there.
Changes made in this Pull Request:
Render CHANGELOG in docs
Slightly pretty-ify some of the CHANGELOG
PR Checklist
[ ] Tests?
[ ] Docs?
[ ] CHANGELOG updated?
[ ] Issue raised/referenced?
Codecov Report
All modified and coverable lines are covered by tests :white_check_mark:
Project coverage is 66.34%. Comparing base (e0fc989) to head (db596b5).
Additional details and impacted files
https://openff-docs--116.org.readthedocs.build/projects/nagl/en/116/CHANGELOG.html#v0-3-8-2024-04-11
Pulling the trigger on this one just so I don't forget about it for a year :)
|
gharchive/pull-request
| 2024-04-25T15:35:39 |
2025-04-01T04:35:22.627811
|
{
"authors": [
"codecov-commenter",
"mattwthompson"
],
"repo": "openforcefield/openff-nagl",
"url": "https://github.com/openforcefield/openff-nagl/pull/116",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
429509967
|
Add tests for constraints and multiple molecule parameterization
Fixes #152 and #180.
I've ended up testing the parametrization of multiple molecules by mixing the AlkEthOH AMBER files with ParmEd and comparing those parameters with the multi-molecule System generated by ForceField.
I found an issue with mixing AMBER files with ParmEd while adding the test (see ParmEd/ParmEd#1045), but I think I found a workaround. It's good to keep that in mind though.
Thanks for the review!
|
gharchive/pull-request
| 2019-04-04T22:29:03 |
2025-04-01T04:35:22.637310
|
{
"authors": [
"andrrizzi"
],
"repo": "openforcefield/openforcefield",
"url": "https://github.com/openforcefield/openforcefield/pull/227",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
544034152
|
Coverage Offering
The API-Coverages draft returns a Coverage Offering from /collections/{coverageid}/coverage. The schema for the resource this URL returns is mostly empty. What should be included in a Coverage Offering?
Coverages SWG call: Lastest discussions and draft updates have resolved that issue (e.g. 6094940d8d2fed741f9bf7d40e548a7b372b58a3).
|
gharchive/issue
| 2019-12-30T22:49:22 |
2025-04-01T04:35:22.655872
|
{
"authors": [
"Schpidi",
"cmheazel"
],
"repo": "opengeospatial/ogc_api_coverages",
"url": "https://github.com/opengeospatial/ogc_api_coverages/issues/40",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
579048989
|
Make multiple terrain providers support
Separate each provider visualization by its bounds and zoom level.
This is partially maintained with the last terrain provider update. Just only a few terrain data sources handling left.
|
gharchive/issue
| 2020-03-11T06:57:57 |
2025-04-01T04:35:22.700232
|
{
"authors": [
"Zemledelec"
],
"repo": "openglobus/openglobus",
"url": "https://github.com/openglobus/openglobus/issues/232",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1091202514
|
Mordhau class missing useful server info
Hi,
I am leveraging opengsq to build a public server monitoring frontend for Mordhau. For this purpose I could use as much information as available through a2s.
https://github.com/opengsq/opengsq-python/blob/4f2b57a8defa159bf74435ec1f78f70f72b19f36/opengsq/servers/mordhau.py#L13
Would it be possible to extend this referenced part with VAC boolean, passworded server boolean, platform string and the keywords string?
hi @pblaas,
Please notice that the latest version of opengsq is v0.1.1, mordhau.py has been removed.
You may use the script below to get the full server query data.
import asyncio
from opengsq.protocols import Source
async def main():
source = Source(address='78.108.216.207', query_port=27015)
info = await source.get_info()
print(info)
asyncio.run(main())
{
'Protocol': 17,
'Name': 'Duke of York - EU - Duel/Roleplay - Rp Feitoria/Noria♛',
'Map': 'Feitoria_RP',
'Folder': 'mordhau',
'Game': 'Mordhau',
'ID': 0,
'Players': 0,
'MaxPlayers': 65,
'Bots': 0,
'ServerType': 'd',
'Environment': 'w',
'Visibility': 0,
'VAC': 0,
'Version': '502402952',
'EDF': 177,
'GamePort': 7500,
'SteamID': 90154808672368644,
'Keywords': 'C:4107,B:19,N:Deathmatch',
'GameID': 629760
}
ok thanks.
sorry @pblaas, I haven't release the latest release yet 😅, I will release v1.0.0 soon, thank you.
thanks, btw I don't think the Source implementation returned proper player count. This was fixed in the Mordhau module. I currently use an RCON implementation. Not ideal.. but does the job.
Thank you for building open source. Regards, Patrick
Mordhau doesn't return player count on Players field, but you can get the player count in Keywords B
'Keywords': 'C:4107,B:19,N:Deathmatch',
See the fix implemented: https://github.com/opengsq/opengsq-python/blob/4f2b57a8defa159bf74435ec1f78f70f72b19f36/opengsq/servers/mordhau.py#L23
aaah thanks! I didn't notice.
v1.0.0 has been released, thank you for using opengsq, thank you.
|
gharchive/issue
| 2021-12-30T16:36:49 |
2025-04-01T04:35:22.727712
|
{
"authors": [
"BattlefieldDuck",
"pblaas"
],
"repo": "opengsq/opengsq-python",
"url": "https://github.com/opengsq/opengsq-python/issues/2",
"license": "MIT",
"license_type": "permissive",
"license_source": "github-api"
}
|
1170408894
|
add metric server on lma
metric서버 설치 추가
metric-server 는 cluster 설치 생성할 때 csi 설치되는 곳에 넣으면 좋을 듯 합니다. 그리고 kubelet-insecure-tls 옵션 안넣어도 되나요? decapod-site 에 없어서요.
티켓할때 아무옵션없이 helm 띄우면 된다했었는데. 추가해야하는 옵션이 있다면 알려주세요
tks-cluster app쪽으로 옮김
tks-cluster app쪽으로 옮김
|
gharchive/pull-request
| 2022-03-16T00:45:43 |
2025-04-01T04:35:23.041954
|
{
"authors": [
"intelliguy",
"seungkyua"
],
"repo": "openinfradev/decapod-base-yaml",
"url": "https://github.com/openinfradev/decapod-base-yaml/pull/151",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
143659971
|
Remove goog.string
Added a utils.js file which has one function in it at the moment (padNumber) or would this be better suited in a string.js file?
Also not sure how to tackle compareVersions, could we have a very simplify case and only deal with numeric types? i.e. not have to deal with "1.2b"
Looks good. +1 on only handling numeric versions. Just one thing: could you rename utils to string? Also the file name.
Done :) not sure about the numeric versions. I put tests in for it and they seem to pass :) So ready for review
This looks good @nicholas-l. Thanks for the work on it. A few comments above about the tests. Any failures in ol.string.compareVersions() assertions aren't currently being caught.
@tschaub changed according to comments
@nicholas-l can you please squash all your commits into one? (there a good explanation on how to do that here: https://github.com/openlayers/ol3/pull/4868#issuecomment-187785482)
@fredj squashed
Thanks @nicholas-l
|
gharchive/pull-request
| 2016-03-26T03:36:18 |
2025-04-01T04:35:23.430861
|
{
"authors": [
"ahocevar",
"fredj",
"nicholas-l",
"tschaub"
],
"repo": "openlayers/ol3",
"url": "https://github.com/openlayers/ol3/pull/5132",
"license": "BSD-2-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
792524622
|
Call Full Screen control of Map Programatically
I am using open layer map, I have added full screen control, using which I am displaying open layer map in full screen mode. I want call that control programatically.
Describe the solution you'd like
I need a way to call full screen control of open layer map, programatically.
map.getTargetElement().requestFullscreen(); will put the map in full screen, but browsers may prohibit this as a security measure, if it is run outside a event handler for a user generated event.
map.getTargetElement().requestFullscreen(); will put the map in full screen, but browsers may prohibit this as a security measure, if it is run outside a event handler for a user generated event.
|
gharchive/issue
| 2021-01-23T10:20:38 |
2025-04-01T04:35:23.432729
|
{
"authors": [
"MoonE",
"spandanparikh"
],
"repo": "openlayers/openlayers",
"url": "https://github.com/openlayers/openlayers/issues/11940",
"license": "BSD-2-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
2694510
|
CSS-based tile animation
This pull request suggests adding CSS-based animations when displaying tiles. To enable tile animations the tileAnimation property should be set to true in the map instance (default is true). Some CSS rule defining transitions on the opacity property should also be present. This pull request adds the following rule to the default theme's style.css file:
.olMapTileAnim .olTileImage {
-webkit-transition: opacity 0.2s linear;
-moz-transition: opacity 0.2s linear;
-o-transition: opacity 0.2s linear;
transition: opacity 0.2s linear;
}
People not relying on OpenLayers' default theme will need to add some CSS rule of this sort, or they'll need to disable tile animations by setting tileAnimation to false in the map options. (I'll document this in the 2.12 release notes if my commits are merged into master.)
All tests pass in FF8, Chromium 15, and IE8.
One thing I'm not sure is how tile animations look like in Android. Any test would be much appreciated.
FWIW, I'd like also to add CSS-based zoom animations in the future. I've been thinking about redesigning our backbuffer implementation to work at the layer container level instead of at the layer level. I've also imagining introducing a zoomAnimation option at the map level, which would be consistent with the tileAnimation option introduced by this pull request.
I'd like to change the implementation a bit. I want to not rely on the transitionend event. Relying on the transitionend event causes problems if tileAnimation is true and no CSS transition rule is defined. There's certainly lots of people not using OpenLayers' default theme out there. For those, because tileAnimation defaults to true (which I think we want), their tiled layers won't behave correctly because loadend will never get triggered and the layer's backbuffer will
never be removed. People using the loading panel add-in will also have issues.
This is a really nice enhancement. I'm working on some changes to handle the transition end events.
With eb924c8f we no longer use transitionend. We prevent flashes by delaying the removal of the back buffer with a simple setTimeout call. This is to avoid problem when no css transition rule is defined, which is the case for people not using OpenLayers' default theme (style.css). And there's an another benefit: the code is simpler.
This is really looking good. I'm still seeing some flashes with the 800ms timeout - they get much less frequent at 2500ms for me. I also think we can get rid of the added map property. These changes are in https://github.com/elemoine/openlayers/pull/3.
Please merge if you agree. Thanks this great enhancement.
The problem with large timeout values is that we re-use the old back buffer while we have a new tiles, and, therefore, a better potential back buffer at our disposal. Was 2500 the smallest value you came up with, or can we lower it a bit?
This looks great!
@ahocevar I'm interested in a review of the new "set opacity" code from you.
I see frequent flickers in Chrome when zooming in/out in the transition.html example with the 4th layer set as base layer.
@ahocevar flickers? Like flashes?
The transition.html example is a good one to test this new code. When using the 1st layer as base layer, I see the whole map disappear frequently after dragging. I think this is related to the flicker issues. Maybe having the backbuffer fade out instead of using setTimeout to remove it would improve things?
@elemoine With flickers, I meant the whole layer or parts of it disappearing for a moment.
Thanks for testing @ahocevar. The problem is related to the delayed removal of the back buffer. There's no problem if one goes very slow and always pans after the back buffer has been removed. I haven't looked at how to fix that for now.
@ahocevar, and can you confirm that you observe these "flickers" only with the single tile layer?
@elemoine the "flickers" are related to the backbuffer. So they appear after zoom changes also in tiled layers. I think the best way to fix this would be to let the backbuffer fade out with an animation before removing it. The root of the problem is that many browsers fire the loadend event for images really after it finished loading, but before it is displayed. This is why we previously removed the tile based back buffer on the layer's loadend event, and not on the tile's loadend.
@ahocevar then I don't understand how/why the problem relates to tile animation.
@elemoine You're right, it's not related. Scheduling the removal of the backbuffer and then clearing the timeout when a new buffer has to be created causes the issue - I see the disappearing tiles only when I wait less than 2.5 sec before dragging (for singleTile layers) or zooming (for other layers) again.
@ahocevar moveTo removes the current back buffer only in the (zoom changed, no resize transition, single tile) case, but without clearing the timer. That may be one problem. I still don't understand what's going on for non single tile layers.
I may know what the issue is now: a back buffer is being reused while it is scheduled for removal (from a previous zoom). In that case the back buffer can be removed during the transition, i.e. while new images are loaded.
So solve the problem we may just need to clear the timer when a back buffer is about to be (re-)used (in applyBackBuffer).
For what it's worth, I didn't see flashes when I was waiting to remove the back buffer until the newly loaded tiles transitionend (and similarly named) event. It wouldn't be too hard to reproduce that work if other methods don't work.
Two options at the time moveTo is called and the back buffer has been scheduled for removal:
Clear the timer and remove the back buffer immediately. This may cause flashes as we remove the back buffer earlier than planned (tile animation may still be going on). This may be mitigated by the fact that we use a large timeout value.
Clear the timer and keep the back buffer. This prevents flashes, but results in the reuse of the old back buffer while we may have new tiles.
I'd favor option 2, to avoid flashes. But both options should be tried out.
Some pseudo-code for option 2 (mostly for myself):
moveTo:
...
if zoom changed and
no resize transition and
single tile:
remove back buffer
...
applyBackBuffer:
if no back buffer:
create back buffer
if no back buffer:
return
else if removal is scheduled:
clear timer
position and scale back buffer
tile loadend listener:
if no more loading tiles:
if back buffer:
schedule removal (set timer)
remove back buffer:
if back buffer:
if back buffer has parent:
remove back buffer from parent
set back buffer to null (no back buffer)
if removal is scheduled:
clear timer
39f2ddc implements option 1, as option 2 didn't yield anything good.
@ahocevar I'd appreciate if you could test the new code, and see if you can still reproduce the issues you've seen.
I'll still need to add tests for the new code.
Hey, wonderful patch, works great.
The only thing that bothers me is that my transparent PNGAs will have fuzzy black pixels in IE7/8, e.g. the transparency no longer works correctly. This surely is an IE bug and is due to setting the filter="alpha(opacity=X)" property.
What would be the nicest way to solve this problem?
Set the opacity filter only when view port div has the olMapTileAnim class?
Basically, a way to disable tile animation would be nice.
Why was the previously introduced property "tileAnimation" removed again?
@lordi, both the current code and my code make use of the opacity filter, so this is a bit strange that you don't have these fuzzy black pixels with the current code. And I don't seem to be able to reproduce the issue using the layer-opacity.html example. (Also, I'm observing that this example doesn't work properly in IE6, but I'm not yet sure this is because of my patch.)
@ahocevar, have you had a chance to test the latest changes? I hope I got it right this time and I can merge this patch. Thank you.
Eric, this is such a huge user experience improvement! Thanks for all the work on it. I've tested it in full-screen maps with multiple layers and transition effects. Everything looks great. Though I'm sure there could be additional items exposed, I think it is worth getting this into master and letting more people have a chance to test it out.
@elemoine: I think this is good to merge. I did some heavy panning and zooming with both untiled and tiled layers, and only saw a minor glitch (a single tile disappearing for a fraction of a second) every now and then with tiled layers. Thanks for your efforts on this.
In Firefox 10 the fading doesn't work well: the transition starts with a black background.
@vrifino thanks for reporting this. Does the same problem exist in Leaflet?
I've just tested Firefox 10 on OSX and the fade transitions work well. Got a report of the same working on Windows.
I've tested Firefox 10 on Windows 7 and the transition starts with a black background. It seems a Firefox 10 bug: the same problem exists in Leaflet. Firefox 8 works well.
|
gharchive/issue
| 2011-12-31T13:26:20 |
2025-04-01T04:35:23.450215
|
{
"authors": [
"ahocevar",
"elemoine",
"lordi",
"tschaub",
"vrifino"
],
"repo": "openlayers/openlayers",
"url": "https://github.com/openlayers/openlayers/issues/127",
"license": "BSD-2-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
2636040644
|
Use lower tile resolution
Currently when I'm at a zoom level, let say :6 i'm using the tiles from resolution 6.
It would be great if TileGrid can easily use the tile from the higher level.
For example zoom 5 instead of 6, usefull when playing a timeline for example which require to call a lot of tiles.
PS : I think there is already a way to do so but I coudnt figured it out
If the served tile size is 256 pixels specify tileSize: 512 and tilePixelRatio: 0.5.
thanks @mike-000 , I probably wasnt clear in my description, the feature i'm looking is something like an array to array translations , for example :
zoom = [0,1, 2,3, 4,5,6]
optimizedZoom = [0,0, 1,1, 2,2,2]
I just acheived it with a certain degree of controls using :
const resolutions = []
const tileSizes = []
const zoomFactor = 2 // can be changed , increase it lower the quality
for (let i = minZoomTileAvailable; i < maxZoomTileAvailable; i++) {
const zoom = Math.min(i, maxZoomTileAvailable)
const resolution = this.map.getView().getResolutionForZoom(zoom)
resolutions.push(resolution / zoomFactor)
tileSizes.push(256 * zoomFactor)
}
const tileGrid = new TileGrid({
extent: this.map.getView().getProjection().getExtent(),
resolutions,
tileSizes,
})
|
gharchive/issue
| 2024-11-05T17:17:16 |
2025-04-01T04:35:23.454308
|
{
"authors": [
"Makio64",
"mike-000"
],
"repo": "openlayers/openlayers",
"url": "https://github.com/openlayers/openlayers/issues/16346",
"license": "BSD-2-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
171874312
|
Mouse over with transform scale
Hello
I need use transform scale for my page. But mouse navigation works not correct on the map. Look screenshot. And http://jsfiddle.net/balkarov/bfpep2uv/
How can I fix it?
It would be good to understand why you need the document level transform
We use transform scale in our project. It does not suit us zoom.
Can somebody help me?
What do you mean by "it does not suit us zoom"? You can customize the appearance of all controls with css, and you have full control over the resolutions your map will displayed in.
No. We need use this case, use transform scale.
How to fix this problem?
Look, I found this problem the highcharts
https://github.com/highcharts/highcharts/issues/2405
But someone did so to solve this problem
Before hack: http://jsfiddle.net/kzoon/v3aa8/
After hack: http://jsfiddle.net/f4z8pay9/
Did you fix it in highcharts?
|
gharchive/issue
| 2016-08-18T10:58:27 |
2025-04-01T04:35:23.459295
|
{
"authors": [
"ahocevar",
"balkarov",
"bill-chadwick"
],
"repo": "openlayers/openlayers",
"url": "https://github.com/openlayers/openlayers/issues/5747",
"license": "BSD-2-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
344085042
|
Observable.on does not respect external context.
Can you please provide the reason why you removed the opt_this argument?
https://github.com/openlayers/openlayers/commit/d7c48314b88b3dfd598265c220f0c40781d07bb5#diff-a3c20f84c3e2fa5a948c6c57621b4e49L100
TL;DR: There is no need for opt_this. Instead of
myObject.on('change', function() {
console.log(this.getProperties())
}, myObject);
you now say
myObject.on('change', function() {
console.log(this.getProperties())
}.bind(myObject));
Ok, thanks for response.
|
gharchive/issue
| 2018-07-24T15:18:21 |
2025-04-01T04:35:23.461401
|
{
"authors": [
"Vasiliy13",
"ahocevar"
],
"repo": "openlayers/openlayers",
"url": "https://github.com/openlayers/openlayers/issues/8431",
"license": "BSD-2-Clause",
"license_type": "permissive",
"license_source": "github-api"
}
|
1407047096
|
fix(reshape)
修改多calibrate-batch引起的reshape算子的bug
有一个业务模型ci没过
|
gharchive/pull-request
| 2022-10-13T02:38:44 |
2025-04-01T04:35:23.669855
|
{
"authors": [
"Jzz24",
"Lenan22"
],
"repo": "openppl-public/ppq",
"url": "https://github.com/openppl-public/ppq/pull/267",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
330168704
|
Fix make_aliases_function
Before if a plugin key is None, an application always crashes, but now we just shows a Logger.warning message.
This change is
Coverage increased (+0.03%) to 74.12% when pulling 5a5d72a24b49dfe104110fba3434d2cdc1c39770 on MichaelYusko:bugfix/fix_make_aliases_function into 2487c4d7c4abc36f26581d82dd1bf80eb95f549e on openprocurement:ea_core_master.
Coverage increased (+0.03%) to 74.12% when pulling 5a5d72a24b49dfe104110fba3434d2cdc1c39770 on MichaelYusko:bugfix/fix_make_aliases_function into 2487c4d7c4abc36f26581d82dd1bf80eb95f549e on openprocurement:ea_core_master.
|
gharchive/pull-request
| 2018-06-07T08:30:01 |
2025-04-01T04:35:23.674022
|
{
"authors": [
"MichaelYusko",
"coveralls"
],
"repo": "openprocurement/openprocurement.api",
"url": "https://github.com/openprocurement/openprocurement.api/pull/345",
"license": "apache-2.0",
"license_type": "permissive",
"license_source": "bigquery"
}
|
641633479
|
Declarative rules aren't executing their visitors
CompositeSourceVisitor delegates for default but not for visit.
Wasn't properly fixed in 1.0.1 for visitors that contain pipelines (chained visitors with andThen). Fixed in 1.0.2.
|
gharchive/issue
| 2020-06-19T00:59:58 |
2025-04-01T04:35:23.753557
|
{
"authors": [
"jkschneider"
],
"repo": "openrewrite/rewrite",
"url": "https://github.com/openrewrite/rewrite/issues/3",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
324325662
|
mod the API definition of Extend Volume
What this PR does / why we need it:
Which issue this PR fixes (optional, in fixes #<issue number>(, fixes #<issue_number>, ...) format, will close that issue when PR gets merged): fixes #
design spec #11
Special notes for your reviewer:
Release note:
Coverage increased (+0.09%) to 45.099% when pulling 32bf54875d21424c1059dbe79150d2163e0d420a on BaiHuoYu:Resize_515 into c0267ad64dc40bedbd8df57bd6186333e1c6a96f on opensds:master.
Coverage increased (+0.09%) to 45.099% when pulling 32bf54875d21424c1059dbe79150d2163e0d420a on BaiHuoYu:Resize_515 into c0267ad64dc40bedbd8df57bd6186333e1c6a96f on opensds:master.
|
gharchive/pull-request
| 2018-05-18T08:56:57 |
2025-04-01T04:35:23.789481
|
{
"authors": [
"BaiHuoYu",
"coveralls"
],
"repo": "opensds/opensds",
"url": "https://github.com/opensds/opensds/pull/390",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
444773079
|
Update orchestration install document
What this PR does / why we need it:
This PR update the Orchestration installation document with updated code
Codecov Report
Merging #27 into master will not change coverage.
The diff coverage is n/a.
@@ Coverage Diff @@
## master #27 +/- ##
======================================
Coverage 42.5% 42.5%
======================================
Files 13 13
Lines 614 614
Branches 65 65
======================================
Hits 261 261
Misses 343 343
Partials 10 10
|
gharchive/pull-request
| 2019-05-16T06:18:51 |
2025-04-01T04:35:23.792004
|
{
"authors": [
"codecov-io",
"joseph-v"
],
"repo": "opensds/orchestration",
"url": "https://github.com/opensds/orchestration/pull/27",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1537727896
|
[BUG] [Dev Setup] Assertion failure while deleting index with remote_store.enabled=true
Describe the bug
While deleting a remote store backed index in developer setup, following exception is thrown.
java.lang.AssertionError: shard [my-index-1][0] is not locked
at __randomizedtesting.SeedInfo.seed([5CA33FB52BB9420E]:0)
at org.opensearch.env.NodeEnvironment.deleteShardDirectoryUnderLock(NodeEnvironment.java:561)
at org.opensearch.indices.IndicesService.deleteShardStore(IndicesService.java:1208)
at org.opensearch.index.IndexService.onShardClose(IndexService.java:638)
at org.opensearch.index.IndexService$StoreCloseListener.accept(IndexService.java:761)
at org.opensearch.index.IndexService$StoreCloseListener.accept(IndexService.java:748)
at org.opensearch.index.store.Store.closeInternal(Store.java:549)
at org.opensearch.index.store.Store$1.closeInternal(Store.java:189)
at org.opensearch.common.util.concurrent.AbstractRefCounted.decRef(AbstractRefCounted.java:76)
at org.opensearch.index.store.Store.decRef(Store.java:522)
at org.opensearch.index.store.Store.close(Store.java:529)
at org.opensearch.index.IndexService.closeShard(IndexService.java:619)
at org.opensearch.index.IndexService.removeShard(IndexService.java:587)
at org.opensearch.index.IndexService.close(IndexService.java:373)
at org.opensearch.indices.IndicesService.removeIndex(IndicesService.java:1044)
at org.opensearch.indices.cluster.IndicesClusterStateService.deleteIndices(IndicesClusterStateService.java:350)
at org.opensearch.indices.cluster.IndicesClusterStateService.applyClusterState(IndicesClusterStateService.java:280)
at org.opensearch.cluster.service.ClusterApplierService.callClusterStateAppliers(ClusterApplierService.java:606)
at org.opensearch.cluster.service.ClusterApplierService.callClusterStateAppliers(ClusterApplierService.java:593)
at org.opensearch.cluster.service.ClusterApplierService.applyChanges(ClusterApplierService.java:561)
at org.opensearch.cluster.service.ClusterApplierService.runTask(ClusterApplierService.java:484)
at org.opensearch.cluster.service.ClusterApplierService$UpdateTask.run(ClusterApplierService.java:186)
at org.opensearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:747)
at org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedOpenSearchThreadPoolExecutor.java:282)
at org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedOpenSearchThreadPoolExecutor.java:245)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:829)
Fixed in https://github.com/opensearch-project/OpenSearch/pull/5918
|
gharchive/issue
| 2023-01-18T10:09:30 |
2025-04-01T04:35:23.815563
|
{
"authors": [
"sachinpkale"
],
"repo": "opensearch-project/OpenSearch",
"url": "https://github.com/opensearch-project/OpenSearch/issues/5919",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1950288319
|
[Backport 2.x] Upload global cluster state to remote store (#10404)
Description
Backport of PR: Upload global cluster state to remote store #10404
Changes done on top of Original PR for back porting:
Conflicts in CHANGELOG
Changed version checks from V_3_0_0 to V_2_12_0.
Related Issues
Resolves #10526
Check List
[ ] New functionality includes testing.
[ ] All tests pass
[ ] New functionality has been documented.
[ ] New functionality has javadoc added
[ ] Commits are signed per the DCO using --signoff
[ ] Commit changes are listed out in CHANGELOG.md file (See: Changelog)
[ ] Public documentation issue/PR created
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Test failure unrelated to this change: org.opensearch.search.aggregations.bucket.DiversifiedSamplerIT.testNestedSamples
|
gharchive/pull-request
| 2023-10-18T17:29:50 |
2025-04-01T04:35:23.820802
|
{
"authors": [
"dhwanilpatel"
],
"repo": "opensearch-project/OpenSearch",
"url": "https://github.com/opensearch-project/OpenSearch/pull/10711",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2067935503
|
Update skip version to match branches
Description
Since we merged #11209 we would need to fix the skip version to match the BWC tests
Related Issues
Resolves #[Issue number to be closed when this PR is merged]
Check List
[x] New functionality includes testing.
[x] All tests pass
[x] New functionality has been documented.
[x] New functionality has javadoc added
[x] Failing checks are inspected and point to the corresponding known issue(s) (See: Troubleshooting Failing Builds)
[x] Commits are signed per the DCO using --signoff
[x] Commit changes are listed out in CHANGELOG.md file (See: Changelog)
[x] Public documentation issue/PR created
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
@harshavamsi should we bring back 340_keyword_doc_values.yml to run tests for bwc version prior to 2.12.0 ? It seems like we have removed test completely
|
gharchive/pull-request
| 2024-01-05T19:35:21 |
2025-04-01T04:35:23.826246
|
{
"authors": [
"harshavamsi",
"reta"
],
"repo": "opensearch-project/OpenSearch",
"url": "https://github.com/opensearch-project/OpenSearch/pull/11776",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
962473793
|
support historical analysis for multi-category HC
Signed-off-by: Yaliang Wu ylwu@amazon.com
Description
Support historical analysis of multi-category HC
Delete anomaly result when delete detector and add to hourly cron job
Fix HC detector query in historical analysis
Check List
[x] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Codecov Report
Merging #159 (f0e265e) into main (6843b29) will decrease coverage by 0.46%.
The diff coverage is 21.75%.
@@ Coverage Diff @@
## main #159 +/- ##
============================================
- Coverage 70.30% 69.83% -0.47%
- Complexity 2970 2982 +12
============================================
Files 268 268
Lines 14051 14183 +132
Branches 1409 1419 +10
============================================
+ Hits 9878 9905 +27
- Misses 3554 3656 +102
- Partials 619 622 +3
Flag
Coverage Δ
plugin
69.83% <21.75%> (-0.47%)
:arrow_down:
Flags with carried forward coverage won't be shown. Click here to find out more.
Impacted Files
Coverage Δ
.../java/org/opensearch/ad/AnomalyDetectorPlugin.java
94.73% <ø> (ø)
src/main/java/org/opensearch/ad/model/ADTask.java
91.25% <0.00%> (-0.24%)
:arrow_down:
.../java/org/opensearch/ad/model/AnomalyDetector.java
96.13% <0.00%> (-0.47%)
:arrow_down:
src/main/java/org/opensearch/ad/model/Entity.java
72.89% <ø> (-0.26%)
:arrow_down:
...ensearch/ad/rest/RestGetAnomalyDetectorAction.java
38.46% <0.00%> (ø)
...pensearch/ad/settings/AnomalyDetectorSettings.java
100.00% <ø> (ø)
...arch/ad/transport/EntityResultTransportAction.java
73.50% <0.00%> (+1.23%)
:arrow_up:
...c/main/java/org/opensearch/ad/util/ParseUtils.java
62.13% <0.00%> (-1.31%)
:arrow_down:
...java/org/opensearch/ad/task/ADBatchTaskRunner.java
50.41% <6.08%> (-3.63%)
:arrow_down:
...ava/org/opensearch/ad/task/ADTaskCacheManager.java
88.00% <10.00%> (-3.63%)
:arrow_down:
... and 11 more
@ylwu-amzn Are your changes in? Please don't force push as it is hard for reviewers to see the diff. Also, the PR build failed now.
@ylwu-amzn Are your changes in? Please don't force push as it is hard for reviewers to see the diff. Also, the PR build failed now.
Thanks for reminding. Pushed changes accidentally. Have fixed the failed build. BTW, you can stop review if the build failed.
@ylwu-amzn Are your changes in? Please don't force push as it is hard for reviewers to see the diff. Also, the PR build failed now.
Thanks for reminding. Pushed changes accidentally. Have fixed the failed build. BTW, you can stop review if the build failed.
Just try to push you so that I can approve the PR.
|
gharchive/pull-request
| 2021-08-06T07:23:20 |
2025-04-01T04:35:23.845969
|
{
"authors": [
"codecov-commenter",
"kaituo",
"ylwu-amzn"
],
"repo": "opensearch-project/anomaly-detection",
"url": "https://github.com/opensearch-project/anomaly-detection/pull/159",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1238213524
|
fix code coverage badge
Signed-off-by: Subhobrata Dey sbcd90@gmail.com
Description
[Describe what this change achieves]
Issues Resolved
[List any issues this PR will resolve]
Check List
[ ] New functionality includes testing.
[ ] All tests pass
[ ] New functionality has been documented.
[ ] New functionality has javadoc added
[ ] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Codecov Report
Merging #176 (f68fda1) into main (1d0ee34) will not change coverage.
The diff coverage is n/a.
@@ Coverage Diff @@
## main #176 +/- ##
=========================================
Coverage 83.76% 83.76%
Complexity 434 434
=========================================
Files 66 66
Lines 2205 2205
Branches 254 254
=========================================
Hits 1847 1847
Misses 261 261
Partials 97 97
Continue to review full report at Codecov.
Legend - Click here to learn more
Δ = absolute <relative> (impact), ø = not affected, ? = missing data
Powered by Codecov. Last update 1d0ee34...f68fda1. Read the comment docs.
|
gharchive/pull-request
| 2022-05-17T07:42:26 |
2025-04-01T04:35:23.854381
|
{
"authors": [
"codecov-commenter",
"sbcd90"
],
"repo": "opensearch-project/common-utils",
"url": "https://github.com/opensearch-project/common-utils/pull/176",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1820945707
|
[FEATURE] Acceleration Wizard UI
As part of the UI for flint, we want a way for users to configure acceleration on specific queries/tables/materialized views from the UI. This can be done either via configuration (data sources management), or in the context of a given query/table. This Issue is to create the acceleration wizard UI in these two situations to allow users to easily configure acceleration methods.
can you share details on the user experience here ?
Closing since it moved to query workbench
|
gharchive/issue
| 2023-07-25T18:51:35 |
2025-04-01T04:35:23.855839
|
{
"authors": [
"YANG-DB",
"derek-ho"
],
"repo": "opensearch-project/dashboards-observability",
"url": "https://github.com/opensearch-project/dashboards-observability/issues/748",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2051450287
|
[Backport main] Bugfix/metric-loading-loop (#1309)
Description
Badkport #1309 "Metric Loading Loop fix" to main
Issues Resolved
[List any issues this PR will resolve]
Check List
[ ] New functionality includes testing.
[ ] All tests pass, including unit test, integration test and doctest
[ ] New functionality has been documented.
[ ] New functionality has javadoc added
[ ] New functionality has user manual doc added
[ ] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Lint CI is failing here due to a configuration change pending in PR: https://github.com/opensearch-project/dashboards-observability/pull/1317
|
gharchive/pull-request
| 2023-12-20T23:32:26 |
2025-04-01T04:35:23.859857
|
{
"authors": [
"pjfitzgibbons",
"ps48"
],
"repo": "opensearch-project/dashboards-observability",
"url": "https://github.com/opensearch-project/dashboards-observability/pull/1318",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1656014355
|
Fix imports for AppendAggregateAction
Description
Fixes star imports from https://github.com/opensearch-project/data-prepper/pull/2230#pullrequestreview-1373053058
Issues Resolved
Check List
[x] New functionality includes testing.
[x] New functionality has been documented.
[x] New functionality has javadoc added
[x] Commits are signed with a real name per the DCO
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Let's merge this once the build passes.
Codecov Report
Merging #2452 (fe42586) into main (738293e) will not change coverage.
The diff coverage is n/a.
:mega: This organization is not using Codecov’s GitHub App Integration. We recommend you install it so Codecov can continue to function properly for your repositories. Learn more
@@ Coverage Diff @@
## main #2452 +/- ##
=========================================
Coverage 93.78% 93.78%
Complexity 2082 2082
=========================================
Files 246 246
Lines 5807 5807
Branches 470 470
=========================================
Hits 5446 5446
Misses 245 245
Partials 116 116
Help us with your feedback. Take ten seconds to tell us how you rate us. Have a feature suggestion? Share it here.
|
gharchive/pull-request
| 2023-04-05T17:22:04 |
2025-04-01T04:35:23.866296
|
{
"authors": [
"codecov-commenter",
"dlvenable",
"graytaylor0"
],
"repo": "opensearch-project/data-prepper",
"url": "https://github.com/opensearch-project/data-prepper/pull/2452",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2049670878
|
Update MAINTAINERS.md
Removed Chris, Anan, and Sean.
Description
Removes Chris and Sean who have recently left Amazon.
Issues Resolved
n/a
Checklist
[x] By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license and subject to the Developers Certificate of Origin.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
@hdhalter Do you also need to update CODEOWNERS?
@hdhalter Do you also need to update CODEOWNERS?
Yes, thanks! https://github.com/opensearch-project/documentation-website/pull/5920
|
gharchive/pull-request
| 2023-12-20T01:34:56 |
2025-04-01T04:35:23.869872
|
{
"authors": [
"hdhalter",
"kolchfa-aws"
],
"repo": "opensearch-project/documentation-website",
"url": "https://github.com/opensearch-project/documentation-website/pull/5917",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1703433803
|
Add notification settings page & runtime notifications
Description
[Describe what this change achieves]
Issues Resolved
[List any issues this PR will resolve]
Check List
[x] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Codecov Report
Merging #731 (ca37317) into main (e658c76) will increase coverage by 1.20%.
The diff coverage is n/a.
:exclamation: Current head ca37317 differs from pull request most recent head 692bf09. Consider uploading reports for the commit 692bf09 to get more accurate results
:exclamation: Your organization is not using the GitHub App Integration. As a result you may experience degraded service beginning May 15th. Please install the Github App Integration for your organization. Read more.
@@ Coverage Diff @@
## main #731 +/- ##
==========================================
+ Coverage 58.87% 60.08% +1.20%
==========================================
Files 318 330 +12
Lines 10576 11035 +459
Branches 1902 2007 +105
==========================================
+ Hits 6227 6630 +403
+ Misses 3832 3831 -1
- Partials 517 574 +57
see 34 files with indirect coverage changes
|
gharchive/pull-request
| 2023-05-10T08:57:09 |
2025-04-01T04:35:23.875833
|
{
"authors": [
"SuZhou-Joe",
"codecov-commenter"
],
"repo": "opensearch-project/index-management-dashboards-plugin",
"url": "https://github.com/opensearch-project/index-management-dashboards-plugin/pull/731",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
2194261458
|
adding xinyual as maintainer
Description
adding xinyual as maintainer
Issues Resolved
[List any issues this PR will resolve]
Check List
[ ] New functionality includes testing.
[ ] All tests pass
[ ] New functionality has been documented.
[ ] New functionality has javadoc added
[ ] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
DCO unsigned.
|
gharchive/pull-request
| 2024-03-19T07:47:09 |
2025-04-01T04:35:23.879128
|
{
"authors": [
"model-collapse",
"peterzhuamazon"
],
"repo": "opensearch-project/ml-commons",
"url": "https://github.com/opensearch-project/ml-commons/pull/2222",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1845833655
|
[Benchmark test] Support enabling telemetry device 'segment-replication-stats' to collect statistic about Segment Replication
Is your feature request related to a problem? Please describe
It's possible to capture the metrics from CAT Segment Replication Stats API in opensearch-benchmark, after the issue https://github.com/opensearch-project/opensearch-benchmark/issues/339 be resolved.
I'd like to have the ability to enable the new telemetry device "segment-replication-stats" to collect metrics in the benchmark test script.
Describe the solution you'd like
Modify the code around this line https://github.com/opensearch-project/opensearch-build/blob/2.9.0/src/test_workflow/benchmark_test/benchmark_test_suite.py#L52 to make it possible to append new values in the parameter --telemetry, so that user can assign multiple telemetery devices, such as --telemetry=node-stats,segment-replication-stats
The existing code to enable node-stats telemetry device is introduced in the commit https://github.com/opensearch-project/opensearch-build/commit/346ea4982b1477678eb7732d54b85edee32f8f1b
Modify the jenkinsfile of https://github.com/opensearch-project/opensearch-build/blob/2.9.0/jenkins/opensearch/benchmark-test.jenkinsfile.
a) Add a parameter CAPTURE_SEGMENT_REPLICATION_STAT like the CAPTURE_NODE_STAT https://github.com/opensearch-project/opensearch-build/blame/2.9.0/jenkins/opensearch/benchmark-test.jenkinsfile#L192-L196
b) Set true for the new parameter for all benchmark runs with segrep:enabled and segrep:enabled-with-remote-store
The modification of jenkinsfile should wait until the docker image of opensearch-benchmark version 1.1.0 has been updated.
Describe alternatives you've considered
No response
Additional context
No response
@rishabh6788 I can make the code change for it.
|
gharchive/issue
| 2023-08-10T20:15:20 |
2025-04-01T04:35:23.885003
|
{
"authors": [
"tlfeng"
],
"repo": "opensearch-project/opensearch-build",
"url": "https://github.com/opensearch-project/opensearch-build/issues/3865",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1353208701
|
Added exclude patterns to the version increment workflow.
Signed-off-by: prudhvigodithi pgodithi@amazon.com
Description
Added exclude patterns to the version increment workflow.
Issues Resolved
List any issues this PR will resolve, e.g. Closes [...].
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Codecov Report
Merging #2520 (e9b8fc4) into main (cc1091b) will increase coverage by 5.60%.
The diff coverage is n/a.
@@ Coverage Diff @@
## main #2520 +/- ##
=============================================
+ Coverage 94.39% 100.00% +5.60%
=============================================
Files 219 6 -213
Lines 4462 105 -4357
Branches 29 19 -10
=============================================
- Hits 4212 105 -4107
+ Misses 244 0 -244
+ Partials 6 0 -6
Impacted Files
Coverage Δ
..._workflow/integ_test/service_termination_result.py
tests/jenkins/jobs/Messages_Jenkinsfile
src/ci_workflow/ci_check_manifest_component.py
tests/jenkins/jobs/CreateGithubIssue_Jenkinsfile
...workflow/opensearch/build_artifact_check_plugin.py
src/sign_workflow/signer_pgp.py
src/sign_workflow/sign_artifacts.py
src/build_workflow/builder.py
src/manifests/build/build_manifest_1_1.py
...w/bwc_test/bwc_test_start_properties_opensearch.py
... and 203 more
Help us with your feedback. Take ten seconds to tell us how you rate us. Have a feature suggestion? Share it here.
What is the reason to exclude those? (Add to the PR description).
What is the reason to exclude those? (Add to the PR description).
Sure @dblock thanks, updated the description, will create an issue and more details towards it, the reason I have added to draft is still looking for better solution, right now the branches like 1.3, 2.2 added are hardcoded and will be added/removed based of future releases, rather I'm just thinking how I can add this dynamically or as an input for workflow execution, so that the execution will only be for the right version and during the right time, rather than run the version increment always for all the listed branches.
Sure @dblock thanks, updated the description, the reason I have added to draft is still looking for better solution, right now the branches like 1.3, 2.2 added are hardcoded and will be added/removed based of future releases, rather I'm just thinking how I can add this dynamically or as an input for workflow execution, so that the execution will only be for the right version and during the right time, rather than run the version increment always for all the listed branches.
lets go with this PR, its fine and one way good, to keep the version increment consistent for all release branches, example if the release is 1.3.5, when this workflow is executed, not only 1.3 it will also auto increment the branch 2.2.
@dblock @gaiksaya @peterzhuamazon @bb
|
gharchive/pull-request
| 2022-08-28T00:36:29 |
2025-04-01T04:35:23.902230
|
{
"authors": [
"codecov-commenter",
"dblock",
"prudhvigodithi"
],
"repo": "opensearch-project/opensearch-build",
"url": "https://github.com/opensearch-project/opensearch-build/pull/2520",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1989527412
|
Fixed typo in CHANGELOG from merged PR #576
Description
Fixes typo in CHANGELOG.md that was accidentally introduced in PR #576
Issues Resolved
A typo in the CHANGELOG suggests we removed leftover support for Python 3.6, which was not part of the PR. Just a typo, should be removed from CHANGELOG.
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
@dblock I'm sorry, I just noticed I had a typo in my PR 576 in the CHANGELOG that suggests I removed EOL Python 3.6, but that was a mistake on my part. Just a typo that should be able to be safely removed from the CHANGELOG.
|
gharchive/pull-request
| 2023-11-12T19:01:07 |
2025-04-01T04:35:23.905650
|
{
"authors": [
"Djcarrillo6"
],
"repo": "opensearch-project/opensearch-py",
"url": "https://github.com/opensearch-project/opensearch-py/pull/577",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1469797279
|
[BUG] OpenApi at version '1.0.0' on 1.x branch vs '1.0.1' for other gems
Not really a bug, but a tiny annoyance when you update your gems.
Should OpenSearch::API::VERSION not be '1.0.1' or don't you keep the different gems in sync for the published versions?
See:
.../1.x/opensearch-api/lib/opensearch/api/version.rb
vs
.../1.x/opensearch-transport/lib/opensearch/transport/version.rb
Related, we're trying to centralize API spec and publish OpenAPI in https://github.com/opensearch-project/opensearch-api-specification/issues/34.
Hi @woidda,
We're not syncing up the versions of these gems. The fact that the versions appeared in sync was a coincidence as we used to do releases for these gems at the same time. We're now able to do releases much more often, and we're planning to deploy a new version of each gem as soon as it's viable.
If you install the opensearch-ruby or opensearch-aws-sigv4 gem, the appropriate versions of api and transport gems will be installed as dependencies.
I use these two:
gem "opensearch-ruby", "~>1.0"
gem "opensearch-transport", "~>1.0"
So I can replace this by including only opensearch-ruby, correct?
OK this makes it easier. I was not sure about the semantics you use for versioning as the elasticsearch gems afaik used the same version numbers for fitting parts.
Thanks for your reply and your work. I am looking forward to when opensearch becomes more mainstream and the confusion around naming things and what libs are available in what programming language disappears...
@woidda Thanks for the kind words and for understanding. And yes we have plans to improve these clients, esp in regard to Opensearch's unique features :)
|
gharchive/issue
| 2022-11-30T15:11:01 |
2025-04-01T04:35:23.910867
|
{
"authors": [
"dblock",
"nhtruong",
"woidda"
],
"repo": "opensearch-project/opensearch-ruby",
"url": "https://github.com/opensearch-project/opensearch-ruby/issues/122",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1560620981
|
(OUI Docs Site) Homepage
[ ] Remove graphic
[ ] Remove entire card for "Charts"
[ ] Change logoElastic to logoOpenSearch
Abby will do the third one Change logoElastic to logoOpenSearch in #248
|
gharchive/issue
| 2023-01-28T01:21:38 |
2025-04-01T04:35:23.913624
|
{
"authors": [
"KrooshalUX",
"ananzh"
],
"repo": "opensearch-project/oui",
"url": "https://github.com/opensearch-project/oui/issues/255",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1787705115
|
[FEATURE] Use actual password validation message as help text
Description
The help text used for the inputs in the Create internal user is hard-coded, so it doesn't match with the actual requirements if these are changes using the proper options in the backend plugin:
plugins.security.restapi.password_validation_regex: '(?=.*[A-Z])(?=.*[^a-zA-Z\d])(?=.*[0-9])(?=.*[a-z]).{5,}'
plugins.security.restapi.password_validation_error_message: "Password must be minimum 5 characters long and must contain at least one uppercase letter, one lowercase letter, one digit, and one special character."
I assume that this is happening for other inputs as well, not only for the password one.
It would be great if the plugins.security.restapi.password_validation_error_message string were used for the help text, instead of the hard-coded string.
https://github.com/opensearch-project/security-dashboards-plugin/blob/32c02c8c18a509884613517fac38b684338d5273/public/apps/apps-constants.tsx#L20-L22
[Triage] This issue will be addressed with the completion of #1503.
Included in 2.x/2.9 branches
|
gharchive/issue
| 2023-07-04T11:01:59 |
2025-04-01T04:35:23.916452
|
{
"authors": [
"AlexRuiz7",
"davidlago",
"scrawfor99"
],
"repo": "opensearch-project/security-dashboards-plugin",
"url": "https://github.com/opensearch-project/security-dashboards-plugin/issues/1501",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
933157853
|
Build on OpenSearch 1.0.0
opensearch-security pull request intake form
Please provide as much details as possible to get feedback/acceptance on your PR quickly
Category: (Enhancement, New feature, Bug fix, Test fix, Refactoring, Maintenance, Documentation)
Github Issue # or road-map entry, if available:
Description of changes:
Why these changes are required?
What is the old behavior before changes and new behavior after changes? (Please add any example/logs/screen-shot if available)
Testing done: (Please provide details of testing done: Unit testing, integration testing and manual testing)
TO-DOs, if any: (Please describe pending items and provide Github issues# for each of them)
Is it backport from main branch? (If yes, please add backport PR # and commits #)
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Codecov Report
Merging #1304 (a861b62) into main (1083d99) will increase coverage by 0.01%.
The diff coverage is n/a.
:exclamation: Current head a861b62 differs from pull request most recent head 2fe070d. Consider uploading reports for the commit 2fe070d to get more accurate results
@@ Coverage Diff @@
## main #1304 +/- ##
============================================
+ Coverage 64.73% 64.75% +0.01%
+ Complexity 3195 3194 -1
============================================
Files 247 247
Lines 17230 17230
Branches 3045 3045
============================================
+ Hits 11154 11157 +3
+ Misses 4526 4523 -3
Partials 1550 1550
Impacted Files
Coverage Δ
...ecurity/configuration/ConfigurationRepository.java
73.07% <0.00%> (-2.20%)
:arrow_down:
...security/configuration/DlsFlsFilterLeafReader.java
60.47% <0.00%> (+0.70%)
:arrow_up:
...nsearch/security/dlic/rest/api/AuditApiAction.java
68.08% <0.00%> (+4.25%)
:arrow_up:
...ecurity/configuration/StaticResourceException.java
25.00% <0.00%> (+25.00%)
:arrow_up:
Continue to review full report at Codecov.
Legend - Click here to learn more
Δ = absolute <relative> (impact), ø = not affected, ? = missing data
Powered by Codecov. Last update 1083d99...2fe070d. Read the comment docs.
|
gharchive/pull-request
| 2021-06-29T22:42:22 |
2025-04-01T04:35:23.933969
|
{
"authors": [
"cliu123",
"codecov-commenter"
],
"repo": "opensearch-project/security",
"url": "https://github.com/opensearch-project/security/pull/1304",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1228049250
|
Add suppression for all removal warnings
Description
Use of the SecurityManager and AccessController have been deprecated and
will be removed in java versions after 17. While this is an issue its
also one that will take a concerted effort to resolve. These warning
messages making discovering build errors and other warnings more
difficult; hence adding this supression logic.
For tracking the effort to replace these components look into https://github.com/opensearch-project/OpenSearch/issues/1687
Issues Resolved
Related to https://github.com/opensearch-project/OpenSearch/issues/1687
Check List
[ ] ~New functionality includes testing~
[ ] ~New functionality has been documented~
[x] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
Codecov Report
Merging #1828 (05d1556) into main (e189b7a) will decrease coverage by 0.02%.
The diff coverage is n/a.
@@ Coverage Diff @@
## main #1828 +/- ##
============================================
- Coverage 60.81% 60.78% -0.03%
+ Complexity 3187 3185 -2
============================================
Files 253 253
Lines 17931 17930 -1
Branches 3204 3204
============================================
- Hits 10904 10899 -5
- Misses 5450 5454 +4
Partials 1577 1577
Impacted Files
Coverage Δ
...ic/auth/http/jwt/AbstractHTTPJwtAuthenticator.java
55.81% <ø> (ø)
...mazon/dlic/auth/http/jwt/HTTPJwtAuthenticator.java
84.90% <ø> (ø)
...ic/auth/http/kerberos/HTTPSpnegoAuthenticator.java
0.00% <ø> (ø)
...dlic/auth/http/saml/AuthTokenProcessorHandler.java
47.28% <ø> (ø)
...zon/dlic/auth/http/saml/HTTPSamlAuthenticator.java
69.72% <ø> (ø)
...zon/dlic/auth/http/saml/Saml2SettingsProvider.java
61.53% <ø> (ø)
...auth/http/saml/SamlFilesystemMetadataResolver.java
0.00% <ø> (ø)
.../dlic/auth/http/saml/SamlHTTPMetadataResolver.java
62.96% <ø> (ø)
...ic/auth/ldap/backend/LDAPAuthorizationBackend.java
57.57% <ø> (ø)
...ava/com/amazon/dlic/auth/ldap/util/LdapHelper.java
61.53% <ø> (ø)
... and 24 more
Continue to review full report at Codecov.
Legend - Click here to learn more
Δ = absolute <relative> (impact), ø = not affected, ? = missing data
Powered by Codecov. Last update e189b7a...05d1556. Read the comment docs.
Deprecation messages are useful if we were adding a component that we did not realize was deprecated and wanted to gate that dependency.
Exactly, JSM being deprecated should be warned in this case, shouldn't it? OpenSearch core doesn't suppress these warns either.
@cliu123 This pull request doesn't eliminate deprecation messages, it suppresses them on components that we have no plans to fix. It is confusing for contributors or even maintainers such as myself to see 'warnings' in our build output that we should actively ignore, which is why I am suppressing warnings on these components.
If a new warning was to be added in a pull request we would have no mechanism to detect it. By suppressing these messages and failing builds on any new messages this puts the codebase in a better posture to prevent new dependencies on deprecated/removed features.
Is there a reason to keep the existing warnings as they are today?
This pull request doesn't eliminate deprecation messages, it suppresses them on components that we have no plans to fix.
I have a couple of questions here:
Will the deprecation messages still show? Could you elaborate more in the PR description:
What exactly deprecatio messages are these changes suppressing? Is it the following?
WARNING: System::setSecurityManager will be removed in a future release
Given that JSM is deprecated and will be removed, why shouldn't security plugin warn users this?
@reta @nknize @dblock might have more context on this. How are these deprecation warnings handled in OpenSearch core where JSM is also being used?
I see. Thanks for the clarification! @peternied @reta
|
gharchive/pull-request
| 2022-05-06T16:08:07 |
2025-04-01T04:35:23.956719
|
{
"authors": [
"cliu123",
"codecov-commenter",
"peternied"
],
"repo": "opensearch-project/security",
"url": "https://github.com/opensearch-project/security/pull/1828",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
1303210014
|
2.1
I am setting up v2.1 cluster with saml authentication, all the configuration seems to be correct however when i try to login and found out that saml authrequest still builds ACS URL with _opendistro/_security/saml/acs instead of /_plugin/_security/saml/acs.
Below is what we extracted from the saml tracer plugin.
<samlp:AuthnRequest xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID="ONELOGIN_XYZ"
Version="2.0"
IssueInstant="2022-07-13T08:55:10Z"
Destination="https://domain/trust/saml2/http-redirect/sso/"
ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
AssertionConsumerServiceURL="https://domain/_opendistro/_security/saml/acs"
>
saml:Issueropensearch-saml</saml:Issuer>
<samlp:NameIDPolicy Format="urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified"
AllowCreate="true"
/>
</samlp:AuthnRequest>
Seems like after version 2.1.0 both enpoints: /_opendistro/_security/saml/acs and /_plugins/_security/saml/acs not working.
If using first-one as IDP acs URL getting: {"statusCode":404,"error":"Not Found","message":"Not Found"} - probbably expected after https://github.com/opensearch-project/security-dashboards-plugin/pull/895
But if switching to 2nd one /_plugins/_security/saml/acs getting {"statusCode":500,"error":"Internal Server Error","message":"Internal Error"}
Rolling back Kibana to version 2.0.1 but leaving Opensearch version: 2.1.0 helps the issue a bit as /_opendistro/_security/saml/acs starts to work again
Description
[Describe what this change achieves]
Category (Enhancement, New feature, Bug fix, Test fix, Refactoring, Maintenance, Documentation)
Bug Fix
Why these changes are required?
Login Dashboard v2.1 with SAML authentication
What is the old behavior before changes and new behavior after changes?
Old behaviour doesnt allow dashboard v2.1 to login with saml authentication
Issues Resolved
[List any issues this PR will resolve]
Is this a backport? If so, please add backport PR # and/or commits #
Testing
[Please provide details of testing done: unit testing, integration testing and manual testing]
Check List
[ ] New functionality includes testing
[ ] New functionality has been documented
[ ] Commits are signed per the DCO using --signoff
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
For more information on following Developer Certificate of Origin and signing off your commits, please check here.
@hpkuppuraj Thanks for this pull request, could you please see about fixing the DCO errors, link?
Note; I said that this changed was set against the 2.1 branch, lets merge this against main and then the maintainers will backport the change to the next release version
@hpkuppuraj Thank you so much for the finding and the PR!
This would be a breaking change with changing things like ACS URL even though it would align the behavior with the documentation. If an old version cluster without this change gets upgraded to a new version with this change, the SAML authN would be broken. So IMHO, this will be a great change for 3.0.0 release.
@peternied , Thanks for highlighting the signoff part which i have missed at commits. I think i have signed the commits that I have made. Looks like still DCO error there.
Codecov Report
Merging #1936 (3251bc7) into main (8393c71) will decrease coverage by 0.02%.
The diff coverage is 50.00%.
@@ Coverage Diff @@
## main #1936 +/- ##
============================================
- Coverage 61.04% 61.01% -0.03%
Complexity 3234 3234
============================================
Files 256 256
Lines 18088 18088
Branches 3224 3224
============================================
- Hits 11041 11037 -4
Misses 5467 5467
- Partials 1580 1584 +4
Impacted Files
Coverage Δ
...rch/security/dlic/rest/api/WhitelistApiAction.java
100.00% <ø> (ø)
...ch/security/securityconf/DynamicConfigFactory.java
56.05% <ø> (ø)
...rg/opensearch/security/ssl/util/CertFileProps.java
100.00% <ø> (ø)
...org/opensearch/security/ssl/util/CertFromFile.java
100.00% <ø> (ø)
...opensearch/security/ssl/util/CertFromKeystore.java
73.91% <ø> (ø)
...ensearch/security/ssl/util/CertFromTruststore.java
87.87% <ø> (ø)
...search/security/ssl/util/CertificateValidator.java
69.41% <ø> (ø)
...rg/opensearch/security/ssl/util/KeystoreProps.java
72.72% <ø> (ø)
...zon/dlic/auth/http/saml/Saml2SettingsProvider.java
61.53% <50.00%> (ø)
...urity/ssl/transport/SecuritySSLNettyTransport.java
62.36% <0.00%> (-4.31%)
:arrow_down:
... and 5 more
Continue to review full report at Codecov.
Legend - Click here to learn more
Δ = absolute <relative> (impact), ø = not affected, ? = missing data
Powered by Codecov. Last update 8393c71...3251bc7. Read the comment docs.
@hpkuppuraj I see there are 93 file changes here and some of them are from other PRs. Can you please rebase your branch against main so that the changes you intended to make are the only ones included.
Maybe better to make both endpoints working? Doesn't seem to be much work and then it would not be breaking change. and in version 3.0 remove _opendistro one.
Maybe better to make both endpoints working? Doesn't seem to be much work and then it would not be breaking change. and in version 3.0 remove _opendistro one.
I would prefer this approach as suggested by @JustinasKO. @hpkuppuraj is this something you'd be able to pick up or would you like to take this on?
Note; there is another change like this in flight - https://github.com/opensearch-project/security/pull/1949
Maybe better to make both endpoints working? Doesn't seem to be much work and then it would not be breaking change. and in version 3.0 remove _opendistro one.
I would prefer this approach as suggested by @JustinasKO. @hpkuppuraj is this something you'd be able to pick up or would you like to take this on?
Note; there is another change like this in flight - #1949
Yes sure, let me explore on this and keep both the endpoints working.
@hpkuppuraj I see there are 93 file changes here and some of them are from other PRs. Can you please rebase your branch against main so that the changes you intended to make are the only ones included.
@hpkuppuraj Would you please follow up with this comment?
@cliu123 , I am trying to figure how the PR gets the changes from other commits, but my local branch has just my changes. Could you please help on this.
@hpkuppuraj just rebase your branch from master branch and only your changes will remain.
@cliu123 , could you please confirm the changes. I believe now only one file change is shown. Also i am seeing DCO author signature mismatch now
@cliu123 , could you please confirm the changes. I believe now only one file change is shown. Also i am seeing DCO author signature mismatch now
I can confirm that I see only 1 file change now. You can fix the DCO error by executing this command: git rebase --signoff HEAD~2 where 2 is the number of commits from current HEAD. Looking at your commit history, you've signed-off your latest commit with Signed-off-by: Hari Prasad Kuppuraj <51482940+hpkuppuraj@users.noreply.github.com> which is not correct. It should be done with your Github user (which is, I believe, what you did in the commit prior to the latest one).
@DarshitChanpura , i dont have access to the account(company) i used to commit the first change, hence switched to the personal github account. Hence there is a discrepancy in the signature.
@hpkuppuraj Thanks for your hard work on this pull request, I've cleaned up the commit history and fixed the DCO issues and pushed an update onto your fork, as soon as CI passes this can be merged.
Should this PR be closed if merging this one? How was this change tested?
@hpkuppuraj Can you please update on the requested changes?
@cliu123 Can you shepherd this change along with the frontend to make sure everything is in alignment?
Hi, apologies I am outstation and will be back to workstation tomorrow. Will resolve the code conflicts
Get Outlook for iOShttps://aka.ms/o0ukef
From: Chang Liu @.>
Sent: Wednesday, July 20, 2022 3:27:25 AM
To: opensearch-project/security @.>
Cc: Hari Prasad Kuppuraj @.>; Mention @.>
Subject: Re: [opensearch-project/security] Update SAML Endpoint (PR #1936)
@hpkuppurajhttps://github.com/hpkuppuraj I see there are 93 file changes here and some of them are from other PRs. Can you please rebase your branch against main so that the changes you intended to make are the only ones included.
@hpkuppurajhttps://github.com/hpkuppuraj Would you please follow up with this comment?
—
Reply to this email directly, view it on GitHubhttps://github.com/opensearch-project/security/pull/1936#issuecomment-1189469666, or unsubscribehttps://github.com/notifications/unsubscribe-auth/AMIZCPFUOUXPR7PFDBOMYY3VU36R3ANCNFSM53OEUBPA.
You are receiving this because you were mentioned.Message ID: @.***>
@opensearch-project/security this is a breaking change we've been considering for a while how do you feel about us taking it? I'm leaning towards no, while it would be nice to have consistent URLs, I would prefer we don't break folks in a hard to detect way.
Please vote 👍 👎 or comment on this issue, lets revisit this during our next triage.
I'm closing this pull request out as we aren't taking steps to get this merged. Thanks again for the contribution sorry it didn't work out on this change.
Pity. Then opensearch-dashboards should be patched, because now we have to use every time we update it:
sed -i 's/_plugins/_opendistro/g' /usr/share/opensearch-dashboards/plugins/securityDashboards/server/auth/types/saml/routes.js
@nerijus from your comment it sounds SAML is broken in security-dashboards-plugin without this change? Can you create an issue
Of course it is broken, that's why this PR was created. I'd still suggest applying this PR instead of changing dashboards.
@cliu123 Could you shepherd this PR and determine how to follow up? This seems very bad (tm) and I'm out of the loop on the state of the SAML configuration in dashboards.
@cliu123 As per discussion, Endpoint: /_plugin/_security/saml/acs is also used by Dashboards Security Plugin. Endpoint changed should be planed for both front-end and back-end plugin together.
This is a documentation issue that I'm creating. Users follow the documentation to configure _plugins/_security/saml/acs in IDP, but in plugins, it still uses /_opendistro/_security/saml/acs.
This is a documentation issue that I'm creating. Users follow the documentation to configure _plugins/_security/saml/acs in IDP, but in plugins, it still uses /_opendistro/_security/saml/acs.
@cliu123 The documentation website was reverted to reflect this: https://github.com/opensearch-project/documentation-website/pull/877
There was a breaking change to SAML introduced in 2.2 that was reverted in 2.3 with this PR: https://github.com/opensearch-project/security-dashboards-plugin/pull/895
The frontend switched to using /_plugins while this backend change was not applied.
We reverted to using /_opendistro/_security/saml/acs for the 2.x line, but it was my understanding that we are still planning this change for 3.0.0. Now that our main branch is building 3.0.0, what is preventing us from merging this change?
|
gharchive/pull-request
| 2022-07-13T10:08:06 |
2025-04-01T04:35:24.001159
|
{
"authors": [
"DarshitChanpura",
"JustinasKO",
"aoguan1990",
"cliu123",
"codecov-commenter",
"cwperks",
"hpkuppuraj",
"nerijus",
"peternied"
],
"repo": "opensearch-project/security",
"url": "https://github.com/opensearch-project/security/pull/1936",
"license": "Apache-2.0",
"license_type": "permissive",
"license_source": "github-api"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.