code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 4
991
| language
stringclasses 9
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
using Microsoft.AspNetCore.Identity;
using Microsoft.AspNetCore.Mvc;
using SaanSoft.AspNet.Identity3.MongoDB;
using Web.MongoDBIdentitySample.Models;
// For more information on enabling MVC for empty projects, visit http://go.microsoft.com/fwlink/?LinkID=397860
namespace Web.MongoDBIdentitySample.Controllers
{
public class DiTestController : Controller
{
private readonly RoleManager<IdentityRole> _roleManager;
private readonly UserManager<ApplicationUser> _userManager;
private readonly IIdentityDatabaseContext<ApplicationUser, IdentityRole, string> _identityDatabaseContext;
private readonly IUserStore<ApplicationUser> _userStore;
private readonly IUserLoginStore<ApplicationUser> _userLoginStore;
private readonly IUserRoleStore<ApplicationUser> _userRoleStore;
private readonly IUserClaimStore<ApplicationUser> _userClaimStore;
private readonly IUserPasswordStore<ApplicationUser> _userPasswordStore;
private readonly IUserSecurityStampStore<ApplicationUser> _userSecurityStampStore;
private readonly IUserEmailStore<ApplicationUser> _userEmailStore;
private readonly IUserLockoutStore<ApplicationUser> _userLockoutStore;
private readonly IUserPhoneNumberStore<ApplicationUser> _userPhoneNumberStore;
private readonly IUserTwoFactorStore<ApplicationUser> _userTwoFactorStore;
private readonly IQueryableUserStore<ApplicationUser> _queryableUserStore;
private readonly IRoleStore<IdentityRole> _roleStore;
private readonly IRoleClaimStore<IdentityRole> _roleClaimStore;
private readonly IQueryableRoleStore<IdentityRole> _queryableRoleStore;
public DiTestController(
// the Microsoft.AspNetCore.Identity User and Role Manager classes
RoleManager<IdentityRole> roleManager,
UserManager<ApplicationUser> userManager,
IIdentityDatabaseContext<ApplicationUser, IdentityRole, string> identityDatabaseContext,
// if want to use with SOLID and Interface Segregation Principle, then can just use the specific interface that need
// these interfaces are all implemented by UserStore
IUserStore<ApplicationUser> userStore,
IUserLoginStore<ApplicationUser> userLoginStore,
IUserRoleStore<ApplicationUser> userRoleStore,
IUserClaimStore<ApplicationUser> userClaimStore,
IUserPasswordStore<ApplicationUser> userPasswordStore,
IUserSecurityStampStore<ApplicationUser> userSecurityStampStore,
IUserEmailStore<ApplicationUser> userEmailStore,
IUserLockoutStore<ApplicationUser> userLockoutStore,
IUserPhoneNumberStore<ApplicationUser> userPhoneNumberStore,
IUserTwoFactorStore<ApplicationUser> userTwoFactorStore,
IQueryableUserStore<ApplicationUser> queryableUserStore,
// these interfaces are all implemented by RoleStore
IRoleStore<IdentityRole> roleStore,
IRoleClaimStore<IdentityRole> roleClaimStore,
IQueryableRoleStore<IdentityRole> queryableRoleStore
)
{
_roleManager = roleManager;
_userManager = userManager;
_identityDatabaseContext = identityDatabaseContext;
_userStore = userStore;
_userLoginStore = userLoginStore;
_userRoleStore = userRoleStore;
_userClaimStore = userClaimStore;
_userPasswordStore = userPasswordStore;
_userSecurityStampStore = userSecurityStampStore;
_userEmailStore = userEmailStore;
_userLockoutStore = userLockoutStore;
_userPhoneNumberStore = userPhoneNumberStore;
_userTwoFactorStore = userTwoFactorStore;
_queryableUserStore = queryableUserStore;
_roleStore = roleStore;
_roleClaimStore = roleClaimStore;
_queryableRoleStore = queryableRoleStore;
}
// GET: /<controller>/
public IActionResult Index()
{
return View();
}
}
}
|
Amevacorp/SaanSoft.AspNet.Identity3.MongoDB
|
samples/Web.MongoDBIdentitySample/Controllers/DiTestController.cs
|
C#
|
apache-2.0
| 3,643 |
/*
* Copyright 2014,2016 agwlvssainokuni
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cherry.elemental.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.List;
import java.util.Map;
import org.junit.Test;
public class ToMapUtilTest {
@Test
public void testFromThrowable_00() {
Exception ex = new Exception("MESSAGE");
Map<String, Object> map = ToMapUtil.fromThrowable(ex, Integer.MAX_VALUE);
assertEquals("MESSAGE", map.get("message"));
assertNotNull(map.get("stackTrace"));
assertTrue(map.get("stackTrace") instanceof List);
@SuppressWarnings("unchecked")
List<String> list = (List<String>) map.get("stackTrace");
assertFalse(list.isEmpty());
assertEquals("cherry.elemental.util.ToMapUtilTest.testFromThrowable_00(ToMapUtilTest.java:36)", list.get(0));
assertNull(map.get("cause"));
}
@Test
public void testFromThrowable_01() {
Exception ex = new Exception("MESSAGE");
Map<String, Object> map = ToMapUtil.fromThrowable(ex, 0);
assertEquals("MESSAGE", map.get("message"));
assertNotNull(map.get("stackTrace"));
assertTrue(map.get("stackTrace") instanceof List);
@SuppressWarnings("unchecked")
List<String> list = (List<String>) map.get("stackTrace");
assertFalse(list.isEmpty());
assertEquals("...", list.get(0));
assertNull(map.get("cause"));
}
@Test
public void testFromThrowable_02() {
Exception cause = new Exception("CAUSE");
Exception ex = new Exception("MESSAGE", cause);
Map<String, Object> map = ToMapUtil.fromThrowable(ex, 1);
assertEquals("MESSAGE", map.get("message"));
assertNotNull(map.get("stackTrace"));
assertTrue(map.get("stackTrace") instanceof List);
@SuppressWarnings("unchecked")
List<String> list = (List<String>) map.get("stackTrace");
assertFalse(list.isEmpty());
assertEquals("cherry.elemental.util.ToMapUtilTest.testFromThrowable_02(ToMapUtilTest.java:71)", list.get(0));
assertNotNull(map.get("cause"));
@SuppressWarnings("unchecked")
Map<String, Object> map2 = (Map<String, Object>) map.get("cause");
assertEquals("CAUSE", map2.get("message"));
assertNotNull(map2.get("stackTrace"));
assertTrue(map2.get("stackTrace") instanceof List);
@SuppressWarnings("unchecked")
List<String> list2 = (List<String>) map2.get("stackTrace");
assertFalse(list2.isEmpty());
assertEquals("...", list2.get(0));
}
@Test
public void testInstantiate() {
try {
new ToMapUtil();
} catch (Exception ex) {
fail("Exception must not be thrown");
}
}
}
|
agwlvssainokuni/springapp2
|
corelib/elemental/src/test/java/cherry/elemental/util/ToMapUtilTest.java
|
Java
|
apache-2.0
| 3,335 |
/**
* Copyright 2015 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var gulp = require('gulp-help')(require('gulp'));
var path = require('path');
var srcGlobs = require('../config').presubmitGlobs;
var util = require('gulp-util');
var dedicatedCopyrightNoteSources = /(\.js|\.css|\.go)$/;
var es6polyfill = 'Not available because we do not currently' +
' ship with a needed ES6 polyfill.';
var requiresReviewPrivacy =
'Usage of this API requires dedicated review due to ' +
'being privacy sensitive. Please file an issue asking for permission' +
' to use if you have not yet done so.';
var privateServiceFactory = 'This service should only be installed in ' +
'the whitelisted files. Other modules should use a public function ' +
'typically called serviceNameFor.';
var shouldNeverBeUsed =
'Usage of this API is not allowed - only for internal purposes.';
// Terms that must not appear in our source files.
var forbiddenTerms = {
'DO NOT SUBMIT': '',
'describe\\.only': '',
'it\\.only': '',
'sinon\\.(spy|stub|mock)\\(\\w[^)]*\\)': {
message: 'Use a sandbox instead to avoid repeated `#restore` calls'
},
'(\\w*([sS]py|[sS]tub|[mM]ock|clock).restore)': {
message: 'Use a sandbox instead to avoid repeated `#restore` calls'
},
'sinon\\.useFake\\w+': {
message: 'Use a sandbox instead to avoid repeated `#restore` calls'
},
'console\\.\\w+\\(': {
message: 'If you run against this, use console/*OK*/.log to ' +
'whitelist a legit case.',
// TODO: temporary, remove when validator is up to date
whitelist: [
'validator/validator.js',
'validator/parse-css.js',
'validator/validator-in-browser.js',
]
},
'iframePing': {
message: 'This is only available in vendor config for ' +
'temporary workarounds.',
whitelist: [
'extensions/amp-analytics/0.1/amp-analytics.js',
],
},
// Service factories that should only be installed once.
'installActionService': {
message: privateServiceFactory,
whitelist: [
'src/service/action-impl.js',
'src/service/standard-actions-impl.js',
'src/amp-core-service.js',
],
},
'installActionHandler': {
message: privateServiceFactory,
whitelist: [
'src/service/action-impl.js',
'extensions/amp-access/0.1/amp-access.js',
],
},
'installActivityService': {
message: privateServiceFactory,
whitelist: [
'src/service/activity-impl.js',
'extensions/amp-analytics/0.1/amp-analytics.js'
]
},
'installCidService': {
message: privateServiceFactory,
whitelist: [
'src/service/cid-impl.js',
'extensions/amp-access/0.1/amp-access.js',
'extensions/amp-analytics/0.1/amp-analytics.js',
],
},
'installStorageService': {
message: privateServiceFactory,
whitelist: [
'extensions/amp-analytics/0.1/amp-analytics.js',
'src/service/storage-impl.js',
],
},
'installViewerService': {
message: privateServiceFactory,
whitelist: [
'src/amp-core-service.js',
'src/service/history-impl.js',
'src/service/resources-impl.js',
'src/service/viewer-impl.js',
'src/service/viewport-impl.js',
'src/service/vsync-impl.js',
],
},
'installViewportService': {
message: privateServiceFactory,
whitelist: [
'src/amp-core-service.js',
'src/service/resources-impl.js',
'src/service/viewport-impl.js',
],
},
'installVsyncService': {
message: privateServiceFactory,
whitelist: [
'src/amp-core-service.js',
'src/service/resources-impl.js',
'src/service/viewport-impl.js',
'src/service/vsync-impl.js',
],
},
'installResourcesService': {
message: privateServiceFactory,
whitelist: [
'src/amp-core-service.js',
'src/service/resources-impl.js',
'src/service/standard-actions-impl.js',
],
},
'sendMessage': {
message: privateServiceFactory,
whitelist: [
'src/service/viewer-impl.js',
'src/service/storage-impl.js',
'examples/viewer-integr-messaging.js',
'extensions/amp-access/0.1/login-dialog.js',
],
},
// Privacy sensitive
'cidFor': {
message: requiresReviewPrivacy,
whitelist: [
'builtins/amp-ad.js',
'src/cid.js',
'src/service/cid-impl.js',
'src/url-replacements.js',
'extensions/amp-access/0.1/amp-access.js',
'extensions/amp-user-notification/0.1/amp-user-notification.js',
],
},
'getBaseCid': {
message: requiresReviewPrivacy,
whitelist: [
'src/service/cid-impl.js',
'src/service/viewer-impl.js',
],
},
'cookie\\W': {
message: requiresReviewPrivacy,
whitelist: [
'src/cookies.js',
'src/service/cid-impl.js',
],
},
'getCookie\\W': {
message: requiresReviewPrivacy,
whitelist: [
'src/service/cid-impl.js',
'src/cookies.js',
'src/experiments.js',
'tools/experiments/experiments.js',
]
},
'setCookie\\W': {
message: requiresReviewPrivacy,
whitelist: [
'src/service/cid-impl.js',
'src/cookies.js',
'src/experiments.js',
'tools/experiments/experiments.js',
]
},
'isDevChannel\\W': {
message: requiresReviewPrivacy,
whitelist: [
'extensions/amp-access/0.1/amp-access.js',
'extensions/amp-user-notification/0.1/amp-user-notification.js',
'src/3p-frame.js',
'src/experiments.js',
'src/service/storage-impl.js',
'src/service/viewport-impl.js',
'tools/experiments/experiments.js',
]
},
'isDevChannelVersionDoNotUse_\\W': {
message: shouldNeverBeUsed,
whitelist: [
'src/experiments.js',
]
},
'isTrusted': {
message: requiresReviewPrivacy,
whitelist: [
'src/service/viewer-impl.js',
]
},
'eval\\(': '',
'storageFor': {
message: requiresReviewPrivacy,
whitelist: [
'src/storage.js',
'extensions/amp-user-notification/0.1/amp-user-notification.js',
],
},
'localStorage': {
message: requiresReviewPrivacy,
whitelist: [
'src/service/cid-impl.js',
'src/service/storage-impl.js',
],
},
'sessionStorage': requiresReviewPrivacy,
'indexedDB': requiresReviewPrivacy,
'openDatabase': requiresReviewPrivacy,
'requestFileSystem': requiresReviewPrivacy,
'webkitRequestFileSystem': requiresReviewPrivacy,
'getAccessReaderId': {
message: requiresReviewPrivacy,
whitelist: [
'extensions/amp-access/0.1/amp-access.js',
'src/url-replacements.js',
]
},
'getAuthdataField': {
message: requiresReviewPrivacy,
whitelist: [
'extensions/amp-access/0.1/amp-access.js',
'src/url-replacements.js',
]
},
'debugger': '',
// ES6. These are only the most commonly used.
'Array\\.of': es6polyfill,
// These currently depend on core-js/modules/web.dom.iterable which
// we don't want. That decision could be reconsidered.
'\\.startsWith': {
message: es6polyfill,
whitelist: [
'validator/tokenize-css.js',
'validator/validator.js'
]
},
'\\.endsWith': {
message: es6polyfill,
whitelist: [
// .endsWith occurs in babel generated code.
'dist.3p/current/integration.js',
],
},
// TODO: (erwinm) rewrite the destructure and spread warnings as
// eslint rules (takes more time than this quick regex fix).
// No destructuring allowed since we dont ship with Array polyfills.
'^\\s*(?:let|const|var) *(?:\\[[^\\]]+\\]|{[^}]+}) *=': es6polyfill,
// No spread (eg. test(...args) allowed since we dont ship with Array
// polyfills except `arguments` spread as babel does not polyfill
// it since it can assume that it can `slice` w/o the use of helpers.
'\\.\\.\\.(?!arguments\\))[_$A-Za-z0-9]*(?:\\)|])': {
message: es6polyfill,
whitelist: [
'extensions/amp-access/0.1/access-expr-impl.js',
],
},
// Overridden APIs.
'(doc.*)\\.referrer': {
message: 'Use Viewer.getReferrerUrl() instead.',
whitelist: [
'3p/integration.js',
'dist.3p/current/integration.js',
'src/service/viewer-impl.js',
'src/error.js',
],
},
'(doc[^.]*)\\.contains': {
message: 'Use dom.documentContains API.',
whitelist: [
'src/dom.js',
],
},
'\\sdocument(?![a-zA-Z0-9_])': {
message: 'Use `window.document` or similar to access document, the global' +
'`document` is forbidden',
whitelist: [
'validator/validator.js',
'testing/iframe.js',
'testing/screenshots/make-screenshot.js',
'tools/experiments/experiments.js',
'examples/viewer-integr.js',
],
},
'getUnconfirmedReferrerUrl': {
message: 'Use Viewer.getReferrerUrl() instead.',
whitelist: [
'extensions/amp-dynamic-css-classes/0.1/amp-dynamic-css-classes.js',
'src/3p-frame.js',
'src/service/viewer-impl.js',
],
},
'setTimeout.*throw': {
message: 'Use dev.error or user.error instead.',
whitelist: [
'src/log.js',
],
},
};
var ThreePTermsMessage = 'The 3p bootstrap iframe has no polyfills loaded and' +
' can thus not use most modern web APIs.';
var forbidden3pTerms = {
// We need to forbid promise usage because we don't have our own polyfill
// available. This whitelisting of callNext is a major hack to allow one
// usage in babel's external helpers that is in a code path that we do
// not use.
'\\.then\\((?!callNext)': ThreePTermsMessage,
'Math\\.sign' : ThreePTermsMessage,
};
var bannedTermsHelpString = 'Please review viewport.js for a helper method ' +
'or mark with `/*OK*/` or `/*REVIEW*/` and consult the AMP team. ' +
'Most of the forbidden property/method access banned on the ' +
'`forbiddenTermsSrcInclusive` object can be found in ' +
'[What forces layout / reflow gist by Paul Irish]' +
'(https://gist.github.com/paulirish/5d52fb081b3570c81e3a). ' +
'These properties/methods when read/used require the browser ' +
'to have the up-to-date value to return which might possibly be an ' +
'expensive computation and could also be triggered multiple times ' +
'if we are not careful. Please mark the call with ' +
'`object./*OK*/property` if you explicitly need to read or update the ' +
'forbidden property/method or mark it with `object./*REVIEW*/property` ' +
'if you are unsure and so that it stands out in code reviews.';
var forbiddenTermsSrcInclusive = {
'\\.innerHTML(?!_)': bannedTermsHelpString,
'\\.outerHTML(?!_)': bannedTermsHelpString,
'\\.postMessage(?!_)': bannedTermsHelpString,
'\\.offsetLeft(?!_)': bannedTermsHelpString,
'\\.offsetTop(?!_)': bannedTermsHelpString,
'\\.offsetWidth(?!_)': bannedTermsHelpString,
'\\.offsetHeight(?!_)': bannedTermsHelpString,
'\\.offsetParent(?!_)': bannedTermsHelpString,
'\\.clientLeft(?!_)(?!_)': bannedTermsHelpString,
'\\.clientTop(?!_)': bannedTermsHelpString,
'\\.clientWidth(?!_)': bannedTermsHelpString,
'\\.clientHeight(?!_)': bannedTermsHelpString,
'\\.getClientRects(?!_)': bannedTermsHelpString,
'\\.getBoundingClientRect(?!_)': bannedTermsHelpString,
'\\.scrollBy(?!_)': bannedTermsHelpString,
'\\.scrollTo(?!_|p|p_)': bannedTermsHelpString,
'\\.scrollIntoView(?!_)': bannedTermsHelpString,
'\\.scrollIntoViewIfNeeded(?!_)': bannedTermsHelpString,
'\\.scrollWidth(?!_)': 'please use `getScrollWidth()` from viewport',
'\\.scrollHeight(?!_)': bannedTermsHelpString,
'\\.scrollTop(?!_)': bannedTermsHelpString,
'\\.scrollLeft(?!_)': bannedTermsHelpString,
'\\.focus(?!_)': bannedTermsHelpString,
'\\.computedRole(?!_)': bannedTermsHelpString,
'\\.computedName(?!_)': bannedTermsHelpString,
'\\.innerText(?!_)': bannedTermsHelpString,
'\\.getComputedStyle(?!_)': bannedTermsHelpString,
'\\.scrollX(?!_)': bannedTermsHelpString,
'\\.scrollY(?!_)': bannedTermsHelpString,
'\\.pageXOffset(?!_)': bannedTermsHelpString,
'\\.pageYOffset(?!_)': bannedTermsHelpString,
'\\.innerWidth(?!_)': bannedTermsHelpString,
'\\.innerHeight(?!_)': bannedTermsHelpString,
'\\.getMatchedCSSRules(?!_)': bannedTermsHelpString,
'\\.scrollingElement(?!_)': bannedTermsHelpString,
'\\.computeCTM(?!_)': bannedTermsHelpString,
'\\.getBBox(?!_)': bannedTermsHelpString,
'\\.webkitConvertPointFromNodeToPage(?!_)': bannedTermsHelpString,
'\\.webkitConvertPointFromPageToNode(?!_)': bannedTermsHelpString,
'\\.changeHeight(?!_)': bannedTermsHelpString,
'\\.changeSize(?!_)': bannedTermsHelpString,
'reject\\(\\)': {
message: 'Always supply a reason in rejections. ' +
'error.cancellation() may be applicable.',
whitelist: [
'extensions/amp-access/0.1/access-expr-impl.js',
],
}
};
// Terms that must appear in a source file.
var requiredTerms = {
'Copyright 20(15|16) The AMP HTML Authors\\.':
dedicatedCopyrightNoteSources,
'Licensed under the Apache License, Version 2\\.0':
dedicatedCopyrightNoteSources,
'http\\://www\\.apache\\.org/licenses/LICENSE-2\\.0':
dedicatedCopyrightNoteSources,
};
/**
* Check if root of path is test/ or file is in a folder named test.
* @param {string} path
* @return {boolean}
*/
function isInTestFolder(path) {
var dirs = path.split('/');
var folder = dirs[dirs.length - 2];
return path.startsWith('test/') || folder == 'test';
}
function stripComments(contents) {
// Multi-line comments
contents = contents.replace(/\/\*(?!.*\*\/)(.|\n)*?\*\//g, '');
// Single line comments with only leading whitespace
contents = contents.replace(/\n\s*\/\/.*/g, '');
// Single line comments following a space, semi-colon, or closing brace
return contents.replace(/( |\}|;)\s*\/\/.*/g, '$1');
}
/**
* Logs any issues found in the contents of file based on terms (regex
* patterns), and provides any possible fix information for matched terms if
* possible
*
* @param {!File} file a vinyl file object to scan for term matches
* @param {!Array<string, string>} terms Pairs of regex patterns and possible
* fix messages.
* @return {boolean} true if any of the terms match the file content,
* false otherwise
*/
function matchTerms(file, terms) {
var pathname = file.path;
var contents = stripComments(file.contents.toString());
var relative = file.relative;
return Object.keys(terms).map(function(term) {
var fix;
var whitelist = terms[term].whitelist;
// NOTE: we could do a glob test instead of exact check in the future
// if needed but that might be too permissive.
if (Array.isArray(whitelist) && (whitelist.indexOf(relative) != -1 ||
isInTestFolder(relative))) {
return false;
}
// we can't optimize building the `RegExp` objects early unless we build
// another mapping of term -> regexp object to be able to get back to the
// original term to get the possible fix value. This is ok as the
// presubmit doesn't have to be blazing fast and this is most likely
// negligible.
var matches = contents.match(new RegExp(term, 'gm'));
if (matches) {
util.log(util.colors.red('Found forbidden: "' + matches[0] +
'" in ' + relative));
if (typeof terms[term] == 'string') {
fix = terms[term];
} else {
fix = terms[term].message;
}
// log the possible fix information if provided for the term.
if (fix) {
util.log(util.colors.blue(fix));
}
util.log(util.colors.blue('=========='));
return true;
}
return false;
}).some(function(hasAnyTerm) {
return hasAnyTerm;
});
}
/**
* Test if a file's contents match any of the
* forbidden terms
*
* @param {!File} file file is a vinyl file object
* @return {boolean} true if any of the terms match the file content,
* false otherwise
*/
function hasAnyTerms(file) {
var pathname = file.path;
var basename = path.basename(pathname);
var hasTerms = false;
var hasSrcInclusiveTerms = false;
var has3pTerms = false;
hasTerms = matchTerms(file, forbiddenTerms);
var isTestFile = /^test-/.test(basename) || /^_init_tests/.test(basename);
if (!isTestFile) {
hasSrcInclusiveTerms = matchTerms(file, forbiddenTermsSrcInclusive);
}
var is3pFile = /3p|ads/.test(pathname) ||
basename == '3p.js' ||
basename == 'style.js';
if (is3pFile && !isTestFile) {
has3pTerms = matchTerms(file, forbidden3pTerms);
}
return hasTerms || hasSrcInclusiveTerms || has3pTerms;
}
/**
* Test if a file's contents fail to match any of the required terms and log
* any missing terms
*
* @param {!File} file file is a vinyl file object
* @return {boolean} true if any of the terms are not matched in the file
* content, false otherwise
*/
function isMissingTerms(file) {
var contents = file.contents.toString();
return Object.keys(requiredTerms).map(function(term) {
var filter = requiredTerms[term];
if (!filter.test(file.path)) {
return false;
}
var matches = contents.match(new RegExp(term));
if (!matches) {
util.log(util.colors.red('Did not find required: "' + term +
'" in ' + file.relative));
util.log(util.colors.blue('=========='));
return true;
}
return false;
}).some(function(hasMissingTerm) {
return hasMissingTerm;
});
}
/**
* Check a file for all the required terms and
* any forbidden terms and log any errors found.
*/
function checkForbiddenAndRequiredTerms() {
var forbiddenFound = false;
var missingRequirements = false;
return gulp.src(srcGlobs)
.pipe(util.buffer(function(err, files) {
forbiddenFound = files.map(hasAnyTerms).some(function(errorFound) {
return errorFound;
});
missingRequirements = files.map(isMissingTerms).some(
function(errorFound) {
return errorFound;
});
}))
.on('end', function() {
if (forbiddenFound) {
util.log(util.colors.blue(
'Please remove these usages or consult with the AMP team.'));
}
if (missingRequirements) {
util.log(util.colors.blue(
'Adding these terms (e.g. by adding a required LICENSE ' +
'to the file)'));
}
if (forbiddenFound || missingRequirements) {
process.exit(1);
}
});
}
gulp.task('presubmit', 'Run validation against files to check for forbidden ' +
'and required terms', checkForbiddenAndRequiredTerms);
|
nekodo/amphtml
|
build-system/tasks/presubmit-checks.js
|
JavaScript
|
apache-2.0
| 18,988 |
/*
* Copyright 2011-2012 Gregory P. Moyer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.syphr.mythtv.ws.backend.impl;
import java.io.IOException;
import java.util.Calendar;
import javax.xml.ws.BindingProvider;
import org.syphr.mythtv.data.Program;
import org.syphr.mythtv.ws.ServiceVersionException;
import org.syphr.mythtv.ws.backend.GuideService;
import org.syphr.mythtv.ws.backend.impl._0_25.guide.Guide;
import org.syphr.mythtv.ws.backend.impl._0_25.guide.GuideServices;
import org.syphr.mythtv.ws.backend.impl._0_25.guide.ProgramGuide;
import org.syphr.mythtv.ws.impl.AbstractService;
public class GuideService0_25 extends AbstractService implements GuideService
{
private static final String NAME = "Guide";
private static final String VERSION = "1.0";
private final Guide service;
public GuideService0_25(String host, int port) throws ServiceVersionException, IOException
{
GuideServices locator = new GuideServices();
service = locator.getBasicHttpBindingGuide();
configureAndVerify(host, port, (BindingProvider)service);
}
@Override
protected String getName()
{
return NAME;
}
@Override
protected String getVersion()
{
return VERSION;
}
@Override
public String getChannelIcon(Integer chanId, Integer width, Integer height)
{
return service.getChannelIcon(chanId, width, height);
}
@Override
public Program getProgramDetails(Integer chanId, Calendar startTime)
{
// TODO
return null;//service.getProgramDetails(chanId, startTime);
}
@Override
public ProgramGuide getProgramGuide(Calendar startTime,
Calendar endTime,
Integer startChanId,
Integer numChannels,
Boolean details)
{
return service.getProgramGuide(startTime, endTime, startChanId, numChannels, details);
}
}
|
syphr42/libmythtv-java
|
ws/src/main/java/org/syphr/mythtv/ws/backend/impl/GuideService0_25.java
|
Java
|
apache-2.0
| 2,555 |
package com.g10.ssm.service;
import java.util.List;
import com.g10.ssm.po.LearningTaskCoursewareKey;
public interface LearningTaskCoursewareService {
public List<LearningTaskCoursewareKey> queryLearningTaskCourseware() throws Exception;
public String[] queryAllCoursewareUrl(Integer learningTaskId) throws Exception;
public Integer[] getAllCoursewareId(Integer learningTaskId) throws Exception;
/*
* public int updateLearningTaskCourseware(LearningTaskCoursewareKey
* testTable) throws Exception;
*/
public int saveLearningTaskCourseware(LearningTaskCoursewareKey learningTaskCourseware) throws Exception;
public int deleteLearningTaskCoursewareByPrimaryKey(LearningTaskCoursewareKey learningTaskCourseware)
throws Exception;
public int deleteLearningTaskCourseware(Integer learningTaskId) throws Exception;
}
|
scaug10/NETESP
|
src/main/java/com/g10/ssm/service/LearningTaskCoursewareService.java
|
Java
|
apache-2.0
| 835 |
import { Component, HostListener, Inject } from '@angular/core';
import { MAT_DIALOG_DATA, MatDialogRef } from '@angular/material';
@Component({
selector: 'cs-volume-delete-dialog',
templateUrl: './volume-delete-dialog.component.html',
styleUrls: ['./volume-delete-dialog.component.scss'],
})
export class VolumeDeleteDialogComponent {
public deleteSnapshots = false;
constructor(
public dialogRef: MatDialogRef<VolumeDeleteDialogComponent>,
@Inject(MAT_DIALOG_DATA) public hasSnapshots: boolean,
) {}
public confirmDestroy(): void {
const result: { deleteSnapshots?: boolean } = {};
if (this.deleteSnapshots) {
result.deleteSnapshots = true;
}
this.dialogRef.close(result);
}
@HostListener('keydown.esc')
public onEsc(): void {
this.dialogRef.close();
}
}
|
bwsw/cloudstack-ui
|
src/app/shared/actions/volume-actions/volume-delete/volume-delete-dialog.component.ts
|
TypeScript
|
apache-2.0
| 817 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.util.lang.api;
import net.sf.mmm.util.lang.api.attribute.AttributeReadValue;
/**
* This enum contains the available values for the orientation.
*
* @see Alignment
*
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 3.0.0
*/
public enum Orientation implements AttributeReadValue<String> {
/**
* A horizontal orientation means that objects are ordered from the left to the right.
*/
HORIZONTAL("-", "horizontal"),
/**
* A vertical orientation means that objects are ordered from the top to the bottom.
*/
VERTICAL("|", "vertical");
private final String value;
private final String title;
/**
* The constructor.
*
* @param value is the {@link #getValue() raw value} (symbol).
* @param title is the {@link #toString() string representation}.
*/
private Orientation(String value, String title) {
this.value = value;
this.title = title;
}
/**
* @return the ascii symbol.
*/
@Override
public String getValue() {
return this.value;
}
@Override
public String toString() {
return this.title;
}
/**
* This method gets the {@link Orientation} with the given {@link #getValue() value}.
*
* @param value is the {@link #getValue() value} of the requested {@link Orientation}.
* @return the requested {@link Orientation}.
*/
public static Orientation fromValue(String value) {
for (Orientation alignment : values()) {
if (alignment.value.equals(value)) {
return alignment;
}
}
return null;
}
/**
* This method gets the inverse orientation.
*
* @return {@link #VERTICAL} if this orientation is {@link #HORIZONTAL} and vice versa.
*/
public Orientation getMirrored() {
if (this == HORIZONTAL) {
return VERTICAL;
} else {
return HORIZONTAL;
}
}
}
|
m-m-m/util
|
lang/src/main/java/net/sf/mmm/util/lang/api/Orientation.java
|
Java
|
apache-2.0
| 1,997 |
package io.github.mapstream;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Optional;
import java.util.Spliterator;
import java.util.function.*;
import java.util.stream.*;
class PairEntryStreamImpl<K, V> implements PairEntryStream<K, V> {
private Stream<PairEntry<K, V>> delegate;
PairEntryStreamImpl(Stream<PairEntry<K, V>> delegate) {
this.delegate = delegate;
}
@Override
public MapStream<K, V> mapStream() {
return MapStream.from(delegate);
}
/* EVERYTHING DELEGATED */
@Override
public Stream<PairEntry<K, V>> filter(Predicate<? super PairEntry<K, V>> predicate) {
return delegate.filter(predicate);
}
@Override
public <R> Stream<R> map(Function<? super PairEntry<K, V>, ? extends R> mapper) {
return delegate.map(mapper);
}
@Override
public IntStream mapToInt(ToIntFunction<? super PairEntry<K, V>> mapper) {
return delegate.mapToInt(mapper);
}
@Override
public LongStream mapToLong(ToLongFunction<? super PairEntry<K, V>> mapper) {
return delegate.mapToLong(mapper);
}
@Override
public DoubleStream mapToDouble(ToDoubleFunction<? super PairEntry<K, V>> mapper) {
return delegate.mapToDouble(mapper);
}
@Override
public <R> Stream<R> flatMap(Function<? super PairEntry<K, V>, ? extends Stream<? extends R>> mapper) {
return delegate.flatMap(mapper);
}
@Override
public IntStream flatMapToInt(Function<? super PairEntry<K, V>, ? extends IntStream> mapper) {
return delegate.flatMapToInt(mapper);
}
@Override
public LongStream flatMapToLong(Function<? super PairEntry<K, V>, ? extends LongStream> mapper) {
return delegate.flatMapToLong(mapper);
}
@Override
public DoubleStream flatMapToDouble(Function<? super PairEntry<K, V>, ? extends DoubleStream> mapper) {
return delegate.flatMapToDouble(mapper);
}
@Override
public Stream<PairEntry<K, V>> distinct() {
return delegate.distinct();
}
@Override
public Stream<PairEntry<K, V>> sorted() {
return delegate.sorted();
}
@Override
public Stream<PairEntry<K, V>> sorted(Comparator<? super PairEntry<K, V>> comparator) {
return delegate.sorted(comparator);
}
@Override
public Stream<PairEntry<K, V>> peek(Consumer<? super PairEntry<K, V>> action) {
return delegate.peek(action);
}
@Override
public Stream<PairEntry<K, V>> limit(long maxSize) {
return delegate.limit(maxSize);
}
@Override
public Stream<PairEntry<K, V>> skip(long n) {
return delegate.skip(n);
}
@Override
public void forEach(Consumer<? super PairEntry<K, V>> action) {
delegate.forEach(action);
}
@Override
public void forEachOrdered(Consumer<? super PairEntry<K, V>> action) {
delegate.forEachOrdered(action);
}
@Override
public Object[] toArray() {
return delegate.toArray();
}
@Override
public <A> A[] toArray(IntFunction<A[]> generator) {
return delegate.toArray(generator);
}
@Override
public PairEntry<K, V> reduce(PairEntry<K, V> identity, BinaryOperator<PairEntry<K, V>> accumulator) {
return delegate.reduce(identity, accumulator);
}
@Override
public Optional<PairEntry<K, V>> reduce(BinaryOperator<PairEntry<K, V>> accumulator) {
return delegate.reduce(accumulator);
}
@Override
public <U> U reduce(U identity, BiFunction<U, ? super PairEntry<K, V>, U> accumulator, BinaryOperator<U> combiner) {
return delegate.reduce(identity, accumulator, combiner);
}
@Override
public <R> R collect(Supplier<R> supplier, BiConsumer<R, ? super PairEntry<K, V>> accumulator, BiConsumer<R, R> combiner) {
return delegate.collect(supplier, accumulator, combiner);
}
@Override
public <R, A> R collect(Collector<? super PairEntry<K, V>, A, R> collector) {
return delegate.collect(collector);
}
@Override
public Optional<PairEntry<K, V>> min(Comparator<? super PairEntry<K, V>> comparator) {
return delegate.min(comparator);
}
@Override
public Optional<PairEntry<K, V>> max(Comparator<? super PairEntry<K, V>> comparator) {
return delegate.max(comparator);
}
@Override
public long count() {
return delegate.count();
}
@Override
public boolean anyMatch(Predicate<? super PairEntry<K, V>> predicate) {
return delegate.anyMatch(predicate);
}
@Override
public boolean allMatch(Predicate<? super PairEntry<K, V>> predicate) {
return delegate.allMatch(predicate);
}
@Override
public boolean noneMatch(Predicate<? super PairEntry<K, V>> predicate) {
return delegate.noneMatch(predicate);
}
@Override
public Optional<PairEntry<K, V>> findFirst() {
return delegate.findFirst();
}
@Override
public Optional<PairEntry<K, V>> findAny() {
return delegate.findAny();
}
@Override
public Iterator<PairEntry<K, V>> iterator() {
return delegate.iterator();
}
@Override
public Spliterator<PairEntry<K, V>> spliterator() {
return delegate.spliterator();
}
@Override
public boolean isParallel() {
return delegate.isParallel();
}
@Override
public Stream<PairEntry<K, V>> sequential() {
return delegate.sequential();
}
@Override
public Stream<PairEntry<K, V>> parallel() {
return delegate.parallel();
}
@Override
public Stream<PairEntry<K, V>> unordered() {
return delegate.unordered();
}
@Override
public Stream<PairEntry<K, V>> onClose(Runnable closeHandler) {
return delegate.onClose(closeHandler);
}
@Override
public void close() {
delegate.close();
}
@Override
public String toString() {
return "PairEntryStream{" +
"delegate=" + delegate +
'}';
}
}
|
mapstream/mapstream
|
src/main/java/io/github/mapstream/PairEntryStreamImpl.java
|
Java
|
apache-2.0
| 6,109 |
package com.github.bingoohuang.excel2beans.annotations;
public enum MergeType {
/**
* 直接合并。
*/
Direct,
/**
* 按相同值合并。
*/
SameValue,
}
|
bingoohuang/excel2javabeans
|
src/main/java/com/github/bingoohuang/excel2beans/annotations/MergeType.java
|
Java
|
apache-2.0
| 194 |
/*******************************************************************************
* Copyright (C) 2014 Philipp B. Costa
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package br.ufc.mdcc.mpos.net.profile;
import java.io.IOException;
import java.util.Random;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.Semaphore;
import android.util.Log;
import br.ufc.mdcc.mpos.net.Protocol;
import br.ufc.mdcc.mpos.net.core.ClientAbstract;
import br.ufc.mdcc.mpos.net.core.FactoryClient;
import br.ufc.mdcc.mpos.net.core.ReceiveDataEvent;
import br.ufc.mdcc.mpos.net.endpoint.ServerContent;
import br.ufc.mdcc.mpos.net.exceptions.MissedEventException;
import br.ufc.mdcc.mpos.net.profile.model.Network;
import br.ufc.mdcc.mpos.util.TaskResult;
import br.ufc.mdcc.mpos.util.Util;
/**
* This implement a full profile client.
*
* @author Philipp B. Costa
*/
public final class ProfileNetworkFull extends ProfileNetworkTask {
private byte data[] = new byte[32 * 1024];
private Network network;
private boolean bandwidthDone = false;
public ProfileNetworkFull(TaskResult<Network> result, ServerContent server) throws MissedEventException {
super(server, result, ProfileNetworkFull.class, "ProfileFull Started on endpoint: " + server.getIp());
// randomize os dados que serão enviados
new Random().nextBytes(data);
}
/**
* Feedback code:
* 15 -> Finished Ping TCP Test
* 30 -> Finished Ping UDP Test
* 35 -> Finished Ping Test with packet loss
* 50 -> Finished Jitter Calculation
* 55 -> Start Donwload Test
* 75 -> Start Upload Test
* 100 -> Finished Conection Test
*/
@Override
protected Network doInBackground(Void... params) {
network = new Network();
try {
Log.i(clsName, "ping tcp");
long[] pings = pingService(Protocol.TCP_EVENT);
network.setResultPingTcp(pings);
publishProgress(15);
Log.i(clsName, "ping udp");
pings = pingService(Protocol.UDP_EVENT);
network.setResultPingUdp(pings);
publishProgress(30);
Log.i(clsName, "loss packet udp");
// conta os pacotes perdidos UDP
if (halted) {
return null;
}
network.setLossPacket(lossPacketCalculation(network));
publishProgress(35);
Log.i(clsName, "jitter calculation");
jitterCalculation();
if (halted) {
return null;
}
retrieveJitterResult();
publishProgress(50);
Log.i(clsName, "bandwidth calculation");
boolean finish = bandwidthCalculation();
publishProgress(100);
// a task foi cancelada ou foi parado por um timer
if (halted || !finish) {
return null;
}
Log.d(clsName, "ProfileFull Finished");
return network;
} catch (InterruptedException e) {
Log.w(clsName, e);
} catch (IOException e) {
Log.e(clsName, e.getMessage(), e);
} catch (MissedEventException e) {
Log.e(clsName, e.getMessage(), e);
}
publishProgress(100);
return null;
}
/**
* Definição: RFC 4689 - defines jitter as “the absolute value of the difference between the Forwarding Delay of two consecutive received packets
* belonging to the same stream”. The jitter is important in real-time communications when the variation between delays can cause a negative
* impact to the server quality, such voice over IP services. Referencia: http://tools.ietf.org/html/rfc4689#section-3.2.5 Em resumo o jitter
* calcula os intervalos tempos entre o intervalo de tempo (corrente) e intervalo de tempo (anterior) e deve ser enviado num fluxo de taxa
* constante. #formula no servidor Intervalo de tempo (It) = Tempo_chegada - Tempo_anterior Jitter = it_atual - it_anterior (voce pode pegar a
* média, maximo e minimo) Sobre os resultados: Um jitter de 15ms é regular, abaixo de 5ms é excelente e acima de 15ms é ruim para o padrão VoIP.
* Seja esse site: http://www.onsip.com/tools/voip-test
*
* @throws MissedEventException
* @throws IOException
* @throws InterruptedException
*/
private void jitterCalculation() throws IOException, MissedEventException, InterruptedException {
ClientAbstract client = FactoryClient.getInstance(Protocol.UDP_EVENT);
client.setReceiveDataEvent(new ReceiveDataEvent() {
@Override
public void receive(byte[] data, int offset, int read) {
Log.d(clsName, "Jitter Finish");
}
});
client.connect(server.getIp(), server.getJitterTestPort());
for (int i = 0; i < 21; i++) {
client.send(("jitter").getBytes());
// bota 250ms para retorno
// por causa do UDP que não tem controle de fluxo
Thread.sleep(250);
}
client.close();
}
private void retrieveJitterResult() throws IOException, MissedEventException, InterruptedException {
Thread.sleep(1500);
final Semaphore mutex = new Semaphore(0);
ClientAbstract client = FactoryClient.getInstance(Protocol.TCP_EVENT);
client.setReceiveDataEvent(new ReceiveDataEvent() {
@Override
public void receive(byte[] data, int offset, int read) {
Log.d(clsName, "Retrieve data from server for Jitter calcule");
network.setJitter(Integer.parseInt(new String(data, offset, read)));
// System.out.println(results.getJitter());
mutex.release();
}
});
client.connect(server.getIp(), server.getJitterRetrieveResultPort());
client.send("get".getBytes());
mutex.acquire();
client.close();
}
private boolean bandwidthCalculation() throws IOException, MissedEventException, InterruptedException {
final Semaphore mutex = new Semaphore(0);
//begin download
publishProgress(55);
ClientAbstract client = FactoryClient.getInstance(Protocol.TCP_EVENT);
client.setReceiveDataEvent(new ReceiveDataEvent() {
private long countBytes = 0L;
private byte endDown[] = "end_down".getBytes();
private byte endSession[] = "end_session".getBytes();
@Override
public void receive(byte[] data, int offset, int read) {
countBytes += (long) read;
if (Util.containsArrays(data, endDown)) {
// System.out.println("Bytes: "+countBytes);
// bytes * 8bits / 7s * 1E+6 = X Mbits
double bandwidth = ((double) (countBytes * 8L) / (double) (7.0 * 1E+6));
network.setBandwidthDownload(String.valueOf(bandwidth));
countBytes = 0L;
mutex.release();
} else if (Util.containsArrays(data, endSession)) {
bandwidthDone = true;
String dataBlock = new String(data, offset, read);
String res[] = dataBlock.split(":");
network.setBandwidthUpload(res[1]);
mutex.release();
}
}
});
// timer for finish!
Timer timeout = new Timer("Timeout Bandwidth");
timeout.schedule(new TimerTask() {
@Override
public void run() {
if (!bandwidthDone) {
// para garantir que não vai travar nenhum semaphoro!
mutex.release();
mutex.release();
Log.i(clsName, "Bandwidth Timeout...");
}
}
}, 120000);// 120s de timeout
Log.i(clsName, "bandwidth (download)");
client.connect(server.getIp(), server.getBandwidthPort());
client.send("down".getBytes());
//wait finish the down...
mutex.acquire();
//begin upload
publishProgress(75);
if (halted) {
timeout.cancel();
return false;
}
Log.i(clsName, "bandwidth (upload)");
client.send("up".getBytes());
// faz upload! - 11s
long timeExit = System.currentTimeMillis() + 11000;
while (System.currentTimeMillis() < timeExit) {
client.send(data);
}
client.send("end_up".getBytes());
Log.i(clsName, "bandwidth (ended upload)");
mutex.acquire();
client.close();
// cancela o timer
timeout.cancel();
return bandwidthDone;
}
}
|
ufc-great/mpos
|
android/MpOS API/src/br/ufc/mdcc/mpos/net/profile/ProfileNetworkFull.java
|
Java
|
apache-2.0
| 8,129 |
//
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
using Microsoft.Azure.Commands.Compute.Automation.Models;
using Microsoft.Azure.Management.Compute;
using Microsoft.Azure.Management.Compute.Models;
using Microsoft.WindowsAzure.Commands.Utilities.Common;
using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Management.Automation;
namespace Microsoft.Azure.Commands.Compute.Automation
{
public partial class InvokeAzureComputeMethodCmdlet : ComputeAutomationBaseCmdlet
{
protected object CreateSnapshotDeleteDynamicParameters()
{
dynamicParameters = new RuntimeDefinedParameterDictionary();
var pResourceGroupName = new RuntimeDefinedParameter();
pResourceGroupName.Name = "ResourceGroupName";
pResourceGroupName.ParameterType = typeof(string);
pResourceGroupName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 1,
Mandatory = true
});
pResourceGroupName.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("ResourceGroupName", pResourceGroupName);
var pSnapshotName = new RuntimeDefinedParameter();
pSnapshotName.Name = "SnapshotName";
pSnapshotName.ParameterType = typeof(string);
pSnapshotName.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByDynamicParameters",
Position = 2,
Mandatory = true
});
pSnapshotName.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("SnapshotName", pSnapshotName);
var pArgumentList = new RuntimeDefinedParameter();
pArgumentList.Name = "ArgumentList";
pArgumentList.ParameterType = typeof(object[]);
pArgumentList.Attributes.Add(new ParameterAttribute
{
ParameterSetName = "InvokeByStaticParameters",
Position = 3,
Mandatory = true
});
pArgumentList.Attributes.Add(new AllowNullAttribute());
dynamicParameters.Add("ArgumentList", pArgumentList);
return dynamicParameters;
}
protected void ExecuteSnapshotDeleteMethod(object[] invokeMethodInputParameters)
{
string resourceGroupName = (string)ParseParameter(invokeMethodInputParameters[0]);
string snapshotName = (string)ParseParameter(invokeMethodInputParameters[1]);
var result = SnapshotsClient.Delete(resourceGroupName, snapshotName);
WriteObject(result);
}
}
public partial class NewAzureComputeArgumentListCmdlet : ComputeAutomationBaseCmdlet
{
protected PSArgument[] CreateSnapshotDeleteParameters()
{
string resourceGroupName = string.Empty;
string snapshotName = string.Empty;
return ConvertFromObjectsToArguments(
new string[] { "ResourceGroupName", "SnapshotName" },
new object[] { resourceGroupName, snapshotName });
}
}
[Cmdlet(VerbsCommon.Remove, "AzureRmSnapshot", DefaultParameterSetName = "DefaultParameter", SupportsShouldProcess = true)]
[OutputType(typeof(PSOperationStatusResponse))]
public partial class RemoveAzureRmSnapshot : ComputeAutomationBaseCmdlet
{
public override void ExecuteCmdlet()
{
ExecuteClientAction(() =>
{
if (ShouldProcess(this.SnapshotName, VerbsCommon.Remove)
&& (this.Force.IsPresent ||
this.ShouldContinue(Properties.Resources.ResourceRemovalConfirmation,
"Remove-AzureRmSnapshot operation")))
{
string resourceGroupName = this.ResourceGroupName;
string snapshotName = this.SnapshotName;
var result = SnapshotsClient.Delete(resourceGroupName, snapshotName);
var psObject = new PSOperationStatusResponse();
ComputeAutomationAutoMapperProfile.Mapper.Map<Azure.Management.Compute.Models.OperationStatusResponse, PSOperationStatusResponse>(result, psObject);
WriteObject(psObject);
}
});
}
[Parameter(
ParameterSetName = "DefaultParameter",
Position = 1,
Mandatory = true,
ValueFromPipelineByPropertyName = true,
ValueFromPipeline = false)]
[AllowNull]
[ResourceManager.Common.ArgumentCompleters.ResourceGroupCompleter()]
public string ResourceGroupName { get; set; }
[Parameter(
ParameterSetName = "DefaultParameter",
Position = 2,
Mandatory = true,
ValueFromPipelineByPropertyName = true,
ValueFromPipeline = false)]
[Alias("Name")]
[AllowNull]
public string SnapshotName { get; set; }
[Parameter(
ParameterSetName = "DefaultParameter",
Mandatory = false)]
[AllowNull]
public SwitchParameter Force { get; set; }
[Parameter(Mandatory = false, HelpMessage = "Run cmdlet in the background")]
public SwitchParameter AsJob { get; set; }
}
}
|
naveedaz/azure-powershell
|
src/ResourceManager/Compute/Commands.Compute/Generated/Snapshot/SnapshotDeleteMethod.cs
|
C#
|
apache-2.0
| 6,217 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import json
import copy
import time
from collections import defaultdict, OrderedDict
import requests
import _jsonnet # pylint: disable=import-error
LINUX_PRICING_URLS = [
# Deprecated instances (JSON format)
'https://aws.amazon.com/ec2/pricing/json/linux-od.json',
# Previous generation instances (JavaScript file)
'https://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js',
# New generation instances (JavaScript file)
# Using other endpoint atm
# 'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js'
]
EC2_REGIONS = [
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
'us-gov-west-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'eu-north-1',
'eu-central-1',
'ca-central-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ap-northeast-2',
'ap-south-1',
'sa-east-1',
'cn-north-1',
]
EC2_INSTANCE_TYPES = [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'i3.large',
'i3.xlarge',
'i3.2xlarge',
'i3.4xlarge',
'i3.8xlarge',
'i3.16large',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
# Maps EC2 region name to region name used in the pricing file
REGION_NAME_MAP = {
'us-east': 'ec2_us_east',
'us-east-1': 'ec2_us_east',
'us-east-2': 'ec2_us_east_ohio',
'us-west': 'ec2_us_west',
'us-west-1': 'ec2_us_west',
'us-west-2': 'ec2_us_west_oregon',
'eu-west-1': 'ec2_eu_west',
'eu-west-2': 'ec2_eu_west_london',
'eu-west-3': 'ec2_eu_west_3',
'eu-ireland': 'ec2_eu_west',
'eu-central-1': 'ec2_eu_central',
'ca-central-1': 'ec2_ca_central_1',
'apac-sin': 'ec2_ap_southeast',
'ap-southeast-1': 'ec2_ap_southeast',
'apac-syd': 'ec2_ap_southeast_2',
'ap-southeast-2': 'ec2_ap_southeast_2',
'apac-tokyo': 'ec2_ap_northeast',
'ap-northeast-1': 'ec2_ap_northeast',
'ap-northeast-2': 'ec2_ap_northeast',
'ap-south-1': 'ec2_ap_south_1',
'sa-east-1': 'ec2_sa_east',
'us-gov-west-1': 'ec2_us_govwest',
'cn-north-1': 'ec2_cn_north',
}
INSTANCE_SIZES = [
'micro',
'small',
'medium',
'large',
'xlarge',
'x-large',
'extra-large'
]
RE_NUMERIC_OTHER = re.compile(r'(?:([0-9]+)|([-A-Z_a-z]+)|([^-0-9A-Z_a-z]+))')
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json')
PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH)
def scrape_ec2_pricing():
result = defaultdict(OrderedDict)
os_map = {'linux': 'ec2_linux', 'windows-std': 'ec2_windows'}
for item in os_map.values():
result[item] = {}
for url in LINUX_PRICING_URLS:
response = requests.get(url)
if re.match(r'.*?\.json$', url):
data = response.json()
print("Sample response: %s..." % (str(data)[:100]))
elif re.match(r'.*?\.js$', url):
data = response.content.decode('utf-8')
print("Sample response: %s..." % (data[:100]))
match = re.match(r'^.*callback\((.*?)\);?$', data,
re.MULTILINE | re.DOTALL)
data = match.group(1)
# NOTE: We used to use demjson, but it's not working under Python 3 and new version of
# setuptools anymore so we use jsonnet
# demjson supports non-strict mode and can parse unquoted objects
data = json.loads(_jsonnet.evaluate_snippet('snippet', data))
regions = data['config']['regions']
for region_data in regions:
region_name = region_data['region']
instance_types = region_data['instanceTypes']
for instance_type in instance_types:
sizes = instance_type['sizes']
for size in sizes:
if not result['ec2_linux'].get(size['size'], False):
result['ec2_linux'][size['size']] = {}
price = size['valueColumns'][0]['prices']['USD']
if str(price).lower() == 'n/a':
# Price not available
continue
result['ec2_linux'][size['size']][
region_name] = float(price)
res = defaultdict(OrderedDict)
url = ('https://calculator.aws/pricing/1.0/'
'ec2/region/{}/ondemand/{}/index.json')
instances = set()
for OS in ['linux', 'windows-std']:
res[os_map[OS]] = {}
for region in EC2_REGIONS:
res[os_map[OS]][region] = {}
full_url = url.format(region, OS)
response = requests.get(full_url)
if response.status_code != 200:
print("Skipping URL %s which returned non 200-status code (%s)" %
(full_url, response.status_code))
continue
data = response.json()
for entry in data['prices']:
instance_type = entry['attributes'].get(
'aws:ec2:instanceType', "")
instances.add(instance_type)
price = entry['price'].get('USD', 0)
res[os_map[OS]][region][instance_type] = price
for item in os_map.values():
for instance in instances:
if not result[item].get(instance, False):
result[item][instance] = {}
for region in EC2_REGIONS:
if res[item][region].get(instance, False):
result[item][instance][region] = float(res[
item][region][instance])
return result
def update_pricing_file(pricing_file_path, pricing_data):
with open(pricing_file_path, 'r') as fp:
content = fp.read()
data = json.loads(content)
original_data = copy.deepcopy(data)
data['compute'].update(pricing_data)
if data == original_data:
# Nothing has changed, bail out early and don't update "updated" attribute
print("Nothing has changed, skipping update.")
return
data['updated'] = int(time.time())
# Always sort the pricing info
data = sort_nested_dict(data)
content = json.dumps(data, indent=4)
lines = content.splitlines()
lines = [line.rstrip() for line in lines]
content = '\n'.join(lines)
with open(pricing_file_path, 'w') as fp:
fp.write(content)
def sort_nested_dict(value):
"""
Recursively sort a nested dict.
"""
result = OrderedDict()
for key, value in sorted(value.items(), key=sort_key_by_numeric_other):
if isinstance(value, (dict, OrderedDict)):
result[key] = sort_nested_dict(value)
else:
result[key] = value
return result
def sort_key_by_numeric_other(key_value):
"""
Split key into numeric, alpha and other part and sort accordingly.
"""
result = []
for (numeric, alpha, other) in RE_NUMERIC_OTHER.findall(key_value[0]):
numeric = int(numeric) if numeric else -1
alpha = INSTANCE_SIZES.index(alpha) if alpha in INSTANCE_SIZES else alpha
alpha = str(alpha)
item = tuple([numeric, alpha, other])
result.append(item)
return tuple(result)
def main():
print('Scraping EC2 pricing data (this may take up to 2 minutes)....')
pricing_data = scrape_ec2_pricing()
update_pricing_file(pricing_file_path=PRICING_FILE_PATH,
pricing_data=pricing_data)
print('Pricing data updated')
if __name__ == '__main__':
main()
|
Kami/libcloud
|
contrib/scrape-ec2-prices.py
|
Python
|
apache-2.0
| 9,128 |
package com.sap.mlt.xliff12.impl.attribute;
import com.sap.mlt.xliff12.api.attribute.PropType;
import com.sap.mlt.xliff12.impl.base.XliffAttributeImpl;
/**
* @deprecated
*/
public class PropTypeImpl extends XliffAttributeImpl implements PropType {
public PropTypeImpl(String propType) {
super(NAME, propType);
}
}
|
SAP/xliff-1-2
|
com.sap.mlt.xliff12.impl/src/main/java/com/sap/mlt/xliff12/impl/attribute/PropTypeImpl.java
|
Java
|
apache-2.0
| 340 |
package com.inktomi.cirrus.forecast;
import org.simpleframework.xml.Element;
@Element
public enum TimeCoordinate {
UTC("UTC"),
LOCAL("local");
private final String value;
TimeCoordinate(String v) {
value = v;
}
public String value() {
return value;
}
public static TimeCoordinate fromValue(String v) {
for (TimeCoordinate c: TimeCoordinate.values()) {
if (c.value.equals(v)) {
return c;
}
}
throw new IllegalArgumentException(v);
}
}
|
inktomi/cirrus
|
cirrus-library/src/main/java/com/inktomi/cirrus/forecast/TimeCoordinate.java
|
Java
|
apache-2.0
| 558 |
/*
* Copyright (C) 2009 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aqnote.app.barcode.history;
import android.database.sqlite.SQLiteOpenHelper;
import android.database.sqlite.SQLiteDatabase;
import android.content.Context;
/**
* @author Sean Owen
*/
final class DBHelper extends SQLiteOpenHelper {
private static final int DB_VERSION = 5;
private static final String DB_NAME = "barcode_scanner_history.db";
static final String TABLE_NAME = "history";
static final String ID_COL = "id";
static final String TEXT_COL = "text";
static final String FORMAT_COL = "format";
static final String DISPLAY_COL = "display";
static final String TIMESTAMP_COL = "timestamp";
static final String DETAILS_COL = "details";
DBHelper(Context context) {
super(context, DB_NAME, null, DB_VERSION);
}
@Override
public void onCreate(SQLiteDatabase sqLiteDatabase) {
sqLiteDatabase.execSQL(
"CREATE TABLE " + TABLE_NAME + " (" +
ID_COL + " INTEGER PRIMARY KEY, " +
TEXT_COL + " TEXT, " +
FORMAT_COL + " TEXT, " +
DISPLAY_COL + " TEXT, " +
TIMESTAMP_COL + " INTEGER, " +
DETAILS_COL + " TEXT);");
}
@Override
public void onUpgrade(SQLiteDatabase sqLiteDatabase, int oldVersion, int newVersion) {
sqLiteDatabase.execSQL("DROP TABLE IF EXISTS " + TABLE_NAME);
onCreate(sqLiteDatabase);
}
}
|
aqnote/AndroidTest
|
app-barcode/src/main/java/com/aqnote/app/barcode/history/DBHelper.java
|
Java
|
apache-2.0
| 1,955 |
/*
* Copyright © 2013-2019 camunda services GmbH and various authors (info@camunda.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.spring.test.transaction.modification;
import org.apache.commons.lang3.time.DateUtils;
import org.camunda.bpm.engine.delegate.DelegateExecution;
import java.util.Date;
public class CalculateTimerDate {
public Date execute(DelegateExecution execution) {
execution.setVariable("createDate", new Date());
return DateUtils.addDays(new Date(), 1);
}
}
|
xasx/camunda-bpm-platform
|
engine-spring/src/test/java/org/camunda/bpm/engine/spring/test/transaction/modification/CalculateTimerDate.java
|
Java
|
apache-2.0
| 1,058 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.glaf.mail.mapper;
import java.util.*;
import org.springframework.stereotype.Component;
import com.glaf.mail.domain.*;
import com.glaf.mail.query.*;
@Component
public interface MailStorageMapper {
void deleteMailStorages(MailStorageQuery query);
void deleteMailStorageById(String id);
MailStorage getMailStorageById(String id);
MailStorage getMailStorageByDataTable(String dataTable);
int getMailStorageCount(MailStorageQuery query);
List<MailStorage> getMailStorages(MailStorageQuery query);
void insertMailStorage(MailStorage model);
void updateMailStorage(MailStorage model);
}
|
jior/glaf
|
workspace/glaf-mail/src/main/java/com/glaf/mail/mapper/MailStorageMapper.java
|
Java
|
apache-2.0
| 1,402 |
/***
* ASM: a very small and fast Java bytecode manipulation framework
* Copyright (c) 2000-2011 INRIA, France Telecom
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
package holophonor.org.objectweb.asm.commons;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import holophonor.org.objectweb.asm.Handle;
import holophonor.org.objectweb.asm.Label;
import holophonor.org.objectweb.asm.MethodVisitor;
import holophonor.org.objectweb.asm.Opcodes;
import holophonor.org.objectweb.asm.Type;
/**
* A {@link holophonor.org.objectweb.asm.MethodVisitor} to insert before, after and around
* advices in methods and constructors.
* <p>
* The behavior for constructors is like this:
* <ol>
*
* <li>as long as the INVOKESPECIAL for the object initialization has not been
* reached, every bytecode instruction is dispatched in the ctor code visitor</li>
*
* <li>when this one is reached, it is only added in the ctor code visitor and a
* JP invoke is added</li>
*
* <li>after that, only the other code visitor receives the instructions</li>
*
* </ol>
*
* @author Eugene Kuleshov
* @author Eric Bruneton
*/
public abstract class AdviceAdapter extends GeneratorAdapter implements Opcodes {
private static final Object THIS = new Object();
private static final Object OTHER = new Object();
protected int methodAccess;
protected String methodDesc;
private boolean constructor;
private boolean superInitialized;
private List<Object> stackFrame;
private Map<Label, List<Object>> branches;
/**
* Creates a new {@link AdviceAdapter}.
*
* @param api
* the ASM API version implemented by this visitor. Must be one
* of {@link Opcodes#ASM4}.
* @param mv
* the method visitor to which this adapter delegates calls.
* @param access
* the method's access flags (see {@link Opcodes}).
* @param name
* the method's name.
* @param desc
* the method's descriptor (see {@link Type Type}).
*/
protected AdviceAdapter(final int api, final MethodVisitor mv,
final int access, final String name, final String desc) {
super(api, mv, access, name, desc);
methodAccess = access;
methodDesc = desc;
constructor = "<init>".equals(name);
}
@Override
public void visitCode() {
mv.visitCode();
if (constructor) {
stackFrame = new ArrayList<Object>();
branches = new HashMap<Label, List<Object>>();
} else {
superInitialized = true;
onMethodEnter();
}
}
@Override
public void visitLabel(final Label label) {
mv.visitLabel(label);
if (constructor && branches != null) {
List<Object> frame = branches.get(label);
if (frame != null) {
stackFrame = frame;
branches.remove(label);
}
}
}
@Override
public void visitInsn(final int opcode) {
if (constructor) {
int s;
switch (opcode) {
case RETURN: // empty stack
onMethodExit(opcode);
break;
case IRETURN: // 1 before n/a after
case FRETURN: // 1 before n/a after
case ARETURN: // 1 before n/a after
case ATHROW: // 1 before n/a after
popValue();
onMethodExit(opcode);
break;
case LRETURN: // 2 before n/a after
case DRETURN: // 2 before n/a after
popValue();
popValue();
onMethodExit(opcode);
break;
case NOP:
case LALOAD: // remove 2 add 2
case DALOAD: // remove 2 add 2
case LNEG:
case DNEG:
case FNEG:
case INEG:
case L2D:
case D2L:
case F2I:
case I2B:
case I2C:
case I2S:
case I2F:
case ARRAYLENGTH:
break;
case ACONST_NULL:
case ICONST_M1:
case ICONST_0:
case ICONST_1:
case ICONST_2:
case ICONST_3:
case ICONST_4:
case ICONST_5:
case FCONST_0:
case FCONST_1:
case FCONST_2:
case F2L: // 1 before 2 after
case F2D:
case I2L:
case I2D:
pushValue(OTHER);
break;
case LCONST_0:
case LCONST_1:
case DCONST_0:
case DCONST_1:
pushValue(OTHER);
pushValue(OTHER);
break;
case IALOAD: // remove 2 add 1
case FALOAD: // remove 2 add 1
case AALOAD: // remove 2 add 1
case BALOAD: // remove 2 add 1
case CALOAD: // remove 2 add 1
case SALOAD: // remove 2 add 1
case POP:
case IADD:
case FADD:
case ISUB:
case LSHL: // 3 before 2 after
case LSHR: // 3 before 2 after
case LUSHR: // 3 before 2 after
case L2I: // 2 before 1 after
case L2F: // 2 before 1 after
case D2I: // 2 before 1 after
case D2F: // 2 before 1 after
case FSUB:
case FMUL:
case FDIV:
case FREM:
case FCMPL: // 2 before 1 after
case FCMPG: // 2 before 1 after
case IMUL:
case IDIV:
case IREM:
case ISHL:
case ISHR:
case IUSHR:
case IAND:
case IOR:
case IXOR:
case MONITORENTER:
case MONITOREXIT:
popValue();
break;
case POP2:
case LSUB:
case LMUL:
case LDIV:
case LREM:
case LADD:
case LAND:
case LOR:
case LXOR:
case DADD:
case DMUL:
case DSUB:
case DDIV:
case DREM:
popValue();
popValue();
break;
case IASTORE:
case FASTORE:
case AASTORE:
case BASTORE:
case CASTORE:
case SASTORE:
case LCMP: // 4 before 1 after
case DCMPL:
case DCMPG:
popValue();
popValue();
popValue();
break;
case LASTORE:
case DASTORE:
popValue();
popValue();
popValue();
popValue();
break;
case DUP:
pushValue(peekValue());
break;
case DUP_X1:
s = stackFrame.size();
stackFrame.add(s - 2, stackFrame.get(s - 1));
break;
case DUP_X2:
s = stackFrame.size();
stackFrame.add(s - 3, stackFrame.get(s - 1));
break;
case DUP2:
s = stackFrame.size();
stackFrame.add(s - 2, stackFrame.get(s - 1));
stackFrame.add(s - 2, stackFrame.get(s - 1));
break;
case DUP2_X1:
s = stackFrame.size();
stackFrame.add(s - 3, stackFrame.get(s - 1));
stackFrame.add(s - 3, stackFrame.get(s - 1));
break;
case DUP2_X2:
s = stackFrame.size();
stackFrame.add(s - 4, stackFrame.get(s - 1));
stackFrame.add(s - 4, stackFrame.get(s - 1));
break;
case SWAP:
s = stackFrame.size();
stackFrame.add(s - 2, stackFrame.get(s - 1));
stackFrame.remove(s);
break;
}
} else {
switch (opcode) {
case RETURN:
case IRETURN:
case FRETURN:
case ARETURN:
case LRETURN:
case DRETURN:
case ATHROW:
onMethodExit(opcode);
break;
}
}
mv.visitInsn(opcode);
}
@Override
public void visitVarInsn(final int opcode, final int var) {
super.visitVarInsn(opcode, var);
if (constructor) {
switch (opcode) {
case ILOAD:
case FLOAD:
pushValue(OTHER);
break;
case LLOAD:
case DLOAD:
pushValue(OTHER);
pushValue(OTHER);
break;
case ALOAD:
pushValue(var == 0 ? THIS : OTHER);
break;
case ASTORE:
case ISTORE:
case FSTORE:
popValue();
break;
case LSTORE:
case DSTORE:
popValue();
popValue();
break;
}
}
}
@Override
public void visitFieldInsn(final int opcode, final String owner,
final String name, final String desc) {
mv.visitFieldInsn(opcode, owner, name, desc);
if (constructor) {
char c = desc.charAt(0);
boolean longOrDouble = c == 'J' || c == 'D';
switch (opcode) {
case GETSTATIC:
pushValue(OTHER);
if (longOrDouble) {
pushValue(OTHER);
}
break;
case PUTSTATIC:
popValue();
if (longOrDouble) {
popValue();
}
break;
case PUTFIELD:
popValue();
if (longOrDouble) {
popValue();
popValue();
}
break;
// case GETFIELD:
default:
if (longOrDouble) {
pushValue(OTHER);
}
}
}
}
@Override
public void visitIntInsn(final int opcode, final int operand) {
mv.visitIntInsn(opcode, operand);
if (constructor && opcode != NEWARRAY) {
pushValue(OTHER);
}
}
@Override
public void visitLdcInsn(final Object cst) {
mv.visitLdcInsn(cst);
if (constructor) {
pushValue(OTHER);
if (cst instanceof Double || cst instanceof Long) {
pushValue(OTHER);
}
}
}
@Override
public void visitMultiANewArrayInsn(final String desc, final int dims) {
mv.visitMultiANewArrayInsn(desc, dims);
if (constructor) {
for (int i = 0; i < dims; i++) {
popValue();
}
pushValue(OTHER);
}
}
@Override
public void visitTypeInsn(final int opcode, final String type) {
mv.visitTypeInsn(opcode, type);
// ANEWARRAY, CHECKCAST or INSTANCEOF don't change stack
if (constructor && opcode == NEW) {
pushValue(OTHER);
}
}
@Override
public void visitMethodInsn(final int opcode, final String owner,
final String name, final String desc) {
mv.visitMethodInsn(opcode, owner, name, desc);
if (constructor) {
Type[] types = Type.getArgumentTypes(desc);
for (int i = 0; i < types.length; i++) {
popValue();
if (types[i].getSize() == 2) {
popValue();
}
}
switch (opcode) {
// case INVOKESTATIC:
// break;
case INVOKEINTERFACE:
case INVOKEVIRTUAL:
popValue(); // objectref
break;
case INVOKESPECIAL:
Object type = popValue(); // objectref
if (type == THIS && !superInitialized) {
onMethodEnter();
superInitialized = true;
// once super has been initialized it is no longer
// necessary to keep track of stack state
constructor = false;
}
break;
}
Type returnType = Type.getReturnType(desc);
if (returnType != Type.VOID_TYPE) {
pushValue(OTHER);
if (returnType.getSize() == 2) {
pushValue(OTHER);
}
}
}
}
@Override
public void visitInvokeDynamicInsn(String name, String desc, Handle bsm,
Object... bsmArgs) {
mv.visitInvokeDynamicInsn(name, desc, bsm, bsmArgs);
if (constructor) {
Type[] types = Type.getArgumentTypes(desc);
for (int i = 0; i < types.length; i++) {
popValue();
if (types[i].getSize() == 2) {
popValue();
}
}
Type returnType = Type.getReturnType(desc);
if (returnType != Type.VOID_TYPE) {
pushValue(OTHER);
if (returnType.getSize() == 2) {
pushValue(OTHER);
}
}
}
}
@Override
public void visitJumpInsn(final int opcode, final Label label) {
mv.visitJumpInsn(opcode, label);
if (constructor) {
switch (opcode) {
case IFEQ:
case IFNE:
case IFLT:
case IFGE:
case IFGT:
case IFLE:
case IFNULL:
case IFNONNULL:
popValue();
break;
case IF_ICMPEQ:
case IF_ICMPNE:
case IF_ICMPLT:
case IF_ICMPGE:
case IF_ICMPGT:
case IF_ICMPLE:
case IF_ACMPEQ:
case IF_ACMPNE:
popValue();
popValue();
break;
case JSR:
pushValue(OTHER);
break;
}
addBranch(label);
}
}
@Override
public void visitLookupSwitchInsn(final Label dflt, final int[] keys,
final Label[] labels) {
mv.visitLookupSwitchInsn(dflt, keys, labels);
if (constructor) {
popValue();
addBranches(dflt, labels);
}
}
@Override
public void visitTableSwitchInsn(final int min, final int max,
final Label dflt, final Label... labels) {
mv.visitTableSwitchInsn(min, max, dflt, labels);
if (constructor) {
popValue();
addBranches(dflt, labels);
}
}
@Override
public void visitTryCatchBlock(Label start, Label end, Label handler,
String type) {
super.visitTryCatchBlock(start, end, handler, type);
if (constructor && !branches.containsKey(handler)) {
List<Object> stackFrame = new ArrayList<Object>();
stackFrame.add(OTHER);
branches.put(handler, stackFrame);
}
}
private void addBranches(final Label dflt, final Label[] labels) {
addBranch(dflt);
for (int i = 0; i < labels.length; i++) {
addBranch(labels[i]);
}
}
private void addBranch(final Label label) {
if (branches.containsKey(label)) {
return;
}
branches.put(label, new ArrayList<Object>(stackFrame));
}
private Object popValue() {
return stackFrame.remove(stackFrame.size() - 1);
}
private Object peekValue() {
return stackFrame.get(stackFrame.size() - 1);
}
private void pushValue(final Object o) {
stackFrame.add(o);
}
/**
* Called at the beginning of the method or after super class class call in
* the constructor. <br>
* <br>
*
* <i>Custom code can use or change all the local variables, but should not
* change state of the stack.</i>
*/
protected void onMethodEnter() {
}
/**
* Called before explicit exit from the method using either return or throw.
* Top element on the stack contains the return value or exception instance.
* For example:
*
* <pre>
* public void onMethodExit(int opcode) {
* if(opcode==RETURN) {
* visitInsn(ACONST_NULL);
* } else if(opcode==ARETURN || opcode==ATHROW) {
* dup();
* } else {
* if(opcode==LRETURN || opcode==DRETURN) {
* dup2();
* } else {
* dup();
* }
* box(Type.getReturnType(this.methodDesc));
* }
* visitIntInsn(SIPUSH, opcode);
* visitMethodInsn(INVOKESTATIC, owner, "onExit", "(Ljava/lang/Object;I)V");
* }
*
* // an actual call back method
* public static void onExit(Object param, int opcode) {
* ...
* </pre>
*
* <br>
* <br>
*
* <i>Custom code can use or change all the local variables, but should not
* change state of the stack.</i>
*
* @param opcode
* one of the RETURN, IRETURN, FRETURN, ARETURN, LRETURN, DRETURN
* or ATHROW
*
*/
protected void onMethodExit(int opcode) {
}
// TODO onException, onMethodCall
}
|
rikf/Holophonor
|
src/main/java/holophonor/org/objectweb/asm/commons/AdviceAdapter.java
|
Java
|
apache-2.0
| 19,221 |
package gnode
import (
"fmt"
"io"
"sync"
"time"
)
type ReceiverManager struct {
usage int
gnode *GNode
buffer MessageBuffer
receiverList []*MessageReceiver
ioChan chan *AntMes
nbReceiver int
receiver MessageReceiver
answerMap map[string]*AntMes
getChan chan string
lockClient sync.RWMutex
functionMap map[string]interface{}
}
func (m *ReceiverManager) loadFunctions() {
m.functionMap = make(map[string]interface{})
//node Functions
m.functionMap["ping"] = m.gnode.nodeFunctions.ping
m.functionMap["pingFromTo"] = m.gnode.nodeFunctions.pingFromTo
m.functionMap["setLogLevel"] = m.gnode.nodeFunctions.setLogLevel
m.functionMap["killNode"] = m.gnode.nodeFunctions.killNode
m.functionMap["updateGrid"] = m.gnode.nodeFunctions.updateGrid
m.functionMap["writeStatsInLog"] = m.gnode.nodeFunctions.writeStatsInLog
m.functionMap["clear"] = m.gnode.nodeFunctions.clear
m.functionMap["forceGC"] = m.gnode.nodeFunctions.forceGCMes
m.functionMap["getConnections"] = m.gnode.nodeFunctions.getConnections
m.functionMap["getNodeInfo"] = m.gnode.nodeFunctions.getNodeInfo
m.functionMap["createUser"] = m.gnode.nodeFunctions.createUser
m.functionMap["createNodeUser"] = m.gnode.nodeFunctions.createNodeUser
m.functionMap["removeUser"] = m.gnode.nodeFunctions.removeUser
m.functionMap["removeNodeUser"] = m.gnode.nodeFunctions.removeNodeUser
m.functionMap["setNodePublicKey"] = m.gnode.nodeFunctions.setNodePublicKey
m.functionMap["isReady"] = m.gnode.nodeFunctions.isReady
//gnode Function
m.functionMap["sendBackEvent"] = m.gnode.sendBackEvent
//EntryManager function
m.functionMap["addEntry"] = m.gnode.entryManager.addEntry
m.functionMap["addBranch"] = m.gnode.entryManager.addBranch
m.functionMap["getTree"] = m.gnode.treeManager.getTree
}
func (m *ReceiverManager) start(gnode *GNode, bufferSize int, maxGoRoutine int) {
m.gnode = gnode
m.loadFunctions()
m.lockClient = sync.RWMutex{}
m.nbReceiver = maxGoRoutine
m.buffer.init(bufferSize)
m.ioChan = make(chan *AntMes)
m.getChan = make(chan string)
m.answerMap = make(map[string]*AntMes)
m.receiverList = []*MessageReceiver{}
if maxGoRoutine <= 0 {
m.receiver.gnode = gnode
return
}
for i := 0; i < maxGoRoutine; i++ {
routine := &MessageReceiver{
id: i,
gnode: m.gnode,
receiverManager: m,
}
m.receiverList = append(m.receiverList, routine)
routine.start()
}
go func() {
for {
mes, ok := m.buffer.get(true)
//logf.info("Receive message ok=%t %v\n", ok, mes.toString())
if ok && mes != nil {
m.ioChan <- mes
}
}
}()
}
func (m *ReceiverManager) waitForAnswer(id string, timeoutSecond int) (*AntMes, error) {
if mes, ok := m.answerMap[id]; ok {
return mes, nil
}
timer := time.AfterFunc(time.Second*time.Duration(timeoutSecond), func() {
m.getChan <- "timeout"
})
logf.info("Waiting for answer originId=%s\n", id)
for {
retId := <-m.getChan
if retId == "timeout" {
return nil, fmt.Errorf("Timeout wiating for message answer id=%s", id)
}
if mes, ok := m.answerMap[id]; ok {
logf.info("Found answer originId=%s\n", id)
timer.Stop()
return mes, nil
}
}
}
func (m *ReceiverManager) receiveMessage(mes *AntMes) bool {
m.usage++
logf.debugMes(mes, "recceive message: %s\n", mes.toString())
if m.nbReceiver <= 0 {
m.receiver.executeMessage(mes)
return true
}
if m.buffer.put(mes) {
//logf.info("receive message function=%s duplicate=%d order=%d ok\n", mes.Function, mes.Duplicate, mes.Order)
return true
}
return false
}
func (m *ReceiverManager) stats() {
fmt.Printf("Receiver: nb=%d maxbuf=%d\n", m.usage, m.buffer.max)
execVal := ""
for _, exec := range m.receiverList {
execVal = fmt.Sprintf("%s %d", execVal, exec.usage)
}
fmt.Printf("Receivers: %s\n", execVal)
}
func (m *ReceiverManager) startClientReader(stream GNodeService_GetClientStreamServer) {
m.lockClient.Lock()
clientName := fmt.Sprintf("client-%d-%d", time.Now().UnixNano(), m.gnode.clientMap.len()+1)
m.gnode.clientMap.set(clientName, &gnodeClient{
name: clientName,
stream: stream,
})
stream.Send(&AntMes{
Function: "ClientAck",
FromClient: clientName,
})
logf.info("Client stream open: %s\n", clientName)
m.lockClient.Unlock() //unlock far to be sure to have several nano
for {
mes, err := stream.Recv()
if err == io.EOF {
logf.error("Client reader %s: EOF\n", clientName)
m.gnode.clientMap.del(clientName)
m.gnode.removeEventListener(clientName)
m.gnode.nodeFunctions.forceGC()
return
}
if err != nil {
logf.error("Client reader %s: Failed to receive message: %v\n", clientName, err)
m.gnode.clientMap.del(clientName)
m.gnode.removeEventListener(clientName)
m.gnode.nodeFunctions.forceGC()
return
}
if mes.Function == "setEventListener" {
m.gnode.setEventListener(mes.Args[0], mes.Args[1], mes.UserName, clientName)
} else {
mes.Id = m.gnode.getNewId(false)
mes.Origin = m.gnode.name
mes.FromClient = clientName
m.gnode.idMap.Add(mes.Id)
if mes.Debug {
logf.debugMes(mes, "-------------------------------------------------------------------------------------------------------------\n")
logf.debugMes(mes, "Receive mes from client %s : %v\n", clientName, mes)
}
for {
if m.gnode.receiverManager.receiveMessage(mes) {
break
}
time.Sleep(1 * time.Second)
}
}
}
}
|
freignat91/blockchain
|
server/gnode/receiverManager.go
|
GO
|
apache-2.0
| 5,407 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.openshift;
import com.openshift.client.IApplication;
import org.apache.camel.Component;
import org.apache.camel.Consumer;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.Producer;
import org.apache.camel.impl.DefaultExchange;
import org.apache.camel.impl.ScheduledPollEndpoint;
import org.apache.camel.spi.Metadata;
import org.apache.camel.spi.UriEndpoint;
import org.apache.camel.spi.UriParam;
import org.apache.camel.spi.UriPath;
import org.apache.camel.util.ObjectHelper;
@UriEndpoint(scheme = "openshift", syntax = "openshift:clientId", consumerClass = OpenShiftConsumer.class, label = "cloud")
public class OpenShiftEndpoint extends ScheduledPollEndpoint {
@UriPath @Metadata(required = "true")
private String clientId;
@UriPath @Metadata(required = "true")
private String username;
@UriPath @Metadata(required = "true")
private String password;
@UriParam
private String domain;
@UriParam
private String server;
@UriParam(enums = "list,start,stop,restart,state")
private String operation;
@UriParam
private String application;
@UriParam
private String mode;
public OpenShiftEndpoint(String endpointUri, Component component) {
super(endpointUri, component);
}
@Override
public Producer createProducer() throws Exception {
ObjectHelper.notEmpty(clientId, "clientId", this);
ObjectHelper.notEmpty(username, "username", this);
ObjectHelper.notEmpty(password, "password", this);
return new OpenShiftProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
ObjectHelper.notEmpty(clientId, "clientId", this);
ObjectHelper.notEmpty(username, "username", this);
ObjectHelper.notEmpty(password, "password", this);
Consumer consumer = new OpenShiftConsumer(this, processor);
configureConsumer(consumer);
return consumer;
}
public Exchange createExchange(IApplication application) {
Exchange exchange = new DefaultExchange(this);
exchange.getIn().setBody(application);
return exchange;
}
@Override
public boolean isSingleton() {
return true;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
public String getServer() {
return server;
}
public void setServer(String server) {
this.server = server;
}
public String getOperation() {
return operation;
}
public void setOperation(String operation) {
this.operation = operation;
}
public void setOperation(OpenShiftOperation operation) {
this.operation = operation.name();
}
public String getApplication() {
return application;
}
public void setApplication(String application) {
this.application = application;
}
public String getMode() {
return mode;
}
public void setMode(String mode) {
this.mode = mode;
}
}
|
ramonmaruko/camel
|
components/camel-openshift/src/main/java/org/apache/camel/component/openshift/OpenShiftEndpoint.java
|
Java
|
apache-2.0
| 4,430 |
/*
* Copyright 2016-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.designpattern.manage.base.creationalpatterns.prototype;
/**
* <p>原型类</p>
*
* @author kay
* @version 1.0
*/
public class Prototype implements Cloneable {
public Prototype clone(){
Prototype prototype = null;
try {
prototype = (Prototype)super.clone();
} catch (CloneNotSupportedException e) {
e.printStackTrace();
}
return prototype;
}
}
|
Minato262/Design-Pattern
|
src/java/main/org/designpattern/manage/base/creationalpatterns/prototype/Prototype.java
|
Java
|
apache-2.0
| 1,057 |
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"context"
"reflect"
"regexp"
"sync"
"time"
"github.com/golang/protobuf/ptypes"
"google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
genprotoStatus "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
// These restrictions can be found at
// https://cloud.google.com/trace/docs/reference/v2/rpc/google.devtools.cloudtrace.v2
maxAnnotationAttributes = 4
maxAnnotationBytes = 256
maxAttributes = 32
maxAttributeKeyBytes = 128
maxAttributeValueBytes = 256
maxDisplayNameBytes = 128
maxLinks = 128
maxTimeEvents = 32
agent = "g.co/agent"
shortenedAgent = "agent"
)
var (
// The exporter is responsible for mapping these special attributes to the correct
// canonical Cloud Trace attributes (/http/method, /http/route, etc.)
specialAttributes = map[string]struct{}{
"http.method": {},
"http.route": {},
"http.status_code": {},
}
requiredFields = []string{"Name", "SpanId", "DisplayName", "StartTime", "EndTime"}
spanNameRegex = regexp.MustCompile("^projects/[^/]+/traces/[a-fA-F0-9]{32}/spans/[a-fA-F0-9]{16}$")
agentRegex = regexp.MustCompile(`^opentelemetry-[a-zA-Z]+ \d+(?:\.\d+)+; google-cloud-trace-exporter \d+(?:\.\d+)+$`)
)
// SpanData wraps all the span data on the server into a struct.
type SpanData struct {
// If a batch has a bad span, we don't write batch to memory, but still want
// info on them for summary, so need SpansSummary
SpansSummary []*cloudtrace.Span
UploadedSpanNames map[string]struct{}
UploadedSpans []*cloudtrace.Span
Mutex sync.RWMutex
}
// ValidateSpans checks that the spans conform to the API requirements.
// That is, required fields are present, and optional fields are of the correct form.
// If any violations are detected, the errors will be added to the result table.
func ValidateSpans(requestName string, spanData *SpanData, spans ...*cloudtrace.Span) error {
var overallError error
currentRequestSpanNames := make(map[string]struct{})
for _, span := range spans {
var currentError error
// Validate required fields are present and semantically make sense.
if err := CheckForRequiredFields(requiredFields, reflect.ValueOf(span), requestName); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
if err := validateName(span.Name, spanData.UploadedSpanNames, currentRequestSpanNames); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
if err := validateTimeStamps(span); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
if err := validateDisplayName(span.DisplayName); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
// Validate that if optional fields are present, they conform to the API.
if err := validateAttributes(span.Attributes, maxAttributes); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
if err := validateTimeEvents(span.TimeEvents); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
if err := validateLinks(span.Links); err != nil {
addSpanToSummary(&spanData.SpansSummary, span, err)
currentError = err
}
if currentError == nil {
addSpanToSummary(&spanData.SpansSummary, span, nil)
} else {
overallError = currentError
}
}
if overallError != nil {
return overallError
}
return nil
}
// addSpanToSummary sets the span's status and adds it to the summary slice.
func addSpanToSummary(spanSummary *[]*cloudtrace.Span, span *cloudtrace.Span, err error) {
setSpanStatus(span, err)
*spanSummary = append(*spanSummary, span)
}
func setSpanStatus(span *cloudtrace.Span, err error) {
if err == nil {
span.Status = &genprotoStatus.Status{
Code: int32(codes.OK),
Message: "OK",
}
} else {
span.Status = &genprotoStatus.Status{
Code: int32(status.Convert(err).Code()),
Message: status.Convert(err).Message(),
}
}
}
// AddSpans adds the given spans to the list of uploaded spans.
func AddSpans(spanData *SpanData, spans ...*cloudtrace.Span) {
for _, span := range spans {
spanData.UploadedSpans = append(spanData.UploadedSpans, span)
spanData.UploadedSpanNames[span.Name] = struct{}{}
}
}
// Delay will block for the specified amount of time.
// Used to delay writing spans to memory.
func Delay(ctx context.Context, delay time.Duration) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(delay):
return nil
}
}
// AccessSpan returns the span at the given index if it is in range.
// If it is not in range, nil is returned.
func AccessSpan(index int, uploadedSpans []*cloudtrace.Span) *cloudtrace.Span {
if index >= len(uploadedSpans) || index < 0 {
return nil
}
return uploadedSpans[index]
}
// validateDisplayName verifies that the display name has at most 128 bytes.
func validateDisplayName(displayName *cloudtrace.TruncatableString) error {
if len(displayName.Value) > maxDisplayNameBytes {
return statusInvalidDisplayName
}
return nil
}
// validateName verifies that the span name is not a duplicate, and is of the form:
// projects/{project_id}/traces/{trace_id}/spans/{span_id}
// where trace_id is a 32-char hex encoding, and span_id is a 16-char hex encoding.
func validateName(name string, spanNames map[string]struct{}, currentRequestSpanNames map[string]struct{}) error {
if _, ok := spanNames[name]; ok {
return statusDuplicateSpanName
}
if _, ok := currentRequestSpanNames[name]; ok {
return statusDuplicateSpanName
}
if !spanNameRegex.MatchString(name) {
return statusInvalidSpanName
}
currentRequestSpanNames[name] = struct{}{}
return nil
}
// validateTimeStamps verifies that the start time of a span is before its end time.
func validateTimeStamps(span *cloudtrace.Span) error {
start, err := ptypes.Timestamp(span.StartTime)
if err != nil {
return statusMalformedTimestamp
}
end, err := ptypes.Timestamp(span.EndTime)
if err != nil {
return statusMalformedTimestamp
}
if !start.Before(end) {
return statusInvalidInterval
}
return nil
}
// validateAttributes verifies that a span has at most 32 attributes, where each attribute is a dictionary.
// The key is a string with max length of 128 bytes, and the value can be a string, int64 or bool.
// If the value is a string, it has a max length of 256 bytes.
func validateAttributes(attributes *cloudtrace.Span_Attributes, maxAttributes int) error {
if attributes == nil {
return nil
}
if len(attributes.AttributeMap) > maxAttributes {
return statusTooManyAttributes
}
containsAgent := false
for k, v := range attributes.AttributeMap {
if len(k) > maxAttributeKeyBytes {
return statusInvalidAttributeKey
}
// Ensure that the special attributes have been translated properly.
if _, ok := specialAttributes[k]; ok {
return statusUnmappedSpecialAttribute
}
if val, ok := v.Value.(*cloudtrace.AttributeValue_StringValue); ok {
if len(val.StringValue.Value) > maxAttributeValueBytes {
return statusInvalidAttributeValue
}
// The span must contain the attribute "g.co/agent" or "agent".
if k == agent || k == shortenedAgent {
containsAgent = true
if err := validateAgent(val.StringValue.Value); err != nil {
return err
}
}
}
}
if !containsAgent {
return statusMissingAgentAttribute
}
return nil
}
// validateAgent checks that the g.co/agent or agent attribute is of the form
// opentelemetry-<language_code> <ot_version>; google-cloud-trace-exporter <exporter_version>
func validateAgent(agent string) error {
if !agentRegex.MatchString(agent) {
return statusInvalidAgentAttribute
}
return nil
}
// validateTimeEvents verifies that a span has at most 32 TimeEvents.
// A TimeEvent consists of a TimeStamp, and either an Annotation or a MessageEvent.
// An Annotation is a dictionary that maps a string description to a list of attributes.
// A MessageEvent describes messages sent between spans and must contain an ID and size.
func validateTimeEvents(events *cloudtrace.Span_TimeEvents) error {
if events == nil {
return nil
}
if len(events.TimeEvent) > maxTimeEvents {
return statusTooManyTimeEvents
}
for _, event := range events.TimeEvent {
if event.Time == nil {
return statusTimeEventMissingTime
}
switch e := event.Value.(type) {
case *cloudtrace.Span_TimeEvent_Annotation_:
if len(e.Annotation.Description.Value) > maxAnnotationBytes {
return statusInvalidAnnotation
}
if err := validateAttributes(e.Annotation.Attributes, maxAnnotationAttributes); err != nil {
return err
}
case *cloudtrace.Span_TimeEvent_MessageEvent_:
if e.MessageEvent.Id <= 0 || e.MessageEvent.UncompressedSizeBytes <= 0 {
return statusInvalidMessageEvent
}
}
}
return nil
}
// validateLinks verifies that a span has at most 128 links, which are used to link the span to another span.
// A link contains a traceId, spanId, the type of the span, and at most 32 attributes.
func validateLinks(links *cloudtrace.Span_Links) error {
if links == nil {
return nil
}
if len(links.Link) > maxLinks {
return statusTooManyLinks
}
for _, link := range links.Link {
if link.SpanId == "" || link.TraceId == "" {
return statusInvalidLink
}
if err := validateAttributes(link.Attributes, maxAttributes); err != nil {
return err
}
}
return nil
}
|
googleinterns/cloud-operations-api-mock
|
internal/validation/mock_trace_validation.go
|
GO
|
apache-2.0
| 10,158 |
package org.cnodejs.android.md.ui.jsbridge;
import android.webkit.JavascriptInterface;
import org.cnodejs.android.md.util.FormatUtils;
import org.joda.time.DateTime;
public final class FormatJavascriptInterface {
public static final String NAME = "formatBridge";
@JavascriptInterface
public String getRelativeTimeSpanString(String time) {
return FormatUtils.getRelativeTimeSpanString(new DateTime(time));
}
}
|
TakWolf/CNode-Material-Design
|
app/src/main/java/org/cnodejs/android/md/ui/jsbridge/FormatJavascriptInterface.java
|
Java
|
apache-2.0
| 439 |
package com.lingju.assistant.view;
import android.app.Dialog;
import android.content.Context;
import android.os.Bundle;
import android.view.View;
import android.widget.RadioButton;
import android.widget.RadioGroup;
import com.lingju.assistant.R;
import java.util.HashMap;
import java.util.Map;
import butterknife.BindView;
import butterknife.ButterKnife;
import butterknife.OnClick;
/**
* Created by Ken on 2017/2/18.
*/
public class AlarmItemDialog extends Dialog {
@BindView(R.id.alarm_item_btns)
RadioGroup mAlarmItemBtns;
@BindView(R.id.alarm_item_rb1)
RadioButton mAlarmItemRb1;
@BindView(R.id.alarm_item_rb2)
RadioButton mAlarmItemRb2;
@BindView(R.id.alarm_item_rb3)
RadioButton mAlarmItemRb3;
private String mAlarmItem;
private OnItemSelectedListener mSelectedListener;
private Map<String, Integer> itemMaps = new HashMap<>();
public AlarmItemDialog(Context context, String item, OnItemSelectedListener listener) {
super(context, R.style.lingju_commond_dialog);
mAlarmItem = item;
mSelectedListener = listener;
}
protected AlarmItemDialog(Context context, int themeResId) {
super(context, themeResId);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.dialog_alarm_item);
ButterKnife.bind(this);
fillMap();
mAlarmItemBtns.check(itemMaps.get(mAlarmItem));
mAlarmItemBtns.setOnCheckedChangeListener(new RadioGroup.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(RadioGroup group, int checkedId) {
RadioButton selectedBtn = (RadioButton) findViewById(checkedId);
mAlarmItem = selectedBtn.getText().toString();
if (mSelectedListener != null)
mSelectedListener.onSelected(mAlarmItem);
dismiss();
}
});
}
private void fillMap() {
itemMaps.put(mAlarmItemRb1.getText().toString(), R.id.alarm_item_rb1);
itemMaps.put(mAlarmItemRb2.getText().toString(), R.id.alarm_item_rb2);
itemMaps.put(mAlarmItemRb3.getText().toString(), R.id.alarm_item_rb3);
}
@OnClick({R.id.tv_cancel})
public void onClick(View view) {
switch (view.getId()) {
case R.id.tv_cancel:
dismiss();
break;
}
}
/* @OnClick({R.id.alarm_item_cancel, R.id.alarm_item_confirm})
public void onClick(View view) {
switch (view.getId()) {
case R.id.alarm_item_confirm:
if (mSelectedListener != null)
mSelectedListener.onSelected(mAlarmItem);
break;
}
dismiss();
}*/
public interface OnItemSelectedListener {
void onSelected(String item);
}
}
|
LingjuAI/AssistantBySDK
|
app/src/main/java/com/lingju/assistant/view/AlarmItemDialog.java
|
Java
|
apache-2.0
| 2,902 |
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.sunshine.app.data;
import android.annotation.TargetApi;
import android.content.ContentProvider;
import android.content.ContentValues;
import android.content.UriMatcher;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteQueryBuilder;
import android.net.Uri;
public class WeatherProvider extends ContentProvider {
// The URI Matcher used by this content provider.
private static final UriMatcher sUriMatcher = buildUriMatcher();
private WeatherDbHelper mOpenHelper;
static final int WEATHER = 100;
static final int WEATHER_WITH_LOCATION = 101;
static final int WEATHER_WITH_LOCATION_AND_DATE = 102;
static final int LOCATION = 300;
private static final SQLiteQueryBuilder sWeatherByLocationSettingQueryBuilder;
static{
sWeatherByLocationSettingQueryBuilder = new SQLiteQueryBuilder();
//This is an inner join which looks like
//weather INNER JOIN location ON weather.location_id = location._id
sWeatherByLocationSettingQueryBuilder.setTables(
WeatherContract.WeatherEntry.TABLE_NAME + " INNER JOIN " +
WeatherContract.LocationEntry.TABLE_NAME +
" ON " + WeatherContract.WeatherEntry.TABLE_NAME +
"." + WeatherContract.WeatherEntry.COLUMN_LOC_KEY +
" = " + WeatherContract.LocationEntry.TABLE_NAME +
"." + WeatherContract.LocationEntry._ID);
}
//location.location_setting = ?
private static final String sLocationSettingSelection =
WeatherContract.LocationEntry.TABLE_NAME+
"." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? ";
//location.location_setting = ? AND date >= ?
private static final String sLocationSettingWithStartDateSelection =
WeatherContract.LocationEntry.TABLE_NAME+
"." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? AND " +
WeatherContract.WeatherEntry.COLUMN_DATE + " >= ? ";
//location.location_setting = ? AND date = ?
private static final String sLocationSettingAndDaySelection =
WeatherContract.LocationEntry.TABLE_NAME +
"." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? AND " +
WeatherContract.WeatherEntry.COLUMN_DATE + " = ? ";
private Cursor getWeatherByLocationSetting(Uri uri, String[] projection, String sortOrder) {
String locationSetting = WeatherContract.WeatherEntry.getLocationSettingFromUri(uri);
long startDate = WeatherContract.WeatherEntry.getStartDateFromUri(uri);
String[] selectionArgs;
String selection;
if (startDate == 0) {
selection = sLocationSettingSelection;
selectionArgs = new String[]{locationSetting};
} else {
selectionArgs = new String[]{locationSetting, Long.toString(startDate)};
selection = sLocationSettingWithStartDateSelection;
}
return sWeatherByLocationSettingQueryBuilder.query(mOpenHelper.getReadableDatabase(),
projection,
selection,
selectionArgs,
null,
null,
sortOrder
);
}
private Cursor getWeatherByLocationSettingAndDate(
Uri uri, String[] projection, String sortOrder) {
String locationSetting = WeatherContract.WeatherEntry.getLocationSettingFromUri(uri);
long date = WeatherContract.WeatherEntry.getDateFromUri(uri);
return sWeatherByLocationSettingQueryBuilder.query(mOpenHelper.getReadableDatabase(),
projection,
sLocationSettingAndDaySelection,
new String[]{locationSetting, Long.toString(date)},
null,
null,
sortOrder
);
}
/*
Students: Here is where you need to create the UriMatcher. This UriMatcher will
match each URI to the WEATHER, WEATHER_WITH_LOCATION, WEATHER_WITH_LOCATION_AND_DATE,
and LOCATION integer constants defined above. You can test this by uncommenting the
testUriMatcher test within TestUriMatcher.
*/
static UriMatcher buildUriMatcher() {
// 1) The code passed into the constructor represents the code to return for the root
// URI. It's common to use NO_MATCH as the code for this case. Add the constructor below.
UriMatcher aURIMatcher = new UriMatcher(UriMatcher.NO_MATCH);
// 2) Use the addURI function to match each of the types. Use the constants from
// WeatherContract to help define the types to the UriMatcher.
aURIMatcher.addURI(WeatherContract.CONTENT_AUTHORITY, WeatherContract.PATH_WEATHER, WEATHER);
aURIMatcher.addURI(WeatherContract.CONTENT_AUTHORITY, WeatherContract.PATH_WEATHER+"/*", WEATHER_WITH_LOCATION);
aURIMatcher.addURI(WeatherContract.CONTENT_AUTHORITY, WeatherContract.PATH_WEATHER+"/*/#", WEATHER_WITH_LOCATION_AND_DATE);
aURIMatcher.addURI(WeatherContract.CONTENT_AUTHORITY, WeatherContract.PATH_LOCATION, LOCATION);
// 3) Return the new matcher!
return aURIMatcher;
}
/*
Students: We've coded this for you. We just create a new WeatherDbHelper for later use
here.
*/
@Override
public boolean onCreate() {
mOpenHelper = new WeatherDbHelper(getContext());
return true;
}
/*
Students: Here's where you'll code the getType function that uses the UriMatcher. You can
test this by uncommenting testGetType in TestProvider.
*/
@Override
public String getType(Uri uri) {
// Use the Uri Matcher to determine what kind of URI this is.
final int match = sUriMatcher.match(uri);
switch (match) {
// Student: Uncomment and fill out these two cases
case WEATHER_WITH_LOCATION_AND_DATE:
return WeatherContract.WeatherEntry.CONTENT_ITEM_TYPE;
case WEATHER_WITH_LOCATION:
return WeatherContract.WeatherEntry.CONTENT_TYPE;
case WEATHER:
return WeatherContract.WeatherEntry.CONTENT_TYPE;
case LOCATION:
return WeatherContract.LocationEntry.CONTENT_TYPE;
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
}
@Override
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs,
String sortOrder) {
// Here's the switch statement that, given a URI, will determine what kind of request it is,
// and query the database accordingly.
Cursor retCursor;
switch (sUriMatcher.match(uri)) {
// "weather/*/*"
case WEATHER_WITH_LOCATION_AND_DATE:
{
retCursor = getWeatherByLocationSettingAndDate(uri, projection, sortOrder);
break;
}
// "weather/*"
case WEATHER_WITH_LOCATION: {
retCursor = getWeatherByLocationSetting(uri, projection, sortOrder);
break;
}
// "weather"
case WEATHER: {
retCursor = mOpenHelper.getReadableDatabase().query(
WeatherContract.WeatherEntry.TABLE_NAME,
projection,
selection,
selectionArgs,
null,
null,
null);
break;
}
// "location"
case LOCATION: {
retCursor = mOpenHelper.getReadableDatabase().query(
WeatherContract.LocationEntry.TABLE_NAME,
projection,
selection,
selectionArgs,
null,
null,
null);
break;
}
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
retCursor.setNotificationUri(getContext().getContentResolver(), uri);
return retCursor;
}
/*
Student: Add the ability to insert Locations to the implementation of this function.
*/
@Override
public Uri insert(Uri uri, ContentValues values) {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
Uri returnUri;
switch (match) {
case WEATHER: {
normalizeDate(values);
long _id = db.insert(WeatherContract.WeatherEntry.TABLE_NAME, null, values);
if ( _id > 0 )
returnUri = WeatherContract.WeatherEntry.buildWeatherUri(_id);
else
throw new android.database.SQLException("Failed to insert row into " + uri);
break;
}
case LOCATION: {
long _id = db.insert(WeatherContract.LocationEntry.TABLE_NAME, null, values);
if ( _id > 0 )
returnUri = WeatherContract.LocationEntry.buildLocationUri(_id);
else
throw new android.database.SQLException("Failed to insert row into " + uri);
break;
}
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
getContext().getContentResolver().notifyChange(uri, null);
db.close();
return returnUri;
}
@Override
public int delete(Uri uri, String selection, String[] selectionArgs) {
// Student: Start by getting a writable database
// Student: Use the uriMatcher to match the WEATHER and LOCATION URI's we are going to
// handle. If it doesn't match these, throw an UnsupportedOperationException.
// Student: A null value deletes all rows. In my implementation of this, I only notified
// the uri listeners (using the content resolver) if the rowsDeleted != 0 or the selection
// is null.
// Oh, and you should notify the listeners here.
// Student: return the actual rows deleted
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
Uri returnUri;
int nbDeletedRows=0;
if (null == selection) selection = "1";
switch (match) {
case WEATHER: {
nbDeletedRows = db.delete(WeatherContract.WeatherEntry.TABLE_NAME, selection, selectionArgs);
break;
}
case LOCATION: {
nbDeletedRows = db.delete(WeatherContract.LocationEntry.TABLE_NAME, selection, selectionArgs);
break;
}
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
if (nbDeletedRows != 0)
{
getContext().getContentResolver().notifyChange(uri, null);
}
db.close();
return nbDeletedRows;
}
private void normalizeDate(ContentValues values) {
// normalize the date value
if (values.containsKey(WeatherContract.WeatherEntry.COLUMN_DATE)) {
long dateValue = values.getAsLong(WeatherContract.WeatherEntry.COLUMN_DATE);
values.put(WeatherContract.WeatherEntry.COLUMN_DATE, WeatherContract.normalizeDate(dateValue));
}
}
@Override
public int update(
Uri uri, ContentValues values, String selection, String[] selectionArgs) {
// Student: This is a lot like the delete function. We return the number of rows impacted
// by the update.
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
Uri returnUri;
int nbUpdatedRows=0;
switch (match) {
case WEATHER: {
nbUpdatedRows = db.update(WeatherContract.WeatherEntry.TABLE_NAME, values, selection, selectionArgs);
break;
}
case LOCATION: {
nbUpdatedRows = db.update(WeatherContract.LocationEntry.TABLE_NAME, values, selection, selectionArgs);
break;
}
default:
throw new UnsupportedOperationException("Unknown uri: " + uri);
}
if (nbUpdatedRows != 0) getContext().getContentResolver().notifyChange(uri, null);
db.close();
return nbUpdatedRows;
}
@Override
public int bulkInsert(Uri uri, ContentValues[] values) {
final SQLiteDatabase db = mOpenHelper.getWritableDatabase();
final int match = sUriMatcher.match(uri);
switch (match) {
case WEATHER:
db.beginTransaction();
int returnCount = 0;
try {
for (ContentValues value : values) {
normalizeDate(value);
long _id = db.insert(WeatherContract.WeatherEntry.TABLE_NAME, null, value);
if (_id != -1) {
returnCount++;
}
}
db.setTransactionSuccessful();
} finally {
db.endTransaction();
}
getContext().getContentResolver().notifyChange(uri, null);
return returnCount;
default:
return super.bulkInsert(uri, values);
}
}
// You do not need to call this method. This is a method specifically to assist the testing
// framework in running smoothly. You can read more at:
// http://developer.android.com/reference/android/content/ContentProvider.html#shutdown()
@Override
@TargetApi(11)
public void shutdown() {
mOpenHelper.close();
super.shutdown();
}
}
|
aboukaram/sunshine-version-chadi
|
app/src/main/java/com/example/android/sunshine/app/data/WeatherProvider.java
|
Java
|
apache-2.0
| 14,859 |
/*
* Copyright 2010-2011 Nabeel Mukhtar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.google.code.linkedinapi.schema;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{}connect-type"/>
* <element ref="{}authorization"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
public interface InvitationRequest
extends SchemaEntity
{
/**
* Gets the value of the connectType property.
*
* @return
* possible object is
* {@link InviteConnectType }
*
*/
InviteConnectType getConnectType();
/**
* Sets the value of the connectType property.
*
* @param value
* allowed object is
* {@link InviteConnectType }
*
*/
void setConnectType(InviteConnectType value);
/**
* Gets the value of the authorization property.
*
* @return
* possible object is
* {@link Authorization }
*
*/
Authorization getAuthorization();
/**
* Sets the value of the authorization property.
*
* @param value
* allowed object is
* {@link Authorization }
*
*/
void setAuthorization(Authorization value);
}
|
shisoft/LinkedIn-J
|
core/src/main/java/com/google/code/linkedinapi/schema/InvitationRequest.java
|
Java
|
apache-2.0
| 2,192 |
export interface DocItem {
title: string
url?: string
children?: DocItem[]
}
export const DOCS: DocItem[] = [
{
title: 'Projects',
children: [
{
title: 'How to create a project?',
url: 'assets/docs/projects/how_to_create_project.html'
},
{
title: 'How to open, save or import a project?',
url: 'assets/docs/projects/How_to_open_save_or_import_a_project.html'
}
]
},
{
title: 'Plugins',
children: [
{
title: 'What is a plugin and how does it work?',
url: 'assets/docs/plugins/How_does_a_plugin_work.html'
}
]
},
{
title: 'Execution encironments',
children: [
{
title: 'What is an entrypoint?',
url: 'assets/docs/exec_env/What_is_an_entrypoint.html'
},
{
title: 'How to change run commands and/or entrypoint?',
url: 'assets/docs/exec_env/How_to_change_run_commands_andor_entrypoint.html'
},
{
title: 'How to change editor themes?',
url: 'assets/docs/exec_env/How_to_change_editor_themes.html'
},
{
title: 'How to manage files and folders in my project?',
url: 'assets/docs/exec_env/How_to_manage_files_and_folders_in_my_project.html'
}
]
},
{
title: 'Marketplace',
children: [
{
title: 'How to install a plugin?',
url: 'assets/docs/marketplace/How_to_install_a_plugin.html'
},
{
title: 'Can I install more than one plugin?',
url: 'assets/docs/marketplace/Can_I_install_more_than_one_plugin.html'
},
{
title: 'How to create a plugin?',
url: 'assets/docs/marketplace/How_to_create_a_plugin.html'
},
{
title: 'How to become a developer?',
url: 'assets/docs/marketplace/How_to_become_a_developer.html'
}
]
},
{
title: 'Miscellaneous',
children: [
{
title: 'What are Identicons?',
url: 'assets/docs/miscellaneous/What_are_Identicons.html'
},
{
title: 'What are different user permissions?',
url: 'assets/docs/miscellaneous/What_are_different_user_permissions.html'
},
{
title: 'How to share code snippets from my project?',
url: 'assets/docs/miscellaneous/How_to_share_code_snippets_from_my_project.html'
}
]
}
]
|
yashdsaraf/reimagined-eureka
|
frontend/src/app/components/docs/docs-content.ts
|
TypeScript
|
apache-2.0
| 2,385 |
package com.wuyin.supermarket.manager;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* Created by wuyin on 2016/5/2.
* 管理线程池
*/
public class ThreadManager {
ThreadPoolProxy poolProxy;
private static ThreadManager instance = new ThreadManager();
private ThreadManager() {
}
public static ThreadManager getInstance() {
return instance;
}
public class ThreadPoolProxy {
ThreadPoolExecutor mPoolExecutor;
//线程池的数量
private int corePoolSize;
private int maximunPoolSize;
private long aliveTime;
/**
* @param corePoolSize 线程池的大小
* @param maximunPoolSize 如果排队满了额外开启的线程的个数
* @param time 线程存活的时间(单位/毫秒)
*/
public ThreadPoolProxy(int corePoolSize, int maximunPoolSize, long time) {
this.corePoolSize = corePoolSize;
this.maximunPoolSize = maximunPoolSize;
this.aliveTime = time;
}
/**
* 任务执行
*
* @param runnable
*/
public void execute(Runnable runnable) {
if (mPoolExecutor == null) {
//创建线程池
/**
* 1、线程池中有几个线程
* 2、如果排队满了,额外开的线程
* 3、如果这个线程池没有要执行的任务,存活多久
* 4、时间的单位
* 5、如果这个线程池里管理的线程都已经用了,剩下的任务 临时存在LinkedBlockingDeque中
*/
mPoolExecutor = new ThreadPoolExecutor(
corePoolSize, maximunPoolSize, aliveTime, TimeUnit.MILLISECONDS
, new LinkedBlockingQueue<Runnable>(10)
);
}
mPoolExecutor.execute(runnable); //调用功能线程池,执行异步任务
}
/**
* 取消任务
*
* @param runnable 任务对象
*/
public void cancel(Runnable runnable) {
//线程不为空 没有挂起
if (mPoolExecutor != null && !mPoolExecutor.isShutdown() && mPoolExecutor.isTerminated()) {
mPoolExecutor.remove(runnable);
}
}
}
/**
* 创建线程池 cpu核数*2+1
*
* @return
*/
public synchronized ThreadPoolProxy createLongPool() {
return createShortPool(5, 5, 5000);
}
/**
* @param size 线程池的大小
* @param aliSize 如果排队满了额外开启的线程的个数
* @param time 线程存活的时间(单位/毫秒)
* @return
*/
public synchronized ThreadPoolProxy createShortPool(int size, int aliSize, long time) {
if (poolProxy == null) {
poolProxy = new ThreadPoolProxy(size, aliSize, time);
}
return poolProxy;
}
}
|
wuyinlei/SuperMarket
|
src/main/java/com/wuyin/supermarket/manager/ThreadManager.java
|
Java
|
apache-2.0
| 3,126 |
#
# Author:: Joshua Timberman (<joshua@getchef.com>)
# Copyright (c) 2014, Chef Software, Inc. <legal@getchef.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'spec_helper'
describe Chef::Resource::HomebrewPackage, 'initialize' do
let(:resource) { Chef::Resource::HomebrewPackage.new('emacs') }
it 'returns a Chef::Resource::HomebrewPackage' do
expect(resource).to be_a_kind_of(Chef::Resource::HomebrewPackage)
end
it 'sets the resource_name to :homebrew_package' do
expect(resource.resource_name).to eql(:homebrew_package)
end
it 'sets the provider to Chef::Provider::Package::Homebrew' do
expect(resource.provider).to eql(Chef::Provider::Package::Homebrew)
end
end
|
jordane/chef
|
spec/unit/resource/homebrew_package_spec.rb
|
Ruby
|
apache-2.0
| 1,212 |
/*
* Copyright 2015 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hawkular.client.android.backend.model;
import android.os.Parcel;
import android.os.Parcelable;
import android.support.annotation.NonNull;
import com.google.gson.annotations.SerializedName;
public final class Environment implements Parcelable {
@SerializedName("id")
private String id;
public Environment(@NonNull String id) {
this.id = id;
}
public String getId() {
return id;
}
public static Creator<Environment> CREATOR = new Creator<Environment>() {
@Override
public Environment createFromParcel(Parcel parcel) {
return new Environment(parcel);
}
@Override
public Environment[] newArray(int size) {
return new Environment[size];
}
};
private Environment(Parcel parcel) {
this.id = parcel.readString();
}
@Override
public void writeToParcel(Parcel parcel, int flags) {
parcel.writeString(id);
}
@Override
public int describeContents() {
return 0;
}
}
|
pilhuhn/hawkular-android-client
|
src/main/java/org/hawkular/client/android/backend/model/Environment.java
|
Java
|
apache-2.0
| 1,729 |
/*
* Autopsy Forensic Browser
*
* Copyright 2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.modules.photoreccarver;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.logging.Level;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.casemodule.services.FileManager;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.XMLUtil;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.LayoutFile;
import org.sleuthkit.datamodel.CarvedFileContainer;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskFileRange;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
/**
* This class parses the xml output from PhotoRec, and creates a list of entries to add back in to be processed.
*/
class PhotoRecCarverOutputParser {
private final Path basePath;
private static final Logger logger = Logger.getLogger(PhotoRecCarverFileIngestModule.class.getName());
PhotoRecCarverOutputParser(Path base) {
basePath = base;
}
/**
* Parses the given report.xml file, creating a List<LayoutFile> to return. Uses FileManager to add all carved files
* that it finds to the TSK database as $CarvedFiles under the passed-in parent id.
*
* @param xmlInputFile The XML file we are trying to read and parse
* @param id The parent id of the unallocated space we are parsing.
* @param af The AbstractFile representing the unallocated space we are parsing.
* @return A List<LayoutFile> containing all the files added into the database
* @throws FileNotFoundException
* @throws IOException
*/
List<LayoutFile> parse(File xmlInputFile, long id, AbstractFile af) throws FileNotFoundException, IOException {
try {
final Document doc = XMLUtil.loadDoc(PhotoRecCarverOutputParser.class, xmlInputFile.toString());
if (doc == null) {
return null;
}
Element root = doc.getDocumentElement();
if (root == null) {
logger.log(Level.SEVERE, "Error loading config file: invalid file format (bad root)."); //NON-NLS
return null;
}
NodeList fileObjects = root.getElementsByTagName("fileobject"); //NON-NLS
final int numberOfFiles = fileObjects.getLength();
if (numberOfFiles == 0) {
return null;
}
String fileName;
Long fileSize;
NodeList fileNames;
NodeList fileSizes;
NodeList fileRanges;
Element entry;
Path filePath;
FileManager fileManager = Case.getCurrentCase().getServices().getFileManager();
// create and initialize the list to put into the database
List<CarvedFileContainer> carvedFileContainer = new ArrayList<>();
for (int fileIndex = 0; fileIndex < numberOfFiles; ++fileIndex) {
entry = (Element) fileObjects.item(fileIndex);
fileNames = entry.getElementsByTagName("filename"); //NON-NLS
fileSizes = entry.getElementsByTagName("filesize"); //NON-NLS
fileRanges = entry.getElementsByTagName("byte_run"); //NON-NLS
fileSize=Long.parseLong(fileSizes.item(0).getTextContent());
fileName=fileNames.item(0).getTextContent();
filePath = Paths.get(fileName);
if (filePath.startsWith(basePath)) {
fileName = filePath.getFileName().toString();
}
List<TskFileRange> tskRanges = new ArrayList<>();
for (int rangeIndex = 0; rangeIndex < fileRanges.getLength(); ++rangeIndex) {
Long img_offset = Long.parseLong(((Element) fileRanges.item(rangeIndex)).getAttribute("img_offset")); //NON-NLS
Long len = Long.parseLong(((Element) fileRanges.item(rangeIndex)).getAttribute("len")); //NON-NLS
// Verify PhotoRec's output
long fileByteStart = af.convertToImgOffset(img_offset);
if (fileByteStart == -1) {
// This better never happen... Data for this file is corrupted. Skip it.
logger.log(Level.INFO, "Error while parsing PhotoRec output for file {0}", fileName); //NON-NLS
continue;
}
// check that carved file is within unalloc block
long fileByteEnd = img_offset + len;
if (fileByteEnd > af.getSize()) {
long overshoot = fileByteEnd - af.getSize();
if (fileSize > overshoot) {
fileSize = fileSize - overshoot;
} else {
// This better never happen... Data for this file is corrupted. Skip it.
continue;
}
}
tskRanges.add(new TskFileRange(fileByteStart, len, rangeIndex));
}
if (!tskRanges.isEmpty()) {
carvedFileContainer.add(new CarvedFileContainer(fileName, fileSize, id, tskRanges));
}
}
return fileManager.addCarvedFiles(carvedFileContainer);
}
catch (NumberFormatException | TskCoreException ex) {
logger.log(Level.SEVERE, "Error parsing PhotoRec output and inserting it into the database: {0}", ex); //NON_NLS
}
List<LayoutFile> empty = Collections.emptyList();
return empty;
}
}
|
sidheshenator/autopsy
|
Core/src/org/sleuthkit/autopsy/modules/photoreccarver/PhotoRecCarverOutputParser.java
|
Java
|
apache-2.0
| 6,641 |
/*
* gucefVFS: GUCEF module implementing a Virtual File System
* Copyright (C) 2002 - 2007. Dinand Vanvelzen
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*-------------------------------------------------------------------------//
// //
// INCLUDES //
// //
//-------------------------------------------------------------------------*/
#include "gucefVFS_CVFSHandle.h"
/*-------------------------------------------------------------------------//
// //
// NAMESPACE //
// //
//-------------------------------------------------------------------------*/
namespace GUCEF {
namespace VFS {
/*-------------------------------------------------------------------------//
// //
// UTILITIES //
// //
//-------------------------------------------------------------------------*/
CVFSHandle::CVFSHandle( CORE::CIOAccess* fileAccess ,
const CORE::CString& filename ,
const CORE::CString& filePath )
: m_fileAccess( fileAccess ) ,
m_filename( filename ) ,
m_bufferPtr() ,
m_filePath( filePath )
{GUCEF_TRACE;
}
/*-------------------------------------------------------------------------*/
CVFSHandle::CVFSHandle( CORE::CIOAccess* fileAccess ,
const CORE::CString& filename ,
const CORE::CString& filePath ,
TDynamicBufferPtr& bufferPtr )
: m_fileAccess( fileAccess ) ,
m_filename( filename ) ,
m_bufferPtr( bufferPtr ) ,
m_filePath( filePath )
{GUCEF_TRACE;
}
/*-------------------------------------------------------------------------*/
CVFSHandle::~CVFSHandle()
{GUCEF_TRACE;
/* dont do anything here, this is just a storage / encapsulation class */
}
/*-------------------------------------------------------------------------*/
const CORE::CString&
CVFSHandle::GetFilename( void ) const
{GUCEF_TRACE;
return m_filename;
}
/*-------------------------------------------------------------------------*/
const CORE::CString&
CVFSHandle::GetFilePath( void ) const
{GUCEF_TRACE;
return m_filePath;
}
/*-------------------------------------------------------------------------*/
CORE::CIOAccess*
CVFSHandle::GetAccess( void )
{GUCEF_TRACE;
return m_fileAccess;
}
/*-------------------------------------------------------------------------*/
bool
CVFSHandle::IsLoadedInMemory( void ) const
{GUCEF_TRACE;
return !m_bufferPtr.IsNULL();
}
/*-------------------------------------------------------------------------//
// //
// NAMESPACE //
// //
//-------------------------------------------------------------------------*/
}; /* namespace VFS */
}; /* namespace GUCEF */
/*-------------------------------------------------------------------------*/
|
LiberatorUSA/GUCEF
|
platform/gucefVFS/src/gucefVFS_CVFSHandle.cpp
|
C++
|
apache-2.0
| 4,430 |
// ============================================================================
//
// Copyright (C) 2006-2018 Talend Inc. - www.talend.com
//
// This source code is available under agreement available at
// https://github.com/Talend/data-prep/blob/master/LICENSE
//
// You should have received a copy of the agreement
// along with this program; if not, write to Talend SA
// 9 rue Pages 92150 Suresnes, France
//
// ============================================================================
package org.talend.dataprep.api.service.command.preparation;
import static org.springframework.beans.factory.config.BeanDefinition.SCOPE_PROTOTYPE;
import static org.talend.dataprep.command.Defaults.asNull;
import java.net.URISyntaxException;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.utils.URIBuilder;
import org.springframework.context.annotation.Scope;
import org.springframework.http.HttpStatus;
import org.springframework.stereotype.Component;
import org.talend.dataprep.command.GenericCommand;
import org.talend.dataprep.exception.TDPException;
import org.talend.dataprep.exception.error.CommonErrorCodes;
/**
* Command used to move a preparation.
*/
@Component
@Scope(SCOPE_PROTOTYPE)
public class PreparationMove extends GenericCommand<Void> {
/**
* Default constructor.
*
* @param id The preparation id to move.
* @param folder where to find the preparation.
* @param destination Where to move the preparation to.
* @param newName Optional new preparation name.
*/
// private constructor to ensure the IoC
private PreparationMove(String id, String folder, String destination, String newName) {
super(GenericCommand.DATASET_GROUP);
execute(() -> onExecute(id, folder, destination, newName));
on(HttpStatus.OK).then(asNull());
}
private HttpRequestBase onExecute(String id, String folder, String destination, String newName) {
try {
URIBuilder uriBuilder = new URIBuilder(preparationServiceUrl + "/preparations/" + id + "/move");
if (StringUtils.isNotBlank(folder)) {
uriBuilder.addParameter("folder", folder);
}
if (StringUtils.isNotBlank(destination)) {
uriBuilder.addParameter("destination", destination);
}
if (StringUtils.isNotBlank(newName)) {
uriBuilder.addParameter("newName", newName);
}
return new HttpPut(uriBuilder.build());
} catch (URISyntaxException e) {
throw new TDPException(CommonErrorCodes.UNEXPECTED_EXCEPTION, e);
}
}
}
|
Talend/data-prep
|
dataprep-api/src/main/java/org/talend/dataprep/api/service/command/preparation/PreparationMove.java
|
Java
|
apache-2.0
| 2,751 |
/**
* Copyright 2014 Eediom Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.araqne.logdb.query.expr;
import static org.junit.Assert.*;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
public class ArrayTest {
@Test
public void testManual() {
// There is no way to express null without using field yet.
// assertArrayEquals(new Object[] {null}, (Object[])FunctionUtil.parseExpr("array(null)").eval(null));
assertListEquals(Arrays.asList(new Object[] { 1 }), toList(FunctionUtil.parseExpr("array(1)").eval(null)));
assertListEquals(Arrays.asList(new Object[] { "hello", "world" }),
toList(FunctionUtil.parseExpr("array(\"hello\", \"world\")").eval(null)));
assertListEquals(Arrays.asList(new Object[] { 42L, "the answer to life, the universe, and everything" }),
toList(FunctionUtil.parseExpr("array(21 * 2, \"the answer to life, the universe, and everything\")").eval(null)));
}
@SuppressWarnings("unchecked")
static List<Object> toList(Object t) {
return (List<Object>)t;
}
static void assertListEquals(List<Object> expected, List<Object> actual) {
if (expected == null) {
assertNull(actual);
} else {
assertNotNull(actual);
}
assertEquals(expected.size(), actual.size());
for (int i = 0, n = expected.size(); i < n; ++i) {
assertEquals(expected.get(i), actual.get(i));
}
}
}
|
araqne/logdb
|
araqne-logdb/src/test/java/org/araqne/logdb/query/expr/ArrayTest.java
|
Java
|
apache-2.0
| 1,892 |
using dBosque.Stub.Interfaces;
using dBosque.Stub.Services;
using dBosque.Stub.Services.Extensions;
using dBosque.Stub.Services.Types;
using dBosque.Stub.Server.Soap.Interface;
using Microsoft.AspNetCore.Http;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Runtime.Serialization;
using System.ServiceModel.Channels;
using System.Text;
using System.Xml;
using System.Xml.Linq;
namespace dBosque.Stub.Server.Soap.Types
{
public class SoapStubMessage : StubMessage<Message>
{
private MessageVersion Version { get; set; }
private Message Message { get; set; }
private MessageBuffer Buffer { get; set; }
/// <summary>
/// Create a new StubMessage class based on a WCF message
/// </summary>
/// <param name="msg"></param>
public SoapStubMessage(Message msg, HttpContext context, string tenant )
: base(tenant, 200, ContentTypes.ApplicationXml)
{
var connectionInfo = context?.Connection;
Sender = $"{connectionInfo?.RemoteIpAddress}:{connectionInfo?.RemotePort}";
Buffer = msg.CreateBufferedCopy(8192);
Message = Buffer.CreateMessage();
ParseMessage();
}
private void ParseMessage()
{
var xrdr = Message.GetReaderAtBodyContents();
Action = Message.Headers.Action;
RootNameSpace = xrdr.NamespaceURI;
RootNode = xrdr.LocalName;
var body = Request = RawRequest = xrdr.ReadOuterXml();
Request = Message.ToString();
Request = Request.Replace("... stream ...", body);
Version = Message.Version;
}
///<summary>
///Return a valid UnAuthorized message
///</summary>
///<returns></returns>
public override Message AsUnauthorized()
{
throw new StubErrorException(System.Net.HttpStatusCode.Unauthorized);
}
///<summary>
///Return a valid result message
///</summary>
///<returns></returns>
public override Message AsResult()
{
string messageStream = string.IsNullOrEmpty(Response)?"<empty/>":Response;
if (!HasMatch && !IsPassTrough)
messageStream = SerializeToString(AsFault());
var memoryStream = new MemoryStream(Encoding.UTF8.GetBytes(messageStream));
var reader = XmlReader.Create(memoryStream, new XmlReaderSettings() { XmlResolver = null, DtdProcessing = DtdProcessing.Prohibit });
if (HttpStatusCode >= 300)
throw new StubErrorException(Convert(), Message.CreateMessage(Version, Action, reader));
return Message.CreateMessage(Version, Action, reader);
}
/// <summary>
/// Generate a MatchList based on all matches
/// </summary>
/// <returns></returns>
private List<Match> ToMatchList()
{
var matches = new List<Match>();
foreach (var m in Matches)
{
var match = new Match(m.Description);
m.Items.ForEach(x => match.XPath.Add(new XPath(x.Expression, x.Value)));
matches.Add(match);
}
return matches;
}
private static XmlElement GetElement(string xml)
{
XmlDocument doc = new XmlDocument();
doc.LoadXml(xml);
return doc.DocumentElement;
}
/// <summary>
/// Create a faultcontract
/// </summary>
/// <returns></returns>
private StubFaultContract AsFault()
{
StubFaultContract result = null;
if (!HasMatch && Matches.Count == 0)
result = new StubFaultContract()
{
Request = Request == null ? null : GetElement(Request),
Message = Matches.Error
};
else if (!HasMatch && Matches.Count > 0)
result = new StubFaultContract()
{
Message = "Multiple matches found.",
Request = GetElement(Request),
Matches = ToMatchList()
};
else if (HasMatch)
result = new StubFaultContract()
{
Message = "One match found.",
Request = GetElement(Request),
Matches = ToMatchList()
};
return result;
}
///<summary>
///Localize a xpath
///</summary>
///<param name="xpath"></param>
///<returns></returns>
public override string LocalizeXpath(string xpath)
{
return xpath.AppendSoapEnvelope();
}
private string SerializeToString<T>(T obj, string request = null)
{
DataContractSerializer serializer = new DataContractSerializer(typeof(T));
MemoryStream memoryStream = new MemoryStream();
serializer.WriteObject(memoryStream, obj);
memoryStream.Position = 0;
StreamReader r = new StreamReader(memoryStream);
string reader = r.ReadToEnd();
if (request != null)
reader = reader.Replace("<Request i:nil=\"true\"/>", $"<Request>{request}</Request");
return reader;
}
///<summary>
///Relay the message to the specific uri.
///</summary>
///<param name="uri"></param>
public override void Relay(string uri)
{
Message = Buffer.CreateMessage();
// Remove all headers.
Message.Headers.ToList().ForEach(a => Message.Headers.RemoveAll(a.Name, a.Namespace));
Message.Headers.RemoveAll("To", "http://schemas.microsoft.com/ws/2005/05/addressing/none");
ParseMessage();
HttpWebRequest webRequest = (HttpWebRequest)WebRequest.Create(uri);
UpdateHeaderProperties(webRequest);
webRequest.Method = "POST";
webRequest.AutomaticDecompression = DecompressionMethods.GZip | DecompressionMethods.Deflate;
XmlDocument soapEnvelopeXml = new XmlDocument();
soapEnvelopeXml.LoadXml(Request);
using (Stream stream = webRequest.GetRequestStream())
{
soapEnvelopeXml.Save(stream);
}
try
{
using (WebResponse response = webRequest.GetResponse())
{
Response = ParseResponse(response);
}
}
catch (WebException ex)
{
Response = ParseResponse(ex.Response);
}
Matches = new StubMatchList();
}
private string ParseResponse(WebResponse response)
{
using (StreamReader rd = new StreamReader(response.GetResponseStream()))
{
string soapResult = rd.ReadToEnd();
XDocument xDoc = XDocument.Load(new StringReader(soapResult));
var unwrappedResponse = xDoc.Descendants((XNamespace)"http://schemas.xmlsoap.org/soap/envelope/" + "Body")
.First()
.FirstNode;
return unwrappedResponse.ToString();
}
}
private string GetHeaderValue(WebHeaderCollection headers, string key, string def = null)
{
return headers.AllKeys.Contains(key) ? headers[key] : def;
}
private void UpdateHeaderProperties(HttpWebRequest request)
{
WebHeaderCollection headers = new WebHeaderCollection();
if (Message.Properties.ContainsKey(HttpRequestMessageProperty.Name))
{
headers = (Message.Properties[HttpRequestMessageProperty.Name] as HttpRequestMessageProperty).Headers;
foreach (var header in headers.AllKeys)
{
try
{
request.Headers.Add(header, headers[header]);
}
catch
{
// catch all
}
}
}
request.ContentType = GetHeaderValue(headers, "ContentType", $"{ContentTypes.TextXml};charset=\"utf-8\"");
request.Accept = GetHeaderValue(headers, "Accept", ContentTypes.TextXml);
}
}
}
|
dbosque/Stub
|
dBosque.Stub.Server.Soap/Types/SoapStubMessage.cs
|
C#
|
apache-2.0
| 8,596 |
package com.hantsylabs.example.spring.jpa.spec;
import java.util.Date;
import com.hantsylabs.example.spring.model.QConference;
import com.querydsl.core.BooleanBuilder;
import com.querydsl.core.types.Predicate;
public class QueryDslPredicates {
public static Predicate inProgressConferences() {
QConference conf = QConference.conference;
final Date now = new Date();
BooleanBuilder builder = new BooleanBuilder();
return builder.and(conf.startedDate.before(now))
.and(conf.endedDate.after(now)).getValue();
}
public static Predicate pastConferences(Date _past) {
QConference conf = QConference.conference;
final Date now = new Date();
BooleanBuilder builder = new BooleanBuilder();
builder.and(conf.endedDate.before(now));
if (_past != null) {
builder.and(conf.startedDate.after(_past));
}
return builder.getValue();
}
public static Predicate upcomingConferences() {
QConference conf = QConference.conference;
final Date now = new Date();
return conf.startedDate.after(now);
}
}
|
hantsy/spring4-sandbox
|
data-jpa/src/main/java/com/hantsylabs/example/spring/jpa/spec/QueryDslPredicates.java
|
Java
|
apache-2.0
| 1,031 |
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encap
type Mode string
const (
Undefined Mode = ""
Always = "always"
CrossSubnet = "cross-subnet"
)
const DefaultMode = Always
|
projectcalico/calico
|
libcalico-go/lib/backend/encap/ipip.go
|
GO
|
apache-2.0
| 771 |
package com.mjrichardson.teamCity.buildTriggers.Fakes;
import com.mjrichardson.teamCity.buildTriggers.CacheManager;
import java.net.URI;
import java.util.HashMap;
import java.util.UUID;
public class FakeCacheManager implements CacheManager {
private HashMap<String, String> cache = new HashMap<>();
@Override
public String getFromCache(CacheNames cacheName, URI uri, UUID correlationId) {
String key = cacheName.name() + "|" + uri.toString();
if (cache.containsKey(key))
return cache.get(key);
return null;
}
@Override
public void addToCache(CacheNames cacheName, URI uri, String body, UUID correlationId) {
String key = cacheName.name() + "|" + uri.toString();
cache.put(key, body);
}
}
|
matt-richardson/teamcity-octopus-build-trigger-plugin
|
octopus-build-trigger-server/src/test/java/com/mjrichardson/teamCity/buildTriggers/Fakes/FakeCacheManager.java
|
Java
|
apache-2.0
| 773 |
package javaselast.examples.csv;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.Field;
public class CsvReader extends Reader {
private BufferedReader bufferedReader;
public CsvReader(Reader reader) {
bufferedReader = new BufferedReader(reader);
}
public <T> T readNext(Class<T> type) {
try {
T cl = type.newInstance();
// Class<?> c = cl.getClass();
// Field field = c.getDeclaredField("id");
//
// field.set(cl,);
} catch (InstantiationException | IllegalAccessException e) {
e.printStackTrace();
}
return null;
}
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
return bufferedReader.read(cbuf,off,len);
}
@Override
public void close() throws IOException {
bufferedReader.close();
}
}
|
nesterione/JavaTrainings
|
src/javaselast/examples/csv/CsvReader.java
|
Java
|
apache-2.0
| 945 |
import logging
import pika
import sys
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
from .. import rabbitutils
import esgfpid.defaults as defaults
from esgfpid.utils import loginfo, logdebug, logtrace, logerror, logwarn, log_every_x_times
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
'''
The RabbitFeeder is responsible for publishing messages to RabbitMQ.
It is very simple. Basically the only method it exposes
(except for some simple getter/setter which is rarely ever used)
is publish_message(), which is called from the main thread.
'''
class RabbitFeeder(object):
def __init__(self, thread, statemachine, nodemanager):
self.thread = thread
'''
Read-only.
Before publishing a message, we check the state, and we log
the state. '''
self.statemachine = statemachine
self.nodemanager = nodemanager
'''
The deliver_number is important. It defines the number of the message
that is used to identify it between this client and the RabbitMQ
server (e.g. so the correct messages are deleted upon confirmation).
It makes sure that rabbit server and this client talk about the
same message.
NEVER EVER INCREMENT OR OTHERWISE MODIFY THIS!
From the RabbitMQ docs:
"The delivery tag is valid only within the channel from which
the message was received. I.e. a client MUST NOT receive a
message on one channel and then acknowledge it on another."
Source: https://www.rabbitmq.com/amqp-0-9-1-reference.html '''
self.__delivery_number = 1
# Logging
self.__first_publication_trigger = True
self.__logcounter_success = 0 # counts successful publishes!
self.__logcounter_trigger = 0 # counts triggers!
self.__LOGFREQUENCY = 10
self.__have_not_warned_about_connection_fail_yet = True
self.__have_not_warned_about_force_close_yet = True
'''
Triggers the publication of one message to RabbitMQ, if the
state machine currently allows this.
The message is fetched from the Queue of unpublished messages.
So far, whenever the library wants to publish messages, it
fires as many of these "publish_message" events as messages
were published (and some extra, to be sure).
If some of these triggers cannot be acted upon, as the module
is not in a state where it is allowed to publish, the triggers
should be fired as soon as the module is in available state
again.
# TODO: Find better way to make sure enough publish events are fired.
Are we sure there is not ever a way to have some messages
in the unpublished Queue that could be sent, but aren't, because
no event was fired for them? For example, if an exception occurs
during publish, and the message was put back - will there ever
be an event to trigger its publication? I don't think so.
Interim solution (hack):
(a) At the moment, for every message that the publisher hands
over, I fire two events (rabbitthread).
(b) During the close-down algorithm, if there is unpublished
messages, I fire publish events, to make sure they are
published (thread_shutter).
'''
def publish_message(self):
try:
return self.__publish_message()
except Exception as e:
logwarn(LOGGER, 'Error in feeder.publish_message(): %s: %s', e.__class__.__name__, repr(e))
raise e
def __publish_message(self):
self.__logcounter_trigger += 1
if self.statemachine.is_NOT_STARTED_YET() or self.statemachine.is_WAITING_TO_BE_AVAILABLE():
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Received early trigger for feeding the rabbit (trigger %i).', self.__logcounter_trigger)
self.__log_why_cannot_feed_the_rabbit_now()
elif self.statemachine.is_AVAILABLE() or self.statemachine.is_AVAILABLE_BUT_WANTS_TO_STOP():
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Received trigger for publishing message to RabbitMQ (trigger %i).', self.__logcounter_trigger)
self.__log_publication_trigger()
self.__publish_message_to_channel()
elif self.statemachine.is_PERMANENTLY_UNAVAILABLE() or self.statemachine.is_FORCE_FINISHED():
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Received late trigger for feeding the rabbit (trigger %i).', self.__logcounter_trigger)
self.__log_why_cannot_feed_the_rabbit_now()
''' This method only logs. '''
def __log_publication_trigger(self):
if self.__first_publication_trigger:
logdebug(LOGGER, 'Received first trigger for publishing message to RabbitMQ.')
self.__first_publication_trigger = False
logtrace(LOGGER, 'Received trigger for publishing message to RabbitMQ, and module is ready to accept it.')
''' This method only logs, depending on the state machine's state.'''
def __log_why_cannot_feed_the_rabbit_now(self):
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Cannot publish message to RabbitMQ (trigger no. %i).', self.__logcounter_trigger)
if self.statemachine.is_WAITING_TO_BE_AVAILABLE():
logdebug(LOGGER, 'Cannot publish message to RabbitMQ yet, as the connection is not ready.')
elif self.statemachine.is_NOT_STARTED_YET():
logerror(LOGGER, 'Cannot publish message to RabbitMQ, as the thread is not running yet.')
elif self.statemachine.is_PERMANENTLY_UNAVAILABLE() or self.statemachine.is_FORCE_FINISHED():
if self.statemachine.detail_could_not_connect:
logtrace(LOGGER, 'Could not publish message to RabbitMQ, as the connection failed.')
if self.__have_not_warned_about_connection_fail_yet:
logwarn(LOGGER, 'Could not publish message(s) to RabbitMQ. The connection failed definitively.')
self.__have_not_warned_about_connection_fail_yet = False
elif self.statemachine.get_detail_closed_by_publisher():
logtrace(LOGGER, 'Cannot publish message to RabbitMQ, as the connection was closed by the user.')
if self.__have_not_warned_about_force_close_yet:
logwarn(LOGGER, 'Could not publish message(s) to RabbitMQ. The sender was closed by the user.')
self.__have_not_warned_about_force_close_yet = False
else:
if self.thread._channel is None:
logerror(LOGGER, 'Very unexpected. Could not publish message(s) to RabbitMQ. There is no channel.')
'''
Retrieves a message from stack and tries to publish it
to RabbitMQ.
In case of failure, it is put back. In case of success,
it is handed on to the confirm module that is responsible
for waiting for RabbitMQ's confirmation.
Note: The publish may cause an error if the Channel was closed.
A closed Channel should be handled in the on_channel_close()
callback, but we catch it here in case the clean up was not quick enough.
'''
def __publish_message_to_channel(self):
# Find a message to publish.
# If no messages left, well, nothing to publish!
try:
message = self.__get_message_from_stack()
except queue.Empty as e:
logtrace(LOGGER, 'Queue empty. No more messages to be published.')
return
# Now try to publish it.
# If anything goes wrong, you need to put it back to
# the stack of unpublished messages!
try:
success = self.__try_publishing_otherwise_put_back_to_stack(message)
if success:
self.__postparations_after_successful_feeding(message)
# Treat various errors that may occur during publishing:
except pika.exceptions.ChannelClosed as e:
logwarn(LOGGER, 'Cannot publish message %i to RabbitMQ because the Channel is closed (%s)', self.__delivery_number+1, repr(e))
except AttributeError as e:
if self.thread._channel is None:
logwarn(LOGGER, 'Cannot publish message %i to RabbitMQ because there is no channel.', self.__delivery_number+1)
else:
logwarn(LOGGER, 'Cannot publish message %i to RabbitMQ (unexpected error %s:%s)', self.__delivery_number+1, e.__class__.__name__, repr(e))
except AssertionError as e:
logwarn(LOGGER, 'Cannot publish message to RabbitMQ %i because of AssertionError: "%s"', self.__delivery_number+1,e)
if 'A non-string value was supplied for self.exchange' in repr(e):
exch = self.thread.get_exchange_name()
logwarn(LOGGER, 'Exchange was "%s" (type %s)', exch, type(exch))
'''
Retrieve an unpublished message from stack.
Note: May block for up to 2 seconds.
:return: A message from the stack of unpublished messages.
:raises: queue.Empty.
'''
def __get_message_from_stack(self, seconds=0):
message = self.thread.get_message_from_unpublished_stack(seconds)
logtrace(LOGGER, 'Found message to be published. Now left in queue to be published: %i messages.', self.thread.get_num_unpublished())
return message
'''
This tries to publish the message and puts it back into the
Queue if it failed.
:param message: Message to be sent.
:raises: pika.exceptions.ChannelClosed, if the Channel is closed.
'''
def __try_publishing_otherwise_put_back_to_stack(self, message):
try:
# Getting message info:
properties = self.nodemanager.get_properties_for_message_publications()
routing_key, msg_string = rabbitutils.get_routing_key_and_string_message_from_message_if_possible(message)
routing_key = self.nodemanager.adapt_routing_key_for_untrusted(routing_key)
# Logging
logtrace(LOGGER, 'Publishing message %i (key %s) (body %s)...', self.__delivery_number+1, routing_key, msg_string) # +1 because it will be incremented after the publish.
log_every_x_times(LOGGER, self.__logcounter_trigger, self.__LOGFREQUENCY, 'Trying actual publish... (trigger no. %i).', self.__logcounter_trigger)
logtrace(LOGGER, '(Publish to channel no. %i).', self.thread._channel.channel_number)
# Actual publish to exchange
self.thread._channel.basic_publish(
exchange=self.thread.get_exchange_name(),
routing_key=routing_key,
body=msg_string,
properties=properties,
mandatory=defaults.RABBIT_MANDATORY_DELIVERY
)
return True
# If anything went wrong, put it back into the stack of
# unpublished messages before re-raising the exception
# for further handling:
except Exception as e:
success = False
logwarn(LOGGER, 'Message was not published. Putting back to queue. Reason: %s: "%s"',e.__class__.__name__, repr(e))
self.thread.put_one_message_into_queue_of_unsent_messages(message)
logtrace(LOGGER, 'Now (after putting back) left in queue to be published: %i messages.', self.thread.get_num_unpublished())
raise e
'''
If a publish was successful, pass it to the confirmer module
and in increment delivery_number for the next message.
'''
def __postparations_after_successful_feeding(self, msg):
# Pass the successfully published message and its delivery_number
# to the confirmer module, to wait for its confirmation.
# Increase the delivery number for the next message.
self.thread.put_to_unconfirmed_delivery_tags(self.__delivery_number)
self.thread.put_to_unconfirmed_messages_dict(self.__delivery_number, msg)
self.__delivery_number += 1
# Logging
self.__logcounter_success += 1
log_every_x_times(LOGGER, self.__logcounter_success, self.__LOGFREQUENCY, 'Actual publish to channel done (trigger no. %i, publish no. %i).', self.__logcounter_trigger, self.__logcounter_success)
logtrace(LOGGER, 'Publishing messages %i to RabbitMQ... done.', self.__delivery_number-1)
if (self.__delivery_number-1 == 1):
loginfo(LOGGER, 'First message published to RabbitMQ.')
logdebug(LOGGER, 'Message published (no. %i)', self.__delivery_number-1)
'''
Reset the delivery_number for the messages.
This must be called on a reconnection / channel reopen!
And may not be called during any other situation!
The number is not sent along to the RabbitMQ server, but
the server keeps track of the delivery number
separately on its side.
That's why it is important to make sure it is incremented
and reset exactly the same way (incremented at each successfully
published message, and reset to one at channel reopen).
(called by the builder during reconnection / channel reopen).
'''
def reset_delivery_number(self):
self.__delivery_number = 1
|
IS-ENES-Data/esgf-pid
|
esgfpid/rabbit/asynchronous/thread_feeder.py
|
Python
|
apache-2.0
| 13,289 |
/*
* Copyright (C) 2017 exzogeni.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package alchemy;
import alchemy.query.AlchemyDelete;
import alchemy.query.AlchemyInsert;
import alchemy.query.AlchemyUpdate;
import alchemy.query.AlchemyWhere;
import java.util.Collection;
import java.util.Collections;
public class Alchemy {
private final DataSource mSource;
public Alchemy(DataSource source) {
mSource = source;
}
public <T> AlchemyWhere<T> where(Class<T> clazz) {
return new OpWhere<>(mSource, mSource.where(clazz));
}
public <T> AlchemyInsert<T> insert(T object) {
return insert(Collections.singletonList(object));
}
public <T> AlchemyInsert<T> insert(Collection<T> objects) {
return new OpInsert<>(mSource.insert(objects));
}
public <T> AlchemyUpdate<T> update(T object) {
return update(Collections.singletonList(object));
}
public <T> AlchemyUpdate<T> update(Collection<T> objects) {
return new OpUpdate<>(mSource.update(objects));
}
public <T> AlchemyDelete delete(T object) {
return delete(Collections.singletonList(object));
}
public <T> AlchemyDelete delete(Collection<T> objects) {
return new OpDelete(mSource.delete(objects));
}
}
|
DanielSerdyukov/rxsqlite
|
alchemy/src/main/java/alchemy/Alchemy.java
|
Java
|
apache-2.0
| 1,801 |
package net.community.chest.jmx;
import java.io.IOException;
import java.util.Collection;
import net.community.chest.dom.DOMUtils;
import net.community.chest.io.EOLStyle;
import net.community.chest.jmx.dom.MBeanEntryDescriptor;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* <P>Definitions of the proprietary JMX protocol over HTTP</P>
* @author Lyor G.
* @since Jan 7, 2008 12:23:28 PM
*/
public final class JMXProtocol {
private JMXProtocol ()
{
// no instance
}
public static final String DEFAULT_MBEANS_LIST_ROOT_ELEM_NAME="mbeans";
// various request parameters
public static final String ATTRIBUTES_PARAM="attributes", // true=list MBean attributes (default=false)
VALUES_PARAM="values", // true=fetch MBean attributes values (default=false)
OPERATIONS_PARAM="operations", // true=fetch MBean operations (default=false)
PARAMS_PARAM="params", // true=fetch MBean operation parameters (default=false)
NULLS_PARAM="null", // true=include null values (default=false)
NAME_PARAM="name",
DOMAIN_PARAM="domain",
OPNAME_PARAM="opname", // if non-nulll then overrides the operation name in the XML descriptor
UNIQUE_PARAM="unique", // true=invoked operation name is unique (default=true)
PARAMLESS_PARAM="paramless"; // true=no need to read operation XML descriptor since operation has no parameters
// format: http://somehost:port/servlet?req=list[&attributes=true/false][&values=true/false][&name='...'][&null=true/false][operations=true/false][params=true/false]
// format: http://somehost:port/servlet?req=get[&null=true/false]
public static final String REQ_PARAM="req",
ALL_REQ="all", // equivalent to req=list&attributes=true&values=true&null=true&operations=true¶ms=true
LIST_REQ="list",
AGENTS_REQ="agents",
GET_REQ="get", // only the 'null' option is valid
INVOKE_REQ="invoke",
WHEREAMI_REQ="whereami",
SYSPROPS_REQ="sysprops",
ENV_REQ="env",
CONTEXT_REQ="context", // servlet context
CONFIG_REQ="config", // servlet configuration
VERINFO_REQ="verinfo"; // community chest version information
public static final <A extends Appendable> A appendDescriptors (A sb, Collection<? extends MBeanEntryDescriptor> mbl) throws IOException
{
final int numMBeans=(null == mbl) ? 0 : mbl.size();
if (null == sb)
throw new IOException("appendDescriptors(" + numMBeans + ") no " + Appendable.class.getSimpleName() + " instance");
if (numMBeans > 0)
{
for (final MBeanEntryDescriptor mbe : mbl)
{
final String mbString=(null == mbe) ? null : mbe.toString();
if ((null == mbString) || (mbString.length() <= 0))
continue;
sb.append(mbString);
EOLStyle.CRLF.appendEOL(sb);
}
}
return sb;
}
public static final String buildDescriptorsDocument (Collection<? extends MBeanEntryDescriptor> mbl) throws IOException
{
final int numMBeans=(null == mbl) ? 0 : mbl.size();
StringBuilder sb=new StringBuilder(Math.max(numMBeans, 1) * 128 + 64)
.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
.append(EOLStyle.CRLF.getStyleChars())
.append(DOMUtils.XML_ELEM_START_DELIM)
.append(DEFAULT_MBEANS_LIST_ROOT_ELEM_NAME)
.append(DOMUtils.XML_ELEM_END_DELIM)
.append(EOLStyle.CRLF.getStyleChars())
;
sb = appendDescriptors(sb, mbl);
sb.append(DOMUtils.XML_ELEM_START_DELIM)
.append(DOMUtils.XML_ELEM_CLOSURE_DELIM)
.append(DEFAULT_MBEANS_LIST_ROOT_ELEM_NAME)
.append(DOMUtils.XML_ELEM_END_DELIM)
;
return sb.toString();
}
}
|
lgoldstein/communitychest
|
chest/base-utils/jmx/src/main/java/net/community/chest/jmx/JMXProtocol.java
|
Java
|
apache-2.0
| 4,647 |
from typing import ClassVar, FrozenSet
from ..config import Config
from .dependency import IngressClassesDependency, SecretDependency, ServiceDependency
from .k8sobject import KubernetesGVK, KubernetesObject
from .k8sprocessor import ManagedKubernetesProcessor
from .resource import NormalizedResource, ResourceManager
class IngressClassProcessor (ManagedKubernetesProcessor):
CONTROLLER: ClassVar[str] = 'getambassador.io/ingress-controller'
ingress_classes_dep: IngressClassesDependency
def __init__(self, manager: ResourceManager) -> None:
super().__init__(manager)
self.ingress_classes_dep = self.deps.provide(IngressClassesDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([
KubernetesGVK('networking.k8s.io/v1beta1', 'IngressClass'),
KubernetesGVK('networking.k8s.io/v1', 'IngressClass'),
])
def _process(self, obj: KubernetesObject) -> None:
# We only want to deal with IngressClasses that belong to "spec.controller: getambassador.io/ingress-controller"
if obj.spec.get('controller', '').lower() != self.CONTROLLER:
self.logger.debug(f'ignoring IngressClass {obj.name} without controller - getambassador.io/ingress-controller')
return
if obj.ambassador_id != Config.ambassador_id:
self.logger.debug(f'IngressClass {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...')
return
# TODO: Do we intend to use this parameter in any way?
# `parameters` is of type TypedLocalObjectReference,
# meaning it links to another k8s resource in the same namespace.
# https://godoc.org/k8s.io/api/core/v1#TypedLocalObjectReference
#
# In this case, the resource referenced by TypedLocalObjectReference
# should not be namespaced, as IngressClass is a non-namespaced resource.
#
# It was designed to reference a CRD for this specific ingress-controller
# implementation... although usage is optional and not prescribed.
ingress_parameters = obj.spec.get('parameters', {})
self.logger.debug(f'Handling IngressClass {obj.name} with parameters {ingress_parameters}...')
self.aconf.incr_count('k8s_ingress_class')
# Don't emit this directly. We use it when we handle ingresses below. If
# we want to use the parameters, we should add them to this dependency
# type.
self.ingress_classes_dep.ingress_classes.add(obj.name)
class IngressProcessor (ManagedKubernetesProcessor):
service_dep: ServiceDependency
ingress_classes_dep: IngressClassesDependency
def __init__(self, manager: ResourceManager) -> None:
super().__init__(manager)
self.deps.want(SecretDependency)
self.service_dep = self.deps.want(ServiceDependency)
self.ingress_classes_dep = self.deps.want(IngressClassesDependency)
def kinds(self) -> FrozenSet[KubernetesGVK]:
return frozenset([
KubernetesGVK('extensions/v1beta1', 'Ingress'),
KubernetesGVK('networking.k8s.io/v1beta1', 'Ingress'),
KubernetesGVK('networking.k8s.io/v1', 'Ingress'),
])
def _update_status(self, obj: KubernetesObject) -> None:
service_status = None
if not self.service_dep.ambassador_service or not self.service_dep.ambassador_service.name:
self.logger.error(f"Unable to set Ingress {obj.name}'s load balancer, could not find Ambassador service")
else:
service_status = self.service_dep.ambassador_service.status
if obj.status != service_status:
if service_status:
status_update = (obj.gvk.kind, obj.namespace, service_status)
self.logger.debug(f"Updating Ingress {obj.name} status to {status_update}")
self.aconf.k8s_status_updates[f'{obj.name}.{obj.namespace}'] = status_update
else:
self.logger.debug(f"Not reconciling Ingress {obj.name}: observed and current statuses are in sync")
def _process(self, obj: KubernetesObject) -> None:
ingress_class_name = obj.spec.get('ingressClassName', '')
has_ingress_class = ingress_class_name in self.ingress_classes_dep.ingress_classes
has_ambassador_ingress_class_annotation = obj.annotations.get('kubernetes.io/ingress.class', '').lower() == 'ambassador'
# check the Ingress resource has either:
# - a `kubernetes.io/ingress.class: "ambassador"` annotation
# - a `spec.ingressClassName` that references an IngressClass with
# `spec.controller: getambassador.io/ingress-controller`
#
# also worth noting, the kube-apiserver might assign the `spec.ingressClassName` if unspecified
# and only 1 IngressClass has the following annotation:
# annotations:
# ingressclass.kubernetes.io/is-default-class: "true"
if not (has_ingress_class or has_ambassador_ingress_class_annotation):
self.logger.debug(f'ignoring Ingress {obj.name} without annotation (kubernetes.io/ingress.class: "ambassador") or IngressClass controller (getambassador.io/ingress-controller)')
return
# We don't want to deal with non-matching Ambassador IDs
if obj.ambassador_id != Config.ambassador_id:
self.logger.debug(f"Ingress {obj.name} does not have Ambassador ID {Config.ambassador_id}, ignoring...")
return
self.logger.debug(f"Handling Ingress {obj.name}...")
self.aconf.incr_count('k8s_ingress')
ingress_tls = obj.spec.get('tls', [])
for tls_count, tls in enumerate(ingress_tls):
tls_secret = tls.get('secretName', None)
if tls_secret is not None:
for host_count, host in enumerate(tls.get('hosts', ['*'])):
tls_unique_identifier = f"{obj.name}-{tls_count}-{host_count}"
spec = {
'ambassador_id': [obj.ambassador_id],
'hostname': host,
'acmeProvider': {
'authority': 'none'
},
'tlsSecret': {
'name': tls_secret
},
'requestPolicy': {
'insecure': {
'action': 'Route'
}
}
}
ingress_host = NormalizedResource.from_data(
'Host',
tls_unique_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
)
self.logger.debug(f"Generated Host from ingress {obj.name}: {ingress_host}")
self.manager.emit(ingress_host)
# parse ingress.spec.defaultBackend
# using ingress.spec.backend as a fallback, for older versions of the Ingress resource.
default_backend = obj.spec.get('defaultBackend', obj.spec.get('backend', {}))
db_service_name = default_backend.get('serviceName', None)
db_service_port = default_backend.get('servicePort', None)
if db_service_name is not None and db_service_port is not None:
db_mapping_identifier = f"{obj.name}-default-backend"
default_backend_mapping = NormalizedResource.from_data(
'Mapping',
db_mapping_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec={
'ambassador_id': obj.ambassador_id,
'prefix': '/',
'service': f'{db_service_name}.{obj.namespace}:{db_service_port}'
},
)
self.logger.debug(f"Generated mapping from Ingress {obj.name}: {default_backend_mapping}")
self.manager.emit(default_backend_mapping)
# parse ingress.spec.rules
ingress_rules = obj.spec.get('rules', [])
for rule_count, rule in enumerate(ingress_rules):
rule_http = rule.get('http', {})
rule_host = rule.get('host', None)
http_paths = rule_http.get('paths', [])
for path_count, path in enumerate(http_paths):
path_backend = path.get('backend', {})
path_type = path.get('pathType', 'ImplementationSpecific')
service_name = path_backend.get('serviceName', None)
service_port = path_backend.get('servicePort', None)
path_location = path.get('path', '/')
if not service_name or not service_port or not path_location:
continue
unique_suffix = f"{rule_count}-{path_count}"
mapping_identifier = f"{obj.name}-{unique_suffix}"
# For cases where `pathType: Exact`,
# otherwise `Prefix` and `ImplementationSpecific` are handled as regular Mapping prefixes
is_exact_prefix = True if path_type == 'Exact' else False
spec = {
'ambassador_id': obj.ambassador_id,
'prefix': path_location,
'prefix_exact': is_exact_prefix,
'precedence': 1 if is_exact_prefix else 0, # Make sure exact paths are evaluated before prefix
'service': f'{service_name}.{obj.namespace}:{service_port}'
}
if rule_host is not None:
if rule_host.startswith('*.'):
# Ingress allow specifying hosts with a single wildcard as the first label in the hostname.
# Transform the rule_host into a host_regex:
# *.star.com becomes ^[a-z0-9]([-a-z0-9]*[a-z0-9])?\.star\.com$
spec['host'] = rule_host\
.replace('.', '\\.')\
.replace('*', '^[a-z0-9]([-a-z0-9]*[a-z0-9])?', 1) + '$'
spec['host_regex'] = True
else:
spec['host'] = rule_host
path_mapping = NormalizedResource.from_data(
'Mapping',
mapping_identifier,
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
)
self.logger.debug(f"Generated mapping from Ingress {obj.name}: {path_mapping}")
self.manager.emit(path_mapping)
# let's make arrangements to update Ingress' status now
self._update_status(obj)
# Let's see if our Ingress resource has Ambassador annotations on it
self.manager.emit_annotated(NormalizedResource.from_kubernetes_object_annotation(obj))
|
datawire/ambassador
|
python/ambassador/fetch/ingress.py
|
Python
|
apache-2.0
| 11,005 |
package ca.uhn.fhir.rest.server.interceptor.auth;
/*
* #%L
* HAPI FHIR - Server Framework
* %%
* Copyright (C) 2014 - 2017 University Health Network
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import static org.apache.commons.lang3.StringUtils.defaultString;
import java.util.*;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang3.Validate;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.hl7.fhir.instance.model.api.*;
import ca.uhn.fhir.context.FhirContext;
import ca.uhn.fhir.model.api.TagList;
import ca.uhn.fhir.rest.api.RestOperationTypeEnum;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.server.exceptions.AuthenticationException;
import ca.uhn.fhir.rest.server.exceptions.ForbiddenOperationException;
import ca.uhn.fhir.rest.server.interceptor.ServerOperationInterceptorAdapter;
import ca.uhn.fhir.util.CoverageIgnore;
/**
* This class is a base class for interceptors which can be used to
* inspect requests and responses to determine whether the calling user
* has permission to perform the given action.
* <p>
* See the HAPI FHIR
* <a href="http://jamesagnew.github.io/hapi-fhir/doc_rest_server_security.html">Documentation on Server Security</a>
* for information on how to use this interceptor.
* </p>
*/
public class AuthorizationInterceptor extends ServerOperationInterceptorAdapter implements IRuleApplier {
private static final org.slf4j.Logger ourLog = org.slf4j.LoggerFactory.getLogger(AuthorizationInterceptor.class);
private PolicyEnum myDefaultPolicy = PolicyEnum.DENY;
/**
* Constructor
*/
public AuthorizationInterceptor() {
super();
}
/**
* Constructor
*
* @param theDefaultPolicy
* The default policy if no rules apply (must not be null)
*/
public AuthorizationInterceptor(PolicyEnum theDefaultPolicy) {
this();
setDefaultPolicy(theDefaultPolicy);
}
private void applyRulesAndFailIfDeny(RestOperationTypeEnum theOperation, RequestDetails theRequestDetails, IBaseResource theInputResource, IIdType theInputResourceId,
IBaseResource theOutputResource) {
Verdict decision = applyRulesAndReturnDecision(theOperation, theRequestDetails, theInputResource, theInputResourceId, theOutputResource);
if (decision.getDecision() == PolicyEnum.ALLOW) {
return;
}
handleDeny(decision);
}
@Override
public Verdict applyRulesAndReturnDecision(RestOperationTypeEnum theOperation, RequestDetails theRequestDetails, IBaseResource theInputResource, IIdType theInputResourceId,
IBaseResource theOutputResource) {
List<IAuthRule> rules = buildRuleList(theRequestDetails);
ourLog.trace("Applying {} rules to render an auth decision for operation {}", rules.size(), theOperation);
Verdict verdict = null;
for (IAuthRule nextRule : rules) {
verdict = nextRule.applyRule(theOperation, theRequestDetails, theInputResource, theInputResourceId, theOutputResource, this);
if (verdict != null) {
ourLog.trace("Rule {} returned decision {}", nextRule, verdict.getDecision());
break;
}
}
if (verdict == null) {
ourLog.trace("No rules returned a decision, applying default {}", myDefaultPolicy);
return new Verdict(myDefaultPolicy, null);
}
return verdict;
}
/**
* Subclasses should override this method to supply the set of rules to be applied to
* this individual request.
* <p>
* Typically this is done by examining <code>theRequestDetails</code> to find
* out who the current user is and then using a {@link RuleBuilder} to create
* an appropriate rule chain.
* </p>
*
* @param theRequestDetails
* The individual request currently being applied
*/
public List<IAuthRule> buildRuleList(RequestDetails theRequestDetails) {
return new ArrayList<IAuthRule>();
}
private OperationExamineDirection determineOperationDirection(RestOperationTypeEnum theOperation, IBaseResource theRequestResource) {
switch (theOperation) {
case ADD_TAGS:
case DELETE_TAGS:
case GET_TAGS:
// These are DSTU1 operations and not relevant
return OperationExamineDirection.NONE;
case EXTENDED_OPERATION_INSTANCE:
case EXTENDED_OPERATION_SERVER:
case EXTENDED_OPERATION_TYPE:
return OperationExamineDirection.BOTH;
case METADATA:
// Security does not apply to these operations
return OperationExamineDirection.IN;
case DELETE:
// Delete is a special case
return OperationExamineDirection.NONE;
case CREATE:
case UPDATE:
case PATCH:
// if (theRequestResource != null) {
// if (theRequestResource.getIdElement() != null) {
// if (theRequestResource.getIdElement().hasIdPart() == false) {
// return OperationExamineDirection.IN_UNCATEGORIZED;
// }
// }
// }
return OperationExamineDirection.IN;
case META:
case META_ADD:
case META_DELETE:
// meta operations do not apply yet
return OperationExamineDirection.NONE;
case GET_PAGE:
case HISTORY_INSTANCE:
case HISTORY_SYSTEM:
case HISTORY_TYPE:
case READ:
case SEARCH_SYSTEM:
case SEARCH_TYPE:
case VREAD:
return OperationExamineDirection.OUT;
case TRANSACTION:
return OperationExamineDirection.BOTH;
case VALIDATE:
// Nothing yet
return OperationExamineDirection.NONE;
default:
// Should not happen
throw new IllegalStateException("Unable to apply security to event of type " + theOperation);
}
}
/**
* The default policy if no rules have been found to apply. Default value for this setting is {@link PolicyEnum#DENY}
*/
public PolicyEnum getDefaultPolicy() {
return myDefaultPolicy;
}
/**
* Handle an access control verdict of {@link PolicyEnum#DENY}.
* <p>
* Subclasses may override to implement specific behaviour, but default is to
* throw {@link ForbiddenOperationException} (HTTP 403) with error message citing the
* rule name which trigered failure
* </p>
*/
protected void handleDeny(Verdict decision) {
if (decision.getDecidingRule() != null) {
String ruleName = defaultString(decision.getDecidingRule().getName(), "(unnamed rule)");
throw new ForbiddenOperationException("Access denied by rule: " + ruleName);
}
throw new ForbiddenOperationException("Access denied by default policy (no applicable rules)");
}
private void handleUserOperation(RequestDetails theRequest, IBaseResource theResource, RestOperationTypeEnum operation) {
applyRulesAndFailIfDeny(operation, theRequest, theResource, theResource.getIdElement(), null);
}
@Override
public void incomingRequestPreHandled(RestOperationTypeEnum theOperation, ActionRequestDetails theProcessedRequest) {
IBaseResource inputResource = null;
IIdType inputResourceId = null;
switch (determineOperationDirection(theOperation, theProcessedRequest.getResource())) {
case IN:
case BOTH:
inputResource = theProcessedRequest.getResource();
inputResourceId = theProcessedRequest.getId();
break;
case OUT:
// inputResource = null;
inputResourceId = theProcessedRequest.getId();
break;
case NONE:
return;
}
RequestDetails requestDetails = theProcessedRequest.getRequestDetails();
applyRulesAndFailIfDeny(theOperation, requestDetails, inputResource, inputResourceId, null);
}
@Override
public boolean outgoingResponse(RequestDetails theRequestDetails, IBaseResource theResponseObject) {
switch (determineOperationDirection(theRequestDetails.getRestOperationType(), null)) {
case IN:
case NONE:
return true;
case BOTH:
case OUT:
break;
}
FhirContext fhirContext = theRequestDetails.getServer().getFhirContext();
List<IBaseResource> resources = Collections.emptyList();
switch (theRequestDetails.getRestOperationType()) {
case SEARCH_SYSTEM:
case SEARCH_TYPE:
case HISTORY_INSTANCE:
case HISTORY_SYSTEM:
case HISTORY_TYPE:
case TRANSACTION:
case GET_PAGE:
case EXTENDED_OPERATION_SERVER:
case EXTENDED_OPERATION_TYPE:
case EXTENDED_OPERATION_INSTANCE: {
if (theResponseObject != null) {
if (theResponseObject instanceof IBaseBundle) {
resources = toListOfResourcesAndExcludeContainer(theResponseObject, fhirContext);
} else if (theResponseObject instanceof IBaseParameters) {
resources = toListOfResourcesAndExcludeContainer(theResponseObject, fhirContext);
}
}
break;
}
default: {
if (theResponseObject != null) {
resources = Collections.singletonList(theResponseObject);
}
break;
}
}
for (IBaseResource nextResponse : resources) {
applyRulesAndFailIfDeny(theRequestDetails.getRestOperationType(), theRequestDetails, null, null, nextResponse);
}
return true;
}
@CoverageIgnore
@Override
public boolean outgoingResponse(RequestDetails theRequestDetails, TagList theResponseObject) {
throw failForDstu1();
}
@CoverageIgnore
@Override
public boolean outgoingResponse(RequestDetails theRequestDetails, TagList theResponseObject, HttpServletRequest theServletRequest, HttpServletResponse theServletResponse)
throws AuthenticationException {
throw failForDstu1();
}
@Override
public void resourceCreated(RequestDetails theRequest, IBaseResource theResource) {
handleUserOperation(theRequest, theResource, RestOperationTypeEnum.CREATE);
}
@Override
public void resourceDeleted(RequestDetails theRequest, IBaseResource theResource) {
handleUserOperation(theRequest, theResource, RestOperationTypeEnum.DELETE);
}
@Override
public void resourceUpdated(RequestDetails theRequest, IBaseResource theOldResource, IBaseResource theNewResource) {
if (theOldResource != null) {
handleUserOperation(theRequest, theOldResource, RestOperationTypeEnum.UPDATE);
}
handleUserOperation(theRequest, theNewResource, RestOperationTypeEnum.UPDATE);
}
/**
* The default policy if no rules have been found to apply. Default value for this setting is {@link PolicyEnum#DENY}
*
* @param theDefaultPolicy
* The policy (must not be <code>null</code>)
*/
public void setDefaultPolicy(PolicyEnum theDefaultPolicy) {
Validate.notNull(theDefaultPolicy, "theDefaultPolicy must not be null");
myDefaultPolicy = theDefaultPolicy;
}
private List<IBaseResource> toListOfResourcesAndExcludeContainer(IBaseResource theResponseObject, FhirContext fhirContext) {
List<IBaseResource> resources;
resources = fhirContext.newTerser().getAllPopulatedChildElementsOfType(theResponseObject, IBaseResource.class);
// Exclude the container
if (resources.size() > 0 && resources.get(0) == theResponseObject) {
resources = resources.subList(1, resources.size());
}
return resources;
}
private static UnsupportedOperationException failForDstu1() {
return new UnsupportedOperationException("Use of this interceptor on DSTU1 servers is not supportd");
}
private enum OperationExamineDirection {
BOTH,
IN,
NONE,
OUT,
}
public static class Verdict {
private final IAuthRule myDecidingRule;
private final PolicyEnum myDecision;
public Verdict(PolicyEnum theDecision, IAuthRule theDecidingRule) {
myDecision = theDecision;
myDecidingRule = theDecidingRule;
}
public IAuthRule getDecidingRule() {
return myDecidingRule;
}
public PolicyEnum getDecision() {
return myDecision;
}
@Override
public String toString() {
ToStringBuilder b = new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE);
b.append("rule", myDecidingRule.getName());
b.append("decision", myDecision.name());
return b.build();
}
}
}
|
eug48/hapi-fhir
|
hapi-fhir-server/src/main/java/ca/uhn/fhir/rest/server/interceptor/auth/AuthorizationInterceptor.java
|
Java
|
apache-2.0
| 12,075 |
/**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.internal.observers;
import java.util.concurrent.atomic.AtomicInteger;
import io.reactivex.internal.fuseable.QueueDisposable;
/**
* An abstract QueueDisposable implementation, extending an AtomicInteger,
* that defaults all unnecessary Queue methods to throw UnsupportedOperationException.
* @param <T> the output value type
*/
public abstract class BasicIntQueueDisposable<T>
extends AtomicInteger
implements QueueDisposable<T> {
private static final long serialVersionUID = -1001730202384742097L;
@Override
public final boolean offer(T e) {
throw new UnsupportedOperationException("Should not be called");
}
@Override
public final boolean offer(T v1, T v2) {
throw new UnsupportedOperationException("Should not be called");
}
}
|
benjchristensen/RxJava
|
src/main/java/io/reactivex/internal/observers/BasicIntQueueDisposable.java
|
Java
|
apache-2.0
| 1,391 |
package com.facepp.demo.mediacodec;
import android.opengl.EGL14;
import android.opengl.GLSurfaceView;
import android.opengl.Matrix;
import java.io.IOException;
/**
* Created by xiejiantao on 2017/10/27.
*/
public class MediaHelper {
/**
* muxer for audio/video recording
*/
private MediaMuxerWrapper mMuxer;
private int mCameraWidth,mCameraHeight;// be video
private GLSurfaceView mSurfaceView;
private int mTextureId;
private MediaVideoEncoder mMediaVideoEncode;
private final float[] mMvpMatrix = new float[16];
/**
*
* @param cameraWidth
* @param cameraHeight
* @param isLand 横屏
*/
public MediaHelper(int cameraWidth, int cameraHeight, boolean isLand, GLSurfaceView surfaceView){
if (!isLand) {
mCameraWidth = cameraWidth;
mCameraHeight = cameraHeight;
} else {
mCameraWidth = cameraHeight;
mCameraHeight = cameraWidth;
}
mSurfaceView=surfaceView;
Matrix.setIdentityM(mMvpMatrix,0);
Matrix.rotateM(mMvpMatrix,0,270,0,0,1);
// TODO: 2017/10/27
}
/**
* start resorcing
* This is a sample project and call this on UI thread to avoid being complicated
* but basically this should be called on private thread because prepareing
* of encoder is heavy work
*/
public void startRecording(int textureId) {
mTextureId=textureId;
try {
mMuxer = new MediaMuxerWrapper(".mp4"); // if you record audio only, ".m4a" is also OK.
if (true) {
// for video capturing
new MediaVideoEncoder(mMuxer, mMediaEncoderListener, mCameraWidth, mCameraHeight);
}
// if (true) {
// // for audio capturing
// new MediaAudioEncoder(mMuxer, mMediaEncoderListener);
// }
mMuxer.prepare();
mMuxer.startRecording();
} catch (final IOException e) {
}
}
/**
* request stop recording
*/
public void stopRecording() {
if (mMuxer != null) {
mMuxer.stopRecording();
mMuxer = null;
// you should not wait here
}
}
/**
* callback methods from encoder
*/
private final MediaEncoder.MediaEncoderListener mMediaEncoderListener = new MediaEncoder.MediaEncoderListener() {
@Override
public void onPrepared(final MediaEncoder encoder) {
if (encoder instanceof MediaVideoEncoder)
setVideoEncoder((MediaVideoEncoder)encoder);
}
@Override
public void onStopped(final MediaEncoder encoder) {
if (encoder instanceof MediaVideoEncoder)
setVideoEncoder(null);
}
};
private void setVideoEncoder(final MediaVideoEncoder encoder) {
mSurfaceView.queueEvent(new Runnable() {
@Override
public void run() {
synchronized (mSurfaceView) {
if (encoder != null) {
encoder.setEglContext(EGL14.eglGetCurrentContext(), mTextureId);
mMediaVideoEncode=encoder;
}
}
}
});
}
public void frameAvailable(float[] mStMatrix,float [] mMvpMatrix){
if (mMediaVideoEncode != null) {
// notify to capturing thread that the camera frame is available.
// mVideoEncoder.frameAvailableSoon(mStMatrix);
mMediaVideoEncode.frameAvailableSoon(mStMatrix, mMvpMatrix);
}
}
public void frameAvailable(float[] mStMatrix){
if (mMediaVideoEncode != null) {
// notify to capturing thread that the camera frame is available.
// mVideoEncoder.frameAvailableSoon(mStMatrix);
mMediaVideoEncode.frameAvailableSoon(mStMatrix,mMvpMatrix);
}
}
public void frameAvailable(){
if (mMediaVideoEncode != null) {
// notify to capturing thread that the camera frame is available.
// mVideoEncoder.frameAvailableSoon(mStMatrix);
mMediaVideoEncode.frameAvailableSoon();
}
}
}
|
FacePlusPlus/MegviiFacepp-Android-SDK
|
faceppdemo/src/main/java/com/facepp/demo/mediacodec/MediaHelper.java
|
Java
|
apache-2.0
| 4,231 |
#include <ap_int.h>
#include "spbits.h"
#include "deltas.h"
#define bw_num 2
void delta::best_delta_seg_ch(
ap_uint<bw_th> dth [seg_ch*seg_ch], //change this when nseg value is changed
ap_uint<seg_ch*seg_ch> sth,//change this when nseg value is changed
ap_uint<seg_ch*seg_ch> dvl,//change this when nseg value is changed
ap_uint<bw_th> *bth, // smallest delta
ap_uint<1> *bsg, // sign of bth
ap_uint<1> *bvl, // valid flag
ap_uint<2> *bnm // winner number//change this when bnum value is changed
)
{
#pragma HLS INTERFACE ap_ctrl_none port=return
#pragma HLS PIPELINE II=1
#pragma HLS ARRAY_PARTITION variable=dth complete dim=1
const int nseg = seg_ch * seg_ch;
ap_uint<1> one_val;
int i;
ap_uint<bw_th> cmp1 [nseg/2];
#pragma HLS ARRAY_PARTITION variable=cmp1 complete dim=1
ap_uint<bw_th> cmp2 [nseg/4];
#pragma HLS ARRAY_PARTITION variable=cmp2 complete dim=1
ap_uint<nseg/2> sig1;
ap_uint<nseg/4> sig2;
ap_uint<bw_num> num1 [nseg/2];
#pragma HLS ARRAY_PARTITION variable=num1 complete dim=1
ap_uint<bw_num> num2 [nseg/4];
#pragma HLS ARRAY_PARTITION variable=num2 complete dim=1
ap_uint<bw_th> a_bth; // smallest delta
ap_uint<1> a_bsg; // sign of bth
ap_uint<1> a_bvl; // valid flag
ap_uint<bw_num> a_bnm; // winner number
ap_uint<seg_ch * seg_ch> a_dvl;
a_dvl=dvl;
// first comparator stage
for (i = 0; i < nseg/2; i = i+1){
// no valid flag analysis here
// we need to take all thetas into account
// differences from invalid thetas are set to max value, so they will not pass sorting
if (dth[i*2] < dth[i*2+1]){
cmp1[i] = dth[i*2];
sig1[i] = sth[i*2];
num1[i] = i*2;
}
else
{
cmp1[i] = dth[i*2+1];
sig1[i] = sth[i*2+1];
num1[i] = i*2+1;
}
}
// second comparator stage
for (i = 0; i < nseg/4; i = i+1){
if (cmp1[i*2] < cmp1[i*2+1]){
cmp2[i] = cmp1[i*2];
sig2[i] = sig1[i*2];
num2[i] = num1[i*2];
}
else
{
cmp2[i] = cmp1[i*2+1];
sig2[i] = sig1[i*2+1];
num2[i] = num1[i*2+1];
}
}
// third comparator stage if needed
if (nseg/4 > 1){
if (cmp2[0] < cmp2[1]){
a_bth = cmp2[0];
a_bsg = sig2[0];
a_bnm = num2[0];
}
else
{
a_bth = cmp2[1];
a_bsg = sig2[1];
a_bnm = num2[1];
}
}
else
{
a_bth = cmp2[0];
a_bsg = sig2[0];
a_bnm = num2[0];
}
// output valid if one or more inputs are valid
a_bvl= a_dvl.or_reduce();
*bth=a_bth;
*bsg=a_bsg;
*bnm=a_bnm;
*bvl=a_bvl;
}
|
nikhilghanathe/HLS-for-EMTF
|
sources_sim/best_delta.cpp
|
C++
|
apache-2.0
| 2,443 |
'use strict';
// Report overall code coverage from Istanbul coverage files.
// Implemented in ES5 for now
/* eslint no-var: 0 */
var _ = require('underscore');
var path = require('path');
var fs = require('fs');
var util = require('util');
var tty = require('tty');
var istanbul = require('istanbul');
var map = _.map;
var filter = _.filter;
var pairs = _.pairs;
var object = _.object;
var clone = _.clone;
var extend = _.extend;
var values = _.values;
var flatten = _.flatten;
var reduce = _.reduce;
var identity = _.identity;
var memoize = _.memoize;
/* eslint no-process-exit: 1 */
// Return the path of the Abacus module dir containing a file
var moddir = function(file) {
if(file === '.' || file === '/') return undefined;
if(/cf-abacus.*/.test(path.basename(file))) return file;
return moddir(path.dirname(file));
};
// Convert the covered file paths in the given coverage info to relative paths
// to the original source files
var sources = function(root, cov) {
return object(filter(map(pairs(cov), function(file) {
// Determine the build path and the name of the module containing each
// covered file
var mdir = moddir(file[0]);
var mod = path.basename(mdir);
// Determine the path to the module source directory
var sdir = root.dependencies[mod] || root.devDependencies[mod];
if(!sdir)
return [file[0], file[1]];
// Return a covered object with a relative path to the original source
// of the covered file
var lib = path.join(sdir, file[0].substr(mdir.length + 1)).split(':').reverse()[0].split('/');
var l = lib.lastIndexOf('lib');
var src = lib.slice(0, l).concat(['src']).concat(lib.slice(l + 1)).join('/');
return [src, extend(clone(file[1]), { path: src })];
}), function(file) { return file[1]; }));
};
// Return a list of all the individual json coverage files for our modules
var covfiles = function(cb) {
fs.readdir('node_modules', function(err, files) {
cb(undefined, filter([path.join('.coverage', 'coverage.json')].concat(err ? [] :
map(files, function(file) {
return path.join('node_modules', file, '.coverage', 'coverage.json');
})), fs.existsSync));
});
};
// Return a coverage collector loaded with all the given files
var collect = function(root, cb) {
covfiles(function(err, files) {
if(err) cb(err);
var collector = new istanbul.Collector();
map(files, function(file) {
collector.add(sources(root, JSON.parse(fs.readFileSync(file))));
});
cb(undefined, collector);
});
};
// Compute overall line and statement coverage percentages
var percentages = function(coverage) {
// Count overall covered and totals of lines, statements and branches
var t = reduce(values(coverage), function(a, cov) {
var l = values(cov.l);
var s = values(cov.s);
var b = flatten(values(cov.b));
return {
l: { covered: a.l.covered + filter(l, identity).length, total: a.l.total + l.length },
s: { covered: a.s.covered + filter(s, identity).length, total: a.s.total + s.length },
b: { covered: a.b.covered + filter(b, identity).length, total: a.b.total + b.length }};
}, { l: { covered: 0, total: 0 }, s: { covered: 0, total: 0 }, b: { covered: 0, total: 0 }});
// Return the coverage percentages
return { l: t.l.covered / (t.l.total || 1) * 100, s: (t.s.covered + /*t.b.covered*/ 0) / (t.s.total + /*t.b.total*/ 0 || 1) * 100 };
};
// Colorify the report on a tty or when the command line says --colors,
// or when env variable COVERAGE_COLORS is configured
var colors = memoize(function() {
var enabled = function(c) { return c !== undefined && c !== '0' && c !== 'false' && c !== 'disabled' && c !== 'no'; };
return tty.isatty(process.stdout) || _.contains(process.argv, '--colors') || enabled(process.env.COVERAGE_COLORS);
});
// Report a failure and exit
var fail = function(msg) {
process.stderr.write(msg);
process.exit(1);
};
// Report overall code coverage from Istanbul coverage files
var runCLI = function() {
// Load the root package.json from the current directory
var root = JSON.parse(fs.readFileSync('package.json'));
// Collect all the individual json coverage reports for our modules
collect(root, function(err, collector) {
if(err) fail(util.format('Couldn\'t collect coverage files', err));
// Combine all the individual reports and write overall coverage
// reports in LCOV and JSON formats
var reporter = new istanbul.Reporter(undefined, '.coverage');
reporter.addAll(['lcovonly', 'json']);
reporter.write(collector, false, function(err) {
if(err) fail(util.format('Couldn\'t write coverage reports', err, '\n'));
// Compute and report overall line and statement coverage
var percent = percentages(collector.getFinalCoverage());
var fullcov = percent.l === 100 && percent.s === 100;
// Print overall code coverage percentages in green for 100%
// coverage and red under 100%
var color = colors() ? fullcov ? '\u001b[32m' : '\u001b[31m' : '';
var reset = colors() ? '\u001b[0m' : '';
process.stdout.write(util.format('\n%sOverall coverage lines %d\% statements %d\%%s\n\n', color, percent.l.toFixed(2), percent.s.toFixed(2), reset));
process.exit(0);
});
});
};
// Export our public functions
module.exports.runCLI = runCLI;
|
stefanschneider/cf-abacus
|
tools/coverage/src/index.js
|
JavaScript
|
apache-2.0
| 5,626 |
// Copyright 2007-2015 Chris Patterson, Dru Sellers, Travis Smith, et. al.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
namespace MassTransit.AzureServiceBusTransport.Contexts
{
using System;
using System.Threading.Tasks;
using Microsoft.ServiceBus;
using Microsoft.ServiceBus.Messaging;
public interface ConnectionContext :
PipeContext
{
/// <summary>
/// The messaging factory initialized for the service bus
/// </summary>
Task<MessagingFactory> MessagingFactory { get; }
/// <summary>
/// The messaging factory initialized for the service bus
/// </summary>
Task<MessagingFactory> SessionMessagingFactory { get; }
/// <summary>
/// The namespace manager for the service bus
/// </summary>
Task<NamespaceManager> NamespaceManager { get; }
/// <summary>
/// The namespace manager at the root of the namespace
/// </summary>
Task<NamespaceManager> RootNamespaceManager { get; }
/// <summary>
/// Return the address for the specified queue
/// </summary>
/// <param name="queueDescription"></param>
/// <returns>The address of the queue</returns>
Uri GetQueueAddress(QueueDescription queueDescription);
/// <summary>
/// return the path of the queue for this connection
/// </summary>
/// <param name="queueDescription"></param>
/// <returns></returns>
string GetQueuePath(QueueDescription queueDescription);
}
}
|
D3-LucaPiombino/MassTransit
|
src/MassTransit.AzureServiceBusTransport/Contexts/ConnectionContext.cs
|
C#
|
apache-2.0
| 2,155 |
package in.notwork.calculator;
import java.io.Serializable;
import java.util.Objects;
/**
* @author rishabh.
*/
public class GeoPoint implements Serializable {
private double latitude;
private double longitude;
public GeoPoint() {
super();
}
public GeoPoint(double latitude, double longitude) {
this();
this.latitude = latitude;
this.longitude = longitude;
}
public double getLatitude() {
return latitude;
}
public void setLatitude(double latitude) {
this.latitude = latitude;
}
public double getLongitude() {
return longitude;
}
public void setLongitude(double longitude) {
this.longitude = longitude;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof GeoPoint)) return false;
GeoPoint geoPoint = (GeoPoint) o;
return Objects.equals(latitude, geoPoint.latitude) &&
Objects.equals(longitude, geoPoint.longitude);
}
@Override
public int hashCode() {
return Objects.hash(latitude, longitude);
}
@Override
public String toString() {
return "GeoPoint{" +
"latitude=" + latitude +
", longitude=" + longitude +
'}';
}
}
|
rishabh9/distance-calculator
|
src/main/java/in/notwork/calculator/GeoPoint.java
|
Java
|
apache-2.0
| 1,339 |
/**
* Copyright 2015-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ch.rasc.bsoncodec.model;
import javax.annotation.Nullable;
import javax.lang.model.element.VariableElement;
import org.immutables.value.Value;
import ch.rasc.bsoncodec.codegen.CodeGen;
@Value.Immutable
public abstract class FieldModel implements Comparable<FieldModel> {
public abstract VariableElement varEl();
@Value.Default
public String name() {
return this.varEl().getSimpleName().toString();
}
public abstract int order();
public abstract @Nullable String customCodecName();
public abstract CodeGen codeGen();
@Value.Default
public boolean storeNullValue() {
return false;
}
@Value.Default
public boolean storeEmptyCollection() {
return false;
}
@Value.Default
public boolean disableEncodeNullCheck() {
return this.idModel() != null || this.varEl().asType().getKind().isPrimitive();
}
@Value.Default
public boolean disableDecodeNullCheck() {
return this.idModel() != null;
}
@Value.Default
public boolean disableSetNullStatement() {
return true;
}
@Value.Default
public int fixedArray() {
return 0;
}
public abstract @Nullable IdModel idModel();
public abstract String methodNameSet();
public abstract String methodNameGet();
@Override
public int compareTo(FieldModel o) {
return Integer.compare(order(), o.order());
}
}
|
ralscha/bsoncodec-apt
|
src/main/java/ch/rasc/bsoncodec/model/FieldModel.java
|
Java
|
apache-2.0
| 1,920 |
/**
* Trim leading and trailing whitespace
* @return {String} Returns trimmed string
*/
String.prototype.trim = function() {
return this.replace(/^\s+/, '').replace(/\s+$/, '');
}
/**
* Creates a new string utilizing placeholders defined in the source string
* @param {Object} values Array or object whose indices or properties correspond to placeholder names
* @exception {KeyNotFoundError} Key or property not found
* @exception {FormatError} Format was invalid
* @return {String} Returns formatted results
* @remarks Placeholders are defined by placing text inside curly brackets. To insert literal curly brackets, simply use 2 consecutive curly brackets.
* The text inside the curly brackets represents a property or index to obtain from the 'values' parameter.
* @example var values = { 1: "First", 2: "Second" };
* return "One is {1} and {{Two}} is {{{2}}}".toFormattedString(values); // results in "One is First and {Two} is {Second}"
*/
String.prototype.toFormattedString = function(values) {
var formatStr = String(this);
var result = '';
var re = /^([^{}]*)(\{+|\}+)(.*?)$/;
var rr = re.exec(formatStr);
var isInPlaceholder = false;
var placeHolderKey = '';
var position = 0;
while (rr != null) {
formatStr = rr[3];
var placeHolderLen = rr[2].length % 2;
if (isInPlaceholder) {
if (placeHolderLen == 1) {
if (rr[2].substr(0, 1) == '{')
throw new FormatError(undefined, "Unexpected opening brace", String(this), position + rr[1].length);
isInPlaceholder = false;
placeHolderKey += rr[1];
if (values === undefined || values === null)
throw new KeyNotFoundError(undefined, "values were not defined", placeHolderKey, String(this), position + rr[1].length);
var v;
try {
v = values[placeHolderKey];
} catch (err) {
throw new KeyNotFoundError(undefined, undefined, placeHolderKey, String(this), position + rr[1].length, err)
}
if (v === undefined)
throw new KeyNotFoundError(undefined, undefined, placeHolderKey, String(this), position + rr[1].length);
result += ((v === null) ? "" : String(v)) + rr[2].substr(0, (rr[2].length - placeHolderLen) / 2);
} else
placeHolderKey += rr[1] + rr[2].substr(0, (rr[2].length - placeHolderLen) / 2);
} else {
result += rr[1] + rr[2].substr(0, (rr[2].length - placeHolderLen) / 2);
if (placeHolderLen == 1) {
if (rr[2].substr(0, 1) == '}')
throw new FormatError(undefined, "Unexpected closing brace", String(this), position + rr[1].length);
isInPlaceholder = true;
}
}
position += r[1].length + r[2].length;
rr = re.exec(formatStr);
}
if (isInPlaceholder)
throw new FormatError(undefined, "Closing brace not found", String(this), position);
return result + formatStr;
}
|
lerwine/JSCookbook
|
src/TypeExtensions/StringExtensions.js
|
JavaScript
|
apache-2.0
| 2,780 |
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using Google.Cloud.EntityFrameworkCore.Spanner.Extensions;
using Google.Cloud.EntityFrameworkCore.Spanner.Extensions.Internal;
using Google.Cloud.EntityFrameworkCore.Spanner.Infrastructure;
using Google.Cloud.EntityFrameworkCore.Spanner.Tests.MigrationTests.Models;
using Grpc.Core;
using Microsoft.EntityFrameworkCore;
using Xunit;
namespace Google.Cloud.EntityFrameworkCore.Spanner.Tests.MigrationTests
{
internal class MockMigrationSampleDbContext : SpannerMigrationSampleDbContext
{
private readonly string _connectionString;
public MockMigrationSampleDbContext() : this("Data Source=projects/p1/instances/i1/databases/d1;")
{
}
public MockMigrationSampleDbContext(string connectionString)
{
_connectionString = connectionString;
}
protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
{
if (!optionsBuilder.IsConfigured)
{
optionsBuilder
#pragma warning disable EF1001
.UseSpanner(_connectionString, _ => SpannerModelValidationConnectionProvider.Instance.EnableDatabaseModelValidation(false), ChannelCredentials.Insecure)
#pragma warning restore EF1001
.UseMutations(MutationUsage.Never)
.UseLazyLoadingProxies();
}
}
}
public class GenerateCreateScriptTest
{
[Fact]
public void Generate_Create_Script()
{
using var db = new MockMigrationSampleDbContext();
var generatedScript = db.Database.GenerateCreateScript();
var script = @"CREATE TABLE `Singers` (
`SingerId` INT64 NOT NULL,
`FirstName` STRING(200),
`LastName` STRING(200) NOT NULL,
`FullName` STRING(400) NOT NULL AS (COALESCE(FirstName || ' ', '') || LastName) STORED,
`BirthDate` DATE,
`Picture` BYTES(MAX)
)PRIMARY KEY (`SingerId`)
CREATE TABLE `TableWithAllColumnTypes` (
`ColInt64` INT64 NOT NULL,
`ColFloat64` FLOAT64,
`ColNumeric` NUMERIC,
`ColBool` BOOL,
`ColString` STRING(100),
`ColStringMax` STRING(MAX),
`ColChar` STRING(1),
`ColBytes` BYTES(100),
`ColBytesMax` BYTES(MAX),
`ColDate` DATE,
`ColTimestamp` TIMESTAMP,
`ColCommitTS` TIMESTAMP OPTIONS (allow_commit_timestamp=true) ,
`ColInt64Array` ARRAY<INT64>,
`ColFloat64Array` ARRAY<FLOAT64>,
`ColNumericArray` ARRAY<NUMERIC>,
`ColBoolArray` ARRAY<BOOL>,
`ColStringArray` ARRAY<STRING(100)>,
`ColStringMaxArray` ARRAY<STRING(MAX)>,
`ColBytesArray` ARRAY<BYTES(100)>,
`ColBytesMaxArray` ARRAY<BYTES(MAX)>,
`ColDateArray` ARRAY<DATE>,
`ColTimestampArray` ARRAY<TIMESTAMP>,
`ColGuid` STRING(36),
`ColComputed` STRING(MAX) AS (ARRAY_TO_STRING(ColStringArray, ',')) STORED
)PRIMARY KEY (`ColInt64`)
CREATE TABLE `Venues` (
`Code` STRING(10) NOT NULL,
`Name` STRING(100),
`Active` BOOL NOT NULL,
`Capacity` INT64,
`Ratings` ARRAY<FLOAT64>
)PRIMARY KEY (`Code`)
CREATE TABLE `Albums` (
`AlbumId` INT64 NOT NULL,
`Title` STRING(100) NOT NULL,
`ReleaseDate` DATE,
`SingerId` INT64 NOT NULL,
`MarketingBudget` INT64,
CONSTRAINT `FK_Albums_Singers` FOREIGN KEY (`SingerId`) REFERENCES `Singers` (`SingerId`),
)PRIMARY KEY (`AlbumId`)
CREATE TABLE `Concerts` (
`VenueCode` STRING(10) NOT NULL,
`StartTime` TIMESTAMP NOT NULL,
`SingerId` INT64 NOT NULL,
`Title` STRING(200),
CONSTRAINT `FK_Concerts_Singers` FOREIGN KEY (`SingerId`) REFERENCES `Singers` (`SingerId`),
CONSTRAINT `FK_Concerts_Venues` FOREIGN KEY (`VenueCode`) REFERENCES `Venues` (`Code`),
)PRIMARY KEY (`VenueCode`, `StartTime`, `SingerId`)
CREATE TABLE `Tracks` (
`AlbumId` INT64 NOT NULL,
`TrackId` INT64 NOT NULL,
`Title` STRING(200) NOT NULL,
`Duration` NUMERIC,
`LyricsLanguages` ARRAY<STRING(2)>,
`Lyrics` ARRAY<STRING(MAX)>,
CONSTRAINT `Chk_Languages_Lyrics_Length_Equal` CHECK (ARRAY_LENGTH(LyricsLanguages) = ARRAY_LENGTH(Lyrics)),
)PRIMARY KEY (`AlbumId`, `TrackId`),
INTERLEAVE IN PARENT `Albums` ON DELETE NO ACTION
CREATE TABLE `Performances` (
`VenueCode` STRING(10) NOT NULL,
`SingerId` INT64 NOT NULL,
`StartTime` TIMESTAMP NOT NULL,
`ConcertStartTime` TIMESTAMP NOT NULL,
`AlbumId` INT64 NOT NULL,
`TrackId` INT64 NOT NULL,
`Rating` FLOAT64,
CONSTRAINT `FK_Performances_Singers` FOREIGN KEY (`SingerId`) REFERENCES `Singers` (`SingerId`),
CONSTRAINT `FK_Performances_Tracks` FOREIGN KEY (`AlbumId`, `TrackId`) REFERENCES `Tracks` (`AlbumId`, `TrackId`),
CONSTRAINT `FK_Performances_Concerts` FOREIGN KEY (`VenueCode`, `ConcertStartTime`, `SingerId`) REFERENCES `Concerts` (`VenueCode`, `StartTime`, `SingerId`),
)PRIMARY KEY (`VenueCode`, `SingerId`, `StartTime`)
CREATE INDEX `AlbumsByAlbumTitle2` ON `Albums` (`Title`) STORING (`MarketingBudget`, `ReleaseDate`)
CREATE INDEX `Idx_Singers_FullName` ON `Singers` (`FullName`)
CREATE NULL_FILTERED INDEX `IDX_TableWithAllColumnTypes_ColDate_ColCommitTS` ON `TableWithAllColumnTypes` (`ColDate`, `ColCommitTS`)
CREATE UNIQUE INDEX `Idx_Tracks_AlbumId_Title` ON `Tracks` (`TrackId`, `Title`)
";
Assert.Equal(script, generatedScript);
}
}
}
|
googleapis/dotnet-spanner-entity-framework
|
Google.Cloud.EntityFrameworkCore.Spanner.Tests/MigrationTests/GenerateCreateScriptTest.cs
|
C#
|
apache-2.0
| 5,850 |
/**
* <copyright>
*
* Copyright (c) 2010 SAP AG.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Reiner Hille-Doering (SAP AG) - initial API and implementation and/or initial documentation
*
* </copyright>
*/
package org.eclipse.bpmn2.provider;
import java.util.Collection;
import java.util.List;
import org.eclipse.bpmn2.Bpmn2Package;
import org.eclipse.bpmn2.FormValue;
import org.eclipse.emf.common.notify.AdapterFactory;
import org.eclipse.emf.common.notify.Notification;
import org.eclipse.emf.edit.provider.ComposeableAdapterFactory;
import org.eclipse.emf.edit.provider.IEditingDomainItemProvider;
import org.eclipse.emf.edit.provider.IItemLabelProvider;
import org.eclipse.emf.edit.provider.IItemPropertyDescriptor;
import org.eclipse.emf.edit.provider.IItemPropertySource;
import org.eclipse.emf.edit.provider.IStructuredItemContentProvider;
import org.eclipse.emf.edit.provider.ITreeItemContentProvider;
import org.eclipse.emf.edit.provider.ItemPropertyDescriptor;
import org.eclipse.emf.edit.provider.ViewerNotification;
/**
* This is the item provider adapter for a {@link org.eclipse.bpmn2.FormValue} object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public class FormValueItemProvider extends BaseElementItemProvider implements
IEditingDomainItemProvider, IStructuredItemContentProvider,
ITreeItemContentProvider, IItemLabelProvider, IItemPropertySource {
/**
* This constructs an instance from a factory and a notifier.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public FormValueItemProvider(AdapterFactory adapterFactory) {
super(adapterFactory);
}
/**
* This returns the property descriptors for the adapted class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public List<IItemPropertyDescriptor> getPropertyDescriptors(Object object) {
if (itemPropertyDescriptors == null) {
super.getPropertyDescriptors(object);
addValueIdPropertyDescriptor(object);
addValueNamePropertyDescriptor(object);
}
return itemPropertyDescriptors;
}
/**
* This adds a property descriptor for the Value Id feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected void addValueIdPropertyDescriptor(Object object) {
itemPropertyDescriptors.add(createItemPropertyDescriptor(
((ComposeableAdapterFactory) adapterFactory)
.getRootAdapterFactory(),
getResourceLocator(),
getString("_UI_FormValue_valueId_feature"),
getString("_UI_PropertyDescriptor_description",
"_UI_FormValue_valueId_feature", "_UI_FormValue_type"),
Bpmn2Package.Literals.FORM_VALUE__VALUE_ID, true, false, false,
ItemPropertyDescriptor.GENERIC_VALUE_IMAGE, null, null));
}
/**
* This adds a property descriptor for the Value Name feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected void addValueNamePropertyDescriptor(Object object) {
itemPropertyDescriptors
.add(createItemPropertyDescriptor(
((ComposeableAdapterFactory) adapterFactory)
.getRootAdapterFactory(),
getResourceLocator(),
getString("_UI_FormValue_valueName_feature"),
getString("_UI_PropertyDescriptor_description",
"_UI_FormValue_valueName_feature",
"_UI_FormValue_type"),
Bpmn2Package.Literals.FORM_VALUE__VALUE_NAME, true,
false, false,
ItemPropertyDescriptor.GENERIC_VALUE_IMAGE, null, null));
}
/**
* This returns FormValue.gif.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public Object getImage(Object object) {
return overlayImage(object,
getResourceLocator().getImage("full/obj16/FormValue"));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected boolean shouldComposeCreationImage() {
return true;
}
/**
* This returns the label text for the adapted class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public String getText(Object object) {
String label = ((FormValue) object).getId();
return label == null || label.length() == 0 ? getString("_UI_FormValue_type")
: getString("_UI_FormValue_type") + " " + label;
}
/**
* This handles model notifications by calling {@link #updateChildren} to update any cached
* children and by creating a viewer notification, which it passes to {@link #fireNotifyChanged}.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void notifyChanged(Notification notification) {
updateChildren(notification);
switch (notification.getFeatureID(FormValue.class)) {
case Bpmn2Package.FORM_VALUE__VALUE_ID:
case Bpmn2Package.FORM_VALUE__VALUE_NAME:
fireNotifyChanged(new ViewerNotification(notification,
notification.getNotifier(), false, true));
return;
}
super.notifyChanged(notification);
}
/**
* This adds {@link org.eclipse.emf.edit.command.CommandParameter}s describing the children
* that can be created under this object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected void collectNewChildDescriptors(
Collection<Object> newChildDescriptors, Object object) {
super.collectNewChildDescriptors(newChildDescriptors, object);
}
}
|
adbrucker/SecureBPMN
|
designer/src/org.activiti.designer.model.edit/src/org/eclipse/bpmn2/provider/FormValueItemProvider.java
|
Java
|
apache-2.0
| 5,555 |
///done, not debug
__g_qrmCmatsize__ = [
undefined,
11, 13, 15, 17
];
///
__g_qrmCdatacodewords__ = {
L:[
undefined,
3, 5, 11, 16
],
M:[
undefined,
0, 4, 9, 14
],
Q:[
undefined,
0, 0, 0, 10
]
};
///
__g_qrmCtotalcodewords__ = {
L:[
undefined,
],
M:[
undefined,
],
Q:[
undefined,
]
};
///
__g_qrmCeccodewords__ = {
L:[
undefined,
],
M:[
undefined,
],
Q:[
undefined,
]
};
///notyet
__g_qrmCdatalen__ = {
// number of characters [num,alnum,8bit,kanji]
L:[
undefined,
[ 0, 0, 0, 0],// 1
[ 0, 0, 0, 0],// 2
[ 0, 0, 0, 0],// 3
[ 0, 0, 0, 0] // 4
],
M:[
undefined,
[ 0, 0, 0, 0],// 1
[ 0, 0, 0, 0],// 2
[ 0, 0, 0, 0],// 3
[ 0, 0, 0, 0] // 4
],
Q:[
undefined,
[ 0, 0, 0, 0],// 1
[ 0, 0, 0, 0],// 2
[ 0, 0, 0, 0],// 3
[ 0, 0, 0, 0] // 4
]
};
///
__g_qrmCsegments__ = [
//[[repeat,totalCodewords,dataCodewords,correctableCodewords],...]
undefined,
{ L:[[ 1, 5, 3, 0]] },//1
{ L:[[ 1, 10, 5, 1]],
M:[[ 1, 10, 4, 2]] },//2
{ L:[[ 1, 17, 11, 2]],
M:[[ 1, 17, 9, 4]] },//3
{ L:[[ 1, 24, 16, 3]],
M:[[ 1, 24, 14, 5]],
Q:[[ 1, 24, 10, 7]] } //4
];
///
__g_qrmCtypenumbers__ = {
L:[
undefined,
0, 1, 3, 5
],
M:[
undefined,
undefined, 2, 4, 6
],
Q:[
undefined,
undefined, undefined, undefined, 7
]
};
///
QrMCSymbolInfo = __extends(Object,
// constructor
function(version, eclevel) {
__constructSuper(this);
if (version == undefined) { version = 1; }
if (eclevel == undefined) { eclevel = "M"; }
this.version = version;
this.eclevel = {L:1,M:0,Q:3}[eclevel]; // L<M<Q
this.matrixSize = __g_qrmCmatsize__ [version];
this.dataCodewords = __g_qrmCdatacodewords__[eclevel][version];
this.segments = __g_qrmCsegments__ [version][eclevel];
this.typeNumber = __g_qrmCtypenumbers__ [eclevel][version];
},
// methods
function(__this__) {
__this__.MAXVER = 4;
});
|
qnq777/matrixcode.js-legacy
|
public_html/js/qrcode/qrmcdata.js
|
JavaScript
|
apache-2.0
| 2,139 |
<?php
/*
* Copyright 2014 Stefan Lorenz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace stlrnz\json\converter\exception;
/**
* ConverterException
*
* @author Stefan Lorenz
* @license Apache License 2.0
* @license http://www.apache.org/licenses/LICENSE-2.0
*
* @package stlrnz\json\converter\exception
*/
class ConverterException extends \Exception
{
}
|
stlrnz/PHPJsonConverter
|
lib/stlrnz/json/converter/exception/ConverterException.php
|
PHP
|
apache-2.0
| 886 |
/*
* Licensed to the Ted Dunning under one or more contributor license
* agreements. See the NOTICE file that may be
* distributed with this work for additional information
* regarding copyright ownership. Ted Dunning licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.mapr.anomaly;
import com.google.common.base.Preconditions;
import java.io.BufferedReader;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Represents an event from a log.
*/
public class Event implements Comparable<Event> {
private static final DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
private static final Pattern format = Pattern.compile("\\[(.*)] /(.+)[?&]user=(.*) (.*)\\.(.*)\\.(.*)\\.(.*)");
private final int uid;
private final long time;
private final int ip;
private final String op;
private Event(int uid, long time, int ip, String op) {
Preconditions.checkNotNull(op);
this.uid = uid;
this.time = time;
this.ip = ip;
this.op = op;
}
public static Event read(BufferedReader in) throws IOException {
in.mark(1000);
String line = in.readLine();
if (line == null) {
return null;
}
try {
Matcher m = format.matcher(line);
if (m.matches()) {
int i = 1;
Date d = df.parse(m.group(i++));
String op = m.group(i++);
int uid = Integer.parseInt(m.group(i++), 16);
int ip = Integer.parseInt(m.group(i++)) << 24;
ip += Integer.parseInt(m.group(i++)) << 16;
ip += Integer.parseInt(m.group(i++)) << 8;
ip += Integer.parseInt(m.group(i));
return new Event(uid, d.getTime(), ip, op);
} else {
in.reset();
return null;
}
} catch (ParseException | NumberFormatException e) {
in.reset();
return null;
}
}
public int getIp() {
return ip;
}
public long getTime() {
return time;
}
@SuppressWarnings("WeakerAccess")
public int getUid() {
return uid;
}
public String getOp() {
return op;
}
@Override
public int compareTo(Event o) {
int r = Integer.compare(uid, o.uid);
if (r != 0) {
return r;
}
r = Long.compare(time, o.time);
if (r != 0) {
return r;
}
r = Integer.compare(ip, o.ip);
return r;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Event)) return false;
Event event = (Event) o;
if (ip != event.ip) return false;
if (time != event.time) return false;
if (uid != event.uid) return false;
return op.equals(event.op);
}
@Override
public int hashCode() {
int result = uid;
result = 31 * result + (int) (time ^ (time >>> 32));
result = 31 * result + ip;
result = 31 * result + op.hashCode();
return result;
}
static class EventFormatException extends Throwable {
@SuppressWarnings("unused")
public EventFormatException(String line) {
super(String.format("Invalid event format found: \"%s\"", line));
}
}
}
|
tdunning/log-synth
|
src/main/java/com/mapr/anomaly/Event.java
|
Java
|
apache-2.0
| 4,105 |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
from gluon.contrib.user_agent_parser import mobilize
import os,sys,types
import string,operator
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import bokeh
#from bokeh.plotting import *
home = os.path.expanduser("~")
datapath = os.path.join(request.folder,'static/results')
from applications.epitopemap.modules.mhcpredict import base, sequtils, tepitope
methods = ['tepitope','netmhciipan','iedbmhc1','bcell']#,'threading'] #'iedbmhc2'
iedbmethods = ['IEDB_recommended','consensus','ann','smm','arb','netmhcpan']
bcellmethods = ['Chou-Fasman', 'Emini', 'Karplus-Schulz',
'Kolaskar-Tongaonkar', 'Parker', 'Bepipred']
colors = {'tepitope':'green','netmhciipan':'orange',
'iedbmhc1':'blue','iedbmhc2':'pink','threading':'purple'}
colormaps={'tepitope':'Greens','netmhciipan':'Oranges','iedbmhc2':'Pinks',
'threading':'Purples','iedbmhc1':'Blues'}
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
"""
if request.user_agent().is_mobile:
response.view.replace('.html','.mobile.html')
form = quicksearch()
return dict(message=T('Menu'),searchform=form)
def register():
return dict(form=auth.register())
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
auth.settings.registration_requires_approval = True
adminmail = 'damien.farrell@ucd.ie'
auth.settings.register_onaccept = lambda form: mail.send(to=adminmail,
subject='New user registered for %s application' % (request.application),
message="new user email is %s" % (form.vars.email))
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_membership("editor_group")
def list_users():
btn = lambda row: A("Edit", _href=URL('manage_user', args=row.auth_user.id))
db.auth_user.edit = Field.Virtual(btn)
rows = db(db.auth_user).select()
headers = ["ID", "Name", "Last Name", "Email","registration", "Edit"]
fields = ['id', 'first_name', 'last_name', "email", "registration_key", "edit"]
table = TABLE(THEAD(TR(*[B(header) for header in headers])),
TBODY(*[TR(*[TD(row[field]) for field in fields]) \
for row in rows]))
table["_class"] = "table table-striped table-bordered table-condensed"
return dict(table=table)
@auth.requires_membership("editor_group")
def manage_user():
user_id = request.args(0) or redirect(URL('list_users'))
form = SQLFORM(db.auth_user, user_id).process()
membership_panel = LOAD(request.controller,
'manage_membership.html',
args=[user_id],
ajax=True)
return dict(form=form,membership_panel=membership_panel)
@auth.requires_membership("editor_group")
def manage_membership():
user_id = request.args(0) or redirect(URL('list_users'))
db.auth_membership.user_id.default = int(user_id)
db.auth_membership.user_id.writable = False
form = SQLFORM.grid(db.auth_membership.user_id == user_id,
args=[user_id],
searchable=False,
deletable=False,
details=False,
selectable=False,
csv=False,
user_signature=True) # change to True in production
return form
@auth.requires_signature()
def data():
return dict(form=crud())
def mpld3Plot(fig, objects=None):
"""mpld3 html plot from figure"""
import mpld3
html = mpld3.fig_to_html(fig)
htmllabels = []
if objects!=None and len(objects)>0:
bars,labels = zip(*objects)
tooltip = MyToolTip(bars, labels)
plugins.connect(fig, tooltip)
return html
def mplPlot(fig):
"""Convert matplitlib plot to bokeh"""
from bokeh import mpl
plot = mpl.to_bokeh(fig)
return plot
def embedPlot_old(plot):
"""Embed plot method for older versions of bokeh"""
from bokeh.resources import Resources
from bokeh.embed import autoload_static
fp = os.path.join(request.folder,'static/temp/')
fp = os.path.join(fp, plot._id+".js")
res = Resources("relative")
res.js_files = ["../static/js/bokeh.min.js"]
res.css_files = ["../static/css/bokeh.min.css"]
jspath = os.path.join('../static/temp/', plot._id+".js")
js,tag = autoload_static(plot, res, jspath)
with open(fp, "w") as f:
f.write(js)
print
return js,tag
def embedPlot(plot):
"""Embed plot method for new version of bokeh (tested on 0.11)"""
from bokeh.embed import components
script, div = components(plot)
#inject the required bokeh js and css files
response.files.append(URL('static','css/bokeh.min.css'))
response.files.append(URL('static','js/bokeh.min.js'))
response.include_files()
return script, div
def plotRegions(plot, regions=None):
"""Plot regions of interest"""
h=27
y=.5+h/2.0
w=20
colors = {'negative':'#FF3333', 'positive':'#0099FF'}
rv0655 = {'negative':[66,77,171,198,251], 'positive':[231]}
rv3676 = {'negative':[197], 'positive':[42,117,204]}
rv0757 = {'negative':[73,175], 'positive':[125,210]}
rv3584 = {'negative':[72], 'positive':[43,49]}
rv3390 = {'positive':[178,185]}
reg = rv3584
for r in reg:
x = reg[r]
x = [i+w/2 for i in x]
plot.rect(x,y, width=w, height=h,color=colors[r],
line_color='black',alpha=0.4,legend=r)
plot.legend.label_text_font_size = '15pt'
return
def plotAnnotations(plot,annotation):
#print annotation
h=1.8
y=.4+h/2.0
if 'signalp' in annotation:
x = annotation['signalp'].values()
#source = ColumnDataSource(data=dict(x=x,y=y))
plot.rect(x,y, width=.5, height=h,color='purple',line_color='red',alpha=0.7,legend='signalp')
if 'tmhmm' in annotation:
vals = annotation['tmhmm']
x=[i[0]+(i[1]-i[0])/2.0 for i in vals]
w=[i[1]-i[0] for i in vals]
#print x,w,y
plot.rect(x,y, width=w, height=h,color='blue',line_color='blue',alpha=0.6,legend='tmhmm')
if 'pfam27' in annotation:
vals = annotation['pfam27']
#print vals
text = [i[0] for i in vals]
x=[i[1]+(i[2]-i[1])/2.0 for i in vals]
w=[i[2]-i[1] for i in vals]
print x,w,y
plot.rect(x,y, width=w, height=h,color='white',line_color='black',alpha=0.6)
plot.text(x,y, text=text, text_font_size='9pt', angle=0, text_alpha=.8,
text_baseline='middle',text_align='center')
return
def plotBCell(plot,pred,height):
"""Line plot of b cell predictions - no allele stuff"""
x = pred.data.Position
#print pred.data[:20]
#source = ColumnDataSource(data=dict(x=x,y=y))
y=pred.data.Score
h=height
y = y+abs(min(y))
y = y*(h/max(y))+3
plot.line(x, y, line_color="red", line_width=2, alpha=0.6,legend='bcell')
return
def plotTracks(preds,tag,n=3,title=None,width=820,height=None,
seqdepot=None,bcell=None,exp=None):
"""Plot epitopes as parallel tracks"""
from bokeh.models import Range1d,HoverTool,FactorRange,Grid,GridPlot,ColumnDataSource
from bokeh.plotting import Figure
alls=1
if title == None:
title=tag
for m in preds:
alls += len(preds[m].data.groupby('allele'))
if height==None:
height = 130+10*alls
yrange = Range1d(start=0, end=alls+3)
plot = Figure(title=title,title_text_font_size="11pt",plot_width=width,
plot_height=height, y_range=yrange,
y_axis_label='allele',
tools="xpan, xwheel_zoom, resize, hover, reset, save",
background_fill="#FAFAFA",
toolbar_location="below")
h=3
if bcell != None:
plotBCell(plot, bcell, alls)
if seqdepot != None:
plotAnnotations(plot,seqdepot)
if exp is not None:
plotExp(plot, exp)
#plotRegions(plot)
#lists for hover data
#we plot all rects at once
x=[];y=[];allele=[];widths=[];clrs=[];peptide=[]
predictor=[];position=[];score=[];leg=[]
l=80
for m in preds:
pred = preds[m]
cmap = mpl.cm.get_cmap(colormaps[m])
df = pred.data
sckey = pred.scorekey
pb = pred.getPromiscuousBinders(data=df,n=n)
if len(pb) == 0:
continue
l = pred.getLength()
grps = df.groupby('allele')
alleles = grps.groups.keys()
if len(pb)==0:
continue
c=colors[m]
leg.append(m)
for a,g in grps:
b = pred.getBinders(data=g)
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort('pos',inplace=True)
scores = b[sckey].values
score.extend(scores)
pos = b['pos'].values
position.extend(pos)
x.extend(pos+(l/2.0)) #offset as coords are rect centers
widths.extend([l for i in scores])
clrs.extend([c for i in scores])
y.extend([h+0.5 for i in scores])
alls = [a for i in scores]
allele.extend(alls)
peptide.extend(list(b.peptide.values))
predictor.extend([m for i in scores])
h+=1
source = ColumnDataSource(data=dict(x=x,y=y,allele=allele,peptide=peptide,
predictor=predictor,position=position,score=score))
plot.rect(x,y, width=widths, height=0.8,
#x_range=Range1d(start=1, end=seqlen+l),
color=clrs,line_color='gray',alpha=0.7,source=source)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("allele", "@allele"),
("position", "@position"),
("peptide", "@peptide"),
("score", "@score"),
("predictor", "@predictor"),
])
seqlen = pred.data.pos.max()+l
plot.set(x_range=Range1d(start=0, end=seqlen+1))
plot.xaxis.major_label_text_font_size = "8pt"
plot.xaxis.major_label_text_font_style = "bold"
plot.ygrid.grid_line_color = None
plot.yaxis.major_label_text_font_size = '0pt'
plot.xaxis.major_label_orientation = np.pi/4
#js,html = embedPlot(plot)
script, div = embedPlot(plot)
return script, div
#return plot, html
def plotEmpty(width=850):
"""Plot an empty plot"""
from bokeh.models import Range1d
plot = figure(title='',plot_width=width, plot_height=10,
y_range=Range1d(start=1, end=100),
tools="xpan, xwheel_zoom, resize, hover, reset",
background_fill="white")
x=range(100); y=2
rect(x,y, width=1, height=0.8,color='white')
js,html = embedPlot(plot)
print plot
return html
def plots():
"""Use as component to plot predictions for given request"""
print 'plot request'
print request.vars
label = request.vars.label
#if we have no data
if label == 'dummy':
figure = plotEmpty()
return dict(figure=figure)
g = request.vars.genome
tag = request.vars.tag
gene = request.vars.gene
title=None
if gene != None:
t = getTagbyGene(g,gene) #override tag with gene name if provided
if t != None:
tag = t
title = tag+' / '+gene
if request.vars.width == None:
width = 820
else:
width = int(request.vars.width)
if request.vars.height != None:
height = int(request.vars.height)
else:
height = None
if request.vars.n == None:
n=3
else:
n = int(request.vars.n)
if request.vars.perccutoff != None:
perccutoff=float(request.vars.perccutoff)
else:
perccutoff=0.96
preds,bcell,cutoffs = getPredictions(label,g,tag,perccutoff)
if len(preds)==0 or preds==None:
return dict(error=True)
sd=None
if request.vars.annotation == 'on':
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
sd = getSeqDepot(seq)['t']
script, div = plotTracks(preds,tag,n=n,title=title,
width=width,height=height,seqdepot=sd,bcell=bcell)
return dict(script=script,div=div,preds=preds,error=False)
def scoredistplots(preds):
"""Score distribution plots"""
from bokeh.models import Range1d,GridPlot
from bokeh.plotting import Figure
plots=[]
for p in preds:
pred=preds[p]
key=pred.scorekey
data = pred.data[key]
hist, edges = np.histogram(data, density=True, bins=30)
p = Figure(title=p,plot_height=250,tools='')
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
plots.append(p)
plot = GridPlot(children=[plots],title='test')
js,html = embedPlot(plot)
return html
def scoreCorrelations(preds):
figs=[]
for p in preds:
pred=preds[p]
df=pred.data
x = df.pivot_table(index='peptide', columns='allele', values=pred.scorekey)
f=plt.figure()
ax=f.add_subplot(111)
pd.scatter_matrix(x, alpha=0.2, figsize=(12,12), diagonal='hist',ax=ax)
#plt.tight_layout()
figs.append(f)
return figs
def results():
"""Component to show predictions for all peptides for each predictor """
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
preds,bcell,cutoffs = getPredictions(label,g,tag)
summary = summaryhtml(preds)
data = {}
for p in preds:
data[p] = preds[p].reshape()
data = dict(data)
return dict(data=data)
def binders():
"""Component for top binder tables"""
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
preds,bcell,cutoffs = getPredictions(label,g,tag)
summary = summaryhtml(preds)
b = base.getBinders(preds,n=n)
kys = b.keys()
if 'tepitope' in kys and 'netmhciipan' in kys:
shared = pd.merge(b['tepitope'],b['netmhciipan'],
on=['peptide','name','pos','core'],
copy=False).sort('pos')
else:
shared=''
return dict(b=b,summary=summary,shared=shared,n=n)
def showSequence(seq,preds):
"""Get html display of binders on sequences"""
colors = {'tepitope':'#70E2AA','netmhciipan':'orange',
'iedbmhc1':'#9DCEFF','iedbmhc2':'pink','threading':'#BCA9F5'}
l=9 #need to get this from predictors
seqs=[]
tabledata=[]
#idx = ''.join([seq[i] if i%10!=0 else '|' for i in range(len(seq))])
tabledata.append((TR(TH('allele'),TH('sequence'))))
for p in preds:
b = preds[p].getBinders()
clr = colors[p]
#pb = preds[p].getPromsicuousBinders(n=n)
#b = b[b.pos.isin(pb.pos)]
grps = b.groupby('allele')
for a,g in grps:
pos=[]
for i in g.pos: pos.extend(np.arange(i,i+l))
seqhtml=[]
for i in range(len(seq)):
if i in pos:
seqhtml.append(SPAN(seq[i],_style="background-color:%s" %clr))
else:
seqhtml.append(SPAN(seq[i],_style="color: gray"))
tabledata.append((TR(TH(a),TD(*seqhtml))))
table = TABLE(*tabledata,_class="seqtable")
return table
def sequence():
"""Component to highlight epitopes on sequence"""
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
feat, fastafmt, previous, next = getFeature(g,tag)
if feat==None:
return dict(table=None)
seq = feat.qualifiers['translation'][0]
preds,bcell,c = getPredictions(label,g,tag)
table = showSequence(seq,preds)
return dict(table=table)
def feature():
"""Component showing gene annotation"""
g = request.vars.genome
tag = request.vars.tag
items = getFeature(g,tag)
if items != None:
feat, fastafmt, previous, next = items
return dict(fastafmt=fastafmt,feat=feat,
previous=previous,next=next)
return dict()
def iedb():
"""remote iedb tools predcitions"""
g = request.vars.genome
tag = request.vars.tag
feature, fastafmt, previous, next = getFeature(g,tag)
seq = feature.qualifiers['translation'][0]
df = base.getIEDBRequest(seq)
result = XML(df.to_html(classes='mytable'))
return dict(result=result)
def seqdepot(result):
"""Sedepot data table format"""
#print result
kys = result['t'].keys()
tables = {}
for k in kys:
fieldnames = [TH(i) for i in sd.toolFields(k)]
rows = [TR(i) for i in result['t'][k]]
rows.insert(0,TR(*fieldnames))
tables[k] = TABLE(*rows,_class="tinytable")
fp = os.path.join(request.folder,'static/temp/')
filename = os.path.join(fp,tag+'.png')
sd.saveImage(aseqid, filename, {'format':'png'})
imgurl = IMG(_src=URL(r=request,c='static',f='temp/%s' %os.path.basename(filename)))
links = [LI(A(k,_href="#%s" %k)) for k in tables]
tablinks = UL(*links,_class="small-tab-links")
divs=[DIV(tables[k],_id=k,_class="tab") for k in tables]
content = DIV(*divs,_class="tab-content")
tabbedcontent = DIV(tablinks, content,_class="tabs")
return dict(result=result,seq=seq,imgurl=imgurl,tables=tables,
tabbedcontent=tabbedcontent)
def protein():
"""Display protein info from a fixed URL"""
label = request.args[0]
g = request.args[1]
tag = request.args[2]
n = 3
#print g
if g == 'other':
items = (None,None,'','')
else:
items = getFeature(g,tag)
if items != None:
feature, fastafmt, previous, next = items
else:
raise HTTP(404, "No such feature %s available in genome %s" %(tag,g))
return
result = dict(label=label,tag=tag,genome=g,n=n,
previous=previous,next=next)
return result
@auth.requires_login()
def sequences():
"""Allow user to add fasta sequences instead"""
uploadform = FORM(
TABLE(TR(TD(LABEL('Identifier:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('Fasta file:')),TD(INPUT(_name='fastafile',_type='file'))),
TR(TD(LABEL('Description:',_for='description')),
TD(INPUT(_name='description',_type='string',_required=False,
_style="width:400px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.fastafile.filename
uploadform.vars.filename = fname
id = db.sequences.insert(name=uploadform.vars.name,
description=uploadform.vars.description,
file=uploadform.vars.fastafile,
filename=uploadform.vars.filename)
db.sequences.id.readable=False
query=((db.sequences.id>0))
default_sort_order=[db.sequences.id]
links=[lambda row: A('browse',_href=URL('fastaview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=64, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'),links=links)
return dict(grid=grid,form=uploadform)
@auth.requires_login()
def genomes():
"""Display available genomes and allow upload"""
formats = ['genbank']
uploadform = FORM(
TABLE(TR(TD(LABEL('Identifier:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('Format:',_for='format')),
TD(SELECT(formats,_name='format',_type='string',_required=True))),
TR(TD(LABEL('file to upload')),TD(INPUT(_name='gfile',_type='file'))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.gfile.filename
uploadform.vars.filename = fname
id = db.genomes.insert(name=uploadform.vars.name,
file=uploadform.vars.gfile,
filename=uploadform.vars.filename,
format=uploadform.vars.format)
db.genomes.id.readable=False
query=((db.genomes.id>0))
default_sort_order=[db.genomes.id]
links=[lambda row: A('browse',_href=URL('genomeview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=350, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'),links=links)
return dict(grid=grid,form=uploadform)
def genomeview():
"""Summary page for genome"""
g = request.args[0]
if len(request.args) == 1:
gfile = getGenome(g)
data = sequtils.genbank2Dataframe(gfile)
summary = sequtils.genbankSummary(data)
data = data[data.type=='CDS']
data = data.drop(['type','pseudo'],1)
#data=data.set_index('locus_tag')
return dict(genome=g,data=data,summary=summary)
else:
return dict()
def fastaview():
"""Summary of fasta contents"""
f = request.args[0]
if len(request.args) == 1:
ffile,desc = getFasta(f)
pd.set_option('max_colwidth', 800)
data = sequtils.fasta2Dataframe(ffile)
return dict(fastafile=f,data=data,desc=desc)
else:
return dict()
@auth.requires_login()
def presets():
"""Preset alleles form"""
uploadform = FORM(
TABLE(TR(TD(LABEL('Name:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('CSV file:')),TD(INPUT(_name='csvfile',_type='file'))),
TR(TD(LABEL('Description:',_for='description')),
TD(INPUT(_name='description',_type='string',_required=False,
_style="width:400px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.csvfile.filename
uploadform.vars.filename = fname
id = db.allelepresets.insert(name=uploadform.vars.name,
description=uploadform.vars.description,
file=uploadform.vars.csvfile,
filename=uploadform.vars.filename)
db.allelepresets.id.readable=False
query=((db.allelepresets.id>0))
default_sort_order=[db.allelepresets.id]
#links=[lambda row: A('browse',_href=URL('fastaview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=64, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'))#,links=links)
return dict(grid=grid,form=uploadform)
@auth.requires_login()
def predictions():
"""Parse results folder to show the actual data existing on file system
might not sync with the results ids."""
vals=[]
for root, subdirs, files in os.walk(datapath):
if not subdirs:
p1,method = os.path.split(root)
p2,genome = os.path.split(p1)
predid = os.path.basename(p2)
#print method,genome,predid
vals.append((predid, genome, method, len(files)))
df = pd.DataFrame(vals,columns=['identifier','genome','method','sequences'])
#df = df.set_index('pred. id')
db.predictions.id.readable=False
query=((db.predictions.id>0))
default_sort_order=[db.predictions.id]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=True, maxtextlength=350, #deletable=True,
paginate=20,details=True, csv=False, #ondelete=myondelete,
deletable=auth.has_membership('editor_group'),
editable=auth.has_membership('editor_group'))
return dict(results=df, grid=grid)
def myondelete(table, id):
form = FORM.confirm('Are you sure?')
print form
if form.accepted:
response.flash = "I don't like your submission"
print table, id
#db(db.predictions.id==id).delete()
return form
def summaryhtml(predictors):
"""Summary table of predictions"""
rows=[]
rows.append(TR(TH('name'),TH('cutoff'),TH('binders')))
for p in predictors:
pred=predictors[p]
b = pred.getPromiscuousBinders(n=2)
rows.append(TR(pred.name, pred.cutoff, len(b)))
return TABLE(*rows,_class='tinytable')
def download():
import StringIO
label = request.args[0]
g = request.args[1]
t = request.args[2]
preds,bcell,c = getPredictions(label,g,t)
data = [preds[p].data for p in preds]
df = pd.concat(data)
output = StringIO.StringIO()
df.to_csv(output,float_format='%.2f')
csvdata = output.getvalue()
return dict(csvdata=csvdata)
def clusterResults():
"""Cluster results"""
results = {}
files = ['topclusters_MTB-H37Rv.csv','topsinglebinders.csv']
for f in files:
f = os.path.join(datapath, f)
r = pd.read_csv(f, index_col=0)
r.reset_index(inplace=True,drop=True)
r.sort('name',inplace=True)
results[f] = r
return dict(results=results)
def quicksearch():
"""Non DB search just using paths"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('tag', 'string', label='locus tag',default='',length=10),
Field('gene', 'string', label='gene',default='',length=10),
hidden=dict(width=550,height=250,n=2),
formstyle="table3cols",_id='myform')
form.element('input[name=tag]')['_style'] = 'width:210px;'
form.element('input[name=gene]')['_style'] = 'width:210px;'
return form
def selectionForm():
"""Quick view form"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('tag', 'string', label='locus tag',default=''),
Field('gene', 'string', label='gene',default=''),
Field('n', 'string', label='min alleles',default=3),
Field('globalcutoff', 'boolean', label='global cutoff',default=True),
Field('perccutoff', 'string', label='perc. cutoff',default=.96),
Field('annotation', 'boolean', label='annotation',default=False),
submit_button="Update",
formstyle='table3cols',_id='myform',_class='myform')
form.element('select[name=genome]').insert(0,'other') #always add other
form.element('input[name=n]')['_style'] = 'width:50px;'
form.element('input[name=perccutoff]')['_style'] = 'width:50px;'
#form.element('input[name=scorecutoff]')['_style'] = 'width:50px;'
form.element('input[name=tag]')['_style'] = 'width:130px;'
form.element('input[name=gene]')['_style'] = 'width:130px;'
return form
@auth.requires_login()
def quickview():
"""Quickview"""
defaultid = 'results_bovine'
form = selectionForm()
searchform = findForm()
return dict(label=defaultid,form=form,searchform=searchform)
def show():
"""Quickview all results in one - faster"""
print request.vars
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
cutoff = float(request.vars.perccutoff)
gene = request.vars.gene
title=None
if gene != '':
t = getTagbyGene(g,gene)
if t != None:
tag = t
title = tag+' / '+gene
if request.vars.perccutoff == None:
cutoff = 0.96
else:
cutoff = float(request.vars.perccutoff)
if request.vars.width == None:
width = 820
else:
width = int(request.vars.width)
annot = request.vars.annotation
if label == 'dummy':
figure = plotEmpty()
preds,bcell,cutoffs = getPredictions(label,g,tag,cutoff)
if len(preds) == 0:
redirect(URL('error'))
if g == 'other':
#no genome stuff
feat = None; fastafmt=''; previous=''; next=''
seq = '' #get the fasta seq
sd=None
else:
feat = None; fastafmt=None
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
sd=None
if request.vars.annotation == 'on':
sd = getSeqDepot(seq)['t']
script, div = plotTracks(preds,tag,n=n,title=title,width=width,seqdepot=sd,bcell=bcell)
#distplots = scoredistplots(preds)
summary = summaryhtml(preds)
#get all results into tables
data = {}
for p in preds:
data[p] = preds[p].reshape()
data = dict(data)
#top binders
b = base.getBinders(preds,n=n)
kys = b.keys()
if 'tepitope' in kys and 'netmhciipan' in kys:
shared = pd.merge(b['tepitope'],b['netmhciipan'],
on=['peptide','name','pos','core'],
copy=False).sort('pos')
else:
shared=''
seqtable = showSequence(seq,preds)
#info
path = os.path.join(datapath, label)
found = [(m,preds[m].getLength()) for m in preds]
info = TABLE(*found,_class='tinytable')
return dict(script=script,div=div,feat=feat,fastafmt=fastafmt,data=data,
b=b,summary=summary,shared=shared,n=n,seqtable=seqtable,cutoffs=cutoffs,
genome=g,tag=tag,label=label,info=info,path=path)
def error():
return dict()
def formerror():
msg = request.vars.msg
return dict(msg=msg)
@auth.requires_login()
def genomeanalysis():
"""Genome wide analysis of epitope predictions"""
defaultid = 'results_test'
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
opts2 = [OPTION(i,value=i) for i in genomes]
form = FORM(TABLE(
TR(TD(LABEL('id:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value=defaultid, _style="width:150px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value='',_style="width:150px;"))),
TR(TD(LABEL('method:',_for='method')),
TD(SELECT(*methods,_name='method',value='tepitope',_style="width:150px;"))),
TR(TD(LABEL('min alleles:',_for='n')),
TD(INPUT(_name='n',_type='text',value=3,_style="width:50px;"))),
TR(TD(LABEL('perc cutoff:',_for='perccutoff')),
TD(INPUT(_name='perccutoff',_type='text',value='0.96',_style="width:50px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Analyse'))),
_class="smalltable"), _id="myform")
return dict(form=form)
@auth.requires_login()
def analysegenome():
"""Analyse genome predictions"""
pd.set_option('max_colwidth', 800)
gname = request.vars.genome
label = request.vars.label
method = request.vars.method
if request.vars.n != None:
n = int(request.vars.n)
else:
n = 3
if request.vars.perccutoff != None:
cutoff = float(request.vars.perccutoff)
else:
cutoff = 0.96
b,res,top,cl,fig = genomeAnalysis(label, gname, method, n, cutoff)
#plothtml = mpld3Plot(fig)
plothtml=''
link = A('download binder list',_href=URL('default','analysegenome.csv',extension='',vars=request.vars))
summary = 'Found %s binders in >=%s alleles from %s proteins' %(len(b),n,len(res))
return dict(genome=gname,method=method,cutoff=cutoff,res=res,top=top,cl=cl,
summary=summary, link=link, plothtml=plothtml)
def zip_dataframes(data, filename):
"""Zip dataframes as csv"""
'''import cStringIO, zipfile
stream = cStringIO.StringIO()
zip_file = zipfile.ZipFile(stream, "w", zipfile.ZIP_DEFLATED, False)
for df in data:
zip_file.writestr(filename, df.to_csv(None, encoding='utf-8', index=False))'''
return
def compare():
"""Correlate predictions from 2 methods"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('method1',requires=IS_IN_SET(methods,multiple=False,zero=None),label='method 1'),
Field('method2',requires=IS_IN_SET(methods,multiple=False,zero=None),label='method 2'),
Field('n', 'string', label='min alleles',default=3),
hidden=dict(perccutoff=.98),
formstyle="table3cols",_id='myform',_class='myform')
form.element('input[name=n]')['_style'] = 'width:50px;'
return dict(form=form)
def correlationanalysis():
fig=''
msg=None
if request.vars.method1 == request.vars.method2:
return dict(res=None,msg='2 methods are the same!')
print request.vars
res = correlation(**request.vars)
if res is None:
msg = 'no such predictions'
fig = plotCorrelation(res)
return dict(fig=fig,res=res,msg=msg)
def plotCorrelation(res):
from bokeh.models import HoverTool,ColumnDataSource
from bokeh.plotting import Figure
width=600
height=600
plot = Figure(title='',title_text_font_size="11pt",
plot_width=width, plot_height=height,
x_axis_label='method1',y_axis_label='method2',
tools="pan, wheel_zoom, resize, hover, reset, save",
background_fill="#FAFAFA")
x=res['perc_x']
y=res['perc_y']
source = ColumnDataSource(data=dict(x=x,y=y, protein=res.locus_tag))
plot.circle(x,y, color='blue', line_color='gray',fill_alpha=0.5, size=10, source=source)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("binders1", "@x"),
("binders2", "@y"),
("protein", "@protein"),
])
js,html = embedPlot(plot)
return html
def conservationAnalysisForm(defaultid='test'):
defaultg = 'MTB-H37Rv'
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
genomes.insert(0,'other')
opts2 = [OPTION(i,value=i) for i in genomes]
form = FORM(TABLE(
TR(TD(LABEL('id:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value=defaultid, _style="width:150px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value=defaultg,_style="width:150px;"))),
TR(TD(LABEL('locus tag:',_for='tag')),
TD(INPUT(_name='tag',_type='text',value="Rv0001",_style="width:150px;"))),
TR(TD(LABEL('method:',_for='method')),
TD(SELECT(*methods,_name='method',value='tepitope',_style="width:150px;"))),
TR(TD(LABEL('min alleles:',_for='n')),
TD(INPUT(_name='n',_type='text',value=3,_style="width:50px;"))),
TR(TD(LABEL('min identity:',_for='identity')),
TD(INPUT(_name='identity',value=70,_style="width:50px;"))),
TR(TD(),TD('BLAST options')),
TR(TD(LABEL('entrez query:',_for='entrezquery')),
TD(TEXTAREA(_name='entrezquery',value='',_style="height:100px;width:150px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform", hidden=dict(width=850))
return form
@auth.requires_login()
def conservation():
"""Analysis of epitope conservation"""
form = conservationAnalysisForm()
'''if form.process().accepted:
session.flash = 'form accepted'
pvars = {'seq':seq,'hitlist_size':400,'equery':equery}
task = scheduler.queue_task('doTask', #pvars=request.vars,
immediate=True, timeout=300)
print task.id
status = scheduler.task_status(task.id, output=True)
result = status.result
print status'''
return dict(form=form)
@auth.requires_login()
def conservationanalysis():
"""Analysis of epitope conservation"""
pd.set_option('max_colwidth', 3000)
label = request.vars.label
gname = request.vars.genome
method = request.vars.method
n=int(request.vars.n)
tag = request.vars.tag
identity = int(request.vars.identity)
equery = request.vars.entrezquery
retval = conservationAnalysis(**request.vars)
msg=''
if retval == 1:
msg = 'No predictions found for %s with method %s with n=%s.' %(tag,method,n)
return dict(res=None,msg=msg)
elif retval == 2:
msg = 'No BLAST results at >%s%% sequence identity.' %identity
return dict(res=None,msg=msg)
else:
res, alnrows, summary, fig = retval
alnrows = analysis.getAlignedBlastResults(alnrows)
alnrows = analysis.setBlastLink(alnrows)
plothtml = mpld3Plot(fig)
url = A('direct link to these results', _href=URL('default','conservationanalysis.load',
vars={'label':label,'genome':gname,'tag':tag,'method':method,'n':n,
'identity':identity,'equery':equery},extension=''))
return dict(res=res,alnrows=alnrows,summary=summary,plothtml=plothtml,
msg=msg,permlink=url)
def submissionForm():
"""Form for job submission"""
applySettings() #so that paths to predictors work
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
genomes.insert(0,'')
opts2 = [OPTION(i,value=i) for i in genomes]
seqs = [p.name for p in db().select(db.sequences.ALL)]
seqs.insert(0,'')
opts3 = [OPTION(i,value=i) for i in seqs]
p1 = base.getPredictor('iedbmhc1')
mhc1alleles = p1.getMHCIList()
p2 = base.getPredictor('netmhciipan')
mhc2alleles = p2.getAlleleList()
drballeles = base.getDRBList(mhc2alleles)
dqpalleles = base.getDQPList(mhc2alleles)
tepitopealleles = tepitope.getAlleles()
#get all possible alleles for both MHCII methods
drballeles = sorted(list(set(drballeles+tepitopealleles)))
lengths = [9,11,13,15]
#presets = presetalleles.keys()
presets = [p.name for p in db().select(db.allelepresets.ALL)]
presets.insert(0,'')
user = session.auth.user['first_name']
form = FORM(DIV(
TABLE(
TR(TD(LABEL('current labels:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value='', _style="width:200px;"))),
TR(TD(LABEL('OR new label:',_for='genome')),
TD(INPUT(_name='newlabel',_type='text',value="",_style="width:200px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value='',_style="width:200px;"))),
TR(TD(LABEL('locus tags:',_for='names')),
TD(INPUT(_name='names',_type='text',value="",_style="width:200px;"))),
TR(TD(LABEL('fasta seqs:',_for='fasta')),
TD(SELECT(*opts3,_name='fasta',value='',_style="width:200px;"))),
TR(TD(LABEL('methods:',_for='methods')),
TD(SELECT(*methods,_name='methods',value='tepitope',_size=4,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('mhc1 method:',_for='iedbmethod')),
TD(SELECT(*iedbmethods,_name='iedbmethod',value='IEDB_recommended',_size=1,
_style="width:200px;"))),
TR(TD(LABEL('bcell method:',_for='bcellmethod')),
TD(SELECT(*bcellmethods,_name='bcellmethod',value='Bepipred',_size=1,
_style="width:200px;"))),
TR(TD(LABEL('length:',_for='length')),
TD(SELECT(*lengths,_name='length',value=11,_size=1,_style="width:70px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit Job'))),
_class="smalltable"),_style='float: left'),
DIV(TABLE(
TR(TD(LABEL('MHC-I alleles:',_for='alleles')),
TD(SELECT(*mhc1alleles,_name='mhc1alleles',value='HLA-A*01:01-10',_size=6,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('MHC-II DRB:',_for='alleles')),
TD(SELECT(*drballeles,_name='drballeles',value='HLA-DRB1*0101',_size=8,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('MHC-II DQ/P:',_for='alleles')),
TD(SELECT(*dqpalleles,_name='dqpalleles',value='',_size=6,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('OR Use Preset:',_for='preset')),
TD(SELECT(*presets,_name='preset',value="",_style="width:200px;"))),
_class="smalltable"),_style='float: left'),
_id="myform", hidden=dict(user=user))
return form
@auth.requires_login()
def submit():
"""Process job for submission and queue job"""
form = submissionForm()
if form.process().accepted:
if form.vars.genome == '' and form.vars.fasta == '':
msg = 'provide a genome OR a sequence'
redirect(URL('formerror',vars={'msg':msg}))
session.flash = 'form accepted'
task = scheduler.queue_task('runPredictors', pvars=request.vars,
immediate=True, timeout=259200)
redirect(URL('jobsubmitted', vars={'id':task.id}))
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
@auth.requires_login()
def jobsubmitted():
"""Get details of a submitted job"""
taskid = int(request.vars['id'])
status = scheduler.task_status(taskid, output=True)
return dict(taskid=taskid,status=status)
def findForm():
"""Find form"""
result={}
form = SQLFORM.factory(
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('gene', 'string', label='gene',default=''),
Field('description', 'string', label='description',default=''),
submit_button="Search",
_id='findform',_class='myform')
form.element('input[name=gene]')['_style'] = 'height:30px;'
form.element('input[name=description]')['_style'] = 'height:30px;'
return form
def search():
"""Search page"""
form = findForm()
return dict(form=form)
def find():
"""Show search results"""
msg = T(" ")
results=pd.DataFrame()
pd.set_option('display.max_colwidth', -1)
gene = request.vars.gene
desc = request.vars.description
genome = request.vars.genome
results = doSearch(genome, gene, desc)
msg = 'found %s proteins' %len(results)
#lst = list(results.index)
link = A('download results',_href=URL('default','find.csv',extension='',vars=request.vars))
return dict(msg=msg,link=link,results=results)
def iedbForm():
dbs = ['iedb','hpv','imma2','hiv_frahm','tcga','tantigen']
types = ['mhc','tcell']
form = SQLFORM.factory(
Field('database', requires=IS_IN_SET(dbs,multiple=False,zero=None),label='database'),
Field('type', requires=IS_IN_SET(types,multiple=False,zero=None),label='type'),
Field('mhc_class', requires=IS_IN_SET([1,2],multiple=False,zero=None), label='mhc_class',default=2),
Field('epitope', 'string', label='epitope'),
submit_button="Search",
_id='iedbform',_class='iedbform')
return form
def datasourcesearch():
"""search IEDB page"""
form = iedbForm()
return dict(form=form)
def datasource():
"""Use pepdata to fetch and search IEDB epitopes"""
print request.vars
db = request.vars.database
epitope = request.vars.epitope
from pepdata import iedb, hpv, imma2, hiv_frahm, tcga, tantigen
if db == 'iedb':
df = iedb.mhc.load_dataframe(mhc_class=2,human=False)
df.columns = df.columns.get_level_values(1)
df = df[df.columns[5:18]]
#df = iedb.tcell.load_dataframe()
#if epitope != '':
# df = df[df['Description'].str.contains(epitope)]
#print df
elif db == 'hpv':
df = hpv.load_mhc()
#df = hpv.load_tcell()
elif db == 'IMMA2':
df, non = imma2.load_classes()
elif db == 'hiv_frahm':
df = hiv_frahm.load_dataframe()
elif db == 'tcga':
df = tcga.load_dataframe(cancer_type='paad')
df = df[:50]
elif db == 'tantigen':
df = tantigen.load_mhc()
#df = tantigen.load_tcell()
if len(df) > 5000:
df = df[:5000]
print df
return dict(results=df)
@auth.requires_login()
def test():
l='human' #'results_emida'
g='MTB-H37Rv'
tag='Rv3874'
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
preds,bcell,c = getPredictions(l,g,tag)
exp = pd.read_csv(os.path.join(home, 'epitopedata/cfp10_regions.csv'))
exp = exp[exp.mean_sfc>0.0]
plot,figure = plotTracks(preds,tag,n=3,title='test',exp=exp)
return dict(figure=figure,exp=exp)
def plotExp(plot, data):
x = data.pos
y = data.mean_sfc
w = 15
h=40
x = [i+w/2.0 for i in x]
y = y+abs(min(y))
y = y*(h/max(y))+3
#plot.line(x, y, line_color="red", line_width=3, alpha=0.6,legend='exp')
plot.rect(x=x, y=1, width=w, height=y, color="blue", alpha=0.3)
return
def bokehtest():
"""Bokeh test"""
from bokeh.models import Range1d, HoverTool, GridPlot, ColumnDataSource
from bokeh.plotting import Figure
#from bokeh.layouts import gridplot
N = 100
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 3
colors = ["#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))]
source = ColumnDataSource(data=dict(x=x,y=y,radius=radii))
def makeplot():
p = Figure(plot_width=800, plot_height=200,tools="hover,pan",title=None)
p.scatter(x, y, radius=radii,
fill_color=colors, fill_alpha=0.6,
line_color='gray', source=source)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("radius", "@radius")])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
return p
p1 = makeplot()
p2 = makeplot()
p3 = makeplot()
p = GridPlot(children=[[p1],[p2],[p3]])
#js,html = embedPlot(p)
script, div = embedPlot(p)
return dict(div=div,script=script)
@auth.requires_login()
def admin():
"""Settings"""
parser,conffile = getConfig()
options = dict(parser.items('base'))
form = SQLFORM.dictform(options)
if form.process().accepted:
for i in dict(parser.items('base')):
print i
parser.set('base', i, form.vars[i])
parser.write(open(conffile,'w'))
response.flash='Saved'
redirect(URL('default','admin'))
return dict(form=form)
def about():
msg = 'About this page'
#fp = os.path.join(request.folder,'static/docs','about.txt')
return dict(msg=msg)
def citation():
return dict()
def help():
msg = T('')
return dict(msg=msg)
|
dmnfarrell/epitopemap
|
controllers/default.py
|
Python
|
apache-2.0
| 49,804 |
package apimanagement
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// LoggersClient is the use these REST APIs for performing operations on
// entities like API, Product, and Subscription associated with your Azure API
// Management deployment.
type LoggersClient struct {
ManagementClient
}
// NewLoggersClient creates an instance of the LoggersClient client.
func NewLoggersClient(subscriptionID string) LoggersClient {
return NewLoggersClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewLoggersClientWithBaseURI creates an instance of the LoggersClient client.
func NewLoggersClientWithBaseURI(baseURI string, subscriptionID string) LoggersClient {
return LoggersClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or Updates a logger.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger.
// parameters is create parameters.
func (client LoggersClient) CreateOrUpdate(resourceGroupName string, serviceName string, loggerid string, parameters LoggerCreateParameters) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: loggerid,
Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "loggerid", Name: validation.Pattern, Rule: `^[^*#&+:<>?]+$`, Chain: nil}}},
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.Type", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.Credentials", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "CreateOrUpdate")
}
req, err := client.CreateOrUpdatePreparer(resourceGroupName, serviceName, loggerid, parameters)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "CreateOrUpdate", nil, "Failure preparing request")
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "CreateOrUpdate", resp, "Failure sending request")
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client LoggersClient) CreateOrUpdatePreparer(resourceGroupName string, serviceName string, loggerid string, parameters LoggerCreateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client LoggersClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Delete deletes the specified logger.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger. ifMatch
// is the entity state (Etag) version of the logger to delete. A value of "*"
// can be used for If-Match to unconditionally apply the operation.
func (client LoggersClient) Delete(resourceGroupName string, serviceName string, loggerid string, ifMatch string) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "Delete")
}
req, err := client.DeletePreparer(resourceGroupName, serviceName, loggerid, ifMatch)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Delete", nil, "Failure preparing request")
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Delete", resp, "Failure sending request")
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client LoggersClient) DeletePreparer(resourceGroupName string, serviceName string, loggerid string, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
return preparer.Prepare(&http.Request{})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client LoggersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the details of the logger specified by its identifier.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger.
func (client LoggersClient) Get(resourceGroupName string, serviceName string, loggerid string) (result LoggerResponse, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "Get")
}
req, err := client.GetPreparer(resourceGroupName, serviceName, loggerid)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Get", nil, "Failure preparing request")
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Get", resp, "Failure sending request")
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client LoggersClient) GetPreparer(resourceGroupName string, serviceName string, loggerid string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client LoggersClient) GetResponder(resp *http.Response) (result LoggerResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByService lists a collection of loggers in the specified service
// instance.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. filter is | Field | Supported operators |
// Supported functions |
// |-------|------------------------|---------------------------------------------|
// | id | ge, le, eq, ne, gt, lt | substringof, contains, startswith,
// endswith |
// | type | eq |
// | top is number of records to return. skip is number of records to skip.
func (client LoggersClient) ListByService(resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result LoggerCollection, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: top,
Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}},
{TargetValue: skip,
Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "ListByService")
}
req, err := client.ListByServicePreparer(resourceGroupName, serviceName, filter, top, skip)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", nil, "Failure preparing request")
}
resp, err := client.ListByServiceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure sending request")
}
result, err = client.ListByServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure responding to request")
}
return
}
// ListByServicePreparer prepares the ListByService request.
func (client LoggersClient) ListByServicePreparer(resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
if top != nil {
queryParameters["$top"] = autorest.Encode("query", *top)
}
if skip != nil {
queryParameters["$skip"] = autorest.Encode("query", *skip)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListByServiceSender sends the ListByService request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) ListByServiceSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListByServiceResponder handles the response to the ListByService request. The method always
// closes the http.Response Body.
func (client LoggersClient) ListByServiceResponder(resp *http.Response) (result LoggerCollection, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByServiceNextResults retrieves the next set of results, if any.
func (client LoggersClient) ListByServiceNextResults(lastResults LoggerCollection) (result LoggerCollection, err error) {
req, err := lastResults.LoggerCollectionPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByServiceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure sending next results request")
}
result, err = client.ListByServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure responding to next results request")
}
return
}
// Update updates an existing logger.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger.
// parameters is update parameters. ifMatch is the entity state (Etag) version
// of the logger to update. A value of "*" can be used for If-Match to
// unconditionally apply the operation.
func (client LoggersClient) Update(resourceGroupName string, serviceName string, loggerid string, parameters LoggerUpdateParameters, ifMatch string) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "Update")
}
req, err := client.UpdatePreparer(resourceGroupName, serviceName, loggerid, parameters, ifMatch)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Update", nil, "Failure preparing request")
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Update", resp, "Failure sending request")
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Update", resp, "Failure responding to request")
}
return
}
// UpdatePreparer prepares the Update request.
func (client LoggersClient) UpdatePreparer(resourceGroupName string, serviceName string, loggerid string, parameters LoggerUpdateParameters, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
return preparer.Prepare(&http.Request{})
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client LoggersClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
|
stubey/azure-sdk-for-go
|
arm/apimanagement/loggers.go
|
GO
|
apache-2.0
| 20,774 |
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"sync"
"time"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/strutil"
)
const (
sourceServicePrefix = "services"
// kubernetesMetaLabelPrefix is the meta prefix used for all meta labels.
// in this discovery.
metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_"
// serviceNamespaceLabel is the name for the label containing a target's service namespace.
serviceNamespaceLabel = metaLabelPrefix + "service_namespace"
// serviceNameLabel is the name for the label containing a target's service name.
serviceNameLabel = metaLabelPrefix + "service_name"
// nodeLabelPrefix is the prefix for the node labels.
nodeLabelPrefix = metaLabelPrefix + "node_label_"
// serviceLabelPrefix is the prefix for the service labels.
serviceLabelPrefix = metaLabelPrefix + "service_label_"
// serviceAnnotationPrefix is the prefix for the service annotations.
serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_"
// nodesTargetGroupName is the name given to the target group for nodes.
nodesTargetGroupName = "nodes"
// apiServersTargetGroupName is the name given to the target group for API servers.
apiServersTargetGroupName = "apiServers"
// roleLabel is the name for the label containing a target's role.
roleLabel = metaLabelPrefix + "role"
serviceAccountToken = "/var/run/secrets/kubernetes.io/serviceaccount/token"
serviceAccountCACert = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
apiVersion = "v1"
apiPrefix = "/api/" + apiVersion
nodesURL = apiPrefix + "/nodes"
servicesURL = apiPrefix + "/services"
endpointsURL = apiPrefix + "/endpoints"
serviceEndpointsURL = apiPrefix + "/namespaces/%s/endpoints/%s"
)
// Discovery implements a TargetProvider for Kubernetes services.
type Discovery struct {
client *http.Client
Conf *config.KubernetesSDConfig
apiServers []config.URL
apiServersMu sync.RWMutex
nodes map[string]*Node
services map[string]map[string]*Service
nodesMu sync.RWMutex
servicesMu sync.RWMutex
runDone chan struct{}
}
// Initialize sets up the discovery for usage.
func (kd *Discovery) Initialize() error {
client, err := newKubernetesHTTPClient(kd.Conf)
if err != nil {
return err
}
kd.apiServers = kd.Conf.APIServers
kd.client = client
kd.runDone = make(chan struct{})
return nil
}
// Sources implements the TargetProvider interface.
func (kd *Discovery) Sources() []string {
sourceNames := make([]string, 0, len(kd.apiServers))
for _, apiServer := range kd.apiServers {
sourceNames = append(sourceNames, apiServersTargetGroupName+":"+apiServer.Host)
}
nodes, _, err := kd.getNodes()
if err != nil {
// If we can't list nodes then we can't watch them. Assume this is a misconfiguration
// & log & return empty.
log.Errorf("Unable to initialize Kubernetes nodes: %s", err)
return []string{}
}
sourceNames = append(sourceNames, kd.nodeSources(nodes)...)
services, _, err := kd.getServices()
if err != nil {
// If we can't list services then we can't watch them. Assume this is a misconfiguration
// & log & return empty.
log.Errorf("Unable to initialize Kubernetes services: %s", err)
return []string{}
}
sourceNames = append(sourceNames, kd.serviceSources(services)...)
return sourceNames
}
func (kd *Discovery) nodeSources(nodes map[string]*Node) []string {
var sourceNames []string
for name := range nodes {
sourceNames = append(sourceNames, nodesTargetGroupName+":"+name)
}
return sourceNames
}
func (kd *Discovery) serviceSources(services map[string]map[string]*Service) []string {
var sourceNames []string
for _, ns := range services {
for _, service := range ns {
sourceNames = append(sourceNames, serviceSource(service))
}
}
return sourceNames
}
// Run implements the TargetProvider interface.
func (kd *Discovery) Run(ch chan<- config.TargetGroup, done <-chan struct{}) {
defer close(ch)
if tg := kd.updateAPIServersTargetGroup(); tg != nil {
select {
case ch <- *tg:
case <-done:
return
}
}
retryInterval := time.Duration(kd.Conf.RetryInterval)
update := make(chan interface{}, 10)
go kd.watchNodes(update, done, retryInterval)
go kd.startServiceWatch(update, done, retryInterval)
var tg *config.TargetGroup
for {
select {
case <-done:
return
case event := <-update:
switch obj := event.(type) {
case *nodeEvent:
kd.updateNode(obj.Node, obj.EventType)
tg = kd.updateNodesTargetGroup()
case *serviceEvent:
tg = kd.updateService(obj.Service, obj.EventType)
case *endpointsEvent:
tg = kd.updateServiceEndpoints(obj.Endpoints, obj.EventType)
}
}
if tg == nil {
continue
}
select {
case ch <- *tg:
case <-done:
return
}
}
}
func (kd *Discovery) queryAPIServerPath(path string) (*http.Response, error) {
req, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, err
}
return kd.queryAPIServerReq(req)
}
func (kd *Discovery) queryAPIServerReq(req *http.Request) (*http.Response, error) {
// Lock in case we need to rotate API servers to request.
kd.apiServersMu.Lock()
defer kd.apiServersMu.Unlock()
var lastErr error
for i := 0; i < len(kd.apiServers); i++ {
cloneReq := *req
cloneReq.URL.Host = kd.apiServers[0].Host
cloneReq.URL.Scheme = kd.apiServers[0].Scheme
res, err := kd.client.Do(&cloneReq)
if err == nil {
return res, nil
}
lastErr = err
kd.rotateAPIServers()
}
return nil, fmt.Errorf("Unable to query any API servers: %v", lastErr)
}
func (kd *Discovery) rotateAPIServers() {
if len(kd.apiServers) > 1 {
kd.apiServers = append(kd.apiServers[1:], kd.apiServers[0])
}
}
func (kd *Discovery) updateAPIServersTargetGroup() *config.TargetGroup {
tg := &config.TargetGroup{
Source: apiServersTargetGroupName,
Labels: model.LabelSet{
roleLabel: model.LabelValue("apiserver"),
},
}
for _, apiServer := range kd.apiServers {
apiServerAddress := apiServer.Host
_, _, err := net.SplitHostPort(apiServerAddress)
// If error then no port is specified - use default for scheme.
if err != nil {
switch apiServer.Scheme {
case "http":
apiServerAddress = net.JoinHostPort(apiServerAddress, "80")
case "https":
apiServerAddress = net.JoinHostPort(apiServerAddress, "443")
}
}
t := model.LabelSet{
model.AddressLabel: model.LabelValue(apiServerAddress),
model.SchemeLabel: model.LabelValue(apiServer.Scheme),
}
tg.Targets = append(tg.Targets, t)
}
return tg
}
func (kd *Discovery) updateNodesTargetGroup() *config.TargetGroup {
kd.nodesMu.RLock()
defer kd.nodesMu.RUnlock()
tg := &config.TargetGroup{
Source: nodesTargetGroupName,
Labels: model.LabelSet{
roleLabel: model.LabelValue("node"),
},
}
// Now let's loop through the nodes & add them to the target group with appropriate labels.
for nodeName, node := range kd.nodes {
address := fmt.Sprintf("%s:%d", node.Status.Addresses[0].Address, kd.Conf.KubeletPort)
t := model.LabelSet{
model.AddressLabel: model.LabelValue(address),
model.InstanceLabel: model.LabelValue(nodeName),
}
for k, v := range node.ObjectMeta.Labels {
labelName := strutil.SanitizeLabelName(nodeLabelPrefix + k)
t[model.LabelName(labelName)] = model.LabelValue(v)
}
tg.Targets = append(tg.Targets, t)
}
return tg
}
func (kd *Discovery) updateNode(node *Node, eventType EventType) {
kd.nodesMu.Lock()
defer kd.nodesMu.Unlock()
updatedNodeName := node.ObjectMeta.Name
switch eventType {
case deleted:
// Deleted - remove from nodes map.
delete(kd.nodes, updatedNodeName)
case added, modified:
// Added/Modified - update the node in the nodes map.
kd.nodes[updatedNodeName] = node
}
}
func (kd *Discovery) getNodes() (map[string]*Node, string, error) {
res, err := kd.queryAPIServerPath(nodesURL)
if err != nil {
// If we can't list nodes then we can't watch them. Assume this is a misconfiguration
// & return error.
return nil, "", fmt.Errorf("Unable to list Kubernetes nodes: %s", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", fmt.Errorf("Unable to list Kubernetes nodes. Unexpected response: %d %s", res.StatusCode, res.Status)
}
var nodes NodeList
if err := json.NewDecoder(res.Body).Decode(&nodes); err != nil {
body, _ := ioutil.ReadAll(res.Body)
return nil, "", fmt.Errorf("Unable to list Kubernetes nodes. Unexpected response body: %s", string(body))
}
nodeMap := map[string]*Node{}
for idx, node := range nodes.Items {
nodeMap[node.ObjectMeta.Name] = &nodes.Items[idx]
}
return nodeMap, nodes.ResourceVersion, nil
}
func (kd *Discovery) getServices() (map[string]map[string]*Service, string, error) {
res, err := kd.queryAPIServerPath(servicesURL)
if err != nil {
// If we can't list services then we can't watch them. Assume this is a misconfiguration
// & return error.
return nil, "", fmt.Errorf("Unable to list Kubernetes services: %s", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", fmt.Errorf("Unable to list Kubernetes services. Unexpected response: %d %s", res.StatusCode, res.Status)
}
var services ServiceList
if err := json.NewDecoder(res.Body).Decode(&services); err != nil {
body, _ := ioutil.ReadAll(res.Body)
return nil, "", fmt.Errorf("Unable to list Kubernetes services. Unexpected response body: %s", string(body))
}
serviceMap := map[string]map[string]*Service{}
for idx, service := range services.Items {
namespace, ok := serviceMap[service.ObjectMeta.Namespace]
if !ok {
namespace = map[string]*Service{}
serviceMap[service.ObjectMeta.Namespace] = namespace
}
namespace[service.ObjectMeta.Name] = &services.Items[idx]
}
return serviceMap, services.ResourceVersion, nil
}
// watchNodes watches nodes as they come & go.
func (kd *Discovery) watchNodes(events chan interface{}, done <-chan struct{}, retryInterval time.Duration) {
until(func() {
nodes, resourceVersion, err := kd.getNodes()
if err != nil {
log.Errorf("Cannot initialize nodes collection: %s", err)
return
}
// Reset the known nodes.
kd.nodes = map[string]*Node{}
for _, node := range nodes {
events <- &nodeEvent{added, node}
}
req, err := http.NewRequest("GET", nodesURL, nil)
if err != nil {
log.Errorf("Cannot create nodes request: %s", err)
return
}
values := req.URL.Query()
values.Add("watch", "true")
values.Add("resourceVersion", resourceVersion)
req.URL.RawQuery = values.Encode()
res, err := kd.queryAPIServerReq(req)
if err != nil {
log.Errorf("Failed to watch nodes: %s", err)
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to watch nodes: %d", res.StatusCode)
return
}
d := json.NewDecoder(res.Body)
for {
var event nodeEvent
if err := d.Decode(&event); err != nil {
log.Errorf("Watch nodes unexpectedly closed: %s", err)
return
}
select {
case events <- &event:
case <-done:
}
}
}, retryInterval, done)
}
// watchServices watches services as they come & go.
func (kd *Discovery) startServiceWatch(events chan<- interface{}, done <-chan struct{}, retryInterval time.Duration) {
until(func() {
// We use separate target groups for each discovered service so we'll need to clean up any if they've been deleted
// in Kubernetes while we couldn't connect - small chance of this, but worth dealing with.
existingServices := kd.services
// Reset the known services.
kd.services = map[string]map[string]*Service{}
services, resourceVersion, err := kd.getServices()
if err != nil {
log.Errorf("Cannot initialize services collection: %s", err)
return
}
// Now let's loop through the old services & see if they still exist in here
for oldNSName, oldNS := range existingServices {
if ns, ok := services[oldNSName]; !ok {
for _, service := range existingServices[oldNSName] {
events <- &serviceEvent{deleted, service}
}
} else {
for oldServiceName, oldService := range oldNS {
if _, ok := ns[oldServiceName]; !ok {
events <- &serviceEvent{deleted, oldService}
}
}
}
}
// Discard the existing services map for GC.
existingServices = nil
for _, ns := range services {
for _, service := range ns {
events <- &serviceEvent{added, service}
}
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
kd.watchServices(resourceVersion, events, done)
wg.Done()
}()
go func() {
kd.watchServiceEndpoints(resourceVersion, events, done)
wg.Done()
}()
wg.Wait()
}, retryInterval, done)
}
func (kd *Discovery) watchServices(resourceVersion string, events chan<- interface{}, done <-chan struct{}) {
req, err := http.NewRequest("GET", servicesURL, nil)
if err != nil {
log.Errorf("Failed to create services request: %s", err)
return
}
values := req.URL.Query()
values.Add("watch", "true")
values.Add("resourceVersion", resourceVersion)
req.URL.RawQuery = values.Encode()
res, err := kd.queryAPIServerReq(req)
if err != nil {
log.Errorf("Failed to watch services: %s", err)
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to watch services: %d", res.StatusCode)
return
}
d := json.NewDecoder(res.Body)
for {
var event serviceEvent
if err := d.Decode(&event); err != nil {
log.Errorf("Watch services unexpectedly closed: %s", err)
return
}
select {
case events <- &event:
case <-done:
return
}
}
}
// watchServiceEndpoints watches service endpoints as they come & go.
func (kd *Discovery) watchServiceEndpoints(resourceVersion string, events chan<- interface{}, done <-chan struct{}) {
req, err := http.NewRequest("GET", endpointsURL, nil)
if err != nil {
log.Errorf("Failed to create service endpoints request: %s", err)
return
}
values := req.URL.Query()
values.Add("watch", "true")
values.Add("resourceVersion", resourceVersion)
req.URL.RawQuery = values.Encode()
res, err := kd.queryAPIServerReq(req)
if err != nil {
log.Errorf("Failed to watch service endpoints: %s", err)
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to watch service endpoints: %d", res.StatusCode)
return
}
d := json.NewDecoder(res.Body)
for {
var event endpointsEvent
if err := d.Decode(&event); err != nil {
log.Errorf("Watch service endpoints unexpectedly closed: %s", err)
return
}
select {
case events <- &event:
case <-done:
}
}
}
func (kd *Discovery) updateService(service *Service, eventType EventType) *config.TargetGroup {
kd.servicesMu.Lock()
defer kd.servicesMu.Unlock()
switch eventType {
case deleted:
return kd.deleteService(service)
case added, modified:
return kd.addService(service)
}
return nil
}
func (kd *Discovery) deleteService(service *Service) *config.TargetGroup {
tg := &config.TargetGroup{Source: serviceSource(service)}
delete(kd.services[service.ObjectMeta.Namespace], service.ObjectMeta.Name)
if len(kd.services[service.ObjectMeta.Namespace]) == 0 {
delete(kd.services, service.ObjectMeta.Namespace)
}
return tg
}
func (kd *Discovery) addService(service *Service) *config.TargetGroup {
namespace, ok := kd.services[service.ObjectMeta.Namespace]
if !ok {
namespace = map[string]*Service{}
kd.services[service.ObjectMeta.Namespace] = namespace
}
namespace[service.ObjectMeta.Name] = service
endpointURL := fmt.Sprintf(serviceEndpointsURL, service.ObjectMeta.Namespace, service.ObjectMeta.Name)
res, err := kd.queryAPIServerPath(endpointURL)
if err != nil {
log.Errorf("Error getting service endpoints: %s", err)
return nil
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to get service endpoints: %d", res.StatusCode)
return nil
}
var eps Endpoints
if err := json.NewDecoder(res.Body).Decode(&eps); err != nil {
log.Errorf("Error getting service endpoints: %s", err)
return nil
}
return kd.updateServiceTargetGroup(service, &eps)
}
func (kd *Discovery) updateServiceTargetGroup(service *Service, eps *Endpoints) *config.TargetGroup {
tg := &config.TargetGroup{
Source: serviceSource(service),
Labels: model.LabelSet{
serviceNamespaceLabel: model.LabelValue(service.ObjectMeta.Namespace),
serviceNameLabel: model.LabelValue(service.ObjectMeta.Name),
},
}
for k, v := range service.ObjectMeta.Labels {
labelName := strutil.SanitizeLabelName(serviceLabelPrefix + k)
tg.Labels[model.LabelName(labelName)] = model.LabelValue(v)
}
for k, v := range service.ObjectMeta.Annotations {
labelName := strutil.SanitizeLabelName(serviceAnnotationPrefix + k)
tg.Labels[model.LabelName(labelName)] = model.LabelValue(v)
}
serviceAddress := service.ObjectMeta.Name + "." + service.ObjectMeta.Namespace + ".svc"
// Append the first TCP service port if one exists.
for _, port := range service.Spec.Ports {
if port.Protocol == ProtocolTCP {
serviceAddress += fmt.Sprintf(":%d", port.Port)
break
}
}
t := model.LabelSet{
model.AddressLabel: model.LabelValue(serviceAddress),
roleLabel: model.LabelValue("service"),
}
tg.Targets = append(tg.Targets, t)
// Now let's loop through the endpoints & add them to the target group with appropriate labels.
for _, ss := range eps.Subsets {
epPort := ss.Ports[0].Port
for _, addr := range ss.Addresses {
ipAddr := addr.IP
if len(ipAddr) == net.IPv6len {
ipAddr = "[" + ipAddr + "]"
}
address := fmt.Sprintf("%s:%d", ipAddr, epPort)
t := model.LabelSet{
model.AddressLabel: model.LabelValue(address),
roleLabel: model.LabelValue("endpoint"),
}
tg.Targets = append(tg.Targets, t)
}
}
return tg
}
func (kd *Discovery) updateServiceEndpoints(endpoints *Endpoints, eventType EventType) *config.TargetGroup {
kd.servicesMu.Lock()
defer kd.servicesMu.Unlock()
serviceNamespace := endpoints.ObjectMeta.Namespace
serviceName := endpoints.ObjectMeta.Name
if service, ok := kd.services[serviceNamespace][serviceName]; ok {
return kd.updateServiceTargetGroup(service, endpoints)
}
return nil
}
func newKubernetesHTTPClient(conf *config.KubernetesSDConfig) (*http.Client, error) {
bearerTokenFile := conf.BearerTokenFile
caFile := conf.TLSConfig.CAFile
if conf.InCluster {
if len(bearerTokenFile) == 0 {
bearerTokenFile = serviceAccountToken
}
if len(caFile) == 0 {
// With recent versions, the CA certificate is mounted as a secret
// but we need to handle older versions too. In this case, don't
// set the CAFile & the configuration will have to use InsecureSkipVerify.
if _, err := os.Stat(serviceAccountCACert); err == nil {
caFile = serviceAccountCACert
}
}
}
tlsOpts := httputil.TLSOptions{
InsecureSkipVerify: conf.TLSConfig.InsecureSkipVerify,
CAFile: caFile,
CertFile: conf.TLSConfig.CertFile,
KeyFile: conf.TLSConfig.KeyFile,
}
tlsConfig, err := httputil.NewTLSConfig(tlsOpts)
if err != nil {
return nil, err
}
var rt http.RoundTripper = &http.Transport{
Dial: func(netw, addr string) (c net.Conn, err error) {
c, err = net.DialTimeout(netw, addr, time.Duration(conf.RequestTimeout))
return
},
TLSClientConfig: tlsConfig,
}
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
bearerToken := conf.BearerToken
if len(bearerToken) == 0 && len(bearerTokenFile) > 0 {
b, err := ioutil.ReadFile(bearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read bearer token file %s: %s", bearerTokenFile, err)
}
bearerToken = string(b)
}
if len(bearerToken) > 0 {
rt = httputil.NewBearerAuthRoundTripper(bearerToken, rt)
}
if conf.BasicAuth != nil {
rt = httputil.NewBasicAuthRoundTripper(conf.BasicAuth.Username, conf.BasicAuth.Password, rt)
}
return &http.Client{
Transport: rt,
}, nil
}
func serviceSource(service *Service) string {
return sourceServicePrefix + ":" + service.ObjectMeta.Namespace + "/" + service.ObjectMeta.Name
}
// Until loops until stop channel is closed, running f every period.
// f may not be invoked if stop channel is already closed.
func until(f func(), period time.Duration, stopCh <-chan struct{}) {
select {
case <-stopCh:
return
default:
f()
}
for {
select {
case <-stopCh:
return
case <-time.After(period):
f()
}
}
}
|
wu8685/prometheus
|
retrieval/discovery/kubernetes/discovery.go
|
GO
|
apache-2.0
| 21,262 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.imaging.common;
import java.util.ArrayList;
import java.util.List;
public class ImageMetadata implements IImageMetadata {
private static final String NEWLINE = System.getProperty("line.separator");
private final List<IImageMetadataItem> items = new ArrayList<IImageMetadataItem>();
public void add(final String keyword, final String text) {
add(new Item(keyword, text));
}
public void add(final IImageMetadataItem item) {
items.add(item);
}
public List<? extends IImageMetadataItem> getItems() {
return new ArrayList<IImageMetadataItem>(items);
}
@Override
public String toString() {
return toString(null);
}
public String toString(String prefix) {
if (null == prefix) {
prefix = "";
}
final StringBuilder result = new StringBuilder();
for (int i = 0; i < items.size(); i++) {
if (i > 0) {
result.append(NEWLINE);
}
// if (null != prefix)
// result.append(prefix);
final ImageMetadata.IImageMetadataItem item = items.get(i);
result.append(item.toString(prefix + "\t"));
// Debug.debug("prefix", prefix);
// Debug.debug("item", items.get(i));
// Debug.debug();
}
return result.toString();
}
public static class Item implements IImageMetadataItem {
private final String keyword;
private final String text;
public Item(final String keyword, final String text) {
this.keyword = keyword;
this.text = text;
}
public String getKeyword() {
return keyword;
}
public String getText() {
return text;
}
@Override
public String toString() {
return toString(null);
}
public String toString(final String prefix) {
final String result = keyword + ": " + text;
if (null != prefix) {
return prefix + result;
} else {
return result;
}
}
}
}
|
windwardadmin/android-awt
|
src/main/java/org/apache/commons/imaging/common/ImageMetadata.java
|
Java
|
apache-2.0
| 2,989 |
<?php
header("location:Login.php");
?>
|
ThanakornDN/WorkShopI
|
Logout.php
|
PHP
|
apache-2.0
| 38 |
package io.intercom.api;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@SuppressWarnings("UnusedDeclaration")
@JsonIgnoreProperties(ignoreUnknown = true)
public class NoteCollection extends TypedDataCollection<Note> {
public NoteCollection() {
}
@Override
public NoteCollection nextPage() {
return fetchNextPage(NoteCollection.class);
}
@SuppressWarnings("EmptyMethod")
@JsonProperty("notes")
@Override
public List<Note> getPage() {
return super.getPage();
}
}
|
intercom/intercom-java
|
intercom-java/src/main/java/io/intercom/api/NoteCollection.java
|
Java
|
apache-2.0
| 617 |
/*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {AfterViewInit, Component, ElementRef, Inject, Input, OnInit, ViewChild} from "@angular/core";
import { ActivatedRoute, Router } from "@angular/router";
import { SnackbarService } from "../../../../services/snackbar.service";
import { DialogService } from "../../../../services/dialog.service";
import { EmailService } from "../../../../services/email.service";
import { NgForm } from "@angular/forms";
import { MAT_DIALOG_DATA, MatDialog, MatDialogRef } from "@angular/material/dialog";
export interface DialogData {
rawTemplate: string;
template: string;
}
@Component({
selector: 'app-email',
templateUrl: './email.component.html',
styleUrls: ['./email.component.scss']
})
export class EmailComponent implements OnInit, AfterViewInit {
private domainId: string;
private appId: string;
private defaultEmailContent = `// Custom email...`;
template: string;
rawTemplate: string;
email: any;
emailName: string;
emailContent: string = (' ' + this.defaultEmailContent).slice(1);
originalEmailContent: string = (' ' + this.emailContent).slice(1);
emailFound = false;
formChanged = false;
config: any = { lineNumbers: true, readOnly: true};
@ViewChild('editor', { static: true }) editor: any;
@ViewChild('preview', { static: true }) preview: ElementRef;
@ViewChild('emailForm', { static: true }) public emailForm: NgForm;
@Input('createMode') createMode: boolean;
@Input('editMode') editMode: boolean;
@Input('deleteMode') deleteMode: boolean;
constructor(private router: Router,
private route: ActivatedRoute,
private emailService: EmailService,
private snackbarService: SnackbarService,
private dialogService: DialogService,
public dialog: MatDialog) { }
ngOnInit() {
this.domainId = this.route.snapshot.data['domain']?.id;
this.appId = this.route.snapshot.params['appId'];
this.rawTemplate = this.route.snapshot.queryParams['template'];
this.email = this.route.snapshot.data['email']
if (this.email && this.email.content) {
this.emailContent = this.email.content;
this.originalEmailContent = (' ' + this.emailContent).slice(1);
this.emailFound = true;
} else {
this.email = {};
this.email.template = this.rawTemplate
this.email.expiresAfter = (this.email.template === 'MFA_CHALLENGE' ? 600 : 86400);
}
this.template = this.rawTemplate.toLowerCase().replace(/_/g, ' ');
this.emailName = this.template.charAt(0).toUpperCase() + this.template.slice(1);
}
ngAfterViewInit(): void {
this.enableCodeMirror();
}
isEnabled() {
return this.email && this.email.enabled;
}
enableEmail(event) {
this.email.enabled = event.checked;
this.enableCodeMirror();
this.formChanged = true;
}
onTabSelectedChanged(e) {
if (e.index === 1) {
this.refreshPreview();
}
}
refreshPreview() {
let doc = this.preview.nativeElement.contentDocument || this.preview.nativeElement.contentWindow;
doc.open();
doc.write(this.emailContent);
doc.close();
}
onContentChanges(e) {
if (e !== this.originalEmailContent) {
this.formChanged = true;
}
}
resizeIframe() {
this.preview.nativeElement.style.height = this.preview.nativeElement.contentWindow.document.body.scrollHeight + 'px';
}
canEdit(): boolean {
return this.emailFound ? this.editMode : this.createMode;
}
save() {
if (!this.emailFound) {
this.create();
} else {
this.update();
}
}
create() {
this.email['content'] = this.emailContent;
this.emailService.create(this.domainId, this.appId, this.email).subscribe(data => {
this.snackbarService.open('Email created');
this.emailFound = true;
this.email = data;
this.formChanged = false;
this.emailForm.reset(this.email);
})
}
update() {
this.email['content'] = this.emailContent;
this.emailService.update(this.domainId, this.appId, this.email.id, this.email).subscribe(data => {
this.snackbarService.open('Email updated');
this.emailFound = true;
this.email = data;
this.formChanged = false;
this.emailForm.reset(this.email);
})
}
delete(event) {
event.preventDefault();
this.dialogService
.confirm('Delete email', 'Are you sure you want to delete this email ?')
.subscribe(res => {
if (res) {
this.emailService.delete(this.domainId, this.appId, this.email.id).subscribe(response => {
this.snackbarService.open('Email deleted');
this.email = {};
this.email.template = this.route.snapshot.queryParams['template'];
this.email.expiresAfter = 86400;
this.emailContent = (' ' + this.defaultEmailContent).slice(1);
this.originalEmailContent = (' ' + this.emailContent).slice(1);
this.emailFound = false;
this.formChanged = false;
this.enableCodeMirror();
});
}
});
}
openDialog() {
this.dialog.open(EmailInfoDialog, {
data: {rawTemplate: this.rawTemplate, template: this.template}
});
}
isFormInvalid() {
return (this.emailForm.pristine || !this.emailForm.valid) && !this.formChanged;
}
private enableCodeMirror(): void {
this.editor.instance.setOption('readOnly', !this.email.enabled);
}
}
@Component({
selector: 'email-info-dialog',
templateUrl: './dialog/email-info.component.html',
})
export class EmailInfoDialog {
constructor(public dialogRef: MatDialogRef<EmailInfoDialog>, @Inject(MAT_DIALOG_DATA) public data: DialogData) {}
}
|
gravitee-io/graviteeio-access-management
|
gravitee-am-ui/src/app/domain/components/emails/email/email.component.ts
|
TypeScript
|
apache-2.0
| 6,286 |
/*
* $Header$
* $Revision: 1228 $
* $Date: 2006-11-08 08:00:22 -0800 (Wed, 08 Nov 2006) $
*
* ====================================================================
*
* Copyright 2000-2002 bob mcwhirter & James Strachan.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the Jaxen Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ====================================================================
* This software consists of voluntary contributions made by many
* individuals on behalf of the Jaxen Project and was originally
* created by bob mcwhirter <bob@werken.com> and
* James Strachan <jstrachan@apache.org>. For more information on the
* Jaxen Project, please see <http://www.jaxen.org/>.
*
* $Id: UnionExpr.java 1228 2006-11-08 16:00:22Z elharo $
*/
package net.arnx.xmlic.internal.org.jaxen.expr;
/**
* Represents an XPath union expression. This is production 18 in the
* <a href="http://www.w3.org/TR/xpath#NT-UnionExpr">XPath 1.0 specification</a>:
*
* <table><tr valign="baseline">
* <td><a name="NT-UnionExpr"></a>[18] </td><td>UnionExpr</td><td> ::= </td><td><a href="http://www.w3.org/TR/xpath#NT-PathExpr">PathExpr</a></td><td></td>
* </tr><tr valign="baseline">
* <td></td><td></td><td></td><td>| <a href="http://www.w3.org/TR/xpath#NT-UnionExpr">UnionExpr</a> '|' <a href="http://www.w3.org/TR/xpath#NT-PathExpr">PathExpr</a>
* </tr></table>
*
*/
public interface UnionExpr extends BinaryExpr
{
}
|
hidekatsu-izuno/xmlic
|
src/main/java/net/arnx/xmlic/internal/org/jaxen/expr/UnionExpr.java
|
Java
|
apache-2.0
| 3,019 |
/*
Copyright 2019 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Text;
using ESRI.ArcGIS.Framework;
using Microsoft.Win32;
namespace RecentFilesCommandsCS
{
/// <summary>
/// Helper class to process recent file lists stored in the registry
/// </summary>
class RecentFilesRegistryHelper
{
const string RecentFileRegistryKeyPath = @"Software\ESRI\Desktop{0}\{1}\Recent File List";
public static string[] GetRecentFiles(IApplication app)
{
List<string> recentFilePaths = new List<string>();
//Read the registry to get the recent file list
string version = ESRI.ArcGIS.RuntimeManager.ActiveRuntime.Version;
string openKey = string.Format(RecentFileRegistryKeyPath, version, app.Name);
RegistryKey recentListKey = Registry.CurrentUser.OpenSubKey(openKey);
if (recentListKey != null)
{
string[] listNames = recentListKey.GetValueNames();
foreach (string name in listNames)
{
string fileName = recentListKey.GetValue(name, string.Empty).ToString();
if (!string.IsNullOrEmpty(fileName))
recentFilePaths.Add(fileName);
}
}
return recentFilePaths.ToArray();
}
}
}
|
Esri/arcobjects-sdk-community-samples
|
Net/Framework/RecentFilesCommands/CSharp/RecentFilesRegistryHelper.cs
|
C#
|
apache-2.0
| 1,930 |
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Program stackdriver-reverse-proxy provides a Stackdriver reverse
// proxy that creates traces for the incoming requests, logs request
// details, and reports errors.
package main
import (
"flag"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/exporter/stackdriver/propagation"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
var (
projectID string
listen string
target string
tlsCert string
tlsKey string
traceFrac float64
disableMonitoring bool
monitoringPeriod string
)
const usage = `stackdriver-reverse-proxy [opts...] -target=<host:port>
For example, to start at localhost:6996 to proxy requests to localhost:6060,
$ stackdriver-reverse-proxy -target=http://localhost:6060
Options:
-http hostname:port to start the proxy server, by default localhost:6996.
-target hostname:port where the app server is running.
-project Google Cloud Platform project ID if running outside of GCP.
Tracing options:
-trace-sampling Tracing sampling fraction, between 0 and 1.0.
HTTPS options:
-tls-cert TLS cert file to start an HTTPS proxy.
-tls-key TLS key file to start an HTTPS proxy.
`
func main() {
flag.Usage = func() {
fmt.Println(usage)
}
flag.StringVar(&projectID, "project", "", "")
flag.StringVar(&listen, "http", ":6996", "host:port proxy listens")
flag.StringVar(&target, "target", "", "target server")
flag.Float64Var(&traceFrac, "trace-sampling", 1, "sampling fraction for tracing")
flag.StringVar(&tlsCert, "tls-cert", "", "TLS cert file to start an HTTPS proxy")
flag.StringVar(&tlsKey, "tls-key", "", "TLS key file to start an HTTPS proxy")
flag.Parse()
if target == "" {
usageExit()
}
exporter, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: projectID,
})
if err != nil {
log.Fatal(err)
}
view.RegisterExporter(exporter)
trace.RegisterExporter(exporter)
view.Subscribe(ochttp.DefaultViews...)
trace.SetDefaultSampler(trace.ProbabilitySampler(traceFrac))
url, err := url.Parse(target)
if err != nil {
log.Fatalf("Cannot URL parse -target: %v", err)
}
proxy := httputil.NewSingleHostReverseProxy(url)
proxy.Transport = &ochttp.Transport{
Propagation: &propagation.HTTPFormat{},
}
if tlsCert != "" && tlsKey != "" {
log.Fatal(http.ListenAndServeTLS(listen, tlsCert, tlsKey, proxy))
} else {
log.Fatal(http.ListenAndServe(listen, proxy))
}
}
func usageExit() {
flag.Usage()
os.Exit(1)
}
|
GoogleCloudPlatform/stackdriver-reverse-proxy
|
cmd/stackdriver-reverse-proxy/main.go
|
GO
|
apache-2.0
| 3,169 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DescribeInventoryDeletionsRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DescribeInventoryDeletionsRequestProtocolMarshaller implements
Marshaller<Request<DescribeInventoryDeletionsRequest>, DescribeInventoryDeletionsRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true)
.operationIdentifier("AmazonSSM.DescribeInventoryDeletions").serviceName("AWSSimpleSystemsManagement").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public DescribeInventoryDeletionsRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<DescribeInventoryDeletionsRequest> marshall(DescribeInventoryDeletionsRequest describeInventoryDeletionsRequest) {
if (describeInventoryDeletionsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<DescribeInventoryDeletionsRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(
SDK_OPERATION_BINDING, describeInventoryDeletionsRequest);
protocolMarshaller.startMarshalling();
DescribeInventoryDeletionsRequestMarshaller.getInstance().marshall(describeInventoryDeletionsRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
|
jentfoo/aws-sdk-java
|
aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DescribeInventoryDeletionsRequestProtocolMarshaller.java
|
Java
|
apache-2.0
| 2,897 |
/**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.runtime.flow;
import java.util.List;
/**
* A List interface for reusing instances of element objects.
<pre><code>
Iterator<Hoge> iter = ...;
ListBuffer<Hoge> hoges = ...;
...
hoges.begin();
while (iter.hasNext()) {
Hoge hoge = iter.next();
if (hoges.isExpandRequired()) {
hoges.expand(new Hoge());
}
hoges.advance().set(hoge);
}
hoges.end();
// use hoges as List
hoges.shrink();
</code></pre>
* @param <E> the element type
*/
public interface ListBuffer<E> extends List<E> {
/**
* Begins changing the list buffer.
* Initially, the internal cursor is on the head of this buffer, and clients can move it to the next element
* by invoking {@link #advance()}.
* After changing the buffer, then clients must invoke {@link #end()} and the buffer can be used as the
* unmodifiable list.
* @see #advance()
* @throws BufferException if failed to prepare buffer
*/
void begin();
/**
* Ends changing the list buffer.
* After this, clients should not change the buffer contents.
* If clients want to change the buffer, must invoke {@link #begin()} once more.
* @throws BufferException if failed to prepare buffer
*/
void end();
/**
* Returns whether a new element object is required for the buffer or not.
* If it required, clients must use {@link #expand(Object)} to add a new object before invoke {@link #advance()}.
* This method must be invoked between {@link #begin()} and {@link #end()}.
* @return {@code true} if a new element object is required, otherwise {@code false}
* @see #expand(Object)
* @throws BufferException if failed to prepare buffer
*/
boolean isExpandRequired();
/**
* Adds a new element object into the tail of this buffer.
* This method must be invoked between {@link #begin()} and {@link #end()}.
* @param value the object
* @see #isExpandRequired()
* @throws IndexOutOfBoundsException if expand is not required (optional operation)
* @see #isExpandRequired()
* @throws BufferException if failed to prepare buffer
*/
void expand(E value);
/**
* Returns the next element object on the internal cursor, and then move the cursor to the next element.
* This method must be invoked between {@link #begin()} and {@link #end()}.
* @return the next element object
* @throws IndexOutOfBoundsException if the next element object is not prepared
* @see #isExpandRequired()
* @see #expand(Object)
* @throws BufferException if failed to prepare buffer
*/
E advance();
/**
* Shrinks this buffer.
*/
void shrink();
}
|
asakusafw/asakusafw
|
core-project/asakusa-runtime/src/main/java/com/asakusafw/runtime/flow/ListBuffer.java
|
Java
|
apache-2.0
| 3,339 |
/**
* Copyright (c) 2012-2013, Michael Yang 杨福海 (www.yangfuhai.com).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.tsz.afinal.bitmap.core;
import net.tsz.afinal.utils.Utils;
import android.graphics.Bitmap;
public class BaseMemoryCacheImpl implements IMemoryCache {
private final LruMemoryCache<String, Bitmap> mMemoryCache;
public BaseMemoryCacheImpl(int size) {
mMemoryCache = new LruMemoryCache<String, Bitmap>(size) {
protected int sizeOf(String key, Bitmap bitmap) {
return Utils.getBitmapSize(bitmap);
}
};
}
public void put(String key, Bitmap bitmap) {
mMemoryCache.put(key, bitmap);
}
public Bitmap get(String key) {
return mMemoryCache.get(key);
}
public void evictAll() {
mMemoryCache.evictAll();
}
public void remove(String key) {
mMemoryCache.remove(key);
}
}
|
luoqii/osgi_android_demos_from_network
|
apkplug/apkplugDemo/bundle/BundleDemoApkplugService/src/net/tsz/afinal/bitmap/core/BaseMemoryCacheImpl.java
|
Java
|
apache-2.0
| 1,471 |
using UnityEngine;
using System.Collections;
public class monkey7 : MonoBehaviour {
public float speed = 100f;//英雄速度
public Vector2 current_position;
Animator animator;
public Grid grid = new Grid(14,10);
public AStar astar;
public float time = 0;
public bool finish = false;//是否已经完成
public bool walk = false;//是否已经开始
private bool findroad = false;//是否找到路径
private int hasdone; //已经走过的寻路结点
public background7 bg;//调用脚本background中的地图
// Use this for initialization
void Start () {
GameObject backgd = GameObject.Find ("background"); //调用脚本background中的地图
bg = (background7)backgd.GetComponent (typeof(background7));
}
public void InitGame()//初始化寻路网格
{
hasdone = 1;
astar = new AStar();
animator = GetComponent<Animator> ();
for (int i =0; i< 14; i++) {//初始化地图是否可走
for (int j =0; j< 10; j++) {
if(bg.level7.map [i, j]!= 0)
grid.SetWalkbale(i,j,false);
else
grid.SetWalkbale(i,j,true);
}
}
grid.SetStartNode (10,6);
grid.SetEndNode (7,4);
}
void FixedUpdate()
{
if(bg.stspt.click == true && walk == false) {//开始键被按下
InitGame();
walk = true;
if(astar.findPath(grid) == true)
{
findroad = true;
//print(astar._path.Count);
}
}
else if (walk == true && hasdone < astar._path.Count) {
time += Time.deltaTime;
Vector2 des;//目的地
des.x = astar._path[hasdone].x*64 + 32;
des.y = astar._path[hasdone].y*64 + 50;
if(WalkTo (des) == true)
{
hasdone++;
}
}
if (astar!=null && hasdone >= astar._path.Count)
{
finish = true;
walk = false;
bg.stspt.click = false;
//Destroy(gameObject);
transform.localScale = new Vector3(0,0,0);
}
}
void TurnRight()//向右转弯的时候打开所有通向右方向的动画和关闭从右出去的动画
{
//animator.SetInteger ("etorr", 1);
animator.SetInteger ("rbtorr", 1);
animator.SetInteger ("rftorr", 1);
animator.SetInteger ("rrtorb", 0);
animator.SetInteger ("rrtorf", 0);
}
void TurnLeft()
{
//animator.SetInteger ("etorl", 1);
animator.SetInteger ("rbtorl", 1);
animator.SetInteger ("rftorl", 1);
animator.SetInteger ("rltorb", 0);
animator.SetInteger ("rltorf", 0);
}
void TurnBack()
{
//animator.SetInteger ("etorb", 1);
animator.SetInteger ("rftorb", 1);
animator.SetInteger ("rrtorb", 1);
animator.SetInteger ("rltorb", 1);
animator.SetInteger ("rbtorl", 0);
animator.SetInteger ("rbtorr", 0);
animator.SetInteger ("rbtorf", 0);
}
void TurnFront()
{
//animator.SetInteger ("etorf", 1);
animator.SetInteger ("rbtorf", 1);
animator.SetInteger ("rrtorf", 1);
animator.SetInteger ("rltorf", 1);
animator.SetInteger ("rftorr", 0);
animator.SetInteger ("rftorl", 0);
animator.SetInteger ("rftorb", 0);
}
void GoRight(Vector2 CurPos)
{
TurnRight ();
Vector2 target = Vector2.right*speed+ CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
void GoLeft(Vector2 CurPos)
{
TurnLeft ();
Vector2 target = -Vector2.right*speed + CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
void GoBack(Vector2 CurPos)
{
TurnBack ();
Vector2 target = Vector2.up*speed + CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
void GoFront(Vector2 CurPos)
{
TurnFront ();
Vector2 target = -Vector2.up*speed + CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
bool WalkTo(Vector2 target)//根据目标点调用向哪个方向走
{
current_position = transform.position;
if (current_position.x < target.x && Mathf.Abs(current_position.x - target.x) >5 ) {
GoRight (current_position);
return false;
}
else if (current_position.x > target.x && Mathf.Abs(current_position.x - target.x) >5) {
GoLeft (current_position);
return false;
}
else if (current_position.y < target.y && Mathf.Abs(current_position.y - target.y) >5) {
GoBack (current_position);
return false;
}
else if (current_position.y > target.y && Mathf.Abs(current_position.y - target.y) >5) {
GoFront(current_position);
return false;
}
return true;
}
}
|
renmaoting/maze-storm
|
maze storm/Assets/script/level7/monkey7.cs
|
C#
|
apache-2.0
| 4,277 |
package org.develnext.jphp.swing.loader.support.propertyreaders;
import org.develnext.jphp.swing.loader.support.PropertyReader;
import org.develnext.jphp.swing.loader.support.Value;
import javax.swing.*;
import java.util.HashMap;
import java.util.Map;
public class JToolBarPropertyReaders extends PropertyReaders<JToolBar> {
protected final Map<String, PropertyReader<JToolBar>> register = new HashMap<String, PropertyReader<JToolBar>>(){{
put("floatable", FLOATABLE);
put("vertical", VERTICAL);
put("rollover", ROLLOVER);
}};
@Override
protected Map<String, PropertyReader<JToolBar>> getRegister() {
return register;
}
@Override
public Class<JToolBar> getRegisterClass() {
return JToolBar.class;
}
public final static PropertyReader<JToolBar> FLOATABLE = new PropertyReader<JToolBar>() {
@Override
public void read(JToolBar component, Value value) {
component.setFloatable(value.asBoolean());
}
};
public final static PropertyReader<JToolBar> VERTICAL = new PropertyReader<JToolBar>() {
@Override
public void read(JToolBar component, Value value) {
component.setOrientation(value.asBoolean() ? SwingConstants.VERTICAL : SwingConstants.HORIZONTAL);
}
};
public final static PropertyReader<JToolBar> ROLLOVER = new PropertyReader<JToolBar>() {
@Override
public void read(JToolBar component, Value value) {
component.setRollover(value.asBoolean());
}
};
}
|
livingvirus/jphp
|
jphp-swing-ext/src/org/develnext/jphp/swing/loader/support/propertyreaders/JToolBarPropertyReaders.java
|
Java
|
apache-2.0
| 1,566 |
package android.nfc;
/*
* #%L
* Matos
* $Id:$
* $HeadURL:$
* %%
* Copyright (C) 2010 - 2014 Orange SA
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
public final class NfcEvent
{
// Fields
public final NfcAdapter nfcAdapter = (NfcAdapter) null;
// Constructors
NfcEvent(NfcAdapter arg1){
}
}
|
Orange-OpenSource/matos-profiles
|
matos-android/src/main/java/android/nfc/NfcEvent.java
|
Java
|
apache-2.0
| 842 |
define(function (require, exports) {
var ko = require('knockout')
var expressionViewer = require('./components/CohortExpressionViewer');
ko.components.register('cohort-expression-viewer', expressionViewer);
var criteriaGroup = require('./components/CriteriaGroup');
ko.components.register('criteria-group-viewer', criteriaGroup);
var conditionOccurrence = require('./components/ConditionOccurrence');
ko.components.register('condition-occurrence-criteria-viewer', conditionOccurrence);
var conditionEra = require('./components/ConditionEra');
ko.components.register('condition-era-criteria-viewer', conditionEra);
var drugExposure = require('./components/DrugExposure');
ko.components.register('drug-exposure-criteria-viewer', drugExposure);
var drugEra = require('./components/DrugEra');
ko.components.register('drug-era-criteria-viewer', drugEra);
var doseEra = require('./components/DoseEra');
ko.components.register('dose-era-criteria-viewer', doseEra);
var procedureOccurrence = require('./components/ProcedureOccurrence');
ko.components.register('procedure-occurrence-criteria-viewer', procedureOccurrence);
var observation = require('./components/Observation');
ko.components.register('observation-criteria-viewer', observation);
var visitOccurrence = require('./components/VisitOccurrence');
ko.components.register('visit-occurrence-criteria-viewer', visitOccurrence);
var deviceExposure = require('./components/DeviceExposure');
ko.components.register('device-exposure-criteria-viewer', deviceExposure);
var measurement = require('./components/Measurement');
ko.components.register('measurement-criteria-viewer', measurement);
var observationPeriod = require('./components/ObservationPeriod');
ko.components.register('observation-period-criteria-viewer', observationPeriod);
var specimen = require('./components/Specimen');
ko.components.register('specimen-criteria-viewer', specimen);
var death = require('./components/Death');
ko.components.register('death-criteria-viewer', death);
var numericRange = require('./components/NumericRange');
ko.components.register('numeric-range-viewer', numericRange);
var dateRange = require('./components/DateRange');
ko.components.register('date-range-viewer', dateRange);
var windowInput = require('./components/WindowInput');
ko.components.register('window-input-viewer',windowInput);
var textFilter = require('./components/TextFilter');
ko.components.register('text-filter-viewer',textFilter);
var conceptList = require('./components/ConceptList');
ko.components.register('concept-list-viewer',conceptList);
var conceptSetReference = require('./components/ConceptSetReference');
ko.components.register('conceptset-reference',conceptSetReference);
var conceptSetViewer = require('./components/ConceptSetViewer');
ko.components.register('conceptset-viewer',conceptSetViewer);
});
|
OHDSI/Circe
|
js/modules/cohortdefinitionviewer/main.js
|
JavaScript
|
apache-2.0
| 2,914 |
/*
* Copyright 2010 John Hopper
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.jps.lioc.context;
/**
*
* @author zinic
*/
public interface ContextReferenceAliaser {
/**
* Creates a context sensitive reference alias for the registered element
*
* @param referenceAlias
*/
void as(String referenceAlias);
}
|
zinic/jliocc
|
src/main/java/net/jps/lioc/context/ContextReferenceAliaser.java
|
Java
|
apache-2.0
| 860 |
package com.sushe.service;
import com.sushe.entity.School;
import java.util.List;
/**
* Created by jiangbin on 15/4/23.
*/
public interface SchoolService {
/**
* 查询学校
* @return
*/
public List<School> selectAll();
public School selectByName(String name);
}
|
Guitjerry/schoolApp
|
src/com/sushe/service/SchoolService.java
|
Java
|
apache-2.0
| 296 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sop4j.base.google.common.util.concurrent;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
/**
* A callback for accepting the results of a {@link java.util.concurrent.Future}
* computation asynchronously.
*
* <p>To attach to a {@link ListenableFuture} use {@link Futures#addCallback}.
*
* @author Anthony Zana
* @since 10.0
*/
public interface FutureCallback<V> {
/**
* Invoked with the result of the {@code Future} computation when it is
* successful.
*/
void onSuccess(@Nullable V result);
/**
* Invoked when a {@code Future} computation fails or is canceled.
*
* <p>If the future's {@link Future#get() get} method throws an {@link
* ExecutionException}, then the cause is passed to this method. Any other
* thrown object is passed unaltered.
*/
void onFailure(Throwable t);
}
|
wspeirs/sop4j-base
|
src/main/java/com/sop4j/base/google/common/util/concurrent/FutureCallback.java
|
Java
|
apache-2.0
| 1,509 |
/**
* Copyright 2012 GroupDocs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupdocs.sdk.common;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.ParseException;
import javax.ws.rs.core.MultivaluedMap;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.core.header.ContentDisposition;
public class FileStream {
private InputStream inputStream;
private String contentType;
private String fileName;
private long size = -1;
public FileStream(InputStream inputStream) {
this.inputStream = inputStream;
}
public FileStream(String requestUri, ClientResponse response) {
this.inputStream = response.getEntityInputStream();
if(response.getType() != null){
this.contentType = response.getType().toString();
}
MultivaluedMap<String, String> headers = response.getHeaders();
try {
// http://www.ietf.org/rfc/rfc2183.txt
ContentDisposition cd = new ContentDisposition(headers.getFirst("Content-Disposition"));
fileName = cd.getFileName() == null ? getFileNameFromUrl(requestUri) : cd.getFileName();
size = cd.getSize() == 0 ? response.getLength() : cd.getSize();
} catch (ParseException e) {
}
}
private String getFileNameFromUrl(String requestUri) {
try {
URL url = new URL(requestUri);
String path = url.getPath();
return path.substring(path.lastIndexOf('/') + 1);
} catch (MalformedURLException e) {
return null;
}
}
public InputStream getInputStream() {
return inputStream;
}
public void setInputStream(InputStream inputStream) {
this.inputStream = inputStream;
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public String getContentType() {
return contentType;
}
public void setContentType(String contentType) {
this.contentType = contentType;
}
}
|
liosha2007/temporary-groupdocs-java-sdk
|
src/main/java/com/groupdocs/sdk/common/FileStream.java
|
Java
|
apache-2.0
| 2,541 |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Strings for component 'book', language 'az', branch 'MOODLE_22_STABLE'
*
* @package book
* @copyright 1999 onwards Martin Dougiamas {@link http://moodle.com}
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
defined('MOODLE_INTERNAL') || die();
$string['addafter'] = 'Yeni fəsil əlavə edin';
$string['book:edit'] = 'Kitabın fəsillərini redaktə edin';
$string['book:read'] = 'Kitabı oxuyun';
$string['book:viewhiddenchapters'] = 'Kitabın gizli fəsillərinə baxış keçirilsin';
$string['chapterscount'] = 'Fəsillərin sayı';
$string['chaptertitle'] = 'Fəsillərin adı';
$string['confchapterdelete'] = 'Siz bu fəsili pozmağa əminsiniz?';
$string['confchapterdeleteall'] = 'Siz bu fəsili və onun məzmununu silməkdə əminsiniz?';
$string['content'] = 'Məzmun';
$string['customtitles'] = 'Qeyri-standart başlıqlar';
$string['customtitles_help'] = 'Fəsillərin qeyd olunmuş adları yalnız başlıqlarda göstərilmişdir';
$string['editingchapter'] = 'Fəslin redaktəsi';
$string['errorchapter'] = 'Kitabın fəsiləsinin oxunması zamanı səhv baş vermişdir.';
$string['faq'] = 'FAQ kitabçası (tez-tez verilən suallar)';
$string['faq_help'] = '* Nə üçün yalnız iki səviyyə? *
Adətən iki səviyyə bütün kitablar üçün kifayətdir. Üç səviyyə sənədin strukturunun korlanmasına səbəb ola bilər. Kitab modulu kiçik həcmli çoxsəhifəli tədris vəsaitinin yaradılması üçün nəzərdə tutulmuşdur. Daha böyük həcmli sənədlər üçün PDF formatından istifadə etmək məsləhət görülür. PDF fayllarının yaradılmasının ən asan yolu virtual printerlərdən istifadə etməkdir (bax. <a href="http://sector7g.wurzel6.de/pdfcreator/index_en.htm" target="_blank">PDFCreator</a>, <a href="http://fineprint.com/products/pdffactory/index.html" target="_blank">PDFFactory</a>, <a href="http://www.adobe.com/products/acrobatstd/main.html" target="_blank">Adobe Acrobat</a>, və s.).
* Tələbə kitabı redaktə edə bilərmi? *
Kitabları yalnız müəllimlər yarada və redaktə edə bilər. Hələ ki, tələbələrə bu imkanı vermək planlaşdırılmayıb. Bunun əsas səbəbi Kitab modulunun mümkün dərəcədə sadə saxlanılmasıdır.
* Mən kitab daxilində axtarış edə bilərəmmi? *
Hal-hazırda yalnız bir üsul mövcuddur - "çap üçün" səhifəsindən, brauzerin imkanlarından istifadə edərək axtarış aparmaq olar. Qobal axtarış hələ ki, yalnız Moodle sisteminin forumlarında mümkündür. Kitab modulu da daxil olmaqla bütün resurslarda axtarış aparmaq imkanı da olsaydı pis olmazdı. Könnüllü vardırmı?
* Fəsilin adı bir Başlıq sətirinə yerləşmir *
Adı qısaltmağa çalışın və ya administratordan Başlıq sətirini genişləndirməyi xahiş edin.O yalnız modul konfiqurasiyası səhifəsindəki bütün kitablar üçün qlobal olaraq təyin olunur.';
$string['modulename'] = 'Kitab';
$string['modulename_help'] = 'Kitab sadə çoxsəhifəli tədris vəsaitidir.';
$string['modulenameplural'] = 'Kitablar';
$string['navexit'] = 'Kitabdan çıxış';
$string['navnext'] = 'Nəvbəti';
$string['navprev'] = 'Əvvəlki';
$string['numbering'] = 'Fəsillərin nömrələnməsi';
$string['numbering0'] = 'Yoxdur';
$string['numbering1'] = 'Nömrələr';
$string['numbering2'] = 'Markerlər';
$string['numbering3'] = 'Boşluq';
$string['numbering_help'] = '*Yoxdur - fəsillər və altbölmələrə nömrələmə və formatlama şamil olunmur. Sizin fəsilələrinizin adlarında artıq nömrələnmə vardırsa bu üsuldan istifadə edin. Məsələn, "1. Birinci fəsil", "1.a Birinci mövzu", ... .
*Nömrələr - fəsillər və altböllmələr rəqəmlərlə nömrələnir (1, 1.1, 2, 2,...).
*Boşluq - altbölmələr boşluqlarla əks olunur.';
$string['numberingoptions'] = 'Mümkün nömrələmə parametrləri';
$string['numberingoptions_help'] = 'Yeni kitablar yaradılarkən əlyetərli olmalı nömrələmə parametrlərini seçin.';
$string['pluginadministration'] = 'Kitab administrasiyası';
$string['pluginname'] = 'Kitab';
$string['subchapter'] = 'Altfəsilələr';
$string['toc'] = 'Mündəricat';
$string['top'] = 'yuxar';
|
carnegiespeech/translations
|
az/book.php
|
PHP
|
apache-2.0
| 4,904 |
package com.coderli.shurnim.storage.plugin;
import java.io.File;
/**
* 插件资源<br>
* 定义了插件的描述信息以及对应的处理器信息(例如: 文件解析器。{@code PluginParser})
*
* @author OneCoder
* @date 2014年4月20日 下午8:53:17
* @website http://www.coderli.com
*/
public interface PluginResource {
/**
* 获取对应的配置文件解析器
*
* @return
* @author OneCoder
* @date 2014年4月20日 下午8:56:32
*/
FileParser getFileParser();
/**
* 获取实际的文件资源
*
* @return
* @author OneCoder
* @date 2014年4月20日 下午8:58:37
*/
File getConfigFile();
}
|
lihongzheshuai/shurnim-storage
|
src/main/java/com/coderli/shurnim/storage/plugin/PluginResource.java
|
Java
|
apache-2.0
| 648 |
package migrations
import (
"github.com/fnproject/fn/api/datastore/sql/migratex"
)
// Migrations is the list of fn specific sql migrations to run
var Migrations []migratex.Migration
func vfunc(v int64) func() int64 { return func() int64 { return v } }
|
fnproject/fn
|
api/datastore/sql/migrations/migs.go
|
GO
|
apache-2.0
| 256 |
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.Linq;
// using System.ServiceProcess;
using System.Text;
using System.Threading;
using System.Security.Cryptography.X509Certificates;
using System.ServiceModel;
using System.ServiceModel.Description;
using System.Xml;
using Microsoft.Win32;
using dp2Library;
using DigitalPlatform;
using DigitalPlatform.IO;
using DigitalPlatform.Text;
namespace dp2LibraryXE
{
public class LibraryHost : HostBase
{
public static string default_miniserver_urls = "http://localhost:8001/dp2library/xe;net.pipe://localhost/dp2library/xe;rest.http://localhost/dp2library/xe/rest";
public static string default_single_url = "net.pipe://localhost/dp2library/xe";
// ServiceHost _host = null;
List<ServiceHost> m_hosts = new List<ServiceHost>();
public string HostUrl = default_single_url; // "net.pipe://localhost/dp2library/xe";
public override void ThreadMethod()
{
string strError = "";
_running = true;
int nRet = Start(this.DataDir, out strError);
if (nRet == -1)
{
this.ErrorInfo = strError;
// this._host = null;
this.m_hosts.Clear();
}
this._eventStarted.Set();
while (_running)
{
Thread.Sleep(100);
}
this.CloseHosts();
this._thread = null;
this._eventClosed.Set();
}
#if NO
void CloseHosts()
{
if (this._host != null)
{
HostInfo info = _host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Dispose();
_host.Extensions.Remove(info);
}
_host.Close();
_host = null;
}
}
#endif
public override void CloseHosts()
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
host.Extensions.Remove(info);
info.Dispose();
}
host.Close();
}
this.m_hosts.Clear();
}
int Start(string strDataDir,
out string strError)
{
strError = "";
CloseHosts();
List<string> urls = StringUtil.SplitList(this.HostUrl, ';');
ServiceHost host = new ServiceHost(typeof(LibraryService));
this.m_hosts.Add(host);
HostInfo info = new HostInfo();
info.DataDir = strDataDir;
host.Extensions.Add(info);
bool bHasWsHttp = false;
int i = 0;
foreach (string strTempUrl in urls)
{
string strUrl = strTempUrl.Trim();
if (string.IsNullOrEmpty(strUrl) == true)
continue;
///
// 绑定协议
Uri uri = null;
try
{
uri = new Uri(strUrl);
}
catch (Exception ex)
{
strError = "dp2Library OnStart() 警告:发现不正确的协议URL '" + strUrl + "' (异常信息: " + ex.Message + ")。该URL已被放弃绑定。";
return -1;
}
if (uri.Scheme.ToLower() == "net.pipe")
{
host.AddServiceEndpoint(typeof(ILibraryService),
CreateNamedpipeBinding0(),
strUrl);
}
else if (uri.Scheme.ToLower() == "net.tcp")
{
host.AddServiceEndpoint(typeof(ILibraryService),
CreateNetTcpBinding0(),
strUrl);
}
else if (uri.Scheme.ToLower() == "http")
{
ServiceEndpoint endpoint = host.AddServiceEndpoint(typeof(ILibraryService),
CreateWsHttpBinding1(),
strUrl);
bHasWsHttp = true;
}
else if (uri.Scheme.ToLower() == "rest.http")
{
ServiceEndpoint endpoint = host.AddServiceEndpoint(typeof(ILibraryServiceREST),
CreateWebHttpBinding1(),
strUrl.Substring(5)); // rest. 这几个字符要去掉
if (endpoint.Behaviors.Find<WebHttpBehavior>() == null)
{
WebHttpBehavior behavior = new WebHttpBehavior();
behavior.DefaultBodyStyle = System.ServiceModel.Web.WebMessageBodyStyle.Wrapped;
behavior.DefaultOutgoingResponseFormat = System.ServiceModel.Web.WebMessageFormat.Json;
behavior.AutomaticFormatSelectionEnabled = true;
behavior.HelpEnabled = true;
endpoint.Behaviors.Add(behavior);
}
}
else
{
// 警告不能支持的协议
strError = "dp2Library OnStart() 警告:发现不能支持的协议类型 '" + strUrl + "'";
return -1;
}
info.Protocol = uri.Scheme.ToLower();
// 只有第一个host才有metadata能力
if (// i == 0 //
uri.Scheme.ToLower() == "http"
&& host.Description.Behaviors.Find<ServiceMetadataBehavior>() == null)
{
string strMetadataUrl = strUrl; // "http://localhost:8001/dp2library/xe/";
if (strMetadataUrl[strMetadataUrl.Length - 1] != '/')
strMetadataUrl += "/";
strMetadataUrl += "metadata";
ServiceMetadataBehavior behavior = new ServiceMetadataBehavior();
behavior.HttpGetEnabled = true;
behavior.HttpGetUrl = new Uri(strMetadataUrl);
host.Description.Behaviors.Add(behavior);
this.MetadataUrl = strMetadataUrl;
}
i++;
}
// 如果具有ws1/ws2 binding,才启用证书
if (bHasWsHttp == true)
{
try
{
string strCertSN = "";
X509Certificate2 cert = GetCertificate(strCertSN,
out strError);
if (cert == null)
{
strError = "dp2Library OnStart() 准备证书 时发生错误: " + strError;
return -1;
}
else
host.Credentials.ServiceCertificate.Certificate = cert;
}
catch (Exception ex)
{
strError = "dp2Library OnStart() 获取证书时发生错误: " + ExceptionUtil.GetExceptionMessage(ex);
return -1;
}
}
if (host.Description.Behaviors.Find<ServiceThrottlingBehavior>() == null)
{
ServiceThrottlingBehavior behavior = new ServiceThrottlingBehavior();
behavior.MaxConcurrentCalls = 50;
behavior.MaxConcurrentInstances = 1000;
behavior.MaxConcurrentSessions = 1000;
host.Description.Behaviors.Add(behavior);
}
// IncludeExceptionDetailInFaults
ServiceDebugBehavior debug_behavior = host.Description.Behaviors.Find<ServiceDebugBehavior>();
if (debug_behavior == null)
{
host.Description.Behaviors.Add(new ServiceDebugBehavior() { IncludeExceptionDetailInFaults = true });
}
else
{
if (debug_behavior.IncludeExceptionDetailInFaults == false)
debug_behavior.IncludeExceptionDetailInFaults = true;
}
host.Opening += new EventHandler(host_Opening);
host.Closing += new EventHandler(m_host_Closing);
try
{
host.Open();
}
catch (Exception ex)
{
string strInstanceName = "";
strError = "dp2Library OnStart() host.Open() 时发生错误: instancename=[" + strInstanceName + "]:" + ExceptionUtil.GetExceptionMessage(ex);
return -1;
}
#if NO
strError = "test error";
return -1;
#endif
return 0;
}
void host_Opening(object sender, EventArgs e)
{
}
void m_host_Closing(object sender, EventArgs e)
{
#if NO
if (this._host != null)
{
HostInfo info = _host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Dispose();
_host.Extensions.Remove(info);
}
}
#endif
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Dispose();
host.Extensions.Remove(info);
}
}
}
public void SetTestMode(bool bTestMode)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.TestMode = bTestMode;
if (info.App != null)
info.App.TestMode = bTestMode;
}
}
}
public void SetMaxClients(int nMaxClients)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.MaxClients = nMaxClients;
if (info.App != null)
info.App.MaxClients = nMaxClients;
}
}
}
public void SetLicenseType(string strLicenseType)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.LicenseType = strLicenseType;
if (info.App != null)
info.App.LicenseType = strLicenseType;
}
}
}
public void SetFunction(string strFunction)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Function = strFunction;
if (info.App != null)
info.App.Function = strFunction;
}
}
}
static void SetTimeout(System.ServiceModel.Channels.Binding binding)
{
binding.SendTimeout = new TimeSpan(0, 20, 0);
binding.ReceiveTimeout = new TimeSpan(0, 20, 0); // 决定Session存活
binding.CloseTimeout = new TimeSpan(0, 20, 0);
binding.OpenTimeout = new TimeSpan(0, 20, 0);
}
// np0: namedpipe
System.ServiceModel.Channels.Binding CreateNamedpipeBinding0()
{
NetNamedPipeBinding binding = new NetNamedPipeBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = NetNamedPipeSecurityMode.None;
binding.MaxReceivedMessageSize = 1024 * 1024;
// binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
// binding.ReliableSession.Enabled = false;
return binding;
}
// nt0: net.tcp
System.ServiceModel.Channels.Binding CreateNetTcpBinding0()
{
NetTcpBinding binding = new NetTcpBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = SecurityMode.None;
binding.MaxReceivedMessageSize = 1024 * 1024;
// binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
// binding.ReliableSession.Enabled = false;
return binding;
}
// ws1: anonymouse -- ClientCredentitialType = None
System.ServiceModel.Channels.Binding CreateWsHttpBinding1()
{
WSHttpBinding binding = new WSHttpBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = SecurityMode.Message;
#if !USERNAME
binding.Security.Message.ClientCredentialType = MessageCredentialType.None;
#else
binding.Security.Message.ClientCredentialType = MessageCredentialType.UserName;
#endif
binding.MaxReceivedMessageSize = 1024 * 1024;
binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
// binding.ReliableSession.Enabled = false;
binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
return binding;
}
System.ServiceModel.Channels.Binding CreateWebHttpBinding1()
{
WebHttpBinding binding = new WebHttpBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = WebHttpSecurityMode.None;
// binding.Security.Message.ClientCredentialType = MessageCredentialType.None;
binding.MaxReceivedMessageSize = 1024 * 1024;
// binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
// binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
// binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
return binding;
}
static X509Certificate2 FindCertificate(
StoreLocation location, StoreName name,
X509FindType findType, string findValue)
{
X509Store store = new X509Store(name, location);
try
{
// create and open store for read-only access
store.Open(OpenFlags.ReadOnly);
// search store
X509Certificate2Collection col = store.Certificates.Find(
findType, findValue, false);
if (col.Count == 0)
return null;
// return first certificate found
return col[0];
}
// always close the store
finally { store.Close(); }
}
X509Certificate2 GetCertificate(
string strCertSN,
out string strError)
{
strError = "";
/*
string strCertSN = GetProductString(
"dp2Library",
"cert_sn");
* */
if (string.IsNullOrEmpty(strCertSN) == false)
{
X509Certificate2 cert = FindCertificate(
StoreLocation.LocalMachine,
StoreName.Root,
X509FindType.FindBySerialNumber,
strCertSN);
if (cert == null)
{
strError = "序列号为 '" + strCertSN + "' 的证书在 StoreLocation.LocalMachine | StoreLocation.CurrentUser / StoreName.Root 中不存在。";
return null;
}
return cert;
}
// 缺省的SubjectName为DigitalPlatform的证书
string strCurrentDir = System.Reflection.Assembly.GetExecutingAssembly().Location; // Environment.CurrentDirectory;
strCurrentDir = PathUtil.PathPart(strCurrentDir);
string strCerFilename = PathUtil.MergePath(strCurrentDir, "digitalplatform.pfx");
return new X509Certificate2(strCerFilename, "setupdp2");
}
}
}
|
renyh1013/dp2
|
dp2LibraryXE/LibraryHost.cs
|
C#
|
apache-2.0
| 17,502 |
/*
* Copyright 2013 Netherlands eScience Center
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.esciencecenter.xenon.filesystems;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import nl.esciencecenter.xenon.InvalidCredentialException;
import nl.esciencecenter.xenon.InvalidLocationException;
import nl.esciencecenter.xenon.InvalidPropertyException;
import nl.esciencecenter.xenon.UnknownAdaptorException;
import nl.esciencecenter.xenon.UnknownPropertyException;
import nl.esciencecenter.xenon.UnsupportedOperationException;
import nl.esciencecenter.xenon.XenonException;
import nl.esciencecenter.xenon.adaptors.AdaptorLoader;
import nl.esciencecenter.xenon.adaptors.NotConnectedException;
import nl.esciencecenter.xenon.adaptors.XenonProperties;
import nl.esciencecenter.xenon.adaptors.filesystems.FileAdaptor;
import nl.esciencecenter.xenon.credentials.Credential;
import nl.esciencecenter.xenon.credentials.DefaultCredential;
import nl.esciencecenter.xenon.utils.DaemonThreadFactory;
/**
* FileSystem represent a (possibly remote) file system that can be used to access data.
*/
public abstract class FileSystem implements AutoCloseable {
private static FileAdaptor getAdaptorByName(String adaptorName) throws UnknownAdaptorException {
return AdaptorLoader.getFileAdaptor(adaptorName);
}
/**
* Gives a list names of the available adaptors.
*
* @return the list
*/
public static String[] getAdaptorNames() {
return AdaptorLoader.getFileAdaptorNames();
}
/**
* Gives the description of the adaptor with the given name.
*
* @param adaptorName
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @return the description
* @throws UnknownAdaptorException
* If the adaptor name is absent in {@link #getAdaptorNames()}.
*/
public static FileSystemAdaptorDescription getAdaptorDescription(String adaptorName) throws UnknownAdaptorException {
return getAdaptorByName(adaptorName);
}
/**
* Gives a list of the descriptions of the available adaptors.
*
* @return the list
*/
public static FileSystemAdaptorDescription[] getAdaptorDescriptions() {
return AdaptorLoader.getFileAdaptorDescriptions();
}
/**
* CopyStatus contains status information for a specific copy operation.
*/
static class CopyStatusImplementation implements CopyStatus {
private final String copyIdentifier;
private final String state;
private final XenonException exception;
private final long bytesToCopy;
private final long bytesCopied;
public CopyStatusImplementation(String copyIdentifier, String state, long bytesToCopy, long bytesCopied, XenonException exception) {
super();
this.copyIdentifier = copyIdentifier;
this.state = state;
this.bytesToCopy = bytesToCopy;
this.bytesCopied = bytesCopied;
this.exception = exception;
}
@Override
public String getCopyIdentifier() {
return copyIdentifier;
}
@Override
public String getState() {
return state;
}
@Override
public XenonException getException() {
return exception;
}
@Override
public void maybeThrowException() throws XenonException {
if (hasException()) {
throw getException();
}
}
@Override
public boolean isRunning() {
return "RUNNING".equals(state);
}
@Override
public boolean isDone() {
return "DONE".equals(state) || "FAILED".equals(state);
}
@Override
public boolean hasException() {
return exception != null;
}
@Override
public long bytesToCopy() {
return bytesToCopy;
}
@Override
public long bytesCopied() {
return bytesCopied;
}
@Override
public String toString() {
return "CopyStatus [copyIdentifier=" + copyIdentifier + ", state=" + state + ", exception=" + exception + ", bytesToCopy=" + bytesToCopy
+ ", bytesCopied=" + bytesCopied + "]";
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
CopyStatusImplementation that = (CopyStatusImplementation) o;
return bytesToCopy == that.bytesToCopy && bytesCopied == that.bytesCopied && Objects.equals(copyIdentifier, that.copyIdentifier)
&& Objects.equals(state, that.state) && Objects.equals(exception, that.exception);
}
@Override
public int hashCode() {
return Objects.hash(copyIdentifier, state, exception, bytesToCopy, bytesCopied);
}
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at <code>location</code> using the <code>credentials</code> to get
* access. Use <code>properties</code> to (optionally) configure the FileSystem when it is created.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="../../../../overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @param location
* the location of the FileSystem.
* @param credential
* the Credentials to use to get access to the FileSystem.
* @param properties
* optional properties to use when creating the FileSystem.
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
*
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor, String location, Credential credential, Map<String, String> properties) throws XenonException {
return getAdaptorByName(adaptor).createFileSystem(location, credential, properties);
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at <code>location</code> using the <code>credentials</code> to get
* access.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="../../../../overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @param location
* the location of the FileSystem.
* @param credential
* the Credentials to use to get access to the FileSystem.
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor, String location, Credential credential) throws XenonException {
return create(adaptor, location, credential, new HashMap<>(0));
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at <code>location</code> using the default credentials to get
* access.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="../../../../overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @param location
* the location of the FileSystem.
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
*
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor, String location) throws XenonException {
return create(adaptor, location, new DefaultCredential());
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at the default location using the default credentials to get access.
*
* Note that there are very few filesystem adaptors that support a default location. The local filesystem adaptor is the prime example.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
*
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor) throws XenonException {
return create(adaptor, null);
}
class CopyCallback {
private long bytesToCopy = 0;
private long bytesCopied = 0;
private boolean started = false;
private boolean cancelled = false;
synchronized void start(long bytesToCopy) {
if (!started) {
started = true;
this.bytesToCopy = bytesToCopy;
}
}
synchronized boolean isStarted() {
return started;
}
synchronized long getBytesCopied() {
return bytesCopied;
}
synchronized long getBytesToCopy() {
return bytesToCopy;
}
synchronized void addBytesCopied(long bytes) {
this.bytesCopied += bytes;
}
synchronized void cancel() {
cancelled = true;
}
synchronized boolean isCancelled() {
return cancelled;
}
}
private class PendingCopy {
Future<Void> future;
CopyCallback callback;
public PendingCopy(Future<Void> future, CopyCallback callback) {
super();
this.future = future;
this.callback = callback;
}
}
private final String uniqueID;
private final String adaptor;
private final String location;
private final Credential credential;
private final XenonProperties properties;
private final ExecutorService pool;
private Path workingDirectory;
private long nextCopyID = 0;
private int bufferSize;
private final HashMap<String, PendingCopy> pendingCopies = new HashMap<>();
protected FileSystem(String uniqueID, String adaptor, String location, Credential credential, Path workDirectory, int bufferSize,
XenonProperties properties) {
if (uniqueID == null) {
throw new IllegalArgumentException("Identifier may not be null!");
}
if (adaptor == null) {
throw new IllegalArgumentException("Adaptor may not be null!");
}
if (location == null) {
throw new IllegalArgumentException("Location may not be null!");
}
if (credential == null) {
throw new IllegalArgumentException("Credential may not be null!");
}
if (workDirectory == null) {
throw new IllegalArgumentException("EntryPath may not be null!");
}
if (bufferSize <= 0) {
throw new IllegalArgumentException("Buffer size may not be 0 or smaller!");
}
this.uniqueID = uniqueID;
this.adaptor = adaptor;
this.location = location;
this.credential = credential;
this.workingDirectory = workDirectory;
this.properties = properties;
this.bufferSize = bufferSize;
this.pool = Executors.newFixedThreadPool(1, new DaemonThreadFactory("CopyThread." + uniqueID));
}
protected int getBufferSize() {
return bufferSize;
}
private synchronized String getNextCopyID() {
return "COPY-" + getAdaptorName() + "-" + nextCopyID++;
}
/**
* Get the name of the adaptor that created this FileSystem.
*
* @return the name of the adaptor.
*/
public String getAdaptorName() {
return adaptor;
}
/**
* Get the location of the FileSystem.
*
* @return the location of the FileSystem.
*/
public String getLocation() {
return location;
}
/**
* Get the credential that this FileSystem is using.
*
* @return the credential this FileSystem is using.
*/
public Credential getCredential() {
return credential;
}
/**
* Get the properties used to create this FileSystem.
*
* @return the properties used to create this FileSystem.
*/
public Map<String, String> getProperties() {
return properties.toMap();
}
/**
* Get the current working directory of this file system.
*
* All relative paths provided to FileSystem methods are resolved against this current working directory.
*
* The current working directory is set when a FileSystem is created using the path specified in the location. If no path is specified in the location, an
* adaptor specific default path is used, for example <code>"/home/username"</code>.
*
* @return the current working directory of this file system.
*/
public Path getWorkingDirectory() {
return workingDirectory;
}
/**
* Get the path separator used by this file system.
*
* The path separator is set when a FileSystem is created.
*
* @return the path separator used by this file system.
*/
public String getPathSeparator() {
return "" + workingDirectory.getSeparator();
}
/**
* Set the current working directory of this file system to <code>directory</code>.
*
* The provided <code>directory</code> must exist and be a directory. Both an absolute or relative path may be provided. In the latter case, the path will
* be resolved against the current working directory.
*
* @param directory
* a path to which the current working directory must be set.
* @throws NoSuchPathException
* if the <code>directory</code> does not exist
* @throws InvalidPathException
* if <code>directory</code> is not a directory
* @throws NotConnectedException
* if file system is closed.
* @throws IllegalArgumentException
* if the argument is null.
* @throws XenonException
* if an I/O error occurred
*/
public void setWorkingDirectory(Path directory) throws XenonException {
Path wd = toAbsolutePath(directory);
assertDirectoryExists(wd);
workingDirectory = wd;
}
/**
* Close this FileSystem. If the adaptor does not support closing this is a no-op.
*
* @throws XenonException
* If the FileSystem failed to close or if an I/O error occurred.
*/
public void close() throws XenonException {
try {
pool.shutdownNow();
} catch (Exception e) {
throw new XenonException(getAdaptorName(), "Failed to cleanly shutdown copy thread pool");
}
}
/**
* Return if the connection to the FileSystem is open. An adaptor which does not support closing is always open.
*
* @throws XenonException
* if the test failed or an I/O error occurred.
* @return if the connection to the FileSystem is open.
*/
public abstract boolean isOpen() throws XenonException;
/**
* Rename an existing source path to a non-existing target path (optional operation).
* <p>
*
* This method only implements a <em>rename</em> operation, not a <em>move</em> operation. Hence, this method will not copy files and should return (almost)
* instantaneously.
*
* The parent of the target path (e.g. <code>target.getParent</code>) must exist.
*
* If the target is equal to the source this method has no effect.
*
* If the source is a link, the link itself will be renamed, not the path to which it refers.
*
* If the source is a directory, it will be renamed to the target. This implies that a moving a directory between physical locations may fail.
* </p>
*
* @param source
* the existing source path.
* @param target
* the non existing target path.
*
* @throws UnsupportedOperationException
* If the adapter does not support renaming.
* @throws NoSuchPathException
* If the source file does not exist or the target parent directory does not exist.
* @throws PathAlreadyExistsException
* If the target file already exists.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If the move failed.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public abstract void rename(Path source, Path target) throws XenonException;
/**
* Creates a new directory, failing if the directory already exists. All nonexistent parent directories are also created.
*
* @param dir
* the directory to create.
*
* @throws PathAlreadyExistsException
* If the directory already exists or if a parent directory could not be created because a file with the same name already exists.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public void createDirectories(Path dir) throws XenonException {
Path absolute = toAbsolutePath(dir);
Path parent = absolute.getParent();
if (parent != null && !exists(parent)) {
// Recursive call
createDirectories(parent);
}
createDirectory(absolute);
}
/**
* Creates a new directory, failing if the directory already exists.
*
* The parent directory of the file must already exists.
*
* @param dir
* the directory to create.
*
* @throws PathAlreadyExistsException
* If the directory already exists.
* @throws NoSuchPathException
* If the parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If the argument is null.
*
*/
public abstract void createDirectory(Path dir) throws XenonException;
/**
* Creates a new empty file, failing if the file already exists.
*
* The parent directory of the file must already exists.
*
* @param file
* a path referring to the file to create.
*
* @throws PathAlreadyExistsException
* If the file already exists.
* @throws NoSuchPathException
* If the parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public abstract void createFile(Path file) throws XenonException;
/**
* Creates a new symbolic link, failing if the link already exists (optional operation).
*
* The target is taken as is. It may be absolute, relative path and/or non-normalized path and may or may not exist.
*
* @param link
* the symbolic link to create.
* @param target
* the target the symbolic link should refer to.
*
* @throws PathAlreadyExistsException
* If the link already exists.
* @throws NoSuchPathException
* If the target or parent directory of link does not exist
* @throws InvalidPathException
* If parent of link is not a directory
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public abstract void createSymbolicLink(Path link, Path target) throws XenonException;
/**
* Deletes an existing path.
*
* If path is a symbolic link the symbolic link is removed and the symbolic link's target is not deleted.
*
* If the path is a directory and <code>recursive</code> is set to true, the contents of the directory will also be deleted. If <code>recursive</code> is
* set to <code>false</code>, a directory will only be removed if it is empty.
*
* @param path
* the path to delete.
* @param recursive
* if the delete must be done recursively
* @throws DirectoryNotEmptyException
* if the directory was not empty (and the delete was not recursive).
* @throws NoSuchPathException
* if the provided path does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public void delete(Path path, boolean recursive) throws XenonException {
Path absPath = toAbsolutePath(path);
assertPathExists(absPath);
if (getAttributes(absPath).isDirectory()) {
Iterable<PathAttributes> itt = list(absPath, false);
if (recursive) {
for (PathAttributes p : itt) {
delete(p.getPath(), true);
}
} else {
if (itt.iterator().hasNext()) {
throw new DirectoryNotEmptyException(getAdaptorName(), "Directory not empty: " + absPath.toString());
}
}
deleteDirectory(absPath);
} else {
deleteFile(absPath);
}
}
/**
* Tests if a path exists.
*
* @param path
* the path to test.
*
* @return If the path exists.
*
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract boolean exists(Path path) throws XenonException;
/**
* List all entries in the directory <code>dir</code>.
*
* All entries in the directory are returned, but subdirectories will not be traversed by default. Set <code>recursive</code> to <code>true</code>, include
* the listing of all subdirectories.
*
* Symbolic links are not followed.
*
* @param dir
* the target directory.
* @param recursive
* should the list recursively traverse the subdirectories ?
*
* @return a {@link List} of {@link PathAttributes} that iterates over all entries in the directory <code>dir</code>.
*
* @throws NoSuchPathException
* If a directory does not exists.
* @throws InvalidPathException
* If <code>dir</code> is not a directory.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public Iterable<PathAttributes> list(Path dir, boolean recursive) throws XenonException {
Path absolute = toAbsolutePath(dir);
assertDirectoryExists(dir);
ArrayList<PathAttributes> result = new ArrayList<>();
list(absolute, result, recursive);
return result;
}
/**
* Open an existing file and return an {@link InputStream} to read from this file.
*
* @param file
* the to read.
*
* @return the {@link InputStream} to read from the file.
*
* @throws NoSuchPathException
* If the file does not exists.
* @throws InvalidPathException
* If the file is not regular file.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract InputStream readFromFile(Path file) throws XenonException;
/**
* Open a file and return an {@link OutputStream} to write to this file.
* <p>
*
* The size of the file (once all data has been written) must be specified using the <code>size</code> parameter. This is required by some implementations
* (typically blob-stores).
*
* </p>
*
* @param path
* the target file for the OutputStream.
* @param size
* the size of the file once fully written.
*
* @return the {@link OutputStream} to write to the file.
*
* @throws PathAlreadyExistsException
* If the target existed.
* @throws NoSuchPathException
* if a parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract OutputStream writeToFile(Path path, long size) throws XenonException;
/**
* Open a file and return an {@link OutputStream} to write to this file. (optional operation)
* <p>
* If the file already exists it will be replaced and its data will be lost.
*
* The amount of data that will be written to the file is not specified in advance. This operation may not be supported by all implementations.
*
* </p>
*
* @param file
* the target file for the OutputStream.
*
* @return the {@link OutputStream} to write to the file.
*
*
* @throws PathAlreadyExistsException
* If the target existed.
* @throws NoSuchPathException
* if a parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract OutputStream writeToFile(Path file) throws XenonException;
/**
* Open an existing file and return an {@link OutputStream} to append data to this file. (optional operation)
* <p>
* If the file does not exist, an exception will be thrown.
*
* This operation may not be supported by all implementations.
*
* </p>
*
* @param file
* the target file for the OutputStream.
*
* @return the {@link OutputStream} to write to the file.
*
* @throws PathAlreadyExistsException
* If the target existed.
* @throws NoSuchPathException
* if a parent directory does not exist.
* @throws InvalidPathException
* if not a regular file
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
* @throws UnsupportedOperationException
* if the adaptor does not support appending
*/
public abstract OutputStream appendToFile(Path file) throws XenonException;
/**
* Get the {@link PathAttributes} of an existing path.
*
* @param path
* the existing path.
*
* @return the FileAttributes of the path.
*
* @throws NoSuchPathException
* If the file does not exists.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract PathAttributes getAttributes(Path path) throws XenonException;
/**
* Reads the target of a symbolic link (optional operation).
*
* @param link
* the link to read.
*
* @return a Path representing the target of the link.
*
* @throws NoSuchPathException
* If the link does not exists.
* @throws InvalidPathException
* If the source is not a link.
* @throws UnsupportedOperationException
* If this FileSystem does not support symbolic links.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract Path readSymbolicLink(Path link) throws XenonException;
/**
* Sets the POSIX permissions of a path (optional operation).
*
* @param path
* the target path.
* @param permissions
* the permissions to set.
*
* @throws NoSuchPathException
* If the target path does not exists.
* @throws UnsupportedOperationException
* If this FileSystem does not support symbolic links.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract void setPosixFilePermissions(Path path, Set<PosixFilePermission> permissions) throws XenonException;
/**
* Convert the provided path to an absolute path by (if necessary) resolving a relative path against the working directory of this FileSystem. The resulting
* path is also normalized.
*
* @param path
* the path to convert
* @throws IllegalArgumentException
* if path is null.
* @return an absolute path
*/
protected Path toAbsolutePath(Path path) {
assertNotNull(path);
if (path.isAbsolute()) {
return path.normalize();
}
return workingDirectory.resolve(path).normalize();
}
/**
* Copy data from <code>in</code> to <code>out</code> using a buffer size of <code>buffersize</code>.
*
* After each <code>buffersize</code> block of data, <code>callback.addBytesCopied</code> will be invoked to report the number of bytes copied and
* <code>callback.isCancelled</code> will be invoked to determine if the copy should continue.
*
* @param in
* the stream to copy the data from.
* @param out
* the stream to copy the data to.
* @param buffersize
* the buffer size to use for copying.
* @param callback
* the callback to report bytes copied to and check cancellation from.
* @throws IOException
* if an I/O exception occurred.
* @throws CopyCancelledException
* if the copy was cancelled by the user.
*/
protected void streamCopy(InputStream in, OutputStream out, int buffersize, CopyCallback callback) throws IOException, CopyCancelledException {
byte[] buffer = new byte[buffersize];
int size = in.read(buffer);
while (size > 0) {
out.write(buffer, 0, size);
callback.addBytesCopied(size);
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
size = in.read(buffer);
}
// Flush the output to ensure all data is written when this method returns.
out.flush();
}
/**
* Copy a symbolic link to another file system (optional operation).
*
* This is a blocking copy operation. It only returns once the link has been copied or the copy has failed.
*
* This operation may be re-implemented by the various implementations of FileSystem.
*
* This default implementation is based on a creating a new link on the destination filesystem. Note that the file the link is referring to is not copied.
* Only the link itself is copied.
*
* @param source
* the link to copy.
* @param destinationFS
* the destination {@link FileSystem} to copy to.
* @param destination
* the destination link on the destination file system.
* @param mode
* selects what should happen if the destination link already exists
* @param callback
* a {@link CopyCallback} used to update the status of the copy, or cancel it while in progress.
*
* @throws InvalidPathException
* if the provide source is not a link.
* @throws NoSuchPathException
* if the source link does not exist or the destination parent directory does not exist.
* @throws PathAlreadyExistsException
* if the destination link already exists.
* @throws UnsupportedOperationException
* if the destination FileSystem does not support symbolic links.
* @throws XenonException
* if the link could not be copied.
*/
protected void copySymbolicLink(Path source, FileSystem destinationFS, Path destination, CopyMode mode, CopyCallback callback) throws XenonException {
PathAttributes attributes = getAttributes(source);
if (!attributes.isSymbolicLink()) {
throw new InvalidPathException(getAdaptorName(), "Source is not a regular file: " + source);
}
destinationFS.assertParentDirectoryExists(destination);
if (destinationFS.exists(destination)) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Destination path already exists: " + destination);
case IGNORE:
return;
case REPLACE:
// continue
break;
}
}
Path target = readSymbolicLink(source);
destinationFS.createSymbolicLink(destination, target);
}
/**
* Copy a single file to another file system.
*
* This is a blocking copy operation. It only returns once the file has been copied or the copy has failed.
*
* This operation may be re-implemented by the various implementations of FileSystem. This default implementation is based on a simple stream based copy.
*
* @param source
* the file to copy.
* @param destinationFS
* the destination {@link FileSystem} to copy to.
* @param destination
* the destination file on the destination file system.
* @param mode
* selects what should happen if the destination file already exists
* @param callback
* a {@link CopyCallback} used to update the status of the copy, or cancel it while in progress.
*
* @throws InvalidPathException
* if the provide source is not a regular file.
* @throws NoSuchPathException
* if the source file does not exist or the destination parent directory does not exist.
* @throws PathAlreadyExistsException
* if the destination file already exists.
* @throws XenonException
* If the file could not be copied.
*/
protected void copyFile(Path source, FileSystem destinationFS, Path destination, CopyMode mode, CopyCallback callback) throws XenonException {
PathAttributes attributes = getAttributes(source);
if (!attributes.isRegular()) {
throw new InvalidPathException(getAdaptorName(), "Source is not a regular file: " + source);
}
destinationFS.assertParentDirectoryExists(destination);
if (destinationFS.exists(destination)) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Destination path already exists: " + destination);
case IGNORE:
return;
case REPLACE:
destinationFS.delete(destination, true);
// continue
break;
}
}
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
try (InputStream in = readFromFile(source); OutputStream out = destinationFS.writeToFile(destination, attributes.getSize())) {
streamCopy(in, out, bufferSize, callback);
} catch (Exception e) {
throw new XenonException(getAdaptorName(), "Stream copy failed", e);
}
}
/**
* Perform a (possibly) recursive copy from a path on this filesystem to a path on <code>destinationFS</code>.
*
* @param source
* the source path on this FileSystem.
* @param destinationFS
* the destination FileSystem.
* @param destination
* the destination path.
* @param mode
* the copy mode that determines how to react if the destination already exists.
* @param recursive
* should the copy be performed recursively ?
* @param callback
* a {@link CopyCallback} used to return status information on the copy.
* @throws XenonException
* if an error occurred.
*/
protected void performCopy(Path source, FileSystem destinationFS, Path destination, CopyMode mode, boolean recursive, CopyCallback callback)
throws XenonException {
if (!exists(source)) {
throw new NoSuchPathException(getAdaptorName(), "No such file " + source.toString());
}
PathAttributes attributes = getAttributes(source);
// if (attributes.isRegular() || attributes.isSymbolicLink()) {
if (attributes.isRegular()) {
copyFile(source, destinationFS, destination, mode, callback);
return;
}
if (attributes.isSymbolicLink()) {
copySymbolicLink(source, destinationFS, destination, mode, callback);
return;
}
if (!attributes.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Source path is not a file, link or directory: " + source);
}
if (!recursive) {
throw new InvalidPathException(getAdaptorName(), "Source path is a directory: " + source);
}
// From here on we know the source is a directory. We should also check the destination type.
if (destinationFS.exists(destination)) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Destination path already exists: " + destination);
case IGNORE:
return;
case REPLACE:
// continue
break;
}
attributes = destinationFS.getAttributes(destination);
if (attributes.isRegular() || attributes.isSymbolicLink()) {
destinationFS.delete(destination, false);
destinationFS.createDirectory(destination);
} else if (!attributes.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Existing destination is not a file, link or directory: " + source);
}
} else {
destinationFS.createDirectory(destination);
}
// We are now sure the target directory exists.
copyRecursive(source, destinationFS, destination, mode, callback);
}
private void copyRecursive(Path source, FileSystem destinationFS, Path destination, CopyMode mode, CopyCallback callback) throws XenonException {
long bytesToCopy = 0;
Iterable<PathAttributes> listing = list(source, true);
for (PathAttributes p : listing) {
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
if (p.isDirectory() && !isDotDot(p.getPath())) {
Path rel = source.relativize(p.getPath());
Path dst = destination.resolve(rel);
if (destinationFS.exists(dst)) {
if (destinationFS.getAttributes(dst).isDirectory()) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Directory already exists: " + dst);
case REPLACE:
break; // leave directory
case IGNORE:
return; // ignore subdir
}
} else {
destinationFS.delete(dst, true);
}
} else {
destinationFS.createDirectories(dst);
}
} else if (p.isRegular()) {
bytesToCopy += p.getSize();
}
}
callback.start(bytesToCopy);
for (PathAttributes p : listing) {
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
if (p.isRegular()) {
Path rel = source.relativize(p.getPath());
Path dst = destination.resolve(rel);
copyFile(p.getPath(), destinationFS, dst, mode, callback);
}
}
}
/**
* Delete a file. Is only called on existing files
*
* This operation must be implemented by the various implementations of FileSystem.
*
* @param file
* the file to remove
* @throws InvalidPathException
* if the provided path is not a file.
* @throws NoSuchPathException
* if the provided file does not exist.
* @throws XenonException
* If the file could not be removed.
*/
protected abstract void deleteFile(Path file) throws XenonException;
/**
* Delete an empty directory. Is only called on empty directories
*
* This operation can only delete empty directories (analogous to <code>rmdir</code> in Linux).
*
* This operation must be implemented by the various implementations of FileSystem.
*
* @param path
* the directory to remove
* @throws InvalidPathException
* if the provided path is not a directory.
* @throws NoSuchPathException
* if the provided path does not exist.
* @throws XenonException
* If the directory could not be removed.
*/
protected abstract void deleteDirectory(Path path) throws XenonException;
/**
* Return the list of entries in a directory.
*
* This operation is non-recursive; any subdirectories in <code>dir</code> will be returned as part of the list, but they will not be listed themselves.
*
* This operation must be implemented by the various implementations of FileSystem.
*
* @param dir
* the directory to list
* @return a {@link Iterable} that iterates over all entries in <code>dir</code>
* @throws XenonException
* If the list could not be retrieved.
*/
protected abstract Iterable<PathAttributes> listDirectory(Path dir) throws XenonException;
/**
* Returns an (optionally recursive) listing of the entries in a directory <code>dir</code>.
*
* This is a generic implementation which relies on <code>listDirectory</code> to provide listings of individual directories.
*
* @param dir
* the directory to list.
* @param list
* the list to which the directory entries will be added.
* @param recursive
* if the listing should be done recursively.
* @throws XenonException
* If the list could not be retrieved.
*/
protected void list(Path dir, ArrayList<PathAttributes> list, boolean recursive) throws XenonException {
Iterable<PathAttributes> tmp = listDirectory(dir);
for (PathAttributes p : tmp) {
if (!isDotDot(p.getPath())) {
list.add(p);
}
}
if (recursive) {
for (PathAttributes current : tmp) {
// traverse subdirs provided they are not "." or "..".
if (current.isDirectory() && !isDotDot(current.getPath())) {
list(dir.resolve(current.getPath().getFileNameAsString()), list, true);
}
}
}
}
/**
* Asynchronously Copy an existing source path to a target path on a different file system.
*
* If the source path is a file, it will be copied to the destination file on the target file system.
*
* If the source path is a directory, it will only be copied if <code>recursive</code> is set to <code>true</code>. Otherwise, an exception will be thrown.
* When copying recursively, the directory and its content (both files and subdirectories with content), will be copied to <code>destination</code>.
*
* Exceptions that occur during copying will not be thrown by this function, but instead are contained in a {@link CopyStatus} object which can be obtained
* with {@link FileSystem#getStatus(String)}
*
* @param source
* the source path (on this filesystem) to copy from.
* @param destinationFS
* the destination filesystem to copy to.
* @param destination
* the destination path (on the destination filesystem) to copy to.
* @param mode
* how to react if the destination already exists.
* @param recursive
* if the copy should be recursive.
*
* @return a {@link String} that identifies this copy and be used to inspect its progress.
*
* @throws IllegalArgumentException
* If source, destinationFS, destination or mode is null.
*/
public synchronized String copy(final Path source, final FileSystem destinationFS, final Path destination, final CopyMode mode, final boolean recursive) {
if (source == null) {
throw new IllegalArgumentException("Source path is null");
}
if (destinationFS == null) {
throw new IllegalArgumentException("Destination filesystem is null");
}
if (destination == null) {
throw new IllegalArgumentException("Destination path is null");
}
if (mode == null) {
throw new IllegalArgumentException("Copy mode is null!");
}
String copyID = getNextCopyID();
final CopyCallback callback = new CopyCallback();
Future<Void> future = pool.submit(() -> {
if (Thread.currentThread().isInterrupted()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
performCopy(toAbsolutePath(source), destinationFS, toAbsolutePath(destination), mode, recursive, callback);
return null;
});
pendingCopies.put(copyID, new PendingCopy(future, callback));
return copyID;
}
/**
* Cancel a copy operation. Afterwards, the copy is forgotten and subsequent queries with this copy string will lead to {@link NoSuchCopyException}
*
* @param copyIdentifier
* the identifier of the copy operation which to cancel.
*
* @return a {@link CopyStatus} containing the status of the copy.
*
* @throws NoSuchCopyException
* If the copy is not known.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If the copyIdentifier is null.
*/
public synchronized CopyStatus cancel(String copyIdentifier) throws XenonException {
if (copyIdentifier == null) {
throw new IllegalArgumentException("Copy identifier may not be null");
}
PendingCopy copy = pendingCopies.remove(copyIdentifier);
if (copy == null) {
throw new NoSuchCopyException(getAdaptorName(), "Copy not found: " + copyIdentifier);
}
copy.callback.cancel();
copy.future.cancel(true);
XenonException ex = null;
String state = "DONE";
try {
copy.future.get();
} catch (ExecutionException ee) {
ex = new XenonException(getAdaptorName(), ee.getMessage(), ee);
state = "FAILED";
} catch (CancellationException ce) {
ex = new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
state = "FAILED";
} catch (InterruptedException e) {
ex = new CopyCancelledException(getAdaptorName(), "Copy interrupted by user");
state = "FAILED";
Thread.currentThread().interrupt();
}
return new CopyStatusImplementation(copyIdentifier, state, copy.callback.getBytesToCopy(), copy.callback.getBytesCopied(), ex);
}
/**
* Wait until a copy operation is done or until a timeout expires.
* <p>
* This method will wait until a copy operation is done (either gracefully or by producing an error), or until the timeout expires, whichever comes first.
* If the timeout expires, the copy operation will continue to run.
* </p>
* <p>
* The timeout is in milliseconds and must be >= 0. When timeout is 0, it will be ignored and this method will wait until the copy operation is done.
* </p>
* After this operation, the copy is forgotten and subsequent queries with this copy string will lead to {@link NoSuchCopyException}
* <p>
* A {@link CopyStatus} is returned that can be used to determine why the call returned.
* </p>
*
* @param copyIdentifier
* the identifier of the copy operation to wait for.
* @param timeout
* the maximum time to wait for the copy operation in milliseconds.
*
* @return a {@link CopyStatus} containing the status of the copy.
*
* @throws IllegalArgumentException
* If argument is illegal.
* @throws NoSuchCopyException
* If the copy handle is not known.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If the copyIdentifier is null or if the value of timeout is negative.
*/
public CopyStatus waitUntilDone(String copyIdentifier, long timeout) throws XenonException {
if (copyIdentifier == null) {
throw new IllegalArgumentException("Copy identifier may not be null");
}
PendingCopy copy = pendingCopies.get(copyIdentifier);
if (copy == null) {
throw new NoSuchCopyException(getAdaptorName(), "Copy not found: " + copyIdentifier);
}
XenonException ex = null;
String state = "DONE";
try {
copy.future.get(timeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
state = "RUNNING";
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause instanceof XenonException) {
ex = (XenonException) cause;
} else {
ex = new XenonException(getAdaptorName(), cause.getMessage(), cause);
}
state = "FAILED";
} catch (CancellationException ce) {
ex = new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
state = "FAILED";
} catch (InterruptedException ie) {
ex = new CopyCancelledException(getAdaptorName(), "Copy interrupted by user");
state = "FAILED";
Thread.currentThread().interrupt();
}
if (copy.future.isDone()) {
pendingCopies.remove(copyIdentifier);
}
return new CopyStatusImplementation(copyIdentifier, state, copy.callback.getBytesToCopy(), copy.callback.getBytesCopied(), ex);
}
/**
* Retrieve the status of an copy. After obtaining the status of a completed copy, the copy is forgotten and subsequent queries with this copy string will
* lead to {@link NoSuchCopyException}.
*
* @param copyIdentifier
* the identifier of the copy for which to retrieve the status.
*
* @return a {@link CopyStatus} containing the status of the asynchronous copy.
*
* @throws NoSuchCopyException
* If the copy is not known.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If the copyIdentifier is null.
*/
public CopyStatus getStatus(String copyIdentifier) throws XenonException {
if (copyIdentifier == null) {
throw new IllegalArgumentException("Copy identifier may not be null");
}
PendingCopy copy = pendingCopies.get(copyIdentifier);
if (copy == null) {
throw new NoSuchCopyException(getAdaptorName(), "Copy not found: " + copyIdentifier);
}
XenonException ex = null;
String state = "PENDING";
if (copy.future.isDone()) {
pendingCopies.remove(copyIdentifier);
// We have either finished, crashed, or cancelled
try {
copy.future.get();
state = "DONE";
} catch (ExecutionException ee) {
ex = new XenonException(getAdaptorName(), ee.getMessage(), ee);
state = "FAILED";
} catch (CancellationException ce) {
ex = new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
state = "FAILED";
} catch (InterruptedException ie) {
ex = new CopyCancelledException(getAdaptorName(), "Copy interrupted by user");
state = "FAILED";
Thread.currentThread().interrupt();
}
} else if (copy.callback.isStarted()) {
state = "RUNNING";
}
return new CopyStatusImplementation(copyIdentifier, state, copy.callback.getBytesToCopy(), copy.callback.getBytesCopied(), ex);
}
protected void assertNotNull(Path path) {
if (path == null) {
throw new IllegalArgumentException("Path is null");
}
}
protected void assertPathExists(Path path) throws XenonException {
assertNotNull(path);
if (!exists(path)) {
throw new NoSuchPathException(getAdaptorName(), "Path does not exist: " + path);
}
}
protected void assertPathNotExists(Path path) throws XenonException {
assertNotNull(path);
if (exists(path)) {
throw new PathAlreadyExistsException(getAdaptorName(), "Path already exists: " + path);
}
}
protected void assertPathIsNotDirectory(Path path) throws XenonException {
assertNotNull(path);
if (exists(path)) {
PathAttributes a = getAttributes(path);
if (a.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Was expecting a regular file, but got a directory: " + path.toString());
}
}
}
protected void assertPathIsFile(Path path) throws XenonException {
assertNotNull(path);
if (!getAttributes(path).isRegular()) {
throw new InvalidPathException(getAdaptorName(), "Path is not a file: " + path);
}
}
protected void assertPathIsDirectory(Path path) throws XenonException {
assertNotNull(path);
PathAttributes a = getAttributes(path);
if (a == null) {
throw new InvalidPathException(getAdaptorName(), "Path failed to produce attributes: " + path);
}
if (!a.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Path is not a directory: " + path);
}
}
protected void assertFileExists(Path file) throws XenonException {
assertPathExists(file);
assertPathIsFile(file);
}
protected void assertDirectoryExists(Path dir) throws XenonException {
assertPathExists(dir);
assertPathIsDirectory(dir);
}
protected void assertParentDirectoryExists(Path path) throws XenonException {
assertNotNull(path);
Path parent = path.getParent();
if (parent != null) {
assertDirectoryExists(parent);
}
}
protected void assertFileIsSymbolicLink(Path link) throws XenonException {
assertNotNull(link);
assertPathExists(link);
if (!getAttributes(link).isSymbolicLink()) {
throw new InvalidPathException(getAdaptorName(), "Not a symbolic link: " + link);
}
}
protected void assertIsOpen() throws XenonException {
if (!isOpen()) {
throw new NotConnectedException(getAdaptorName(), "Connection is closed");
}
}
// Expects two non-null, normalized absolute paths
protected boolean areSamePaths(Path source, Path target) {
return source.equals(target);
}
protected boolean isDotDot(Path path) {
assertNotNull(path);
String filename = path.getFileNameAsString();
return ".".equals(filename) || "..".equals(filename);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
FileSystem that = (FileSystem) o;
return Objects.equals(uniqueID, that.uniqueID);
}
@Override
public int hashCode() {
return Objects.hash(uniqueID);
}
}
|
NLeSC/Xenon
|
src/main/java/nl/esciencecenter/xenon/filesystems/FileSystem.java
|
Java
|
apache-2.0
| 63,280 |
maintainer 'Wesleyan University'
maintainer_email 'software@wesleyan.edu'
license 'Apache 2.0'
description 'Manages file type associations'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version '0.2.0'
depends 'common'
depends 'homebrew'
|
wesleyan/chef-cookbooks
|
cookbooks/launch_association/metadata.rb
|
Ruby
|
apache-2.0
| 311 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using UnityEngine;
namespace Assets.NVR.Interfaces.Elements
{
public interface IElement : IUnit
{
void SetColor(string color);
void SetPosition(float pos);
}
}
|
tsvetie/nativescript-cli
|
resources/vr/Assets/NVR/Interfaces/Elements/IElement.cs
|
C#
|
apache-2.0
| 278 |
package org.infinispan.protostream;
/**
* An interface to be implemented by marshaller objects of type {@link MessageMarshaller}) that are able to handle
* unknown fields by storing them into an {@link UnknownFieldSet}.
*
* @author anistor@redhat.com
* @since 3.0
*/
public interface UnknownFieldSetHandler<T> {
UnknownFieldSet getUnknownFieldSet(T message);
void setUnknownFieldSet(T message, UnknownFieldSet unknownFieldSet);
}
|
jmarkos/protostream
|
core/src/main/java/org/infinispan/protostream/UnknownFieldSetHandler.java
|
Java
|
apache-2.0
| 445 |
public struct SomeValue
{
public static SomeValue operator-(SomeValue someValue, SomeValue someValue2)
{
return new SomeValue();
}
}
public class Program
{
public static void Main()
{
SomeValue? nullable = new SomeValue();
SomeValue someValue = new SomeValue();
nullable -= someValue;
}
}
|
consulo/consulo-csharp
|
csharp-impl/src/test/resources/resolve/other/Issue424.cs
|
C#
|
apache-2.0
| 306 |
package com.fasterxml.jackson.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation that can be used to define one or more alternative names for
* a property, accepted during deserialization as alternative to the official
* name. Alias information is also exposed during POJO introspection, but has
* no effect during serialization where primary name is always used.
*<p>
* Examples:
*<pre>
*public class Info {
* @JsonAlias({ "n", "Name" })
* public String name;
*}
*</pre>
*
* @since 2.9
*/
@Target({ElementType.ANNOTATION_TYPE, // for combo-annotations
ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER// for properties (field, setter, ctor param)
})
@Retention(RetentionPolicy.RUNTIME)
@JacksonAnnotation
public @interface JsonAlias
{
/**
* One or more secondary names to accept as aliases to the official name.
*
* @return Zero or more aliases to associate with property annotated
*/
public String[] value() default { };
}
|
FasterXML/jackson-annotations
|
src/main/java/com/fasterxml/jackson/annotation/JsonAlias.java
|
Java
|
apache-2.0
| 1,130 |
package com.example.linxj.tool;
public interface RequestListener {
public static final int EVENT_BASE = 0x100;
/**
* 没有网络的信息提示
* */
public static final int EVENT_NOT_NETWORD = EVENT_BASE + 1;
/**
* 网络异常的信息提示
* */
public static final int EVENT_NETWORD_EEEOR = EVENT_BASE + 2;
/**
* 获取网络数据失败
* */
public static final int EVENT_GET_DATA_EEEOR = EVENT_BASE + 3;
/**
* 获取网络数据成功
* */
public static final int EVENT_GET_DATA_SUCCESS = EVENT_BASE + 4;
/**
* 获取网络数据成功
* */
public static final int EVENT_CLOSE_SOCKET = EVENT_BASE + 5;
public void action(int actionCode, Object object);
}
|
lab702-flyme/sweo_android
|
app/src/main/java/com/example/linxj/tool/RequestListener.java
|
Java
|
apache-2.0
| 787 |
package net.joelinn.riot.staticdata.dto;
/**
* Joe Linn
* 2/1/14
*/
public class RuneType {
public boolean isrune;
public String tier;
public String type;
}
|
jlinn/riot-api-java
|
src/main/java/net/joelinn/riot/staticdata/dto/RuneType.java
|
Java
|
apache-2.0
| 173 |
package com.example.mohit.tpomnnit.tpo;
import android.app.Dialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.SharedPreferences;
import android.database.sqlite.SQLiteDatabase;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.annotation.NonNull;
import android.support.design.widget.FloatingActionButton;
import android.support.v7.app.AlertDialog;
import android.view.View;
import android.support.design.widget.NavigationView;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBarDrawerToggle;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.view.Window;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.Spinner;
import android.widget.TextView;
import android.widget.Toast;
import com.example.mohit.tpomnnit.Landing;
import com.example.mohit.tpomnnit.R;
import com.example.mohit.tpomnnit.contactUs;
import com.example.mohit.tpomnnit.login_signup.TpoLogin;
import com.example.mohit.tpomnnit.student.StudentProfile;
import com.example.mohit.tpomnnit.student.profile.UserData;
import com.google.android.gms.tasks.OnFailureListener;
import com.google.android.gms.tasks.OnSuccessListener;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
import com.google.firebase.storage.FileDownloadTask;
import com.google.firebase.storage.FirebaseStorage;
import com.google.firebase.storage.StorageReference;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class TpoHome extends AppCompatActivity
implements NavigationView.OnNavigationItemSelectedListener {
private EditText name,regnum,branch,course,regno;
private String registrationnum,userId;
private DatabaseReference mDatabase;
private StorageReference storage,imageref;
private ImageView imageview,verified;
String nameuser;
Spinner spinnerbranch,spinnercourse;
String branchselected,courseselected;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_tpo_home);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.fab);
fab.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
Intent i = new Intent(TpoHome.this,AddCompany.class);
startActivity(i);
}
});
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
ActionBarDrawerToggle toggle = new ActionBarDrawerToggle(
this, drawer, toolbar, R.string.navigation_drawer_open, R.string.navigation_drawer_close);
drawer.setDrawerListener(toggle);
toggle.syncState();
final NavigationView navigationView = (NavigationView) findViewById(R.id.nav_view);
navigationView.setNavigationItemSelectedListener(this);
mDatabase = FirebaseDatabase.getInstance().getReference("tpouserdata");
userId=mDatabase.push().getKey();
name=(EditText)findViewById(R.id.name);
regnum=(EditText)findViewById(R.id.regnum);
branch=(EditText)findViewById(R.id.branch);
course=(EditText)findViewById(R.id.course);
verified=(ImageView)findViewById(R.id.verified);
imageview = (ImageView)findViewById(R.id.imageView3);
registrationnum = getIntent().getStringExtra("reg");
// Log.e("reg",registrationnum);
storage = FirebaseStorage.getInstance().getReference("userimage/"+registrationnum+".jpg");
imageref = storage;
File localFile = null;
try {
localFile = File.createTempFile("images", "jpg");
} catch (IOException e) {
e.printStackTrace();
}
final File finalLocalFile = localFile;
imageref.getFile(localFile).addOnSuccessListener(new OnSuccessListener<FileDownloadTask.TaskSnapshot>() {
@Override
public void onSuccess(FileDownloadTask.TaskSnapshot taskSnapshot) {
// Local temp file has been created
Toast.makeText(getApplicationContext(),"File Download",Toast.LENGTH_LONG);
Bitmap bitmap = BitmapFactory.decodeFile(finalLocalFile.getAbsolutePath());
imageview.setImageBitmap(bitmap);
}
}).addOnFailureListener(new OnFailureListener() {
@Override
public void onFailure(@NonNull Exception exception) {
// Handle any errors
Toast.makeText(TpoHome.this,"Image not found",Toast.LENGTH_LONG).show();
}
});
ValueEventListener vel = new ValueEventListener() {
@Override
public void onDataChange(DataSnapshot dataSnapshot) {
UserData user= dataSnapshot.getValue(UserData.class);
for(DataSnapshot userDetails : dataSnapshot.getChildren()) {
if(registrationnum.equals(userDetails.child("regno").getValue().toString()))
{
name.setText(userDetails.child("name").getValue().toString());
View h1 = navigationView.getHeaderView(0);
TextView nav_user = h1.findViewById(R.id.name);
TextView nav_email = h1.findViewById(R.id.email);
nav_user.setText( "\t "+userDetails.child("name").getValue().toString());
nav_email.setText("\t "+userDetails.child("email").getValue().toString());
/*View h1 = navigationView.getHeaderView(0);
TextView nav_user = h1.findViewById(R.id.name);
TextView nav_email = h1.findViewById(R.id.email);
//Toast.makeText(StudentProfile.this,""+userDetails.child("name").getValue().toString(),Toast.LENGTH_LONG).show();
nav_user.setText( "\t "+userDetails.child("name").getValue().toString());
nav_email.setText("\t "+userDetails.child("email").getValue().toString());*/
course.setText(userDetails.child("course").getValue().toString());
branch.setText(userDetails.child("branch").getValue().toString());
regnum.setText(registrationnum);
/*int val=Integer.parseInt(userDetails.child("verified").getValue().toString());
if(val==1)
{
//Drawable d=R.drawable.tick;
verified.setImageResource(R.drawable.tick);
}*/
//Access all data
}
// Log.d("valueName:", userDetails.child("name").getValue().toString());
// Log.d("valueEmail:", userDetails.child("email").getValue().toString());
// Log.d("valueuserid:", userDetails.child("studentid").getValue().toString());
// Log.d("password:", userDetails.child("password").getValue().toString());
}
}
@Override
public void onCancelled(DatabaseError databaseError) {
}
};
mDatabase.addListenerForSingleValueEvent(vel);
}
@Override
public void onBackPressed()
{
Intent i=new Intent(TpoHome.this,Landing.class);
startActivity(i);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.tpo_home, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
//noinspection SimplifiableIfStatement
if (id == R.id.action_settings) {
}
return super.onOptionsItemSelected(item);
}
@SuppressWarnings("StatementWithEmptyBody")
@Override
public boolean onNavigationItemSelected(MenuItem item) {
// Handle navigation view item clicks here.
int id = item.getItemId();
if (id == R.id.verifyuser) {
Intent i = new Intent(TpoHome.this,VerifyUser.class);
i.putExtra("flag",0);
startActivity(i);
// Handle the camera action
} else if (id == R.id.manage_student) {
final Dialog dialog = new Dialog(TpoHome.this);
dialog.requestWindowFeature(Window.FEATURE_NO_TITLE);
dialog.setCancelable(false);
dialog.setContentView(R.layout.activity_student_filter);
dialog.setCanceledOnTouchOutside(true);
dialog.onBackPressed();
spinnerbranch=(Spinner)dialog.findViewById(R.id.spinnerbranch);
spinnercourse=(Spinner)dialog.findViewById(R.id.spinnercourse);
regno=(EditText) dialog.findViewById(R.id.regno);
Button find=(Button)dialog.findViewById(R.id.find);
branchspinner();
coursespinner();
find.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dialog.dismiss();
Intent intent=new Intent(TpoHome.this,ManageStudents.class);
intent.putExtra("flag",1);
intent.putExtra("course",courseselected);
intent.putExtra("branch",branchselected);
intent.putExtra("regno",regno.getText().toString().trim());
startActivity(intent);
}
});
dialog.show();
//Intent i = new Intent(TpoHome.this,StudentFilter.class);
//startActivity(i);
} else if (id == R.id.update_company) {
Intent intent=new Intent(TpoHome.this,UpdateCompany.class);
startActivity(intent);
} else if (id == R.id.nav_manage) {
} else if (id == R.id.nav_share) {
Intent i = new Intent(TpoHome.this,contactUs.class);
startActivity(i);
} else if (id == R.id.nav_send) {
} else if (id == R.id.logout){
new AlertDialog.Builder(this)
.setIcon(android.R.drawable.ic_dialog_alert)
.setTitle("Logging Off")
.setMessage("Are you sure you want to logout?")
.setPositiveButton("Yes", new DialogInterface.OnClickListener()
{
@Override
public void onClick(DialogInterface dialog, int which) {
SQLiteDatabase data = openOrCreateDatabase("login", MODE_PRIVATE, null);
data.execSQL("drop table if exists tpo");
Intent i = new Intent(TpoHome.this, TpoLogin.class);
startActivity(i);
finish();
}
})
.setNegativeButton("No", null)
.show();
}
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
drawer.closeDrawer(GravityCompat.START);
return true;
}
private void branchspinner()
{
// Spinner click listener
List<String> branches = new ArrayList<String>();
branches.add("ALL");
branches.add("CSE");
branches.add("IT");
branches.add("ECE");
branches.add("EE");
branches.add("ME");
branches.add("PIE");
branches.add("CHE");
branches.add("BIO");
branches.add("CIVIL");
branches.add("MCA");
ArrayAdapter<String> dataAdapterbranch;
dataAdapterbranch = new ArrayAdapter<String>(this, android.R.layout.simple_spinner_item,branches);
// Drop down layout style - list view with radio button
dataAdapterbranch.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// attaching data adapter to spinner
spinnerbranch.setAdapter(dataAdapterbranch);
spinnerbranch.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view,
int position, long id) {
branchselected = parent.getItemAtPosition(position).toString();
if(branchselected.equals("ALL"))
branchselected="";
// TODO Auto-generated method stub
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
// TODO Auto-generated method stub
}
});
}
private void coursespinner()
{
// Spinner click listener
List<String> courses = new ArrayList<String>();
courses.add("ALL");
courses.add("BTech");
courses.add("MTech");
courses.add("MCA");
courses.add("PhD");
courses.add("MBA");
ArrayAdapter<String> dataAdapterbranch;
dataAdapterbranch = new ArrayAdapter<String>(this, android.R.layout.simple_spinner_item,courses);
// Drop down layout style - list view with radio button
dataAdapterbranch.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// attaching data adapter to spinner
spinnercourse.setAdapter(dataAdapterbranch);
spinnercourse.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view,
int position, long id) {
courseselected = parent.getItemAtPosition(position).toString();
if(courseselected.equals("ALL"))
courseselected="";
// TODO Auto-generated method stub
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
// TODO Auto-generated method stub
}
});
}
}
|
mkfeuhrer/TPO-MNNIT
|
app/src/main/java/com/example/mohit/tpomnnit/tpo/TpoHome.java
|
Java
|
apache-2.0
| 15,187 |
namespace Husky
{
public enum OnOff
{
[Label("关闭", CssClass = "text-danger")]
Off,
[Label("开启", CssClass = "text-success")]
On
}
}
|
cwx521/Husky
|
src/Husky.Helpers/Enums/OnOff.cs
|
C#
|
apache-2.0
| 154 |
package com.packt.sfjd.ch11;
import java.io.Serializable;
import scala.runtime.AbstractFunction2;
public class AbsFunc2 extends AbstractFunction2<Object, String, Object> implements Serializable{
@Override
public Object apply(Object arg0, String arg1) {
return true;
}
}
|
kumarsumit1/learning
|
src/main/java/com/packt/sfjd/ch11/AbsFunc2.java
|
Java
|
apache-2.0
| 283 |
// Copyright 2006 Konrad Twardowski
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.makagiga.commons.swing;
import java.awt.BorderLayout;
import java.awt.ComponentOrientation;
import java.awt.Insets;
import java.awt.Window;
import javax.swing.JComponent;
import javax.swing.JWindow;
import org.makagiga.commons.MApplication;
import org.makagiga.commons.UI;
/**
* @since 4.0 (org.makagiga.commons.swing package)
*/
public class MWindow extends JWindow implements MBorderLayout {
// private
private boolean respectScreenInsets = true;
private UI.HorizontalPosition horizontalPosition;
private UI.VerticalPosition verticalPosition;
// public
public MWindow() {
this(null);
}
public MWindow(final Window owner) {
super(owner);
}
public UI.HorizontalPosition getHorizontalPosition() { return horizontalPosition; }
public void setHorizontalPosition(final UI.HorizontalPosition value) {
horizontalPosition = value;
setLocation(getNewXPosition(), getLocation().y);
}
/**
* @since 4.8
*/
public boolean getRespectScreenInsets() { return respectScreenInsets; }
/**
* @since 4.8
*/
public void setRespectScreenInsets(final boolean value) { respectScreenInsets = value; }
public UI.VerticalPosition getVerticalPosition() { return verticalPosition; }
public void setVerticalPosition(final UI.VerticalPosition value) {
verticalPosition = value;
setLocation(getLocation().x, getNewYPosition());
}
@Override
public void setVisible(final boolean value) {
if (value && MApplication.getForceRTL() && !isVisible())
applyComponentOrientation(ComponentOrientation.RIGHT_TO_LEFT);
super.setVisible(value);
}
public void showAtPosition(final UI.VerticalPosition v, final UI.HorizontalPosition h) {
showAtPosition(v, h, true);
}
/**
* @since 4.8
*/
public void showAtPosition(final UI.VerticalPosition v, final UI.HorizontalPosition h, final boolean pack) {
horizontalPosition = h;
verticalPosition = v;
if (pack)
pack();
setLocation(getNewXPosition(), getNewYPosition());
setVisible(true);
}
// MBorderLayout
/**
* @since 1.2
*/
@Override
public void addCenter(final JComponent component) {
UI.addCenter(this, component);
}
/**
* @since 1.2
*/
@Override
public void addEast(final JComponent component) {
add(component, BorderLayout.LINE_END);
}
/**
* @since 1.2
*/
@Override
public void addNorth(final JComponent component) {
add(component, BorderLayout.PAGE_START);
}
/**
* @since 1.2
*/
@Override
public void addSouth(final JComponent component) {
add(component, BorderLayout.PAGE_END);
}
/**
* @since 1.2
*/
@Override
public void addWest(final JComponent component) {
add(component, BorderLayout.LINE_START);
}
// private
private int getNewXPosition() {
Insets i = getScreenInsets();
return
(horizontalPosition == UI.HorizontalPosition.LEFT)
? i.left // left
: UI.getScreenSize().width - getWidth() - i.right; // right
}
private int getNewYPosition() {
Insets i = getScreenInsets();
return
(verticalPosition == UI.VerticalPosition.BOTTOM)
? UI.getScreenSize().height - getHeight() - i.bottom // bottom
: i.top; // top
}
private Insets getScreenInsets() {
if (respectScreenInsets)
return UI.getScreenInsets();
return new Insets(0, 0, 0, 0);
}
}
|
stuffer2325/Makagiga
|
src/org/makagiga/commons/swing/MWindow.java
|
Java
|
apache-2.0
| 3,865 |
export default class Shared {
getPrettyDate(time) {
const date = new Date((time || '').replace(/-/g,'/').replace(/[TZ]/g,' '));
const diff = (((new Date()).getTime() - date.getTime()) / 1000);
const day_diff = Math.floor(diff / 86400);
// return date for anything greater than a day
if (isNaN(day_diff) || day_diff < 0 || day_diff > 0) {
return date.getDate() + ' ' + date.toDateString().split(' ')[1];
}
return (day_diff === 0 && ((diff < 60 && 'just now') || (diff < 120 && '1 minute ago') || (diff < 3600 && Math.floor( diff / 60 ) + ' minutes ago') || (diff < 7200 && '1 hour ago') || (diff < 86400 && Math.floor( diff / 3600 ) + ' hours ago')))
|| (day_diff === 1 && 'Yesterday') || (day_diff < 7 && day_diff + ' days ago') || (day_diff < 31 && Math.ceil( day_diff / 7 ) + ' weeks ago');
}
}
|
beckettkev/react-chat-back
|
src/utils/shared.js
|
JavaScript
|
apache-2.0
| 867 |
package com.amazonaws.fps.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="TransactionId" type="{http://www.w3.org/2001/XMLSchema}string"/>
* <element name="TransactionStatus" type="{http://fps.amazonaws.com/doc/2008-09-17/}TransactionStatus"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
* Generated by AWS Code Generator
* <p/>
* Tue Sep 29 03:25:23 PDT 2009
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"transactionId",
"transactionStatus"
})
@XmlRootElement(name = "WriteOffDebtResult")
public class WriteOffDebtResult {
@XmlElement(name = "TransactionId", required = true)
protected String transactionId;
@XmlElement(name = "TransactionStatus", required = true)
protected TransactionStatus transactionStatus;
/**
* Default constructor
*
*/
public WriteOffDebtResult() {
super();
}
/**
* Value constructor
*
*/
public WriteOffDebtResult(final String transactionId, final TransactionStatus transactionStatus) {
this.transactionId = transactionId;
this.transactionStatus = transactionStatus;
}
/**
* Gets the value of the transactionId property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTransactionId() {
return transactionId;
}
/**
* Sets the value of the transactionId property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setTransactionId(String value) {
this.transactionId = value;
}
public boolean isSetTransactionId() {
return (this.transactionId!= null);
}
/**
* Gets the value of the transactionStatus property.
*
* @return
* possible object is
* {@link TransactionStatus }
*
*/
public TransactionStatus getTransactionStatus() {
return transactionStatus;
}
/**
* Sets the value of the transactionStatus property.
*
* @param value
* allowed object is
* {@link TransactionStatus }
*
*/
public void setTransactionStatus(TransactionStatus value) {
this.transactionStatus = value;
}
public boolean isSetTransactionStatus() {
return (this.transactionStatus!= null);
}
/**
* Sets the value of the TransactionId property.
*
* @param value
* @return
* this instance
*/
public WriteOffDebtResult withTransactionId(String value) {
setTransactionId(value);
return this;
}
/**
* Sets the value of the TransactionStatus property.
*
* @param value
* @return
* this instance
*/
public WriteOffDebtResult withTransactionStatus(TransactionStatus value) {
setTransactionStatus(value);
return this;
}
/**
*
* XML fragment representation of this object
*
* @return XML fragment for this object. Name for outer
* tag expected to be set by calling method. This fragment
* returns inner properties representation only
*/
protected String toXMLFragment() {
StringBuffer xml = new StringBuffer();
if (isSetTransactionId()) {
xml.append("<TransactionId>");
xml.append(escapeXML(getTransactionId()));
xml.append("</TransactionId>");
}
if (isSetTransactionStatus()) {
xml.append("<TransactionStatus>");
xml.append(getTransactionStatus().value());
xml.append("</TransactionStatus>");
}
return xml.toString();
}
/**
*
* Escape XML special characters
*/
private String escapeXML(String string) {
StringBuffer sb = new StringBuffer();
int length = string.length();
for (int i = 0; i < length; ++i) {
char c = string.charAt(i);
switch (c) {
case '&':
sb.append("&");
break;
case '<':
sb.append("<");
break;
case '>':
sb.append(">");
break;
case '\'':
sb.append("'");
break;
case '"':
sb.append(""");
break;
default:
sb.append(c);
}
}
return sb.toString();
}
/**
*
* JSON fragment representation of this object
*
* @return JSON fragment for this object. Name for outer
* object expected to be set by calling method. This fragment
* returns inner properties representation only
*
*/
protected String toJSONFragment() {
StringBuffer json = new StringBuffer();
boolean first = true;
if (isSetTransactionId()) {
if (!first) json.append(", ");
json.append(quoteJSON("TransactionId"));
json.append(" : ");
json.append(quoteJSON(getTransactionId()));
first = false;
}
if (isSetTransactionStatus()) {
if (!first) json.append(", ");
json.append(quoteJSON("TransactionStatus"));
json.append(" : ");
json.append(quoteJSON(getTransactionStatus().value()));
first = false;
}
return json.toString();
}
/**
*
* Quote JSON string
*/
private String quoteJSON(String string) {
StringBuffer sb = new StringBuffer();
sb.append("\"");
int length = string.length();
for (int i = 0; i < length; ++i) {
char c = string.charAt(i);
switch (c) {
case '"':
sb.append("\\\"");
break;
case '\\':
sb.append("\\\\");
break;
case '/':
sb.append("\\/");
break;
case '\b':
sb.append("\\b");
break;
case '\f':
sb.append("\\f");
break;
case '\n':
sb.append("\\n");
break;
case '\r':
sb.append("\\r");
break;
case '\t':
sb.append("\\t");
break;
default:
if (c < ' ') {
sb.append("\\u" + String.format("%03x", Integer.valueOf(c)));
} else {
sb.append(c);
}
}
}
sb.append("\"");
return sb.toString();
}
}
|
DomDerrien/amazon-fps-gaej
|
src/com/amazonaws/fps/model/WriteOffDebtResult.java
|
Java
|
apache-2.0
| 7,331 |
/*
Copyright 2010 Zhengmao HU (James)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net.sf.jabb.util.db.impl;
import java.util.Properties;
import javax.sql.DataSource;
import net.sf.jabb.util.db.ConnectionUtility;
import net.sf.jabb.util.db.DataSourceProvider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* 一个一个尝试。
* @author Zhengmao HU (James)
*
*/
public class TryDataSourceProvider implements DataSourceProvider {
private static final Log log = LogFactory.getLog(TryDataSourceProvider.class);
@Override
public DataSource createDataSource(String source, Properties configurationProperties, String config) {
log.warn("Properties argument ignored for: " + source);
return createDataSource(source, config);
}
@Override
public DataSource createDataSource(String source, String config) {
for (String subSource: config.split(ConnectionUtility.DELIMITORS)){
DataSource ds = ConnectionUtility.getDataSource(subSource);
if (ds != null){
log.debug("Data source '" + subSource + "' will be used for data source '" + source + "'.");
return ds;
}
}
log.error("No usable data source found for '" + source + "'.");
return null;
}
@Override
public boolean destroyDataSource(DataSource dataSource) {
return false;
}
}
|
james-hu/jabb-core
|
src/main/java/net/sf/jabb/util/db/impl/TryDataSourceProvider.java
|
Java
|
apache-2.0
| 1,868 |
package org.jboss.resteasy.reactive.server.vertx.test.customproviders;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
@Provider
public class UniExceptionMapper implements ExceptionMapper<UniException> {
@Override
public Response toResponse(UniException exception) {
return Response.accepted(exception.getInput()).build();
}
}
|
quarkusio/quarkus
|
independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/customproviders/UniExceptionMapper.java
|
Java
|
apache-2.0
| 409 |
package utils
import (
"math/rand"
"time"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
func RandomString(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func LetterByIndex(idx int) string {
idx = idx % len(letterBytes)
return letterBytes[idx : idx+1]
}
|
fiatjaf/summadb
|
utils/random.go
|
GO
|
apache-2.0
| 964 |
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
import six
from logcollector.openstack.common import excutils
from logcollector.openstack.common.gettextutils import _
from logcollector.openstack.common import importutils
from logcollector.openstack.common import jsonutils
from logcollector.openstack.common import log as logging
from logcollector.openstack.common.rpc import amqp as rpc_amqp
from logcollector.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
redhat-cip/openstack-logcollector
|
openstack-logcollector/openstack/common/rpc/impl_qpid.py
|
Python
|
apache-2.0
| 29,688 |