file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
client.ts
|
Attempts) {
attempts++
try {
const res = await new Promise<request.Response>((resolve, reject) => {
request(options, (error: any, response: request.Response, _body: any) => {
if (error != null) {
reject(error)
} else {
resolve(response)
}
})
})
return Object.assign(res, {
attempts,
originalUrl: options.url as string,
originalMethod: options.method as string,
})
} catch (e) {
err = e
if (!retryErrorCodes.includes(e.code)) {
break
}
await delay(retryDelay)
}
}
throw Object.assign(err, {
attempts,
originalUrl: options.url,
originalMethod: options.method,
})
}
private _options: ClientOptions
private _host: string
private _headers: Payload
private _query: Payload
private _requestOptions: RequestOptions
constructor (options: ClientOptions & RetryOptions) {
if (!MONGO_REG.test(options.appId)) {
throw new Error(`appId: ${options.appId} is not a valid mongo object id`)
}
if (!Array.isArray(options.appSecrets) || options.appSecrets.length === 0) {
throw new Error(`appSecrets required`)
}
if (typeof options.host !== 'string' || options.host === '') {
throw new Error(`host required`)
}
options.timeout = options.timeout == null ? 3000 : options.timeout
options.pool = options.pool == null ?
{ maxSockets: options.maxSockets == null ? 100 : options.maxSockets } : options.pool
options.strictSSL = options.strictSSL === true
options.retryDelay = options.retryDelay == null ? 2000 : options.retryDelay
options.maxAttempts = options.maxAttempts == null ? 3 : options.maxAttempts
this._options = options
this._host = options.host
this._headers = { 'User-Agent': UA }
this._query = {}
this._requestOptions = {
json: true,
forever: true,
strictSSL: options.strictSSL,
timeout: options.timeout,
cert: options.certChain,
key: options.privateKey,
ca: options.rootCert,
pool: options.pool,
time: options.time,
retryDelay: options.retryDelay,
maxAttempts: options.maxAttempts,
retryErrorCodes: options.retryErrorCodes,
useQuerystring: options.useQuerystring,
} as RequestOptions
}
/**
* @returns User-Agent on the client.
*/
get UA (): string {
const ua = this._headers['User-Agent']
return ua == null ? '' : ua
}
/**
* Set User-Agent to the client.
* @param ua User-Agent string.
*/
set UA (ua: string) {
this._headers['User-Agent'] = ua
}
/**
* @returns host on the client.
*/
get host () {
return this._host
}
/**
* @returns preset headers on the client.
*/
get headers () {
return this._headers
}
/**
* @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0) {
headers = FORWARD_HEADERS
}
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
signAppToken (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options some JWT verify options.
* @returns a literal object.
*/
verifyToken (token: string, options?: jwt.VerifyOptions): Payload {
let error = null
for (const secret of this._options.appSecrets) {
try {
return jwt.verify(token, secret, options) as Payload
} catch (err) {
error = err
}
}
throw createError(401, error)
}
|
/**
|
random_line_split
|
|
client.ts
|
? Math.floor(options.retryDelay) : 2000
const maxAttempts = options.maxAttempts != null ? Math.floor(options.maxAttempts) : 3
const retryErrorCodes = Array.isArray(options.retryErrorCodes) ? options.retryErrorCodes : RETRIABLE_ERRORS
// default to `false`
options.followRedirect = options.followRedirect === true
let err = null
let attempts = 0
while (attempts < maxAttempts) {
attempts++
try {
const res = await new Promise<request.Response>((resolve, reject) => {
request(options, (error: any, response: request.Response, _body: any) => {
if (error != null) {
reject(error)
} else {
resolve(response)
}
})
})
return Object.assign(res, {
attempts,
originalUrl: options.url as string,
originalMethod: options.method as string,
})
} catch (e) {
err = e
if (!retryErrorCodes.includes(e.code)) {
break
}
await delay(retryDelay)
}
}
throw Object.assign(err, {
attempts,
originalUrl: options.url,
originalMethod: options.method,
})
}
private _options: ClientOptions
private _host: string
private _headers: Payload
private _query: Payload
private _requestOptions: RequestOptions
constructor (options: ClientOptions & RetryOptions) {
if (!MONGO_REG.test(options.appId)) {
throw new Error(`appId: ${options.appId} is not a valid mongo object id`)
}
if (!Array.isArray(options.appSecrets) || options.appSecrets.length === 0) {
throw new Error(`appSecrets required`)
}
if (typeof options.host !== 'string' || options.host === '') {
throw new Error(`host required`)
}
options.timeout = options.timeout == null ? 3000 : options.timeout
options.pool = options.pool == null ?
{ maxSockets: options.maxSockets == null ? 100 : options.maxSockets } : options.pool
options.strictSSL = options.strictSSL === true
options.retryDelay = options.retryDelay == null ? 2000 : options.retryDelay
options.maxAttempts = options.maxAttempts == null ? 3 : options.maxAttempts
this._options = options
this._host = options.host
this._headers = { 'User-Agent': UA }
this._query = {}
this._requestOptions = {
json: true,
forever: true,
strictSSL: options.strictSSL,
timeout: options.timeout,
cert: options.certChain,
key: options.privateKey,
ca: options.rootCert,
pool: options.pool,
time: options.time,
retryDelay: options.retryDelay,
maxAttempts: options.maxAttempts,
retryErrorCodes: options.retryErrorCodes,
useQuerystring: options.useQuerystring,
} as RequestOptions
}
/**
* @returns User-Agent on the client.
*/
get UA (): string {
const ua = this._headers['User-Agent']
return ua == null ? '' : ua
}
/**
* Set User-Agent to the client.
* @param ua User-Agent string.
*/
set UA (ua: string) {
this._headers['User-Agent'] = ua
}
/**
* @returns host on the client.
*/
get host () {
return this._host
}
/**
* @returns preset headers on the client.
*/
get headers () {
return this._headers
}
/**
* @returns preset query on the client.
*/
get query () {
return this._query
}
/**
* @returns preset request options on the client.
*/
get requestOptions () {
return this._requestOptions
}
/**
* Creates (by Object.create) a **new client** instance with given service methods.
* @param servicePrototype service methods that will be mount to client.
* @param servicehost service host for new client.
* @returns a **new client** with with given service methods.
*/
withService<T> (serviceMethod: T, servicehost: string = ''): this & T {
const srv = Object.assign<this, T>(Object.create(this), serviceMethod)
if (servicehost !== '') {
srv._host = servicehost
}
return srv
}
/**
* Creates (by Object.create) a **new client** instance with given request options.
* @param options request options that will be copy into client.
* @returns a **new client** with with given request options.
*/
withOptions (options: RequestOptions): this {
return Object.assign(Object.create(this), {
_requestOptions: Object.assign({}, this._requestOptions, options),
})
}
/**
* Creates (by Object.create) a **new client** instance with given headers.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
withHeaders (headers: Payload): this {
return Object.assign(Object.create(this), {
_headers: Object.assign({}, this._headers, headers),
})
}
/**
* Creates (by Object.create) a **new client** instance with headers copy from the request.
* @param req IncomingMessage object that headers read from.
* @param headers headers that will be copy into client.
* @returns a **new client** with with given headers.
*/
forwardHeaders (req: IncomingMessage | any, ...headers: string[]): this {
if (req.req != null && req.req.headers != null) {
req = req.req
}
if (headers.length === 0)
|
const forwardHeaders: { [key: string]: string | string[] } = {}
for (const header of headers) {
if (req.headers[header] != null) {
forwardHeaders[header] = req.headers[header]
}
}
return this.withHeaders(forwardHeaders)
}
/**
* Creates (by Object.create) a **new client** instance with given query.
* @param query query that will be copy into client.
* @returns a **new client** with with given query.
*/
withQuery (query: Payload): this {
return Object.assign(Object.create(this), {
_query: Object.assign({}, this._query, query),
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Tenant-Id` and `X-Tenant-Type`.
* @param tenantId that will be added to header as `X-Tenant-Id`.
* @param tenantType that will be added to header as `X-Tenant-Type`.
* @returns a **new client** with with given headers.
*/
withTenant (tenantId: string, tenantType = 'organization') {
return this.withHeaders({
'X-Tenant-Id': tenantId,
'X-Tenant-Type': tenantType,
})
}
/**
* Creates (by withHeaders) a **new client** instance with given `X-Operator-ID`.
* @param operatorId that will be added to header as `X-Operator-ID`.
* @returns a **new client** with with given headers.
*/
withOperator (operatorId: string) {
return this.withHeaders({
'X-Operator-ID': operatorId,
})
}
/**
* Creates a JWT token string with given payload and client's appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param options some JWT sign options.
* @returns a token string.
*/
signToken (payload: Payload, options?: jwt.SignOptions) {
return jwt.sign(payload, this._options.appSecrets[0], options)
}
/**
* Creates a periodical changed JWT token string with appId and appSecrets.
* @param payload Payload to sign, should be an literal object.
* @param periodical period in seccond, default to 3600s.
* @param options some JWT sign options.
* @returns a token string.
*/
signAppToken (periodical: number = 3600, options?: jwt.SignOptions) {
const iat = Math.floor(Date.now() / (1000 * periodical)) * periodical
const payload = {
iat,
exp: iat + Math.floor(1.1 * periodical),
_appId: this._options.appId,
}
// token change in every hour, optimizing for server cache.
return this.signToken(payload, options)
}
/**
* Decode a JWT token string to literal object payload.
* @param token token to decode.
* @param options some JWT decode options.
* @returns a literal object.
*/
decodeToken (token: string, options?: jwt.DecodeOptions): Payload {
return jwt.decode(token, options) as Payload
}
/**
* Decode and verify a JWT token string to literal object payload.
* if verify failure, it will throw a 401 error (creates by 'http-errors' module)
* @param token token to decode.
* @param options
|
{
headers = FORWARD_HEADERS
}
|
conditional_block
|
cluster.go
|
use the same node labels as the primary
spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = ""
spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = ""
labels := make(map[string]string)
labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name
spec.ClusterName = cl.Spec.Name
uniqueName := util.RandStringBytesRmndr(4)
labels[config.LABEL_NAME] = cl.Spec.Name + "-" + uniqueName
spec.Name = labels[config.LABEL_NAME]
newInstance := &crv1.Pgreplica{
ObjectMeta: metav1.ObjectMeta{
Name: labels[config.LABEL_NAME],
Labels: labels,
},
Spec: spec,
Status: crv1.PgreplicaStatus{
State: crv1.PgreplicaStateCreated,
Message: "Created, not processed yet",
},
}
_, err = clientset.CrunchydataV1().Pgreplicas(namespace).Create(newInstance)
if err != nil {
log.Error(" in creating Pgreplica instance" + err.Error())
publishClusterCreateFailure(cl, err.Error())
}
}
}
}
// AddClusterBootstrap creates the resources needed to bootstrap a new cluster from an existing
// data source. Specifically, this function creates the bootstrap job that will be run to
// bootstrap the cluster, along with supporting resources (e.g. ConfigMaps and volumes).
func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) error {
namespace := cluster.GetNamespace()
if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace,
cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cluster.Spec.PrimaryStorage)
if err != nil {
publishClusterCreateFailure(cluster, err.Error())
return err
}
if err := addClusterBootstrapJob(clientset, cluster, namespace, dataVolume,
walVolume, tablespaceVolumes); err != nil && !kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
patch, err := json.Marshal(map[string]interface{}{
"status": crv1.PgclusterStatus{
State: crv1.PgclusterStateBootstrapping,
Message: "Bootstapping cluster from an existing data source",
},
})
if err == nil {
_, err = clientset.CrunchydataV1().Pgclusters(namespace).Patch(cluster.Name, types.MergePatchType, patch)
}
if err != nil {
return err
}
return nil
}
// AddBootstrapRepo creates a pgBackRest repository and associated service to use when
// bootstrapping a cluster from an existing data source. If an existing repo is detected
// and is being used to bootstrap another cluster, then an error is returned. If an existing
// repo is detected and is not associated with a bootstrap job (but rather an active cluster),
// then no action is taken and the function resturns. Also, in addition to returning an error
// in the event an error is encountered, the function also returns a 'repoCreated' bool that
// specifies whether or not a repo was actually created.
func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (repoCreated bool, err error) {
restoreClusterName := cluster.Spec.PGDataSource.RestoreFrom
repoName := fmt.Sprintf(util.BackrestRepoServiceName, restoreClusterName)
found := true
repoDeployment, err := clientset.AppsV1().Deployments(cluster.GetNamespace()).Get(
repoName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return
}
found = false
}
if !found {
if err = backrest.CreateRepoDeployment(clientset, cluster, false, true, 1); err != nil {
return
}
repoCreated = true
} else if _, ok := repoDeployment.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok {
err = fmt.Errorf("Unable to create bootstrap repo %s to bootstrap cluster %s "+
"(namespace %s) because it is already running to bootstrap another cluster",
repoName, cluster.GetName(), cluster.GetNamespace())
return
}
return
}
// DeleteClusterBase ...
func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) {
DeleteCluster(clientset, cl, namespace)
//delete any existing configmaps
if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil {
log.Error(err)
}
//delete any existing pgtasks ???
//publish delete cluster event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventDeleteClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventDeleteCluster,
},
Clustername: cl.Spec.Name,
}
if err := events.Publish(f); err != nil {
log.Error(err)
}
}
// ScaleBase ...
func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
if replica.Spec.Status == crv1.CompletedStatus {
log.Warn("crv1 pgreplica " + replica.Spec.Name + " is already marked complete, will not recreate")
return
}
//get the pgcluster CRD to base the replica off of
cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace, replica.Spec.Name, replica.Spec.ReplicaStorage)
if err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD pvcname
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/replicastorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//create the replica service if it doesnt exist
if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//instantiate the replica
if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil {
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD status
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
//publish event for replica creation
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleCluster,
},
Clustername: cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME],
Replicaname: cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER],
}
if err = events.Publish(f); err != nil {
log.Error(err.Error())
}
}
// ScaleDownBase ...
func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
//get the pgcluster CRD for this replica
_, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil {
return
}
DeleteReplica(clientset, replica, namespace)
//publish event for scale down
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleDownClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleDownCluster,
},
Clustername: replica.Spec.ClusterName,
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
return
}
}
|
random_line_split
|
||
cluster.go
|
{}
// now, simply deep copy the values from the CRD
if cluster.Spec.Resources != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Requests = cluster.Spec.Resources.DeepCopy()
}
if cluster.Spec.Limits != nil {
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = cluster.Spec.Limits.DeepCopy()
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// update the deployment with the new values
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// UpdateTablespaces updates the PostgreSQL instance Deployments to update
// what tablespaces are mounted.
// Though any new tablespaces are present in the CRD, to attempt to do less work
// this function takes a map of the new tablespaces that are being added, so we
// only have to check and create the PVCs that are being mounted at this time
//
// To do this, iterate through the tablespace mount map that is present in the
// new cluster.
func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config,
cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error {
// first, get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items))
// now we can start creating the new tablespaces! First, create the new
// PVCs. The PVCs are created for each **instance** in the cluster, as every
// instance needs to have a distinct PVC for each tablespace
for i, deployment := range deployments.Items {
tablespaceVolumes[i] = make(map[string]operator.StorageResult)
for tablespaceName, storageSpec := range newTablespaces {
// get the name of the tablespace PVC for that instance
tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName)
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name)
// and now create it! If it errors, we just need to return, which
// potentially leaves things in an inconsistent state, but at this point
// only PVC objects have been created
tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset,
storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
}
}
// now the fun step: update each deployment with the new volumes
for i, deployment := range deployments.Items {
log.Debugf("attach tablespace volumes to [%s]", deployment.Name)
// iterate through each table space and prepare the Volume and
// VolumeMount clause for each instance
for tablespaceName := range newTablespaces {
// this is the volume to be added for the tablespace
volume := v1.Volume{
Name: operator.GetTablespaceVolumeName(tablespaceName),
VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(),
}
// add the volume to the list of volumes
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
// now add the volume mount point to that of the database container
volumeMount := v1.VolumeMount{
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
Name: operator.GetTablespaceVolumeName(tablespaceName),
}
// we can do this as we always know that the "database" container is the
// first container in the list
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// add any supplemental groups specified in storage configuration.
// SecurityContext is always initialized because we use fsGroup.
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append(
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups,
tablespaceVolumes[i][tablespaceName].SupplementalGroups...)
}
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
// name list
ok := false
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
// yup, it's an old fashioned linear time lookup
if envVar.Name == "PGHA_TABLESPACES" {
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
cluster.Spec.TablespaceMounts)
ok = true
}
}
// if its not found, we need to add it to the env
if !ok {
envVar := v1.EnvVar{
Name: "PGHA_TABLESPACES",
Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts),
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar)
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// finally, update the Deployment. Potential to put things into an
// inconsistent state if any of these updates fail
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// annotateBackrestSecret annotates the pgBackRest repository secret with relevant cluster
// configuration as needed to support bootstrapping from the repository after the cluster
// has been deleted
func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error {
clusterName := cluster.GetName()
namespace := cluster.GetNamespace()
// simple helper that takes two config options, returning the first if populated, and
// if not the returning the second (which also might now be populated)
cfg := func(cl, op string) string {
if cl != "" {
return cl
}
return op
}
cl := cluster.Spec
op := operator.Pgo.Cluster
values := map[string]string{
config.ANNOTATION_PG_PORT: cluster.Spec.Port,
config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster),
config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket),
config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint),
config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region),
config.ANNOTATION_SSHD_PORT: strconv.Itoa(operator.Pgo.Cluster.BackrestPort),
config.ANNOTATION_SUPPLEMENTAL_GROUPS: cluster.Spec.BackrestStorage.SupplementalGroups,
config.ANNOTATION_S3_URI_STYLE: cfg(cl.BackrestS3URIStyle, op.BackrestS3URIStyle),
config.ANNOTATION_S3_VERIFY_TLS: cfg(cl.BackrestS3VerifyTLS, op.BackrestS3VerifyTLS),
}
valuesJSON, err := json.Marshal(values)
if err != nil {
return err
}
secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName)
patchString := fmt.Sprintf(`{"metadata":{"annotations":%s}}`, string(valuesJSON))
log.Debugf("About to patch secret %s (namespace %s) using:\n%s", secretName, namespace,
patchString)
if _, err := clientset.CoreV1().Secrets(namespace).Patch(secretName, types.MergePatchType,
[]byte(patchString)); err != nil {
return err
}
return nil
}
func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error {
label := fmt.Sprintf("pg-cluster=%s", clusterName)
list, err := clientset.CoreV1().ConfigMaps(ns).List(metav1.ListOptions{LabelSelector: label})
if err != nil {
return fmt.Errorf("No configMaps found for selector: %s", label)
}
for _, configmap := range list.Items {
err := clientset.CoreV1().ConfigMaps(ns).Delete(configmap.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func
|
publishClusterCreateFailure
|
identifier_name
|
|
cluster.go
|
through the tablespace mount map that is present in the
// new cluster.
func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config,
cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error {
// first, get a list of all of the instance deployments for the cluster
deployments, err := operator.GetInstanceDeployments(clientset, cluster)
if err != nil {
return err
}
tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items))
// now we can start creating the new tablespaces! First, create the new
// PVCs. The PVCs are created for each **instance** in the cluster, as every
// instance needs to have a distinct PVC for each tablespace
for i, deployment := range deployments.Items {
tablespaceVolumes[i] = make(map[string]operator.StorageResult)
for tablespaceName, storageSpec := range newTablespaces {
// get the name of the tablespace PVC for that instance
tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName)
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name)
// and now create it! If it errors, we just need to return, which
// potentially leaves things in an inconsistent state, but at this point
// only PVC objects have been created
tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset,
storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace)
if err != nil {
return err
}
}
}
// now the fun step: update each deployment with the new volumes
for i, deployment := range deployments.Items {
log.Debugf("attach tablespace volumes to [%s]", deployment.Name)
// iterate through each table space and prepare the Volume and
// VolumeMount clause for each instance
for tablespaceName := range newTablespaces {
// this is the volume to be added for the tablespace
volume := v1.Volume{
Name: operator.GetTablespaceVolumeName(tablespaceName),
VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(),
}
// add the volume to the list of volumes
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
// now add the volume mount point to that of the database container
volumeMount := v1.VolumeMount{
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
Name: operator.GetTablespaceVolumeName(tablespaceName),
}
// we can do this as we always know that the "database" container is the
// first container in the list
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
// add any supplemental groups specified in storage configuration.
// SecurityContext is always initialized because we use fsGroup.
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append(
deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups,
tablespaceVolumes[i][tablespaceName].SupplementalGroups...)
}
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
// name list
ok := false
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
// yup, it's an old fashioned linear time lookup
if envVar.Name == "PGHA_TABLESPACES" {
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
cluster.Spec.TablespaceMounts)
ok = true
}
}
// if its not found, we need to add it to the env
if !ok {
envVar := v1.EnvVar{
Name: "PGHA_TABLESPACES",
Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts),
}
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar)
}
// Before applying the update, we want to explicitly stop PostgreSQL on each
// instance. This prevents PostgreSQL from having to boot up in crash
// recovery mode.
//
// If an error is returned, we only issue a warning
if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {
log.Warn(err)
}
// finally, update the Deployment. Potential to put things into an
// inconsistent state if any of these updates fail
if _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {
return err
}
}
return nil
}
// annotateBackrestSecret annotates the pgBackRest repository secret with relevant cluster
// configuration as needed to support bootstrapping from the repository after the cluster
// has been deleted
func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error {
clusterName := cluster.GetName()
namespace := cluster.GetNamespace()
// simple helper that takes two config options, returning the first if populated, and
// if not the returning the second (which also might now be populated)
cfg := func(cl, op string) string {
if cl != "" {
return cl
}
return op
}
cl := cluster.Spec
op := operator.Pgo.Cluster
values := map[string]string{
config.ANNOTATION_PG_PORT: cluster.Spec.Port,
config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster),
config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket),
config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint),
config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region),
config.ANNOTATION_SSHD_PORT: strconv.Itoa(operator.Pgo.Cluster.BackrestPort),
config.ANNOTATION_SUPPLEMENTAL_GROUPS: cluster.Spec.BackrestStorage.SupplementalGroups,
config.ANNOTATION_S3_URI_STYLE: cfg(cl.BackrestS3URIStyle, op.BackrestS3URIStyle),
config.ANNOTATION_S3_VERIFY_TLS: cfg(cl.BackrestS3VerifyTLS, op.BackrestS3VerifyTLS),
}
valuesJSON, err := json.Marshal(values)
if err != nil {
return err
}
secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName)
patchString := fmt.Sprintf(`{"metadata":{"annotations":%s}}`, string(valuesJSON))
log.Debugf("About to patch secret %s (namespace %s) using:\n%s", secretName, namespace,
patchString)
if _, err := clientset.CoreV1().Secrets(namespace).Patch(secretName, types.MergePatchType,
[]byte(patchString)); err != nil {
return err
}
return nil
}
func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error {
label := fmt.Sprintf("pg-cluster=%s", clusterName)
list, err := clientset.CoreV1().ConfigMaps(ns).List(metav1.ListOptions{LabelSelector: label})
if err != nil {
return fmt.Errorf("No configMaps found for selector: %s", label)
}
for _, configmap := range list.Items {
err := clientset.CoreV1().ConfigMaps(ns).Delete(configmap.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
func publishClusterCreateFailure(cl *crv1.Pgcluster, errorMsg string) {
pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER]
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventCreateClusterFailureFormat{
EventHeader: events.EventHeader{
Namespace: cl.ObjectMeta.Namespace,
Username: pgouser,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventCreateClusterFailure,
},
Clustername: cl.ObjectMeta.Name,
ErrorMessage: errorMsg,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err := events.Publish(f)
if err != nil {
log.Error(err.Error())
}
}
func publishClusterShutdown(cluster crv1.Pgcluster) error
|
{
clusterName := cluster.Name
//capture the cluster creation event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventShutdownClusterFormat{
EventHeader: events.EventHeader{
Namespace: cluster.Namespace,
Username: cluster.Spec.UserLabels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventShutdownCluster,
},
Clustername: clusterName,
}
if err := events.Publish(f); err != nil {
|
identifier_body
|
|
cluster.go
|
,
WorkflowID: cl.ObjectMeta.Labels[config.LABEL_WORKFLOW_ID],
}
err = events.Publish(f)
if err != nil {
log.Error(err.Error())
}
//add replicas if requested
if cl.Spec.Replicas != "" {
replicaCount, err := strconv.Atoi(cl.Spec.Replicas)
if err != nil {
log.Error("error in replicas value " + err.Error())
publishClusterCreateFailure(cl, err.Error())
return
}
//create a CRD for each replica
for i := 0; i < replicaCount; i++ {
spec := crv1.PgreplicaSpec{}
//get the storage config
spec.ReplicaStorage = cl.Spec.ReplicaStorage
spec.UserLabels = cl.Spec.UserLabels
//the replica should not use the same node labels as the primary
spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = ""
spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = ""
labels := make(map[string]string)
labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name
spec.ClusterName = cl.Spec.Name
uniqueName := util.RandStringBytesRmndr(4)
labels[config.LABEL_NAME] = cl.Spec.Name + "-" + uniqueName
spec.Name = labels[config.LABEL_NAME]
newInstance := &crv1.Pgreplica{
ObjectMeta: metav1.ObjectMeta{
Name: labels[config.LABEL_NAME],
Labels: labels,
},
Spec: spec,
Status: crv1.PgreplicaStatus{
State: crv1.PgreplicaStateCreated,
Message: "Created, not processed yet",
},
}
_, err = clientset.CrunchydataV1().Pgreplicas(namespace).Create(newInstance)
if err != nil {
log.Error(" in creating Pgreplica instance" + err.Error())
publishClusterCreateFailure(cl, err.Error())
}
}
}
}
// AddClusterBootstrap creates the resources needed to bootstrap a new cluster from an existing
// data source. Specifically, this function creates the bootstrap job that will be run to
// bootstrap the cluster, along with supporting resources (e.g. ConfigMaps and volumes).
func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) error {
namespace := cluster.GetNamespace()
if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil &&
!kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace,
cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cluster.Spec.PrimaryStorage)
if err != nil {
publishClusterCreateFailure(cluster, err.Error())
return err
}
if err := addClusterBootstrapJob(clientset, cluster, namespace, dataVolume,
walVolume, tablespaceVolumes); err != nil && !kerrors.IsAlreadyExists(err) {
publishClusterCreateFailure(cluster, err.Error())
return err
}
patch, err := json.Marshal(map[string]interface{}{
"status": crv1.PgclusterStatus{
State: crv1.PgclusterStateBootstrapping,
Message: "Bootstapping cluster from an existing data source",
},
})
if err == nil {
_, err = clientset.CrunchydataV1().Pgclusters(namespace).Patch(cluster.Name, types.MergePatchType, patch)
}
if err != nil {
return err
}
return nil
}
// AddBootstrapRepo creates a pgBackRest repository and associated service to use when
// bootstrapping a cluster from an existing data source. If an existing repo is detected
// and is being used to bootstrap another cluster, then an error is returned. If an existing
// repo is detected and is not associated with a bootstrap job (but rather an active cluster),
// then no action is taken and the function resturns. Also, in addition to returning an error
// in the event an error is encountered, the function also returns a 'repoCreated' bool that
// specifies whether or not a repo was actually created.
func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (repoCreated bool, err error) {
restoreClusterName := cluster.Spec.PGDataSource.RestoreFrom
repoName := fmt.Sprintf(util.BackrestRepoServiceName, restoreClusterName)
found := true
repoDeployment, err := clientset.AppsV1().Deployments(cluster.GetNamespace()).Get(
repoName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return
}
found = false
}
if !found {
if err = backrest.CreateRepoDeployment(clientset, cluster, false, true, 1); err != nil {
return
}
repoCreated = true
} else if _, ok := repoDeployment.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok {
err = fmt.Errorf("Unable to create bootstrap repo %s to bootstrap cluster %s "+
"(namespace %s) because it is already running to bootstrap another cluster",
repoName, cluster.GetName(), cluster.GetNamespace())
return
}
return
}
// DeleteClusterBase ...
func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) {
DeleteCluster(clientset, cl, namespace)
//delete any existing configmaps
if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil {
log.Error(err)
}
//delete any existing pgtasks ???
//publish delete cluster event
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventDeleteClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventDeleteCluster,
},
Clustername: cl.Spec.Name,
}
if err := events.Publish(f); err != nil {
log.Error(err)
}
}
// ScaleBase ...
func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
if replica.Spec.Status == crv1.CompletedStatus {
log.Warn("crv1 pgreplica " + replica.Spec.Name + " is already marked complete, will not recreate")
return
}
//get the pgcluster CRD to base the replica off of
cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(replica.Spec.ClusterName, metav1.GetOptions{})
if err != nil
|
dataVolume, walVolume, tablespaceVolumes, err := pvc.CreateMissingPostgreSQLVolumes(
clientset, cluster, namespace, replica.Spec.Name, replica.Spec.ReplicaStorage)
if err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD pvcname
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/replicastorage/name", dataVolume.PersistentVolumeClaimName, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in pvcname patch " + err.Error())
}
//create the replica service if it doesnt exist
if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil {
log.Error(err)
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//instantiate the replica
if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil {
publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster)
return
}
//update the replica CRD status
err = util.Patch(clientset.Discovery().RESTClient(), "/spec/status", crv1.CompletedStatus, crv1.PgreplicaResourcePlural, replica.Spec.Name, namespace)
if err != nil {
log.Error("error in status patch " + err.Error())
}
//publish event for replica creation
topics := make([]string, 1)
topics[0] = events.EventTopicCluster
f := events.EventScaleClusterFormat{
EventHeader: events.EventHeader{
Namespace: namespace,
Username: replica.ObjectMeta.Labels[config.LABEL_PGOUSER],
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventScaleCluster,
},
Clustername: cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME],
Replicaname: cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER],
}
if err = events.Publish(f); err != nil {
log.Error(err.Error())
}
}
// ScaleDownBase ...
func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) {
//get the pgcluster CR
|
{
return
}
|
conditional_block
|
train.py
|
': False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def build_model(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
|
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument
|
def augment_image(image, label):
|
random_line_split
|
train.py
|
False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def build_model(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
def augment_image(image, label):
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
|
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument
|
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
|
conditional_block
|
train.py
|
False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def build_model(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
|
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
def augment_image(image, label):
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument
|
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
|
identifier_body
|
train.py
|
False,
'random_crop': False,
'random_brightness': False,
'repeat_dataset': None,
# model params
'model_name': 'ganomaly',
'latent_size': 100,
'intermediate_size': 0, # only valid for cvae
'n_filters': 64,
'n_extra_layers': 0,
'w_adv': 1, # only valid for GANomaly
'w_rec': 50, # only valid for GANomaly
'w_enc': 1, # only valid for GANomaly
# debugging params
'train_steps': None,
'eval_steps': None,
'log_level': 'info',
'debug': False,
# input/output dir params
'data_dir': './trainig/data',
'model_dir': './trainig/model',
'output_data_dir': './trainig/output'
}
def
|
(args) -> tf.keras.Model:
image_shape = (args.image_size, args.image_size, args.image_channels)
def build_default(model_class, **kwargs):
return model_class(
input_shape=image_shape,
latent_size=args.latent_size,
n_filters=args.n_filters,
n_extra_layers=args.n_extra_layers,
**kwargs
)
def compile_default(model):
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=args.learning_rate),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[
tf.keras.losses.MeanAbsoluteError(),
tf.keras.losses.BinaryCrossentropy()
]
)
return model
def build_ganomaly():
model = build_default(GANomaly)
model.compile(
loss={
'adv': tf.keras.losses.MeanSquaredError(),
'rec': tf.keras.losses.MeanAbsoluteError(),
'enc': tf.keras.losses.MeanSquaredError(),
'dis': tf.keras.losses.BinaryCrossentropy()
},
loss_weights={
'adv': args.w_adv,
'rec': args.w_rec,
'enc': args.w_enc
},
optimizer={
'gen': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999),
'dis': tf.keras.optimizers.Adam(
learning_rate=args.learning_rate,
beta_1=0.5, beta_2=0.999)
}
)
return model
def switcher_default():
warning("Unknown model_name, using 'ganomaly' as default!")
return build_ganomaly()
switcher = {
'ganomaly': build_ganomaly,
'cae': lambda: compile_default(build_default(CAE)),
'cnae': lambda: compile_default(build_default(CNAE)),
'cvae': lambda: compile_default(build_default(CVAE,
intermediate_size=args.intermediate_size))
}
model = switcher.get(args.model_name, switcher_default)()
model.build((None, *image_shape))
return model
def get_prepared_datasets(args):
# get dataset by name with simple try an error
try:
train_ds = get_labeled_dataset(
category=args.dataset_name, split='train', image_channels=args.image_channels, binary_labels=True)
test_ds = get_labeled_dataset(
category=args.dataset_name, split='test', image_channels=args.image_channels, binary_labels=True)
args.image_channels = 3 if args.image_channels == 0 else args.image_channels
except ValueError:
try:
(train_images, train_labels), (test_images, test_labels) = create_anomaly_dataset(
dataset=get_dataset(args.dataset_name), abnormal_class=args.abnormal_class)
args.dataset_name += str(args.abnormal_class)
train_ds = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels))
args.image_channels = train_images.shape[-1]
except ValueError:
raise ValueError(
"{} isn't a valid dataset".format(args.dataset_name))
def resize_image(image, label):
image = tf.image.resize(image, (args.image_size, args.image_size))
return image, label
train_ds = train_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_ds = test_ds.map(
resize_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.cache_path:
cache_dir = os.path.join(args.cache_path, 'tfdata_cache_{}_{}_{}'.format(
args.dataset_name, args.image_size, args.image_channels))
os.makedirs(cache_dir, exist_ok=True)
train_ds = train_ds.cache(os.path.join(cache_dir, 'train'))
test_ds = test_ds.cache(os.path.join(cache_dir, 'test'))
if args.repeat_dataset:
train_ds = train_ds.repeat(args.repeat_dataset)
if args.random_flip or args.random_crop or args.random_brightness:
def augment_image(image, label):
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if args.random_crop:
image_shape = (args.image_size, args.image_size,
args.image_channels)
image = tf.image.resize_with_crop_or_pad(
image, image_shape[-3] + 6, image_shape[-2] + 6)
image = tf.image.random_crop(image, size=image_shape)
if args.random_brightness:
image = tf.image.random_brightness(image, max_delta=0.5)
image = tf.clip_by_value(image, 0, 1)
return image, label
train_ds = train_ds.map(
augment_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if args.shuffle:
train_ds = train_ds.shuffle(args.buffer_size)
if args.prefetch:
train_ds = train_ds.prefetch(args.buffer_size)
test_ds = test_ds.prefetch(args.buffer_size)
return train_ds, test_ds
def main(args):
train_ds, test_ds = get_prepared_datasets(args)
train_count = tf.data.experimental.cardinality(train_ds).numpy()
test_count = tf.data.experimental.cardinality(test_ds).numpy()
info("dataset: train_count: {}, test_count: {}".format(train_count, test_count))
model = build_model(args)
model.summary(print_fn=info)
#model.net_gen.summary(print_fn=info) # TODO call it from summary() of GANomaly
#model.net_dis.summary(print_fn=info)
#model.load_weights('./no/valid/path')
adme = ADModelEvaluator(
test_count=test_count if args.eval_steps is None else args.eval_steps * args.batch_size,
model_dir=args.sm_model_dir or args.model_dir,
early_stopping_patience=args.early_stopping_patience,
reduce_lr_patience=args.reduce_lr_patience
)
results = model.fit(
x=train_ds.batch(args.batch_size),
validation_data=test_ds.batch(args.batch_size),
callbacks=[adme],
epochs=args.epochs,
steps_per_epoch=args.train_steps,
validation_steps=args.eval_steps,
verbose=2
)
# remove the useless per image losses and labels and add test results
del results.history['val_losses']
del results.history['val_labels']
results.history['val_auroc'] = adme.test_results
# https://stackoverflow.com/questions/23613426/write-dictionary-of-lists-to-a-csv-file
info("results: {}".format(json.dumps(
results.history, indent=4, sort_keys=True, default=str)))
critical("END OF SCRIPT REACHED")
def parse_args():
"""
https://docs.python.org/3.6/library/argparse.html
https://sagemaker.readthedocs.io/en/stable/using_tf.html#prepare-a-script-mode-training-script
https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2logging(v):
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(v, logging.INFO)
def str2posint(v):
v = int(v)
return v if v > 0 else None
parser = argparse.ArgumentParser()
# training params
parser.add_argument('--epochs', type=int, default=default_args['epochs'])
parser.add_argument('--batch_size', type=int,
default=default_args['batch_size'])
parser.add_argument('--learning_rate', type=float,
default=default_args['learning_rate'])
parser.add_argument('--early_stopping_patience', '--early_stopping', type=int,
default=default_args['early_stopping_patience'])
parser.add_argument('--reduce_lr_patience', type=int,
default=default_args['reduce_lr_patience'])
# tf.data piepline options
parser.add_argument('--dataset_name', type=str,
default=default_args['dataset_name'])
parser.add_argument
|
build_model
|
identifier_name
|
lib.rs
|
.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn is_meta_character(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex.
///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false.
/// For example, `%` is not a meta character, but it is escapeable. That is,
/// `%` and `\%` both match a literal `%` in all contexts.
///
/// The purpose of this routine is to provide knowledge about what characters
/// may be escaped. Namely, most regex engines permit "superfluous" escapes
/// where characters without any special significance may be escaped even
/// though there is no actual _need_ to do so.
///
/// This will return false for some characters. For example, `e` is not
/// escapeable. Therefore, `\e` will either result in a parse error (which is
/// true today), or it could backwards compatibly evolve into a new construct
/// with its own meaning. Indeed, that is the purpose of banning _some_
/// superfluous escapes: it provides a way to evolve the syntax in a compatible
/// manner.
///
/// # Example
///
/// ```
/// use regex_syntax::is_escapeable_character;
///
/// assert!(is_escapeable_character('?'));
/// assert!(is_escapeable_character('-'));
/// assert!(is_escapeable_character('&'));
/// assert!(is_escapeable_character('#'));
/// assert!(is_escapeable_character('%'));
/// assert!(is_escapeable_character('/'));
/// assert!(is_escapeable_character('!'));
/// assert!(is_escapeable_character('"'));
///
/// assert!(!is_escapeable_character('e'));
/// ```
pub fn is_escapeable_character(c: char) -> bool {
// Certainly escapeable if it's a meta character.
if is_meta_character(c) {
return true;
}
// Any character that isn't ASCII is definitely not escapeable. There's
// no real need to allow things like \☃ right?
if !c.is_ascii() {
return false;
}
// Otherwise, we basically say that everything is escapeable unless it's a
// letter or digit. Things like \3 are either octal (when enabled) or an
// error, and we should keep it that way. Otherwise, letters are reserved
// for adding new syntax in a backwards compatible way.
match c {
'0'..='9' | 'A'..='Z' | 'a'..='z' => false,
// While not currently supported, we keep these as not escapeable to
// give us some flexibility with respect to supporting the \< and
// \> word boundary assertions in the future. By rejecting them as
// escapeable, \< and \> will result in a parse error. Thus, we can
// turn them into something else in the future without it being a
// backwards incompatible change.
'<' | '>' => false,
_ => true,
}
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Panics
///
/// If the `unicode-perl` feature is not enabled, then this function
/// panics. For this reason, it is recommended that callers use
/// [`try_is_word_character`] instead.
pub fn is_word_character(c: char) -> bool {
try_is_word_character(c).expect("unicode-perl feature must be enabled")
}
/// Returns true if and only if the given character is a Unicode word
/// character.
///
/// A Unicode word character is defined by
/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties).
/// In particular, a character
/// is considered a word character if it is in either of the `Alphabetic` or
/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark`
/// or `Connector_Punctuation` general categories.
///
/// # Errors
///
/// If the `unicode-perl` feature is not enabled, then this function always
/// returns an error.
pub fn try_is_word_character(
c: char,
) -> core::result::Result<bool, UnicodeWordError> {
|
unicode::is_word_character(c)
}
|
identifier_body
|
|
lib.rs
|
to the size of the original pattern string (in bytes).
This includes the type's corresponding destructors. (One exception to this
is literal extraction, but this will eventually get fixed.)
# Error reporting
The `Display` implementations on all `Error` types exposed in this library
provide nice human readable errors that are suitable for showing to end users
in a monospace font.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn
|
(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' |
|
is_meta_character
|
identifier_name
|
lib.rs
|
.
# Literal extraction
This crate provides limited support for [literal extraction from `Hir`
values](hir::literal). Be warned that literal extraction uses recursion, and
therefore, stack size proportional to the size of the `Hir`.
The purpose of literal extraction is to speed up searches. That is, if you
know a regular expression must match a prefix or suffix literal, then it is
often quicker to search for instances of that literal, and then confirm or deny
the match using the full regular expression engine. These optimizations are
done automatically in the `regex` crate.
# Crate features
An important feature provided by this crate is its Unicode support. This
includes things like case folding, boolean properties, general categories,
scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`.
However, a downside of this support is that it requires bundling several
Unicode data tables that are substantial in size.
A fair number of use cases do not require full Unicode support. For this
reason, this crate exposes a number of features to control which Unicode
data is available.
If a regular expression attempts to use a Unicode feature that is not available
because the corresponding crate feature was disabled, then translating that
regular expression to an `Hir` will return an error. (It is still possible
construct an `Ast` for such a regular expression, since Unicode data is not
used until translation to an `Hir`.) Stated differently, enabling or disabling
any of the features below can only add or subtract from the total set of valid
regular expressions. Enabling or disabling a feature will never modify the
match semantics of a regular expression.
The following features are available:
* **std** -
Enables support for the standard library. This feature is enabled by default.
When disabled, only `core` and `alloc` are used. Otherwise, enabling `std`
generally just enables `std::error::Error` trait impls for the various error
types.
* **unicode** -
Enables all Unicode features. This feature is enabled by default, and will
always cover all Unicode features, even if more are added in the future.
* **unicode-age** -
Provide the data for the
[Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age).
This makes it possible to use classes like `\p{Age:6.0}` to refer to all
codepoints first introduced in Unicode 6.0
* **unicode-bool** -
Provide the data for numerous Unicode boolean properties. The full list
is not included here, but contains properties like `Alphabetic`, `Emoji`,
`Lowercase`, `Math`, `Uppercase` and `White_Space`.
* **unicode-case** -
Provide the data for case insensitive matching using
[Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches).
* **unicode-gencat** -
Provide the data for
[Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values).
This includes, but is not limited to, `Decimal_Number`, `Letter`,
`Math_Symbol`, `Number` and `Punctuation`.
* **unicode-perl** -
Provide the data for supporting the Unicode-aware Perl character classes,
corresponding to `\w`, `\s` and `\d`. This is also necessary for using
Unicode-aware word boundary assertions. Note that if this feature is
disabled, the `\s` and `\d` character classes are still available if the
`unicode-bool` and `unicode-gencat` features are enabled, respectively.
* **unicode-script** -
Provide the data for
[Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/).
This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`,
`Latin` and `Thai`.
* **unicode-segment** -
Provide the data necessary to provide the properties used to implement the
[Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/).
This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and
`\p{sb=ATerm}`.
* **arbitrary** -
Enabling this feature introduces a public dependency on the
[`arbitrary`](https://crates.io/crates/arbitrary)
crate. Namely, it implements the `Arbitrary` trait from that crate for the
[`Ast`](crate::ast::Ast) type. This feature is disabled by default.
*/
#![no_std]
#![forbid(unsafe_code)]
#![deny(missing_docs, rustdoc::broken_intra_doc_links)]
#![warn(missing_debug_implementations)]
// MSRV(1.62): Allow unused warnings. Needed for the 'allow' below,
// since the warning is no longer triggered in newer Rust releases.
// Once the 'allow(mutable_borrow_reservation_conflict)' can be
// removed, we can remove the 'allow(renamed_and_removed_lints)' too.
#![allow(renamed_and_removed_lints)]
// MSRV(1.62): This gets triggered on Rust <1.62, and since our MSRV
// is Rust 1.60 at the time of writing, a warning is displayed. But
// the lang team decided the code pattern flagged by this warning is
// OK, so the warning is innocuous. We can remove this explicit allow
// once we get to a Rust release where the warning is no longer
// triggered. I believe that's Rust 1.62.
#![allow(mutable_borrow_reservation_conflict)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(test, feature = "std"))]
extern crate std;
extern crate alloc;
pub use crate::{
error::Error,
parser::{parse, Parser, ParserBuilder},
unicode::UnicodeWordError,
};
use alloc::string::String;
pub mod ast;
mod debug;
mod either;
mod error;
pub mod hir;
mod parser;
mod rank;
mod unicode;
mod unicode_tables;
pub mod utf8;
/// Escapes all regular expression meta characters in `text`.
///
/// The string returned may be safely used as a literal in a regular
/// expression.
pub fn escape(text: &str) -> String {
let mut quoted = String::new();
escape_into(text, &mut quoted);
quoted
}
/// Escapes all meta characters in `text` and writes the result into `buf`.
///
/// This will append escape characters into the given buffer. The characters
/// that are appended are safe to use as a literal in a regular expression.
pub fn escape_into(text: &str, buf: &mut String) {
buf.reserve(text.len());
for c in text.chars() {
if is_meta_character(c) {
buf.push('\\');
}
buf.push(c);
}
}
/// Returns true if the given character has significance in a regex.
///
/// Generally speaking, these are the only characters which _must_ be escaped
/// in order to match their literal meaning. For example, to match a literal
/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For
/// example, `-` is treated as a meta character because of its significance
/// for writing ranges inside of character classes, but the regex `-` will
/// match a literal `-` because `-` has no special meaning outside of character
/// classes.
///
/// In order to determine whether a character may be escaped at all, the
/// [`is_escapeable_character`] routine should be used. The difference between
/// `is_meta_character` and `is_escapeable_character` is that the latter will
/// return true for some characters that are _not_ meta characters. For
/// example, `%` and `\%` both match a literal `%` in all contexts. In other
/// words, `is_escapeable_character` includes "superfluous" escapes.
///
/// Note that the set of characters for which this function returns `true` or
/// `false` is fixed and won't change in a semver compatible release. (In this
/// case, "semver compatible release" actually refers to the `regex` crate
/// itself, since reducing or expanding the set of meta characters would be a
/// breaking change for not just `regex-syntax` but also `regex` itself.)
///
/// # Example
///
/// ```
/// use regex_syntax::is_meta_character;
///
/// assert!(is_meta_character('?'));
/// assert!(is_meta_character('-'));
/// assert!(is_meta_character('&'));
/// assert!(is_meta_character('#'));
///
/// assert!(!is_meta_character('%'));
/// assert!(!is_meta_character('/'));
/// assert!(!is_meta_character('!'));
/// assert!(!is_meta_character('"'));
/// assert!(!is_meta_character('e'));
/// ```
pub fn is_meta_character(c: char) -> bool {
match c {
'\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{'
| '}' | '^' | '$' | '#' | '&' | '-' | '~' => true,
_ => false,
}
}
/// Returns true if the given character can be escaped in a regex.
|
///
/// This returns true in all cases that `is_meta_character` returns true, but
/// also returns true in some cases where `is_meta_character` returns false.
|
random_line_split
|
|
main.rs
|
())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool>
|
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default
|
{
download::download_all(&self.data_dir)
}
|
identifier_body
|
main.rs
|
())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists()
|
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default
|
{
args.create_index()?;
}
|
conditional_block
|
main.rs
|
_matches())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn from_matches(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
|
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value
|
random_line_split
|
|
main.rs
|
())?;
if args.debug {
log::set_max_level(log::LevelFilter::Debug);
}
// Forcefully update the data and re-index if requested.
if args.update_data {
args.download_all_update()?;
args.create_index()?;
return Ok(());
}
// Ensure that the necessary data exists.
if args.download_all()? || args.update_index {
args.create_index()?;
if args.update_index {
return Ok(());
}
}
// Now ensure that the index exists.
if !args.index_dir.exists() {
args.create_index()?;
}
let mut searcher = args.searcher()?;
let results = match args.query {
None => None,
Some(ref query) => Some(searcher.search(&query.parse()?)?),
};
if args.files.is_empty() {
let results = match results {
None => failure::bail!("run with a file to rename or --query"),
Some(ref results) => results,
};
return write_tsv(io::stdout(), &mut searcher, results.as_slice());
}
let mut builder = RenamerBuilder::new();
builder
.min_votes(args.min_votes)
.good_threshold(0.25)
.regex_episode(&args.regex_episode)
.regex_season(&args.regex_season)
.regex_year(&args.regex_year);
if let Some(ref results) = results {
builder.force(choose(&mut searcher, results.as_slice(), 0.25)?);
}
let renamer = builder.build()?;
let proposals = renamer.propose(
&mut searcher,
&args.files,
args.dest_dir,
args.rename_action)?;
if proposals.is_empty() {
failure::bail!("no files to rename");
}
let mut stdout = TabWriter::new(io::stdout());
for p in &proposals {
writeln!(stdout, "{}\t->\t{}", p.src().display(), p.dst().display())?;
}
stdout.flush()?;
if read_yesno(&format!(
"Are you sure you want to {action} the above files? (y/n) ",
action = &args.rename_action
))? {
for p in &proposals {
if let Err(err) = p.rename() {
eprintln!("{}", err);
}
}
}
Ok(())
}
#[derive(Debug)]
struct Args {
data_dir: PathBuf,
dest_dir: Option<PathBuf>,
debug: bool,
files: Vec<PathBuf>,
index_dir: PathBuf,
ngram_size: usize,
ngram_type: NgramType,
query: Option<String>,
regex_episode: String,
regex_season: String,
regex_year: String,
update_data: bool,
update_index: bool,
min_votes: u32,
rename_action: RenameAction,
}
impl Args {
fn
|
(matches: &clap::ArgMatches) -> Result<Args> {
let files = collect_paths(
matches
.values_of_os("file")
.map(|it| it.collect())
.unwrap_or(vec![]),
matches.is_present("follow"),
);
let query = matches
.value_of_lossy("query")
.map(|q| q.into_owned());
let data_dir = matches
.value_of_os("data-dir")
.map(PathBuf::from)
.unwrap();
let dest_dir = matches
.value_of_os("dest-dir")
.map(PathBuf::from);
let index_dir = matches
.value_of_os("index-dir")
.map(PathBuf::from)
.unwrap_or(data_dir.join("index"));
let regex_episode = matches
.value_of_lossy("re-episode")
.unwrap()
.into_owned();
let regex_season = matches
.value_of_lossy("re-season")
.unwrap()
.into_owned();
let regex_year = matches
.value_of_lossy("re-year")
.unwrap()
.into_owned();
let min_votes = matches
.value_of_lossy("votes")
.unwrap()
.parse()?;
let rename_action = {
if matches.is_present("symlink") {
if !cfg!(unix) {
failure::bail!(
"--symlink currently supported only on Unix \
platforms, try hardlink (-H) instead"
);
}
RenameAction::Symlink
} else if matches.is_present("hardlink") {
RenameAction::Hardlink
} else {
RenameAction::Rename
}
};
Ok(Args {
data_dir: data_dir,
dest_dir: dest_dir,
debug: matches.is_present("debug"),
files: files,
index_dir: index_dir,
ngram_size: matches.value_of_lossy("ngram-size").unwrap().parse()?,
ngram_type: matches.value_of_lossy("ngram-type").unwrap().parse()?,
query: query,
regex_episode: regex_episode,
regex_season: regex_season,
regex_year: regex_year,
update_data: matches.is_present("update-data"),
update_index: matches.is_present("update-index"),
min_votes: min_votes,
rename_action: rename_action,
})
}
fn create_index(&self) -> Result<Index> {
Ok(IndexBuilder::new()
.ngram_size(self.ngram_size)
.ngram_type(self.ngram_type)
.create(&self.data_dir, &self.index_dir)?)
}
fn open_index(&self) -> Result<Index> {
Ok(Index::open(&self.data_dir, &self.index_dir)?)
}
fn searcher(&self) -> Result<Searcher> {
Ok(Searcher::new(self.open_index()?))
}
fn download_all(&self) -> Result<bool> {
download::download_all(&self.data_dir)
}
fn download_all_update(&self) -> Result<()> {
download::update_all(&self.data_dir)
}
}
fn app() -> clap::App<'static, 'static> {
use clap::{App, AppSettings, Arg};
lazy_static! {
// clap wants all of its strings tied to a particular lifetime, but
// we'd really like to determine some default values dynamically. Using
// a lazy_static here is one way of safely giving a static lifetime to
// a value that is computed at runtime.
//
// An alternative approach would be to compute all of our default
// values in the caller, and pass them into this function. It's nicer
// to defined what we need here though. Locality of reference and all
// that.
static ref DATA_DIR: PathBuf = env::temp_dir().join("imdb-rename");
}
App::new("imdb-rename")
.author(clap::crate_authors!())
.version(clap::crate_version!())
.max_term_width(100)
.setting(AppSettings::UnifiedHelpMessage)
.arg(Arg::with_name("file")
.multiple(true)
.help("One or more files to rename."))
.arg(Arg::with_name("data-dir")
.long("data-dir")
.env("IMDB_RENAME_DATA_DIR")
.takes_value(true)
.default_value_os(DATA_DIR.as_os_str())
.help("The location to store IMDb data files."))
.arg(Arg::with_name("dest-dir")
.long("dest-dir")
.short("d")
.env("IMDB_RENAME_DEST_DIR")
.takes_value(true)
.help("The output directory of renamed files \
(or symlinks/hardlinks with the -s/-H options). \
By default, files are renamed in place."))
.arg(Arg::with_name("debug")
.long("debug")
.help("Show debug messages. Use this when filing bugs."))
.arg(Arg::with_name("follow")
.long("follow")
.short("f")
.help("Follow directories and attempt to rename all child \
entries."))
.arg(Arg::with_name("index-dir")
.long("index-dir")
.env("IMDB_RENAME_INDEX_DIR")
.takes_value(true)
.help("The location to store IMDb index files. \
When absent, the default is {data-dir}/index."))
.arg(Arg::with_name("ngram-size")
.long("ngram-size")
.default_value("3")
.help("Choose the ngram size for indexing names. This is only \
used at index time and otherwise ignored."))
.arg(Arg::with_name("ngram-type")
.long("ngram-type")
.default_value("window")
.possible_values(NgramType::possible_names())
.help("Choose the type of ngram generation. This is only used \
used at index time and otherwise ignored."))
.arg(Arg::with_name("query")
.long("query")
.short("q")
.takes_value(true)
.help("Setting an override query is necessary if the file \
path lacks sufficient information to find a matching \
title. For example, if a year could not be found. It \
is also useful for specifying a TV show when renaming \
multiple episodes at once."))
.arg(Arg::with_name("re-episode")
.long("re-episode")
.takes_value(true)
.default_value
|
from_matches
|
identifier_name
|
models.py
|
_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
|
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""planning markdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image_id
|
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
|
conditional_block
|
models.py
|
_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""pl
|
arkdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image
|
anning m
|
identifier_name
|
models.py
|
img = Image.new('RGB', size=(width, height), color='white')
rotate_direction = random.randint(0, 3)
if rotate_direction in (0, 2):
font_size = random.randrange(width // 25, width // 10)
else:
font_size = random.randrange(height // 25, height // 10)
font = ImageFont.truetype("app/static/arial.ttf", size=font_size)
txt = Image.new('RGB', (16 * font_size, int(1.1 * font_size)), color=(192, 192, 192))
d = ImageDraw.Draw(txt)
d.text((0, 0), "New image mock, generated by PIL", font=font, fill=0)
rotated = txt.rotate(90 * rotate_direction, expand=1)
img.paste(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""planning markdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
|
def random_image(seed):
random.seed(seed)
width = random.randint(128, 1024 + 1)
height = random.randint(128, 1024 + 1)
|
random_line_split
|
|
models.py
|
(rotated, box=(random.randrange(width // 2),
random.randrange(height // 2)))
d = ImageDraw.Draw(img)
n_steps = random.randrange(10, 20)
prev_point = [random.randrange(width), random.randrange(height)]
prev_horizontal = True
for _ in range(n_steps):
next_dir = random.randint(0, 1)
next_point = [0, 0]
if prev_horizontal:
next_point[0] = prev_point[0]
if next_dir == 0:
next_point[1] = random.randrange(prev_point[1] + 1)
else:
next_point[1] = random.randrange(prev_point[1] - 1, height)
else:
next_point[1] = prev_point[1]
if next_dir == 0:
next_point[0] = random.randrange(prev_point[0] + 1)
else:
next_point[0] = random.randrange(prev_point[0] - 1, width)
prev_horizontal = not prev_horizontal
d.line(prev_point + next_point, fill=0, width=3)
prev_point = next_point
return img
def transform_image(image, angle=None, borders=None):
image = image.convert('L') # to grayscale
image, angle = rotate_image(image, angle=angle) # optimal rotating
image, borders = crop_image(image, borders=borders)
return image, angle, borders
def resize_image(image, max_dimension):
ratio = min(max_dimension / image.size[0], max_dimension / image.size[1])
return image.resize((int(image.size[0] * ratio),
int(image.size[1] * ratio)),
Image.ANTIALIAS)
def rotate_image(image, angle=None):
def rotating_criteria(image_to_rotate, angle):
tmp_image = image_to_rotate.rotate(angle, expand=1)
(width, height) = tmp_image.size
image_array = np.array(tmp_image.getdata()).astype('uint8').reshape((height, width))
# criteria = (np.max(np.sum(image_array, axis=0)) / height,
# np.max(np.sum(image_array, axis=1)) / width)
criteria = [None, None]
for axis in [0, 1]:
# sum_over_axis = image_array.sum(axis=axis)
# sum_over_axis = ((sum_over_axis[1:-1] > sum_over_axis[:-2]*10)
# | (sum_over_axis[1:-1] > sum_over_axis[2:]*10))
sum_over_axis = image_array.mean(axis=axis) > 5
sum_over_axis = np.nonzero(sum_over_axis)[0]
if len(sum_over_axis) > 0:
criteria[axis] = sum_over_axis[-1] - sum_over_axis[0]
else:
criteria[axis] = 1000000
return min(criteria)
print('basic image: ', image.size)
if angle is None:
# turn auto-rotating off
# search optimal angle to rotate
# current_resize_level = 0
# angles = [-45.0]
# angles += [0] * (ROTATION_N_TO_SPLIT - 1)
# angles += [45.0]
# crit = [None] * (ROTATION_N_TO_SPLIT + 1)
# image_inverted = None
# while (angles[-1] - angles[0]) > 0.1:
# # отресайзим изображение, если надо
# if current_resize_level != len(ROTATION_RESIZING_LEVELS):
# if (angles[-1] - angles[0]) < ROTATION_RESIZING_LEVELS[current_resize_level]['angle_diff']:
# image_inverted = resize_image(invert(image), ROTATION_RESIZING_LEVELS[current_resize_level]['size'])
# current_resize_level += 1
# print('image inverted: ', image_inverted.size)
# crit[0] = rotating_criteria(image_inverted, angles[0])
# crit[-1] = rotating_criteria(image_inverted, angles[-1])
#
# for ic in range(1, ROTATION_N_TO_SPLIT):
# angles[ic] = angles[0] + (angles[-1] - angles[0]) * ic / ROTATION_N_TO_SPLIT
# crit[ic] = rotating_criteria(image_inverted, angles[ic])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# angles[0] = angles[max(max_point - 2, 0)]
# angles[-1] = angles[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# crit[0] = crit[max(max_point - 2, 0)]
# crit[-1] = crit[min(max_point + 2, ROTATION_N_TO_SPLIT)]
# print('new borders: ', angles[0], angles[-1])
# max_point = (np.argmin(crit) + ROTATION_N_TO_SPLIT - np.argmin(crit[::-1])) // 2
# opt_angle = angles[max_point]
# opt_criteria = crit[max_point]
#
# print('opt_angle: ', opt_angle, ', criteria: ', opt_criteria)
opt_angle = 0
else:
# take existing angle
opt_angle = angle
# final rotation
if opt_angle != 0:
tmp_image = image.rotate(opt_angle, expand=1)
bg_mask = Image.new(mode='L', size=image.size, color=255).rotate(opt_angle, expand=1)
bg = Image.new(mode='L', size=tmp_image.size, color=255)
bg.paste(tmp_image, mask=bg_mask)
return bg, opt_angle
return image, 0
def crop_image(image, borders=None):
print(image.size)
width, height = image.size
image_array = (np.ones(shape=(height, width), dtype='uint8') * 255
- np.array(image.getdata()).astype('uint8').reshape((height, width)))
if borders is None:
# search optimal borders to crop
hist_u_to_b = (((image_array.max(axis=1) - image_array.min(axis=1)) > CROP_MIN_MAX_GAP)
| (image_array.mean(axis=1) > CROP_SIGNIFICANT_MEAN))
hist_l_to_r = (((np.max(image_array, axis=0) - np.min(image_array, axis=0)) > CROP_MIN_MAX_GAP)
| (np.mean(image_array, axis=0) > CROP_SIGNIFICANT_MEAN))
print(hist_l_to_r.shape, hist_u_to_b.shape)
left_border = int(max(np.nonzero(hist_l_to_r)[0][0] - 1, 0))
right_border = int(min(np.nonzero(hist_l_to_r)[0][-1] + 1, width))
up_border = int(max(np.nonzero(hist_u_to_b)[0][0] - 1, 0))
bottom_border = int(min(np.nonzero(hist_u_to_b)[0][-1] + 1, height))
borders = {'left_border': left_border,
'right_border': right_border,
'up_border': up_border,
'bottom_border': bottom_border}
else:
# take existing borders
left_border = borders['left_border']
right_border = borders['right_border']
up_border = borders['up_border']
bottom_border = borders['bottom_border']
image_array = 255 - image_array[up_border:bottom_border, left_border:right_border]
return Image.fromarray(image_array), borders
class ImageToMark:
def __init__(self, image_id):
self.image_id = image_id
self._image = None
@property
def markdown(self):
"""planning markdown is saved here (redirection to database)"""
return db[self.basic_image_id]
@markdown.setter
def markdown(self, value):
db[self.image_id] = value
# update hash set: there is an marked image with that hash
image_hash = self.hash
marked_images = marked_hashes.get(image_hash, [])
if self.image_id not in marked_images:
marked_hashes[image_hash] = marked_images + [self.image_id]
@property
def image(self):
if self._image is None:
# self._image = random_image(self.image_id)
self._image = load_image_from_url(self.url)
# self._image = resize_image(self._image, 400)
basic_image_id = self.basic_image_id
angle = db.get_full_item(basic_image_id).get('angle', None)
borders = db.get_full_item(basic_image_id).get('borders', None)
(self._image,
angle,
borders) = transform_image(self._image, angle=angle, borders=borders)
db.get_full_item(self.image_id)['angle'] = angle
db.get_full_item(self.image_id)['borders'] = borders
return self._image
@property
def url(self):
return db.get_full_item(self.
|
image_id)['url']
@property
def dupli
|
identifier_body
|
|
rpc_test.go
|
)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T)
|
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
}
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
for {
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ =
|
{
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
|
identifier_body
|
rpc_test.go
|
)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T) {
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
}
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
|
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ =
|
str := fmt.Sprintf("cl %d %d", i, j)
for {
|
random_line_split
|
rpc_test.go
|
clients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
for {
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case e := <-errorCh:
t.Fatalf("Error received while doing cas: %v", e)
default: // no errors
}
m, _ = clients[0].read("concCas")
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg.Kind = %d, msg.Contents=%s", m.Kind, m.Contents)
}
}
func PTest_Kill_Leader_And_Revive(t *testing.T) {
leaderId := id_from_url[leaderUrl]
leaderCl := mkClientUrl(t, leaderUrl)
data := "Some data before kill"
m, err := leaderCl.write("killers.txt", data, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
time.Sleep(2 * time.Second)
fsp[leaderId-1].Process.Kill()
//fmt.Println("Killed: ", err, leaderId)
time.Sleep(4 * time.Second) //for elections
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId%num)
//fmt.Println(leaderUrl)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message2: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
//fmt.Println("pppp")
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
}
} else if m.Kind == 'C' {
expect(t, m, &Msg{Kind: 'C'}, data, err)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
//fmt.Println("ddddd")
new_leader_id := id_from_url[leaderUrl]
data2 := "new data for file"
leaderCl = mkClientUrl(t, leaderUrl)
leaderCl.write("killers.txt", data2, 0)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
fsp[new_leader_id-1].Process.Kill()
fsp[leaderId-1] = exec.Command("./assignment4", strconv.Itoa(leaderId))
fsp[leaderId-1].Stdout = os.Stdout
fsp[leaderId-1].Stderr = os.Stdout
fsp[leaderId-1].Stdin = os.Stdin
fsp[leaderId-1].Start()
time.Sleep(1 * time.Second)
for {
leaderUrl = "localhost:" + strconv.Itoa(8000+leaderId-1)
leaderCl := mkClientUrl(t, leaderUrl)
m, err := leaderCl.read("killers.txt")
//fmt.Println("message3: ", m)
content := string(m.Contents)
if m.Kind == 'R' {
if content != "-1" {
leaderUrl = content
leaderCl := mkClientUrl(t, leaderUrl)
m, err = leaderCl.read("killers.txt")
expect(t, m, &Msg{Kind: 'C'}, data2, err)
break
}
} else if m.Kind == 'C' {
t.Error("Leader elected although log might be incomplete", m)
break
} else {
t.Error("Committed but not found on other nodes", m)
}
time.Sleep(100 * time.Millisecond)
}
}
func Test_Kill_all(t *testing.T) {
for _, fs := range fsp {
fs.Process.Kill()
}
time.Sleep(1 * time.Second)
}
func Test_Clean(t *testing.T) {
for i := 1; i <= num; i++ {
str := strconv.Itoa(i)
os.RemoveAll("mylog" + str)
os.Remove("stateStoreFile" + str)
}
}
//----------------------------------------------------------------------
// Utility functions
func expect(t *testing.T, response *Msg, expected *Msg, errstr string, err error) {
if err != nil {
t.Fatal("Unexpected error: " + err.Error())
}
ok := true
if response.Kind != expected.Kind {
ok = false
errstr += fmt.Sprintf(" Got kind='%c', expected '%c'", response.Kind, expected.Kind)
}
if expected.Version > 0 && expected.Version != response.Version {
ok = false
errstr += " Version mismatch"
}
if response.Kind == 'C' {
if expected.Contents != nil &&
bytes.Compare(response.Contents, expected.Contents) != 0 {
ok = false
}
}
if !ok {
t.Fatal("Expected " + errstr)
}
}
type Msg struct {
// Kind = the first character of the command. For errors, it
// is the first letter after "ERR_", ('V' for ERR_VERSION, for
// example), except for "ERR_CMD_ERR", for which the kind is 'M'
Kind byte
Filename string
Contents []byte
Numbytes int
Exptime int // expiry time in seconds
Version int
}
func (cl *Client) read(filename string) (*Msg, error) {
cmd := "read " + filename + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) write(filename string, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("write %s %d\r\n", filename, len(contents))
} else {
cmd = fmt.Sprintf("write %s %d %d\r\n", filename, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) cas(filename string, version int, contents string, exptime int) (*Msg, error) {
var cmd string
if exptime == 0 {
cmd = fmt.Sprintf("cas %s %d %d\r\n", filename, version, len(contents))
} else {
cmd = fmt.Sprintf("cas %s %d %d %d\r\n", filename, version, len(contents), exptime)
}
cmd += contents + "\r\n"
return cl.sendRcv(cmd)
}
func (cl *Client) delete(filename string) (*Msg, error) {
cmd := "delete " + filename + "\r\n"
return cl.sendRcv(cmd)
}
var errNoConn = errors.New("Connection is closed")
type Client struct {
conn *net.TCPConn
reader *bufio.Reader // a bufio Reader wrapper over conn
}
func mkClient(t *testing.T, host string, port int) *Client {
return mkClientUrl(t, host+":"+strconv.Itoa(port))
}
func mkClientUrl(t *testing.T, url string) *Client {
var client *Client
raddr, err := net.ResolveTCPAddr("tcp", url)
if err == nil {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
client = &Client{conn: conn, reader: bufio.NewReader(conn)}
}
}
if err != nil
|
{
t.Fatal(err)
}
|
conditional_block
|
|
rpc_test.go
|
(t *testing.T) {
// Should be able to accept a few bytes at a time
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
var err error
snd := func(chunk string) {
if err == nil {
err = cl.send(chunk)
}
}
// Send the command "write teststream 10\r\nabcdefghij\r\n" in multiple chunks
// Nagle's algorithm is disabled on a write, so the server should get these in separate TCP packets.
snd("wr")
time.Sleep(10 * time.Millisecond)
snd("ite test")
time.Sleep(10 * time.Millisecond)
snd("stream 1")
time.Sleep(10 * time.Millisecond)
snd("0\r\nabcdefghij\r")
time.Sleep(10 * time.Millisecond)
snd("\n")
var m *Msg
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "writing in chunks should work", err)
}
func TestRPC_Batch(t *testing.T) {
// Send multiple commands in one batch, expect multiple responses
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
cmds := "write batch1 3\r\nabc\r\n" +
"write batch2 4\r\ndefg\r\n" +
"read batch1\r\n"
cl.send(cmds)
m, err := cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch1 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'O'}, "write batch2 success", err)
m, err = cl.rcv()
expect(t, m, &Msg{Kind: 'C', Contents: []byte("abc")}, "read batch1", err)
}
func PTestRPC_BasicTimer(t *testing.T) {
cl := mkClientUrl(t, leaderUrl)
defer cl.close()
// Write file cs733, with expiry time of 2 seconds
str := "Cloud fun"
m, err := cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "write success", err)
// Expect to read it back immediately.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "read my cas", err)
time.Sleep(3 * time.Second)
// Expect to not find the file after expiry
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found", err)
// Recreate the file with expiry time of 1 second
m, err = cl.write("cs733", str, 2)
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
// Overwrite the file with expiry time of 4. This should be the new time.
m, err = cl.write("cs733", str, 3)
expect(t, m, &Msg{Kind: 'O'}, "file overwriten with exptime=4", err)
// The last expiry time was 3 seconds. We should expect the file to still be around 2 seconds later
time.Sleep(1 * time.Second)
// Expect the file to not have expired.
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C', Contents: []byte(str)}, "file to not expire until 4 sec", err)
time.Sleep(3 * time.Second)
// 5 seconds since the last write. Expect the file to have expired
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'F'}, "file not found after 4 sec", err)
// Create the file with an expiry time of 1 sec. We're going to delete it
// then immediately create it. The new file better not get deleted.
m, err = cl.write("cs733", str, 1)
expect(t, m, &Msg{Kind: 'O'}, "file created for delete", err)
m, err = cl.delete("cs733")
expect(t, m, &Msg{Kind: 'O'}, "deleted ok", err)
m, err = cl.write("cs733", str, 0) // No expiry
expect(t, m, &Msg{Kind: 'O'}, "file recreated", err)
time.Sleep(1100 * time.Millisecond) // A little more than 1 sec
m, err = cl.read("cs733")
expect(t, m, &Msg{Kind: 'C'}, "file should not be deleted", err)
}
// nclients write to the same file. At the end the file should be
// any one clients' last write
func PTestRPC_ConcurrentWrites(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
errCh := make(chan error, nclients)
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to begin concurrently
sem.Add(1)
ch := make(chan *Msg, nclients*niters) // channel for all replies
for i := 0; i < nclients; i++ {
go func(i int, cl *Client) {
sem.Wait()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
m, err := cl.write("concWrite", str, 0)
if err != nil {
errCh <- err
break
} else {
ch <- m
}
}
}(i, clients[i])
}
time.Sleep(3000 * time.Millisecond) // give goroutines a chance
sem.Done() // Go!
time.Sleep(10 * time.Second)
// There should be no errors
for i := 0; i < nclients*niters; i++ {
select {
case m := <-ch:
if m.Kind != 'O' {
t.Fatalf("Concurrent write failed with kind=%c", m.Kind)
}
case err := <-errCh:
t.Fatal(err)
}
}
m, _ := clients[0].read("concWrite")
// Ensure the contents are of the form "cl <i> 9"
// The last write of any client ends with " 9"
if !(m.Kind == 'C' && strings.HasSuffix(string(m.Contents), " 2")) {
t.Fatalf("Expected to be able to read after 1000 writes. Got msg = %v", m)
}
}
// nclients cas to the same file. At the end the file should be any one clients' last write.
// The only difference between this test and the ConcurrentWrite test above is that each
// client loops around until each CAS succeeds. The number of concurrent clients has been
// reduced to keep the testing time within limits.
func PTestRPC_ConcurrentCas(t *testing.T) {
nclients := 3
niters := 3
clients := make([]*Client, nclients)
for i := 0; i < nclients; i++ {
cl := mkClientUrl(t, leaderUrl)
if cl == nil {
t.Fatalf("Unable to create client #%d", i)
}
defer cl.close()
clients[i] = cl
}
var sem sync.WaitGroup // Used as a semaphore to coordinate goroutines to *begin* concurrently
sem.Add(1)
m, _ := clients[0].write("concCas", "first", 0)
ver := m.Version
if m.Kind != 'O' || ver == 0 {
t.Fatalf("Expected write to succeed and return version")
}
var wg sync.WaitGroup
wg.Add(nclients)
errorCh := make(chan error, nclients)
for i := 0; i < nclients; i++ {
go func(i int, ver int, cl *Client) {
sem.Wait()
defer wg.Done()
for j := 0; j < niters; j++ {
str := fmt.Sprintf("cl %d %d", i, j)
for {
m, err := cl.cas("concCas", ver, str, 0)
if err != nil {
errorCh <- err
return
} else if m.Kind == 'O' {
break
} else if m.Kind != 'V' {
errorCh <- errors.New(fmt.Sprintf("Expected 'V' msg, got %c", m.Kind))
return
}
ver = m.Version // retry with latest version
}
}
}(i, ver, clients[i])
}
sem.Done() // Start goroutines
time.Sleep(1000 * time.Millisecond) // give goroutines a chance
wg.Wait() // Wait for them to finish
time.Sleep(10 * time.Second)
select {
case
|
TestRPC_Chunks
|
identifier_name
|
|
dcy.go
|
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name, dc)
return
}
time.Sleep(time.Second * queryTimeoutSeconds)
continue
}
tries = 0
wi = qm.LastIndex
// monitor routine might be started for service that still doesn't exist but is expected to show up
// in that case don't send updates for non existing service and instead wait for it to show up
if !serviceExistedOnStart && len(ses) == 0 {
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc) {
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
}
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name, "_", "-", -1)
srvs, err = srvQuery(tag, nameNomad, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
// LocalServices returns all services registered in Consul in specifed, or if not set, local datacenter
func LocalServices(name string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv("", sn, ldc)
return srvs, err
}
// Services returns all services registered in Consul from all of the datacenters
func Services(name string) (Addresses, error)
|
{
return ServicesByTag(name, "")
}
|
identifier_body
|
|
dcy.go
|
.SetNodeName(nodeName)
}
}
func noConsulTestMode() {
domain = "sd"
dc = "dev"
nodeName = "node01"
federatedDcs = []string{dc}
cache["test1"] = []Address{
{"127.0.0.1", 12345},
{"127.0.0.1", 12348},
}
cache["test2"] = []Address{
{"10.11.12.13", 1415},
}
cache["test3"] = []Address{
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name, dc)
return
}
time.Sleep(time.Second * queryTimeoutSeconds)
continue
}
tries = 0
wi = qm.LastIndex
// monitor routine might be started for service that still doesn't exist but is expected to show up
// in that case don't send updates for non existing service and instead wait for it to show up
if !serviceExistedOnStart && len(ses) == 0 {
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc)
|
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name
|
{
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
}
|
conditional_block
|
dcy.go
|
{
continue
}
updateCache(tag, name, dc, parseConsulServiceEntries(ses))
}
}
func service(service, tag string, qo *api.QueryOptions) ([]*api.ServiceEntry, *api.QueryMeta, error) {
ses, qm, err := consul.Health().Service(service, tag, false, qo)
if err != nil {
return nil, nil, err
}
// izbacujem servise koji imaju check koji nije ni "passing" ni "warning"
var filteredSes []*api.ServiceEntry
loop:
for _, se := range ses {
for _, c := range se.Checks {
if c.Status != "passing" && c.Status != "warning" {
continue loop
}
}
filteredSes = append(filteredSes, se)
}
return filteredSes, qm, nil
}
func query(tag, name, dc string) (Addresses, error) {
qo := &api.QueryOptions{Datacenter: dc}
ses, qm, err := service(name, tag, qo)
if err != nil {
return nil, err
}
// if key exists in cache it means that monitor goroutine is already started
if !existsInCache(tag, name, dc) {
serviceExists := len(ses) != 0
// initialize cache key and start goroutine
initializeCacheKey(tag, name, dc)
go func() {
monitor(tag, name, dc, qm.LastIndex, serviceExists)
}()
}
srvs := parseConsulServiceEntries(ses)
if len(srvs) == 0 {
return nil, ErrNotFound
}
updateCache(tag, name, dc, srvs)
return srvs, nil
}
func srvQuery(tag, name string, dc string) (Addresses, error) {
l.RLock()
srvs, ok := cache[cacheKey(tag, name, dc)]
l.RUnlock()
if ok && len(srvs) > 0 {
return srvs, nil
}
srvs, err := query(tag, name, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
func srv(tag, name string, dc string) (Addresses, error) {
srvs, err := srvQuery(tag, name, dc)
if err == nil {
return srvs, nil
}
nameNomad := strings.Replace(name, "_", "-", -1)
srvs, err = srvQuery(tag, nameNomad, dc)
if err != nil {
return nil, err
}
return srvs, nil
}
// LocalServices returns all services registered in Consul in specifed, or if not set, local datacenter
func LocalServices(name string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv("", sn, ldc)
return srvs, err
}
// Services returns all services registered in Consul from all of the datacenters
func Services(name string) (Addresses, error) {
return ServicesByTag(name, "")
}
// Services returns all services registered in Consul from all of the datacenters
func ServicesByTag(name, tag string) (Addresses, error) {
sn, _ := serviceName(name, domain)
srvs := []Address{}
for _, fdc := range federatedDcs {
s, err := srv(tag, sn, fdc)
if err == nil {
srvs = append(srvs, s...)
}
}
if len(srvs) == 0 {
return srvs, ErrNotFound
}
return srvs, nil
}
// Service will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func Service(name string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, "")
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
// ServiceByTag will find one service in Consul cluster giving priority to local datacenter.
// Will randomly choose one if there are multiple register in Consul.
func ServiceByTag(name, tag string) (Address, error) {
srvs, err := servicesWithLocalPriority(name, tag)
if err != nil {
return Address{}, err
}
return oneOf(srvs), nil
}
func oneOf(srvs []Address) Address {
if len(srvs) == 1 {
return srvs[0]
}
return srvs[rand.Intn(len(srvs))]
}
// returns services from one of the datacenters giving priority to the local dc
func servicesWithLocalPriority(name, tag string) (Addresses, error) {
sn, ldc := serviceName(name, domain)
srvs, err := srv(tag, sn, ldc)
if err == nil && len(srvs) != 0 {
return srvs, err
}
// loop through all datacenters until desired service is found
for _, fdc := range federatedDcs {
// skip local dc since it was already checked
if fdc == dc {
continue
}
srvs, err = srv(tag, sn, fdc)
if err == nil && len(srvs) != 0 {
break
}
}
return srvs, err
}
// ServiceInDc will find one service in Consul claster for specified datacenter
func ServiceInDc(name, dc string) (Address, error) {
return ServiceInDcByTag("", name, dc)
}
// ServiceInDcByTag will find one service in consul claster with tag for specified datacenter
func ServiceInDcByTag(tag, name, dc string) (Address, error) {
srvs, err := srv(tag, name, dc)
if err != nil {
return Address{}, err
}
srv := srvs[rand.Intn(len(srvs))]
return srv, nil
}
// AgentService finds service on this (local) agent.
func AgentService(name string) (Address, error) {
svcs, err := consul.Agent().Services()
if err != nil {
return Address{}, err
}
for _, svc := range svcs {
//fmt.Printf("\t %#v\n", svc)
if svc.Service == name {
addr := svc.Address
if addr == "" {
addr = consulAddr
}
return Address{Address: addr, Port: svc.Port}, nil
}
}
return Address{}, ErrNotFound
}
// Inspect Consul for configuration parameters.
func self() error {
s, err := consul.Agent().Self()
if err != nil {
return err
}
cfg := s["Config"]
version := cfg["Version"].(string)
dc = cfg["Datacenter"].(string)
nodeName = cfg["NodeName"].(string)
if strings.HasPrefix(version, "0.") {
domain = cfg["Domain"].(string)
} else {
if dcfg := s["DebugConfig"]; dcfg != nil {
domain = dcfg["DNSDomain"].(string)
}
}
return nil
}
// Call consul LockKey api function.
func LockKey(key string) (*api.Lock, error) {
opts := &api.LockOptions{
Key: key,
LockWaitTime: 5 * time.Second,
}
return consul.LockOpts(opts)
}
// NodeName returns Node name as defined in Consul.
func NodeName() string {
return nodeName
}
// Dc returns datacenter name.
func Dc() string {
return dc
}
// KV reads key from Consul key value storage.
func KV(key string) (string, error) {
kv := consul.KV()
pair, _, err := kv.Get(key, nil)
if err != nil {
return "", err
}
if pair == nil {
return "", ErrKeyNotFound
}
return string(pair.Value), nil
}
// KVs read keys from Consul key value storage.
func KVs(key string) (map[string]string, error) {
kv := consul.KV()
entries, _, err := kv.List(key, nil)
if err != nil {
return nil, err
}
if entries == nil {
return nil, ErrKeyNotFound
}
m := make(map[string]string)
for _, e := range entries {
k := strings.TrimPrefix(e.Key, key)
k = strings.TrimPrefix(k, "/")
m[k] = string(e.Value)
}
return m, nil
}
// URL discovers host from url.
// If there are multiple services will randomly choose one.
func URL(url string) string {
scheme, host, _, path, query := unpackURL(url)
// log.S("url", url).S("host", host).Debug(fmt.Sprintf("should discover: %v", shouldDiscoverHost(host)))
if !shouldDiscoverHost(host) {
return url
}
srvs, err := Services(host)
if err != nil {
log.Error(err)
return url
}
// log.I("len_srvs", len(srvs)).Debug("service entries")
if len(srvs) == 0 {
return url
}
srv := srvs[rand.Intn(len(srvs))]
return packURL(scheme, srv.String(), "", path, query)
}
// shouldDiscoverHost - ima li smisla pitati consul za service discovery
func
|
shouldDiscoverHost
|
identifier_name
|
|
dcy.go
|
federatedDcs []string
)
// Address is service address returned from Consul.
type Address struct {
Address string
Port int
}
// String return address in host:port string.
func (a Address) String() string {
return fmt.Sprintf("%s:%d", a.Address, a.Port)
}
func (a Address) Equal(a2 Address) bool {
return a.Address == a2.Address && a.Port == a2.Port
}
// Addresses is array of service addresses.
type Addresses []Address
// String returns string array in host:port format.
func (a Addresses) String() []string {
addrs := []string{}
for _, addr := range a {
addrs = append(addrs, addr.String())
}
return addrs
}
func (a Addresses) Equal(a2 Addresses) bool {
if len(a) != len(a2) {
return false
}
for _, d := range a {
found := false
for _, d2 := range a2 {
if d.Equal(d2) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (a Addresses) Contains(a2 Address) bool {
for _, a1 := range a {
if a1.Equal(a2) {
return true
}
}
return false
}
func (a *Addresses) Append(as Addresses) {
s := []Address(*a)
for _, a1 := range []Address(as) {
if a.Contains(a1) {
continue
}
s = append(s, a1)
}
*a = Addresses(s)
}
// On including package it will try to find consul.
// Will BLOCK until consul is found.
// If not found it will raise fatal.
// To disable finding consul, and use it in test mode set EnvConsul to "-"
// If EnvWait is defined dcy will not start until those services are not found in consul. This is usefull for development environment where we start consul, and other applications which are using dcy.
func init() {
if e, ok := os.LookupEnv(EnvConsul); ok && e != "" {
consulAddr = e
}
if consulAddr == "--" {
return
}
if consulAddr == "-" || (env.InTest() && consulAddr == localConsulAdr) {
noConsulTestMode()
return
}
if _, _, err := net.SplitHostPort(consulAddr); err != nil {
consulAddr = consulAddr + ":8500"
}
if e, ok := os.LookupEnv(EnvFederatedDcs); ok {
federatedDcs = strings.Fields(e)
}
rand.Seed(time.Now().UTC().UnixNano())
mustConnect()
updateEnv()
}
func updateEnv() {
if dc != "" {
env.SetDc(dc)
}
if nodeName != "" {
env.SetNodeName(nodeName)
}
}
func noConsulTestMode() {
domain = "sd"
dc = "dev"
nodeName = "node01"
federatedDcs = []string{dc}
cache["test1"] = []Address{
{"127.0.0.1", 12345},
{"127.0.0.1", 12348},
}
cache["test2"] = []Address{
{"10.11.12.13", 1415},
}
cache["test3"] = []Address{
{"192.168.0.1", 12345},
{"10.0.13.0", 12347},
}
cache["syslog"] = []Address{
{"127.0.0.1", 9514},
}
cache["statsd"] = []Address{
{"127.0.0.1", 8125},
}
cache["mongo"] = []Address{
{"127.0.0.1", 27017},
{"192.168.10.123", 27017},
}
cache["nsqlookupd-http"] = []Address{
{"127.0.0.1", 4161},
}
// add federated service notation to cache for all existing services - {service-name}-{datacenter}
for k, v := range cache {
cache[fmt.Sprintf("%s-%s", k, dc)] = v
}
}
func mustConnect() {
if err := signal.WithExponentialBackoff(connect); err != nil {
log.Printf("Giving up connecting %s", consulAddr)
log.Fatal(err)
}
}
func connect() error {
config := api.DefaultConfig()
config.Address = consulAddr
c, err := api.NewClient(config)
if err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
consul = c
if err := self(); err != nil {
log.S("addr", consulAddr).Error(err)
return err
}
// add local dc if it's not set
if !contains(federatedDcs, dc) {
federatedDcs = append(federatedDcs, dc)
}
// wait for dependencies to apear in consul
if e, ok := os.LookupEnv(EnvWait); ok && e != "" {
services := strings.Split(e, ",")
for _, s := range services {
if _, err := Services(s); err != nil {
log.S("addr", consulAddr).S("service", s).Error(err)
return err
}
}
}
return nil
}
func ConnectTo(addr string) error {
if consul != nil {
return nil
}
consulAddr = addr
return signal.WithExponentialBackoff(connect)
}
func serviceName(fqdn, domain string) (string, string) {
rx := regexp.MustCompile(fmt.Sprintf(`^(\S*)\.service\.*(\S*)*\.%s$`, domain))
ms := rx.FindStringSubmatch(fqdn)
if len(ms) < 2 {
return fqdn, ""
}
if len(ms) > 2 {
return ms[1], ms[2]
}
return ms[1], ""
}
func parseConsulServiceEntries(ses []*api.ServiceEntry) Addresses {
srvs := []Address{}
for _, se := range ses {
addr := se.Service.Address
if addr == "" {
addr = se.Node.Address
}
srvs = append(srvs, Address{
Address: addr,
Port: se.Service.Port,
})
}
return srvs
}
func updateCache(tag, name, ldc string, srvs Addresses) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, ldc)
if srvs2, ok := cache[key]; ok {
if srvs2.Equal(srvs) {
return
}
}
cache[key] = srvs
cdc := ldc
if cdc == "" { // if not set, local dc is default
cdc = dc
}
// cache is updated only with services from specific datacenter
// but when notifying subscribers services from all of the datacenters are used
allServices := make([]Address, len(srvs))
copy(allServices, srvs)
for _, fdc := range federatedDcs {
if fdc == cdc {
continue
}
services, _, err := service(name, tag, &api.QueryOptions{Datacenter: fdc})
if err != nil {
continue
}
allServices = append(allServices, parseConsulServiceEntries(services)...)
}
nn := cacheKey(tag, name, "")
notify(nn, allServices)
}
func initializeCacheKey(tag, name, dc string) {
l.Lock()
defer l.Unlock()
key := cacheKey(tag, name, dc)
cache[key] = Addresses{}
}
func invalidateCache(tag, name, dc string) {
l.Lock()
defer l.Unlock()
delete(cache, cacheKey(tag, name, dc))
}
func cacheKey(tag, name, dc string) string {
var key string
if tag != "" {
key = fmt.Sprintf("%s-", tag)
}
if dc == "" {
return fmt.Sprintf("%s%s", key, name)
}
return fmt.Sprintf("%s%s-%s", key, name, dc)
}
func existsInCache(tag, name, dc string) bool {
l.RLock()
defer l.RUnlock()
_, ok := cache[cacheKey(tag, name, dc)]
return ok
}
func monitor(tag, name, dc string, startIndex uint64, serviceExistedOnStart bool) {
wi := startIndex
tries := 0
for {
qo := &api.QueryOptions{
WaitIndex: wi,
WaitTime: time.Minute * waitTimeMinutes,
AllowStale: true,
RequireConsistent: false,
Datacenter: dc,
}
ses, qm, err := service(name, tag, qo)
if err != nil {
tries++
if tries == queryRetries {
invalidateCache(tag, name
|
consulAddr = localConsulAdr
|
random_line_split
|
|
opendocument_html_xslt.py
|
from plone.transforms.interfaces import ITransform, IRankedTransform
from plone.transforms.message import PloneMessageFactory as _
from plone.transforms.transform import TransformResult
from plone.transforms.log import log
import plone.opendocument.utils as utils
HAS_LXML = True
try:
from lxml import etree
except ImportError:
HAS_LXML = False
class OpendocumentHtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def transform(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
data_.write(chunk)
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opend
|
random_line_split
|
||
opendocument_html_xslt.py
|
HtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def transform(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
|
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag =
|
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
data_.write(chunk)
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
|
identifier_body
|
opendocument_html_xslt.py
|
HtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def transform(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
|
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag
|
data_.write(chunk)
|
conditional_block
|
opendocument_html_xslt.py
|
HtmlXsltTransform(object):
"""
XSL transform which transforms OpenDocument files into XHTML
"""
implements(ITransform, IRankedTransform)
inputs = ('application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
)
output = 'text/html'
name = u'plone.opendocument.opendocument_html_xslt.OpendocumentHtmlXsltTransform'
title = _(u'title_opendocument_html_xslt',
default=u"OpenDocument to XHTML transform with XSL")
description = _(u'description_markdown_transform',
default=u"A transform which transforms OpenDocument files into XHTML \
with XSL")
available = False
rank = 1
xsl_stylesheet = os.path.join(os.getcwd(), os.path.dirname(__file__),\
'lib/odf2html/all-in-one.xsl')
xsl_stylesheet_param = {}
data = None
subobjects = {}
metadata = {}
errors = u''
_dataFiles = {}
_imageNames = {}
def __init__(self):
super(OpendocumentHtmlXsltTransform, self).__init__()
if HAS_LXML:
self.available = True
self.xsl_stylesheet_param = {
'param_track_changes':"0",#display version changes
'param_no_css':"0", #don't make css styles
'scale':"1", #scale font size, (non zero integer value)
}
self.data = tempfile.NamedTemporaryFile()
def
|
(self, data, options=None):
'''
Transforms data (OpenDocument file) to XHTML. It returns an
TransformResult object.
'''
if not self.available:
log(DEBUG, "The LXML library is required to use the %s transform "
% (self.name))
return None
self._prepareTrans(data)
if not self._dataFiles:
return None;
result = None
#XSL tranformation
try:
try:
etree.clearErrorLog()
parser = etree.XMLParser(remove_comments=True,\
remove_blank_text=True)
#concatenate all xml files
contentXML = etree.parse(self._concatDataFiles(), parser)
contentXML.xinclude()
#adjust file paths
root = contentXML.getroot()
images = root.xpath("//draw:image", {'draw' :\
'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0' })
for i in images:
imageName = i.get("{http://www.w3.org/1999/xlink}href")
imageName = os.path.basename(imageName)
if not self._imageNames.has_key(imageName):
self.errors = self.errors + u'''
Image file or OLE Object '%s' does not\
exist. Maybe it is\
not embedded in OpenDocument file?
''' % (imageName)
i.set("{http://www.w3.org/1999/xlink}href", imageName)
continue
imageName = self._imageNames[imageName]
i.set("{http://www.w3.org/1999/xlink}href", imageName)
#extract meta data
self._getMetaData(contentXML)
#xslt transformation
stylesheetXML = etree.parse(self.xsl_stylesheet, parser)
xslt = etree.XSLT(stylesheetXML)
resultXML = xslt(contentXML, **self.xsl_stylesheet_param)
docinfo = u'<?xml version=\'1.0\' encoding=\'utf-8\'?>\
\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
self._addCssSelectorPrefix(resultXML,'#odf_document ')
self.data.write(docinfo.encode('utf-8'))
resultXML.write(self.data, pretty_print=True)
self.data.seek(0)
#log non fatal errors and warnings
if parser.error_log:
self.errors = self.errors + u'''
Parse errors which are not fatal:
%s
''' % (parser.error_log)
if xslt.error_log:
self.errors = self.errors + u'''
XSLT errors which are not fatal:
%s
''' % (xslt.error_log)
for f in self._dataFiles.values():
f.close()
result = TransformResult(self.data,
subobjects=self.subobjects or {},
metadata=self.metadata or {},
errors=self.errors or None
)
except etree.LxmlError, e:
log(DEBUG,\
str(e) + ('\nlibxml error_log:\n') + str(e.error_log))
return None
except Exception, e:
log(DEBUG, str(e))
return None
finally:
self.data = tempfile.NamedTemporaryFile()
self.subobjects = {}
self.metadata = {}
self.errors = u''
self._dataFiles = {}
self._imageNames = {}
return result
def _prepareTrans(self, data):
'''
Extracts required files from data (opendocument file). They are stored
in self.subobjects and self._dataFiles.
'''
try:
#transform data to zip file object
data_ = tempfile.NamedTemporaryFile()
for chunk in data:
data_.write(chunk)
data_.seek(0)
dataZip = zipfile.ZipFile(data_)
dataIterator = utils.zipIterator(dataZip)
#extract content
for fileName, fileContent in dataIterator:
#getting data files
if (fileName == 'content.xml'):
content = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, content)
content.seek(0)
self._dataFiles['content'] = content
continue
if (fileName == 'styles.xml'):
styles = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, styles)
styles.seek(0)
self._dataFiles['styles'] = styles
continue
if (fileName == 'meta.xml'):
meta = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, meta)
meta.seek(0)
self._dataFiles['meta'] = meta
continue
#getting images
if ('Pictures/' in fileName):
imageName = os.path.basename(fileName)
imageContent = tempfile.NamedTemporaryFile()
shutil.copyfileobj(fileContent, imageContent)
imageContent.seek(0)
fileContent.close()
#assert that the image is viewable with web browsers
imageName_, imageContent_ = utils.makeViewable((imageName, imageContent))
if not imageName_:
self.errors = self.errors + u'''
Image file '%s' could not be make viewable \
with web browser.
''' % (imageName)
imageName_ = imageName
imageContent_ = imageContent
#store image
self._imageNames[imageName] = imageName_
self.subobjects[imageName_] = imageContent_
dataZip.close()
except Exception, e:
self._dataFiles = None
self.subobjects = None
log(DEBUG, str(e))
def _concatDataFiles(self):
'''
Returns XML file object that concatenates all files stored in self._dataFiles
with xi:include.
'''
includeXML = lambda x: (x in self._dataFiles) and \
'<xi:include href="%s" />' % (self._dataFiles[x].name)
concat = StringIO(
'''<?xml version='1.0' encoding='UTF-8'?>
<office:document xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0">
%s %s %s
</office:document>
'''
% (
includeXML('meta') or ' ',
includeXML('styles') or ' ',
includeXML('content') or ' ',
)
)
return concat
def _getMetaData(self, contentXML):
'''
Extracts all OpenDocument meta data from contentXML (ElementTree
object) and stores it in self.metadata.
'''
root = contentXML.getroot()
Elements = root.xpath("//office:meta", {'office'\
:'urn:oasis:names:tc:opendocument:xmlns:office:1.0'})
if not Elements:
self.errors = self.errors + u'''
There is no <office:meta> element to extract \
meta data.
'''
for element in Elements:
meta = u'{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}'
dc = u'{http://purl.org/dc/elements/1.1/}'
for m in element.iterchildren():
#regular elements
text = unicode(m.text).rstrip().lstrip()
prefix = unicode(m.prefix)
tag = unicode(m.tag)
tag = tag.replace(meta, u'')
tag
|
transform
|
identifier_name
|
event.go
|
("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) GetEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero has the
// effect of including all events with the same timestamp (leaving after
// to work like Start). The time will be truncated to miliseconds for
// the search.
After time.Time
AfterOrdinal int64
// Only include listing before this time stamp. Optionally you can include
// an ordinal as well which will be used if an event exists at the exact
// same ms as Before. The precision of this time value is in miliseconds.
Before time.Time
BeforeOrdinal int64
}
// Sets up a Events listing. This does not actually perform the query, that is
// done on the first call to Next() in the iterator. If opts is nil then
// default listing parameters are used, which will return all events and
// limits the query to 10 items at a time.
func (c *Collection) ListEvents(
key, typ string, opts *ListEventsQuery,
) *Iterator {
var path string
// Build a query from the user provided values.
if opts != nil {
query := make(url.Values, 10)
if opts.Limit != 0 {
query.Add("limit", strconv.Itoa(opts.Limit))
}
var defaultTime time.Time
if opts.After != defaultTime {
if opts.AfterOrdinal != 0 {
query.Add("afterEvent", fmt.Sprintf("%d/%d",
opts.After.UnixNano()/1000000, opts.AfterOrdinal))
} else {
query.Add("afterEvent",
strconv.FormatInt(opts.After.UnixNano()/1000000, 10))
}
}
if opts.Before != defaultTime {
if opts.BeforeOrdinal != 0 {
query.Add("beforeEvent", fmt.Sprintf("%d/%d",
opts.Before.UnixNano()/1000000, opts.BeforeOrdinal))
} else {
query.Add("beforeEvent",
strconv.FormatInt(opts.Before.UnixNano()/1000000, 10))
}
}
if opts.End != defaultTime {
if opts.EndOrdinal != 0 {
query.Add("endEvent", fmt.Sprintf("%d/%d",
opts.End.UnixNano()/1000000, opts.EndOrdinal))
} else {
query.Add("endEvent",
strconv.FormatInt(opts.End.UnixNano()/1000000, 10))
}
}
if opts.Start != defaultTime
|
{
if opts.StartOrdinal != 0 {
query.Add("startEvent", fmt.Sprintf("%d/%d",
opts.Start.UnixNano()/1000000, opts.StartOrdinal))
} else {
query.Add("startEvent",
strconv.FormatInt(opts.Start.UnixNano()/1000000, 10))
}
}
|
conditional_block
|
|
event.go
|
a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, nil, value)
}
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection)
|
(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero
|
GetEvent
|
identifier_name
|
event.go
|
given a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, nil, value)
}
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
|
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) GetEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at zero
|
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
|
random_line_split
|
event.go
|
a new Ordinal value. To update and existing
// Event use UpdateEvent() instead.
//
// Note that the key should exist otherwise this call will have unpredictable
// results.
func (c *Collection) AddEvent(
key, typ string, value interface{},
) (*Event, error)
|
// Like AddEvent() except this lets you specify the timestamp that will be
// attached to the event.
func (c *Collection) AddEventWithTimestamp(
key, typ string, ts time.Time, value interface{},
) (*Event, error) {
return c.innerAddEvent(key, typ, &ts, value)
}
// Inner implementation of AddEvent*
func (c *Collection) innerAddEvent(
key, typ string, ts *time.Time, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual POST
headers := map[string]string{"Content-Type": "application/json"}
var path string
if ts != nil {
path = fmt.Sprintf("%s/%s/events/%s/%d", c.Name, key, typ,
ts.UnixNano()/1000000)
} else {
path = fmt.Sprintf("%s/%s/events/%s", c.Name, key, typ)
}
resp, err := c.client.emptyReply("POST", path, headers,
bytes.NewBuffer(event.Value), 201)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// DeleteEvent
//
// Removes an event from the collection. This succeeds even if the event did
// not exist prior to this call. Note that all event deletes are Final and can
// not be undone.
func (c *Collection) DeleteEvent(
key, typ string, ts time.Time, ordinal int64,
) error {
path := fmt.Sprintf("%s/%s/events/%s/%d/%d?purge=true",
c.Name, key, typ, ts.UnixNano()/1000000, ordinal)
_, err := c.client.emptyReply("DELETE", path, nil, nil, 204)
return err
}
//
// GetEvent
//
// Returns an individual event with the given details.
func (c *Collection) GetEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Perform the actual GET
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
var responseData jsonEvent
_, err := c.client.jsonReply("GET", path, nil, 200, &responseData)
if err != nil {
return nil, err
}
// Move the data from the returned values into the Event object.
event.Value = responseData.Value
event.Ref = responseData.Path.Ref
secs := responseData.Timestamp / 1000
nsecs := (responseData.Timestamp % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = responseData.Ordinal
// If the user provided us a place to unmarshal the 'value' field into
// we do that here.
if value != nil {
return event, event.Unmarshal(value)
}
// Success
return event, nil
}
//
// UpdateEvent
//
// Updates an event at the given location. In order for this to work the Event
// must exist prior to this call.
func (c *Collection) UpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
) (*Event, error) {
headers := map[string]string{"Content-Type": "application/json"}
return c.innerUpdateEvent(key, typ, ts, ordinal, value, headers)
}
// Inner implementation used in both UpdateEvent and Event.Update.
func (c *Collection) innerUpdateEvent(
key, typ string, ts time.Time, ordinal int64, value interface{},
headers map[string]string,
) (*Event, error) {
event := &Event{
Collection: c,
Key: key,
Ordinal: ordinal,
Timestamp: ts,
Type: typ,
}
// Encode the JSON message into a raw value that we can return to the
// client if necessary.
if rawMsg, err := json.Marshal(value); err != nil {
return nil, err
} else {
event.Value = json.RawMessage(rawMsg)
}
// Perform the actual PUT
path := fmt.Sprintf("%s/%s/events/%s/%d/%d", c.Name, key, typ,
ts.UnixNano()/1000000, ordinal)
resp, err := c.client.emptyReply("PUT", path, headers,
bytes.NewBuffer(event.Value), 204)
if err != nil {
return nil, err
}
// Get the Location header and parse it. The Header will give us the
// Ordinal.
location := resp.Header.Get("Location")
if location == "" {
return nil, fmt.Errorf("Missing Location header.")
} else if parts := strings.Split(location, "/"); len(parts) != 8 {
return nil, fmt.Errorf("Malformed Location header.")
} else if ts, err := strconv.ParseInt(parts[6], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Timestamp in the Location header.")
} else if ord, err := strconv.ParseInt(parts[7], 10, 64); err != nil {
return nil, fmt.Errorf("Malformed Ordinal in the Location header.")
} else {
secs := ts / 1000
nsecs := (ts % 1000) * 1000000
event.Timestamp = time.Unix(secs, nsecs)
event.Ordinal = ord
}
// Get the Ref via the Etag header.
if etag := resp.Header.Get("Etag"); etag == "" {
return nil, fmt.Errorf("Missing ETag header.")
} else if parts := strings.Split(etag, `"`); len(parts) != 3 {
return nil, fmt.Errorf("Malformed ETag header.")
} else {
event.Ref = parts[1]
}
// Success
return event, nil
}
//
// ListEvents
//
//
// Search
//
// Provides optional searching parameters to a cal to ListEvents()
type ListEventsQuery struct {
// The number of results to return per call to Orchestrate. The default
// if this is not set is to return 10 at a time, the maximum that can be
// returned is 100.
Limit int
// This is the timestamp and ordinal that should be the oldest item
// included in the Event listing. Since Events a re listed newest to oldest
// this will be the last item returned (if it exists). The precision of
// the time value is miliseconds.
Start time.Time
StartOrdinal int64
// Events up to this timestamp will be included in the listing. Note that
// if EndOrdinal is not set then End behaves the same as Before. The time
// till be truncated to miliseconds.
End time.Time
EndOrdinal int64
// After the time/ordinal pairing which all events must be newer than in
// order to be included in the results. Leaving Ordinal at
|
{
return c.innerAddEvent(key, typ, nil, value)
}
|
identifier_body
|
day15.js
|
those, the Goblin first in reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the Elf's attack power (3), reducing its hit points to -1, killing it.
After attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its turn. If all units have taken turns in this round, the round ends, and a new round begins.
The Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)
Below is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.
Initially:
#######
#.G...# G(200)
#...EG# E(200), G(200)
#.#.#G# G(200)
#..G#E# G(200), E(200)
#.....#
#######
After 1 round:
#######
#..G..# G(200)
#...EG# E(197), G(197)
#.#G#G# G(200), G(197)
#...#E# E(197)
#.....#
#######
After 2 rounds:
#######
#...G.# G(200)
#..GEG# G(200), E(188), G(194)
#.#.#G# G(194)
#...#E# E(194)
#.....#
#######
Combat ensues; eventually, the top Elf dies:
After 23 rounds:
#######
#...G.# G(200)
#..G.G# G(200), G(131)
#.#.#G# G(131)
#...#E# E(131)
#.....#
#######
After 24 rounds:
#######
#..G..# G(200)
#...G.# G(131)
#.#G#G# G(200), G(128)
#...#E# E(128)
#.....#
#######
After 25 rounds:
#######
#.G...# G(200)
#..G..# G(131)
#.#.#G# G(125)
#..G#E# G(200), E(125)
#.....#
#######
After 26 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(122)
#...#E# E(122)
#..G..# G(200)
#######
After 27 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(119)
#...#E# E(119)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200)
#######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(197)
#G.##.# --> #.E##.# E(185)
#...#E# #E..#E# E(200), E(200)
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 982 total hit points left
Outcome: 37 * 982 = 36334
####### #######
#E..EG# #.E.E.# E(164), E(197)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##.# E(98)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 46 full rounds
Elves win with 859 total hit points left
Outcome: 46 * 859 = 39514
####### #######
#E.G#.# #G.G#.# G(200), G(98)
#.#G..# #.#G..# G(200)
#G.#.G# --> #..#..#
#G..#.# #...#G# G(95)
#...E.# #...G.# G(200)
####### #######
Combat ends after 35 full rounds
Goblins win with 793 total hit points left
Outcome: 35 * 793 = 27755
####### #######
#.E...# #.....#
#.#..G# #.#G..# G(200)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #G.G#G# G(98), G(38), G(200)
####### #######
Combat ends after 54 full rounds
Goblins win with 536 total hit points left
Outcome: 54 * 536 = 28944
######### #########
#G......# #.G.....# G(137)
#.E.#...# #G.G#...# G(200), G(200)
#..##..G# #.G##...# G(200)
#...##..# --> #...##..#
#...#...# #.G.#...# G(200)
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 20 full rounds
Goblins win with 937 total hit points left
Outcome: 20 * 937 = 18740
What is the outcome of the combat described in your puzzle input?
*/
/**
* Could keep map as lines of strings like in Day 13?
* Would require repeated loops for target acquisition/distance/reading order determination?
* Create nodes linking each instead
* Create fighter class
* Distinguish friend and foe - probably just extend fighter
* -or just compare chars? though modifying elves/goblins separately might be needed for part 2
* Separate fighter round into phases
* -acquire target
* -attack
* -move
* Find shortest path
* Choose by reading order upon multiple
* Break out of round when any single fighter finds no enemies left or deduct 1 from final round count
*/
const mapStrings = input.day15.split('\n');
class Node {
constructor(x, y, char) {
// Might not end up needing all of these
this.x = x;
this.y = y;
this.char = char === '#' ? char : '.';
this.isTraversable = char !== '#';
this.occupant = null; // Need something like this to not traverse occupied nodes + acquire target
// Link these after map is generated?
this.left = null;
this.right = null;
this.up = null;
this.down = null;
this.directions = ['left', 'up', 'right', 'down'];
}
isAdjacent(target) {
// this returns true if diagonal
// return Math.abs(this.x - target.x) === 1 || Math.abs(this.y - target.y) === 1;
}
getDistance({ x, y }) {
return Math.abs(this.x - x) + Math.abs(this.y - y);
}
|
getPath
|
identifier_name
|
|
day15.js
|
19)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200)
#######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(197)
#G.##.# --> #.E##.# E(185)
#...#E# #E..#E# E(200), E(200)
#...E.# #.....#
####### #######
Combat ends after 37 full rounds
Elves win with 982 total hit points left
Outcome: 37 * 982 = 36334
####### #######
#E..EG# #.E.E.# E(164), E(197)
#.#G.E# #.#E..# E(200)
#E.##E# --> #E.##.# E(98)
#G..#.# #.E.#.# E(200)
#..E#.# #...#.#
####### #######
Combat ends after 46 full rounds
Elves win with 859 total hit points left
Outcome: 46 * 859 = 39514
####### #######
#E.G#.# #G.G#.# G(200), G(98)
#.#G..# #.#G..# G(200)
#G.#.G# --> #..#..#
#G..#.# #...#G# G(95)
#...E.# #...G.# G(200)
####### #######
Combat ends after 35 full rounds
Goblins win with 793 total hit points left
Outcome: 35 * 793 = 27755
####### #######
#.E...# #.....#
#.#..G# #.#G..# G(200)
#.###.# --> #.###.#
#E#G#G# #.#.#.#
#...#G# #G.G#G# G(98), G(38), G(200)
####### #######
Combat ends after 54 full rounds
Goblins win with 536 total hit points left
Outcome: 54 * 536 = 28944
######### #########
#G......# #.G.....# G(137)
#.E.#...# #G.G#...# G(200), G(200)
#..##..G# #.G##...# G(200)
#...##..# --> #...##..#
#...#...# #.G.#...# G(200)
#.G...G.# #.......#
#.....G.# #.......#
######### #########
Combat ends after 20 full rounds
Goblins win with 937 total hit points left
Outcome: 20 * 937 = 18740
What is the outcome of the combat described in your puzzle input?
*/
/**
* Could keep map as lines of strings like in Day 13?
* Would require repeated loops for target acquisition/distance/reading order determination?
* Create nodes linking each instead
* Create fighter class
* Distinguish friend and foe - probably just extend fighter
* -or just compare chars? though modifying elves/goblins separately might be needed for part 2
* Separate fighter round into phases
* -acquire target
* -attack
* -move
* Find shortest path
* Choose by reading order upon multiple
* Break out of round when any single fighter finds no enemies left or deduct 1 from final round count
*/
const mapStrings = input.day15.split('\n');
class Node {
constructor(x, y, char) {
// Might not end up needing all of these
this.x = x;
this.y = y;
this.char = char === '#' ? char : '.';
this.isTraversable = char !== '#';
this.occupant = null; // Need something like this to not traverse occupied nodes + acquire target
// Link these after map is generated?
this.left = null;
this.right = null;
this.up = null;
this.down = null;
this.directions = ['left', 'up', 'right', 'down'];
}
isAdjacent(target) {
// this returns true if diagonal
// return Math.abs(this.x - target.x) === 1 || Math.abs(this.y - target.y) === 1;
}
getDistance({ x, y }) {
return Math.abs(this.x - x) + Math.abs(this.y - y);
}
getPath({ x, y }) {}
}
function generateMap(lines = mapStrings) {
const nodes = lines.map((line, y) => [...line].map((char, x) => new Node(x, y, char)));
nodes.forEach(line => line.forEach((node) => {
if (nodes[node.x - 1]) node.left = nodes[node.y][node.x - 1];
if (nodes[node.y - 1]) node.up = nodes[node.y - 1][node.x];
if (nodes[node.x + 1]) node.right = nodes[node.y][node.x + 1];
if (nodes[node.y + 1]) node.down = nodes[node.y + 1][node.x - 1];
// node.surroundings = [node.left, node.right, node.up, node.down];
}));
return nodes;
}
class Fighter {
// Use nodes instead of x,y for location?
constructor(x, y, map) {
this.location = map[y][x]; // Grab node with given x,y from however we're storing them
// this.char = char; // To be used to determine allegiance? or just set side based on char without storing char
this.hp = 200;
this.ap = 3;
this.isAlive = true;
this.location.occupant = this;
}
takeDamage(n) {
this.hp -= n;
if (this.hp <= 0) this.isAlive = false;
}
attack(target) {
target.takeDamage(this.ap);
}
// move(...directions) { // single items
// take in array of directions
move(directions) {
directions.forEach((direction) => {
if (this.location[direction].occupant) throw new Error('Trying to move into occupied spot');
this.location.occupant = null;
this.location = this.location[direction];
this.location.occupant = this;
});
}
// use this in acquireTarget instead?
getSurroundingDistances(target) {
return this.location.directions.map(direction => (target.location[direction].isTraversable && !target.location[direction].occupant
? { direction: this.location.getDistance(target.location[direction]) }
: null));
}
acquireTarget(enemies) {
// BFS to get closest target(s) ?
// todo get distance to nodes around the enemy, not the enemy location itself
const targetDistances = enemies
.map(enemy => ({ target: enemy, distance: this.location.getDistance(enemy.location) }))
.sort((x, y) => x.distance - y.distance);
if (targetDistances[0].distance < targetDistances[1].distance) return targetDistances[0].target;
const equidistantTargets = targetDistances.filter(
enemy => enemy.distance === targetDistances[0].distance,
);
// Determine reading order in case of multiple
}
attackPhase(target) {
// Target should already be acquired
// Subtract ap from targets hp
// todo revisit each phase's structure
}
}
class Goblin extends Fighter {
constructor(x, y, map) {
super(x, y, map);
this.enemy = Elf;
}
}
class Elf extends Fighter {
constructor(x, y, map)
|
{
super(x, y, map);
this.enemy = Goblin;
}
|
identifier_body
|
|
day15.js
|
# #!G.#G# #.G.#G#
####### ####### ####### ####### #######
In the above scenario, the Elf has three targets (the three Goblins):
Each of the Goblins has open, adjacent squares which are in range (marked with a ? on the map).
Of those squares, four are reachable (marked @); the other two (on the right) would require moving through a wall or unit to reach.
Three of these reachable squares are nearest, requiring the fewest steps (only 2) to reach (marked !).
Of those, the square which is first in reading order is chosen (+).
The unit then takes a single step toward the chosen square along the shortest path to that square. If multiple steps would put the unit equally closer to its destination, the unit chooses the step which is first in reading order. (This requires knowing when there is more than one shortest path so that you can consider the first step of each such path.) For example:
In range: Nearest: Chosen: Distance: Step:
####### ####### ####### ####### #######
#.E...# #.E...# #.E...# #4E212# #..E..#
#...?.# --> #...!.# --> #...+.# --> #32101# --> #.....#
#..?G?# #..!G.# #...G.# #432G2# #...G.#
####### ####### ####### ####### #######
The Elf sees three squares in range of a target (?), two of which are nearest (!), and so the first in reading order is chosen (+). Under "Distance", each open square is marked with its distance from the destination square; the two squares to which the Elf could move on this turn (down and to the right) are both equally good moves and would leave the Elf 2 steps from being in range of the Goblin. Because the step which is first in reading order is chosen, the Elf moves right one square.
Here's a larger example of movement:
Initially:
#########
#G..G..G#
#.......#
#.......#
#G..E..G#
#.......#
#.......#
#G..G..G#
#########
After 1 round:
#########
#.G...G.#
#...G...#
#...E..G#
#.G.....#
#.......#
#G..G..G#
#.......#
#########
After 2 rounds:
#########
#..G.G..#
#...G...#
#.G.E.G.#
#.......#
#G..G..G#
#.......#
#.......#
#########
After 3 rounds:
#########
#.......#
#..GGG..#
#..GEG..#
#G..G...#
#......G#
#.......#
#.......#
#########
Once the Goblins and Elf reach the positions above, they all are either in range of a target or cannot find any square in range of a target, and so none of the units can move until a unit dies.
After moving (or if the unit began its turn in range of a target), the unit attacks.
To attack, the unit first determines all of the targets that are in range of it by being immediately adjacent to it. If there are no such targets, the unit ends its turn. Otherwise, the adjacent target with the fewest hit points is selected; in a tie, the adjacent target with the fewest hit points which is first in reading order is selected.
The unit deals damage equal to its attack power to the selected target, reducing its hit points by that amount. If this reduces its hit points to 0 or fewer, the selected target dies: its square becomes . and it takes no further turns.
Each unit, either Goblin or Elf, has 3 attack power and starts with 200 hit points.
For example, suppose the only Elf is about to attack:
HP: HP:
G.... 9 G.... 9
..G.. 4 ..G.. 4
..EG. 2 --> ..E..
..G.. 2 ..G.. 2
...G. 1 ...G. 1
The "HP" column shows the hit points of the Goblin to the left in the corresponding row. The Elf is in range of three targets: the Goblin above it (with 4 hit points), the Goblin to its right (with 2 hit points), and the Goblin below it (also with 2 hit points). Because three targets are in range, the ones with the lowest hit points are selected: the two Goblins with 2 hit points each (one to the right of the Elf and one below the Elf). Of those, the Goblin first in reading order (the one to the right of the Elf) is selected. The selected Goblin's hit points (2) are reduced by the Elf's attack power (3), reducing its hit points to -1, killing it.
After attacking, the unit's turn ends. Regardless of how the unit's turn ends, the next unit in the round takes its turn. If all units have taken turns in this round, the round ends, and a new round begins.
The Elves look quite outnumbered. You need to determine the outcome of the battle: the number of full rounds that were completed (not counting the round in which combat ends) multiplied by the sum of the hit points of all remaining units at the moment combat ends. (Combat only ends when a unit finds no targets during its turn.)
Below is an entire sample combat. Next to each map, each row's units' hit points are listed from left to right.
Initially:
#######
#.G...# G(200)
#...EG# E(200), G(200)
#.#.#G# G(200)
#..G#E# G(200), E(200)
#.....#
#######
After 1 round:
#######
#..G..# G(200)
#...EG# E(197), G(197)
#.#G#G# G(200), G(197)
#...#E# E(197)
#.....#
#######
After 2 rounds:
#######
#...G.# G(200)
#..GEG# G(200), E(188), G(194)
#.#.#G# G(194)
#...#E# E(194)
#.....#
#######
Combat ensues; eventually, the top Elf dies:
After 23 rounds:
#######
#...G.# G(200)
#..G.G# G(200), G(131)
#.#.#G# G(131)
#...#E# E(131)
#.....#
#######
After 24 rounds:
#######
#..G..# G(200)
#...G.# G(131)
#.#G#G# G(200), G(128)
#...#E# E(128)
#.....#
#######
After 25 rounds:
#######
#.G...# G(200)
#..G..# G(131)
#.#.#G# G(125)
#..G#E# G(200), E(125)
#.....#
#######
After 26 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(122)
#...#E# E(122)
#..G..# G(200)
#######
After 27 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(119)
#...#E# E(119)
#...G.# G(200)
#######
After 28 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(116)
#...#E# E(113)
#....G# G(200)
#######
More combat ensues; eventually, the bottom Elf dies:
After 47 rounds:
#######
#G....# G(200)
#.G...# G(131)
#.#.#G# G(59)
#...#.#
#....G# G(200)
|
####### #######
#G..#E# #...#E# E(200)
#E#E.E# #E#...# E(
|
#######
Before the 48th round can finish, the top-left Goblin finds that there are no targets remaining, and so combat ends. So, the number of full rounds that were completed is 47, and the sum of the hit points of all remaining units is 200+131+59+200 = 590. From these, the outcome of the battle is 47 * 590 = 27730.
Here are a few example summarized combats:
|
random_line_split
|
producer.rs
|
connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn
|
() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127
|
connector_kafka_producer
|
identifier_name
|
producer.rs
|
connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
|
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127.0
|
let batched_data = literal!([{
"data": {
"value": {
|
random_line_split
|
producer.rs
|
connectors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()>
|
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) => {
error!("Error creating topic {}: {}", &topic, err);
}
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127
|
{
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
|
identifier_body
|
producer.rs
|
ors::tests::free_port::find_free_tcp_port;
use crate::{connectors::impls::kafka, errors::Result, Event};
use futures::StreamExt;
use rdkafka::{
admin::{AdminClient, AdminOptions, NewTopic, TopicReplication},
config::FromClientConfig,
consumer::{CommitMode, Consumer, StreamConsumer},
message::Headers,
ClientConfig, Message,
};
use serial_test::serial;
use std::time::Duration;
use testcontainers::clients::Cli as DockerCli;
use tokio::time::timeout;
use tremor_common::ports::IN;
use tremor_pipeline::EventId;
use tremor_value::literal;
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn connector_kafka_producer() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let docker = DockerCli::default();
let container = redpanda_container(&docker).await?;
let port = container.get_host_port_ipv4(9092);
let mut admin_config = ClientConfig::new();
let broker = format!("127.0.0.1:{port}");
let topic = "tremor_test";
let num_partitions = 3;
let num_replicas = 1;
admin_config
.set("client.id", "test-admin")
.set("bootstrap.servers", &broker);
let admin_client = AdminClient::from_config(&admin_config)?;
let options = AdminOptions::default();
let res = admin_client
.create_topics(
vec![&NewTopic::new(
topic,
num_partitions,
TopicReplication::Fixed(num_replicas),
)],
&options,
)
.await?;
for r in res {
match r {
Err((topic, err)) =>
|
Ok(topic) => {
info!("Created topic {}", topic);
}
}
}
let connector_config = literal!({
"reconnect": {
"retry": {
"interval_ms": 1000_u64,
"max_retries": 10_u64
}
},
"codec": {"name": "json", "config": {"mode": "sorted"}},
"config": {
"brokers": [
broker.clone()
],
"topic": topic,
"key": "snot",
"rdkafka_options": {
// "debug": "all"
}
}
});
let mut harness = ConnectorHarness::new(
function_name!(),
&kafka::producer::Builder::default(),
&connector_config,
)
.await?;
harness.start().await?;
harness.wait_for_connected().await?;
harness.consume_initial_sink_contraflow().await?;
let consumer = ClientConfig::new()
.set("bootstrap.servers", &broker)
.set("group.id", "connector_kafka_producer")
//.set("client.id", "my-client")
//.set("socket.timeout.ms", "2000")
.set("session.timeout.ms", "6000")
.set("auto.offset.reset", "earliest")
.set("enable.auto.commit", "false")
//.set("auto.commit.interval.ms", "100")
.set("enable.auto.offset.store", "false")
//.set("debug", "all")
.create::<StreamConsumer>()
.expect("Consumer creation error");
consumer
.subscribe(&[topic])
.expect("Can't subscribe to specified topic");
let mut message_stream = consumer.stream();
let data = literal!({
"snot": "badger"
});
let meta = literal!({});
let e1 = Event {
id: EventId::default(),
data: (data.clone(), meta).into(),
transactional: false,
..Event::default()
};
harness.send_to_sink(e1, IN).await?;
match timeout(Duration::from_secs(30), message_stream.next()) // first message, we might need to wait a little longer for the consumer to boot up and settle things with redpanda
.await?
{
Some(Ok(msg)) => {
assert_eq!(msg.key(), Some("snot".as_bytes()));
assert_eq!(msg.payload(), Some("{\"snot\":\"badger\"}".as_bytes()));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("Topic Stream unexpectedly finished.".into());
}
};
assert!(harness.get_pipe(IN)?.get_contraflow_events().is_empty());
let data2 = literal!([1, 2, 3]);
let meta2 = literal!({
"kafka_producer": {
"key": "badger",
"headers": {
"foo": "baz"
},
"timestamp": 123_000_000,
"partition": 0
}
});
let e2 = Event {
id: EventId::default(),
data: (data2, meta2).into(),
transactional: true,
..Event::default()
};
harness.send_to_sink(e2, IN).await?;
match timeout(Duration::from_secs(5), message_stream.next()).await? {
Some(Ok(msg)) => {
assert_eq!(Some("badger".as_bytes()), msg.key());
assert_eq!(Some("[1,2,3]".as_bytes()), msg.payload());
assert_eq!(0_i32, msg.partition());
assert_eq!(Some(123), msg.timestamp().to_millis());
let headers = msg.headers().expect("No headers found");
assert_eq!(1, headers.count());
let h = headers.get(0);
assert_eq!("foo", h.key);
assert_eq!("baz".as_bytes(), h.value.expect("no value"));
consumer
.commit_message(&msg, CommitMode::Sync)
.expect("Commit failed");
}
Some(Err(e)) => {
return Err(e.into());
}
None => {
return Err("EOF on kafka topic".into());
}
}
// batched event
let batched_data = literal!([{
"data": {
"value": {
"field1": 0.1,
"field3": []
},
"meta": {
"kafka_producer": {
"key": "nananananana: batchman!"
}
}
}
}, {
"data": {
"value": {
"field2": "just a string"
},
"meta": {}
}
}]);
let batched_meta = literal!({});
let batched_event = Event {
id: EventId::from_id(0, 0, 1),
data: (batched_data, batched_meta).into(),
transactional: true,
is_batch: true,
..Event::default()
};
harness.send_to_sink(batched_event, IN).await?;
let borrowed_batchman_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_batchman_msg, CommitMode::Sync)
.expect("commit failed");
let mut batchman_msg = borrowed_batchman_msg.detach();
drop(borrowed_batchman_msg);
let borrowed_snot_msg = timeout(Duration::from_secs(2), message_stream.next())
.await?
.expect("timeout waiting for batchman message")
.expect("error waiting for batchman message");
consumer
.commit_message(&borrowed_snot_msg, CommitMode::Sync)
.expect("commit failed");
let mut snot_msg = borrowed_snot_msg.detach();
drop(borrowed_snot_msg);
if batchman_msg.key().eq(&Some("snot".as_bytes())) {
core::mem::swap(&mut snot_msg, &mut batchman_msg);
}
assert_eq!(
Some("nananananana: batchman!".as_bytes()),
batchman_msg.key()
);
assert_eq!(
Some("{\"field1\":0.1,\"field3\":[]}".as_bytes()),
batchman_msg.payload()
);
assert!(batchman_msg.headers().is_none());
assert_eq!(Some("snot".as_bytes()), snot_msg.key());
assert_eq!(
Some("{\"field2\":\"just a string\"}".as_bytes()),
snot_msg.payload()
);
assert!(snot_msg.headers().is_none());
consumer.unsubscribe();
drop(message_stream);
drop(consumer);
// shutdown
let (out_events, err_events) = harness.stop().await?;
assert_eq!(out_events, vec![]);
assert_eq!(err_events, vec![]);
// cleanup
drop(container);
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[serial(kafka)]
async fn producer_unreachable() -> Result<()> {
let _: std::result::Result<_, _> = env_logger::try_init();
let port = find_free_tcp_port().await?;
let broker = format!("127
|
{
error!("Error creating topic {}: {}", &topic, err);
}
|
conditional_block
|
enforsbot.py
|
and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
|
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def handle_activity(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user,
|
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
|
conditional_block
|
enforsbot.py
|
and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
|
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def handle_activity(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user, location
|
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
|
random_line_split
|
enforsbot.py
|
and connected.",
"hello" : "Hello there!",
"hi" : "Hi there!",
"LocationUpdate .*" : self.handle_incoming_location_update,
"locate" : self.respond_location,
"syscond" : self.respond_syscond,
"status" : self.respond_status,
"lights out" : self.respond_lights_off,
"lights off" : self.respond_lights_off,
"lights on" : self.respond_lights_on,
}
# Incoming user messages can come from several different threads.
# When we get one, we keep track of which thread it's from, so
# we know which thread we should send the response to. For example,
# if we get a user message from TwitterStream, we should send the
# response to TwitterRest.
self.response_threads = {
#Incoming from Send response to
#=============== ================
"TwitterStreams" : "TwitterRest",
"Telegram" : "Telegram",
"IRC" : "IRC"
}
self.activity_cmds = {
"multi" : self.start_multi,
}
self.location = None
self.arrived = False
self.database = sqlite3.connect("enforsbot.db",
detect_types=sqlite3.PARSE_DECLTYPES)
self.user_handler = eb_user.UserHandler(self.config, self.database)
def start(self):
"Start the bot."
self.start_all_threads()
self.main_loop()
def main_loop(self):
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def
|
(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user,
|
handle_activity
|
identifier_name
|
enforsbot.py
|
"The main loop of the bot."
try:
while True:
message = self.config.recv_message("Main")
if message.msg_type == \
eb_message.MSG_TYPE_THREAD_STARTED:
print("Thread started: %s" % message.sender)
self.config.set_thread_state(message.sender,
"running")
elif message.msg_type == eb_message.MSG_TYPE_THREAD_STOPPED:
print("Thread stopped: %s" % message.sender)
self.config.set_thread_state(message.sender,
"stopped")
elif message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:
self.handle_incoming_user_message(message,
self.response_threads[message.sender])
elif message.msg_type == eb_message.MSG_TYPE_LOCATION_UPDATE:
self.handle_incoming_location_update(message)
elif message.msg_type == eb_message.MSG_TYPE_NOTIFY_USER:
self.handle_incoming_notify_user(message)
else:
print("Unsupported incoming message type: %d" % \
message.msg_type)
except (KeyboardInterrupt, SystemExit):
self.stop_all_threads()
return
def start_all_threads(self):
"Start all necessary threads."
# pylint: disable=not-context-manager
with self.config.lock:
twitter_thread = eb_twitter.TwitterThread("Twitter",
self.config)
self.config.threads["Twitter"] = twitter_thread
telegram_thread = eb_telegram.TelegramThread("Telegram",
self.config)
self.config.threads["Telegram"] = telegram_thread
irc_thread = eb_irc.IRCThread("IRC", self.config)
self.config.threads["IRC"] = irc_thread
self.config.set_thread_state("Twitter", "starting")
twitter_thread.start()
self.config.set_thread_state("Telegram", "starting")
telegram_thread.start()
self.config.set_thread_state("IRC", "starting")
irc_thread.start()
def stop_all_threads(self):
"Stop all threads."
print("") # Add a newline to get away from "^C" on screen
# pylint: disable=not-context-manager
with self.config.lock:
threads_to_stop = [thread for thread in self.config.threads if
self.config.thread_states[thread] == "running"]
print("Stopping threads: %s" % threads_to_stop)
for thread in threads_to_stop:
if thread not in self.config.threads:
print("ERROR: %s not in self.config.threads!" % thread)
self.stop_thread(thread)
print("ALL THREADS STOPPED.")
def stop_thread(self, thread):
"Stop one specific thread."
message = eb_message.Message("Main",
eb_message.MSG_TYPE_STOP_THREAD, {})
self.config.send_message(thread, message)
self.config.threads[thread].join()
def handle_incoming_user_message(self, message, response_thread):
"Handle an incoming message of type USER."
user_name = message.data["user"]
text = message.data["text"]
print("Main: Message from %s: '%s'" % (user_name, text))
protocol = response_thread
if protocol.startswith("Twitter"):
protocol = "Twitter"
user = self.user_handler.find_user_by_identifier(protocol,
user_name)
response = ""
choices = []
# If this is an IRC message:
if response_thread == "IRC":
# msg_type = message.data["msg_type"]
channel = message.data["channel"]
# But don't respond unless it's a private message.
if channel.lower() != "enforsbot" and \
channel.lower() != "enforstestbot":
return None
text = text.lower()
# If this is a command to start an activity:
# commented out - should be replaced with proper commands
# if text in self.activity_cmds.keys() and not user.current_activity():
# self.start_activity(user, text)
# If we don't have a name for the user, then insert
# a question about the user's name.
# Check if new unknown user
# =========================
if user.name is None and not user.current_activity():
self.start_ask_user_name(user, text)
# If no ongoing activity
# ======================
if not user.current_activity():
# Check patterns
# ==============
for pattern, pattern_response in self.responses.items():
pat = re.compile(pattern)
if pat.match(text):
response = pattern_response
if callable(response):
response = response(text)
# If no pattern match found, check commands
# =========================================
if response == "":
response, choices = self.cmd_parser.parse(text, user)
# Handle any ongoing activities
# =============================
if user.current_activity():
repeat = True
while repeat:
status = self.handle_activity(user, text)
response += status.output + " "
choices = status.choices
repeat = status.done and user.current_activity()
if repeat:
text = status.result
# Admit defeat
# ============
if response == "":
response = "I have no clue what you're talking about."
# Send response
# =============
response = response.strip() + "\n"
print(" - Response: %s" % response.replace("\n", " "))
message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": user_name,
"text": response,
"choices": choices})
self.config.send_message(response_thread, message)
def start_activity(self, user, text):
"""Check if text is a command to start an activity, and if so,
start it. Return True if started, otherwise False."""
text = text.strip().lower()
if text in self.activity_cmds.keys():
self.activity_cmds[text](user, text)
return True
return False
@staticmethod
def handle_activity(user, text):
"""Send user input to ongoing activity."""
activity = user.current_activity()
if not activity:
return None
status = activity.handle_text(text)
if status.done:
user.remove_activity()
return status
@staticmethod
def start_ask_user_name(user, text):
"""Ask the user for their name."""
activity = eb_activity.AskUserNameActivity(user)
user.insert_activity(activity)
@staticmethod
def start_multi(user, text):
"""Start multiplication practice activity."""
activity = eb_math.MathDrill(user)
user.push_activity(activity)
return True
@staticmethod
def respond_ip(message):
"Return our local IP address."
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(("gmail.com", 80)) # I'm abusing gmail.
response = "I'm currently running on IP address %s." % \
sock.getsockname()[0]
sock.close()
return response
def handle_incoming_location_update(self, message):
"Handle incoming request for our location."
user = "Enfors" # Hardcoded for now. Sue me.
location = message.data["location"]
arrived = message.data["arrived"]
print("Updating location: [%s:%s]" % (location, str(arrived)))
# pylint: disable=not-context-manager
with self.config.lock, self.database:
cur = self.database.cursor()
if arrived:
self.location = location
self.arrived = True
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'arrived', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location updated: %s" % self.location)
else: # if leaving
# If leaving the location I'm currently at (sometimes
# the "left source" message arrives AFTER "arrived at
# destination" message), skipping those.
if self.arrived is False or location == self.location:
cur.execute("insert into LOCATION_HISTORY "
"(user, location, event, time) values "
"(?, ?, 'left', ?)",
(user, location, datetime.datetime.now()))
print("Main: Location left: %s" % location)
self.arrived = False
return None
def handle_incoming_notify_user(self, message):
"Send notification message through Twitter."
out_message = eb_message.Message("Main",
eb_message.MSG_TYPE_USER_MESSAGE,
{"user": message.data["user"],
"text": message.data["text"]})
self.config.send_message("TwitterRest", out_message)
def respond_location(self, message):
"Return our location."
with self.database:
cur = self.database.cursor()
cur.execute("select * from LOCATION_HISTORY "
"order by ROWID desc limit 1")
try:
(user, location, event, timestamp) = cur.fetchone()
except TypeError:
return "I have no information on that."
if event == "arrived":
return "%s %s at %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
return "%s %s %s %s." % \
(user, event, location,
self.get_datetime_diff_string(timestamp,
datetime.datetime.now()))
def respond_syscond(self, message):
"Return the SysCond status of the host."
return self.check_syscond()
def respond_status(self, message):
|
"Return threads status."
output = ""
for thread in self.config.threads:
output += "%s: %s\n" % (thread,
self.config.get_thread_state(thread))
return output
|
identifier_body
|
|
index.ts
|
string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) {
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function
|
(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @param
|
fetch
|
identifier_name
|
index.ts
|
string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) {
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo>
|
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function fetch(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @
|
{
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
|
identifier_body
|
index.ts
|
f/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN) {
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function fetch(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @param api
*/
export async function setTexture(option: {
accessToken: string,
uuid: string,
type: "skin" | "cape" | "elytra",
texture?: GameProfile.Texture,
data?: Buffer,
}, api: API = API_MOJANG): Promise<void> {
const textUrl = url.parse(API.getTextureUrl(api, option.uuid, option.type));
const headers: any = { Authorization: `Bearer: ${option.accessToken}` };
const requireEmpty = (httpOption: https.RequestOptions, content?: string | Buffer) =>
new Promise<void>((resolve, reject) => {
const req = https.request(httpOption, (inc) => {
let d = "";
inc.on("error", (e) => { reject(e); });
inc.on("data", (b) => d += b.toString());
inc.on("end", () => {
if (d === "" && inc.statusCode === 204) { resolve(); } else { reject(JSON.parse(d)); }
});
});
req.on("error", (e) => reject(e));
if (content) { req.write(content); }
req.end();
});
if (!option.texture) {
return requireEmpty({
method: "DELETE",
path: textUrl.path,
host: textUrl.host,
headers,
});
} else if (option.data) {
let status = 0;
const boundary = `----------------------${crypto.randomBytes(8).toString("hex")}`;
let buff: ByteBuffer = new ByteBuffer();
const diposition = (key: string, value: string) => {
if (status === 0) {
buff.writeUTF8String(`--${boundary}\r\nContent-Disposition: form-data`);
status = 1;
}
buff.writeUTF8String(`; ${key}="${value}"`);
};
const header = (key: string, value: string) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
status = 2;
}
buff.writeUTF8String(`${key}:${value}\r\n`);
};
const content = (payload: Buffer) => {
if (status === 1) {
buff.writeUTF8String("\r\n");
}
status = 0;
|
buff.writeUTF8String("\r\n");
buff = buff.append(payload);
buff.writeUTF8String("\r\n");
|
random_line_split
|
|
index.ts
|
string into name param
* @param api The api
* @param name The name will be replaced
*/
export function getProfileByNameUrl(api: API, name: string) {
return api.profileByName.replace("${name}", name);
}
/**
* Replace uuid string into `${uuid}`, and type string into `${type}`
* @param api The api
* @param uuid The uuid string
* @param type The type string
*/
export function getTextureUrl(api: API, uuid: string, type: string) {
return api.texture.replace("${uuid}", uuid).replace("${type}", type);
}
}
/**
* The default Mojang API
*/
export const API_MOJANG: API = {
publicKey: `-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAylB4B6m5lz7jwrcFz6Fd
/fnfUhcvlxsTSn5kIK/2aGG1C3kMy4VjhwlxF6BFUSnfxhNswPjh3ZitkBxEAFY2
5uzkJFRwHwVA9mdwjashXILtR6OqdLXXFVyUPIURLOSWqGNBtb08EN5fMnG8iFLg
EJIBMxs9BvF3s3/FhuHyPKiVTZmXY0WY4ZyYqvoKR+XjaTRPPvBsDa4WI2u1zxXM
eHlodT3lnCzVvyOYBLXL6CJgByuOxccJ8hnXfF9yY4F0aeL080Jz/3+EBNG8RO4B
yhtBf4Ny8NQ6stWsjfeUIvH7bU/4zCYcYOq4WrInXHqS8qruDmIl7P5XXGcabuzQ
stPf/h2CRAUpP/PlHXcMlvewjmGU6MfDK+lifScNYwjPxRo4nKTGFZf/0aqHCh/E
AsQyLKrOIYRE0lDG3bzBh8ogIMLAugsAfBb6M3mqCqKaTMAf/VAjh5FFJnjS+7bE
+bZEV0qwax1CEoPPJL1fIQjOS8zj086gjpGRCtSy9+bTPTfTR/SJ+VUB5G2IeCIt
kNHpJX2ygojFZ9n5Fnj7R9ZnOM+L8nyIjPu3aePvtcrXlyLhH/hvOfIOjPxOlqW+
O5QwSFP4OEcyLAUgDdUgyW36Z5mB285uKW/ighzZsOTevVUG2QwDItObIV6i8RCx
FbN2oDHyPaO5j1tTaBNyVt8CAwEAAQ==
-----END PUBLIC KEY-----`,
texture: "https://api.mojang.com/user/profile/${uuid}/${type}",
profile: "https://sessionserver.mojang.com/session/minecraft/profile/${uuid}",
profileByName: "https://api.mojang.com/users/profiles/minecraft/${name}",
};
function checkSign(value: string, signature: string, pemKey: string) {
return crypto.createVerify("SHA1").update(value, "utf8").verify(pemKey, signature, "base64");
}
async function fetchProfile(target: string, pemPubKey?: string, payload?: object) {
const { body: obj, statusCode, statusMessage } = await fetchJson(target, { body: payload });
if (statusCode !== 200) {
throw new Error(statusMessage);
}
function parseProfile(o: any) {
if (typeof o.id !== "string" || typeof o.name !== "string") {
throw new Error(`Corrupted profile response ${JSON.stringify(o)}`);
}
if (o.properties && o.properties instanceof Array) {
const properties = o.properties as Array<{ name: string; value: string; signature: string; }>;
const to: { [key: string]: string } = {};
for (const prop of properties) {
if (prop.signature && pemPubKey && !checkSign(prop.value, prop.signature, pemPubKey.toString())) {
console.warn(`Discard corrupted prop ${prop.name}: ${prop.value} as the signature mismatched!`);
} else {
to[prop.name] = prop.value;
}
}
o.properties = to;
}
return o as GameProfile;
}
if (obj instanceof Array) {
return obj.map(parseProfile);
} else {
return parseProfile(obj);
}
}
export function fetchTexture(texture: GameProfile.Texture, dest: string): Promise<void>;
export function fetchTexture(texture: GameProfile.Texture): Promise<Buffer>;
/**
* Fetch the texture into disk or memory
*/
export async function fetchTexture(texture: GameProfile.Texture, dest?: string): Promise<void | Buffer> {
if (dest) {
await vfs.waitStream(got.stream(texture.url)
.pipe(vfs.createWriteStream(dest)));
} else {
const { body } = await fetchBuffer(texture.url);
return body;
}
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
*/
export async function cacheTexturesAsUri(tex: GameProfile.TexturesInfo) {
if (!tex) { return Promise.reject("No textures"); }
async function cache(texture: GameProfile.Texture): Promise<GameProfile.Texture> {
if (new URL(texture.url).protocol === "data;") { return texture; }
texture.url = await fetchBuffer(texture.url)
.then((resp) => resp.body)
.then((b) => b.toString("base64"))
.then((s) => `data:image/png;base64,${s}`);
return texture;
}
if (tex.textures.SKIN)
|
if (tex.textures.CAPE) {
tex.textures.CAPE = await cache(tex.textures.CAPE);
}
if (tex.textures.ELYTRA) {
tex.textures.ELYTRA = await cache(tex.textures.ELYTRA);
}
return tex;
}
/**
* Cache the texture into the url as data-uri
* @param tex The texture info
* @deprecated
*/
export async function cacheTextures(tex: GameProfile.TexturesInfo) {
return cacheTexturesAsUri(tex);
}
/**
* Get all the textures of this GameProfile and cache them.
*
* @param profile The game profile from the profile service
* @param cache Should we cache the texture into url? Default is `true`.
*/
export async function getTextures(profile: GameProfile, cache: boolean = true): Promise<GameProfile.TexturesInfo> {
const texture = parseTexturesInfo(profile);
if (texture) { return cache ? cacheTextures(texture) : texture; }
return Promise.reject(`No texture for user ${profile.id}.`);
}
/**
* Fetch the GameProfile by uuid.
*
* @param uuid The unique id of user/player
* @param option the options for this function
*/
export function fetch(uuid: string, option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
return fetchProfile(API.getProfileUrl(api, uuid) + "?" + queryString.stringify({
unsigned: false,
}), api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up the GameProfile by username in game.
* @param name The username in game.
* @param option the options of this function
*/
export function lookup(name: string, option: { api?: API, timestamp?: number } = {}) {
const api = option.api || API_MOJANG;
const time: number = option.timestamp || 0;
let target = API.getProfileByNameUrl(api, name);
if (time) {
target += "?" + queryString.stringify({
at: (time / 1000),
});
}
return fetchProfile(target, api.publicKey).then((p) => p as GameProfile);
}
/**
* Look up all names by api
* @param names The names will go through
* @param option The option with api
*/
export function lookUpAll(names: string[], option: { api?: API } = {}) {
const api = option.api || API_MOJANG;
let target = API.getProfileByNameUrl(api, "");
target = target.substring(0, target.length - 1);
return fetchProfile(target, api.publicKey, names).then((r) => r as Array<GameProfile | undefined>);
}
/**
* Set texture by access token and uuid. If the texture is undefined, it will clear the texture to default steve.
*
* @param option
* @
|
{
tex.textures.SKIN = await cache(tex.textures.SKIN);
}
|
conditional_block
|
weginfos.js
|
",
"Berg": "Seebensee",
"Beschreibung": "Um das Panorama eines bekannten Berges zu genießen, muss man sich bekanntermaßen in den umliegenden Bergen aufhalten. So auch bei dieser Wanderung mit Zugspitzpanorama. ",
"Tourname": "Seebensee",
"Schwierigkeit": "mittel/schwer",
"Dauer": "5",
"KM": "13",
"Aufstieg": 880,
"Abstieg": 880
},
{
"Nummer": "7",
"Land": "Österreich",
"Berg": "Olpererhütte",
"Beschreibung": "Der bekannteste Fotospot Tirols dürfte wohl eine kleine Hängebrücke im Zillertal sein. Unweit der Olperer Hütte zieht die kleine Brücke täglich hunderte Bergwanderer in seinen Bann.",
"Tourname": "Olpererhütte",
"Schwierigkeit": "mittel",
"Dauer": "5,1",
"KM": "14",
"Aufstieg": 875,
"Abstieg": 875
},
{
"Nummer": "8",
"Land": "Österreich",
"Berg": "Zell am See",
"Beschreibung": "Im Salzburger Land liegt eine der beliebtesten Sehenswürdigkeiten der Alpen: Zell am See mit dem dazugehörigen Zeller See. ",
"Tourname": "Zell am See",
"Schwierigkeit": "mittel/schwer",
"Dauer": "8",
"KM": "20",
"Aufstieg": 1470,
"Abstieg": 1470
},
{
"Nummer": "9",
"Land": "Österreich",
"Berg": "Dachstein",
"Beschreibung": "Mit 850 Höhenmetern zählt die Dachstein-Südwand zu den imposantesten Wänden der Ostalpen. Der „Dachstein-Professor“ Friedrich Simony ließ nach seiner Besteigung des Dachsteins von Süden durchzogen von „recht abscheulichem Klettern“ mit Hilfe einiger Gönner den \"Dachstein Randkluftsteig\" errichten, den ersten Klettersteig der Alpen.",
"Tourname": "Johann Klettersteig ",
"Schwierigkeit": "schwer",
"Dauer": "8",
"KM": "n.a.",
"Aufstieg": "1200",
"Abstieg": "200"
},
{
"Nummer": "10",
"Land": "Österreich",
"Berg": "Hallstatt",
"Beschreibung": "Kaum ein Ort in den Ostalpen steht so sehr für die Alpenidylle wie das kleine Dorf Hallstatt. Die Chinesen waren gleich so verzückt, dass sie die Gemeinde (samt See!) als Attraktion nachbauten.",
"Tourname": "Hallstatt",
"Schwierigkeit": "leicht/mittel",
"Dauer": "2",
"KM": "5,6",
"Aufstieg": 330,
"Abstieg": 330
},
{
"Nummer": "11",
"Land": "Österreich",
"Berg": "Achensee",
"Beschreibung": "Zwischen Rofan und Karwendel erstreckt sich malerisch der Achensee. Der länglich gezogene See erscheint eingezwängt in den steilen Berghängen wie ein klassischer norwegischer Fjord. ",
"Tourname": "Bärenkopf",
"Schwierigkeit": "mittel/schwer",
"Dauer": "6,2",
"KM": "16",
"Aufstieg": 1240,
"Abstieg": 1240
},
{
"Nummer": "12",
"Land": "Österreich",
"Berg": "Innsbruck Karwendel",
"Beschreibung": "Sie ist die österreichweit einzige Stadt inmitten einer alpinen Bergwelt: Innsbruck. Die umgebenden Berge sind allesamt zu bewandern, teils erleichtern Gondelbahnen den Auf- und Abstieg direkt aus dem Stadtzentrum.",
"Tourname": "Karwendelblick hoch über Innsbruck",
"Schwierigkeit": "mittel",
"Dauer": "4",
"KM": "10",
"Aufstieg": 740,
|
"Berg": "Innsbruck Citytour",
"Beschreibung": "Innsbruck ist keine überwältigend große Metropole. Ihre Einzigartigkeit besteht dafür in ihrer alpinen Lage. Blicke aus der Innenstadt gen Himmel bleiben an den prominenten, die Stadt umrahmenden Bergketten hängen.",
"Tourname": "Die Hauptstadt der Alpen urban entdecken",
"Schwierigkeit": "leicht",
"Dauer": "2",
"KM": "6",
"Aufstieg": 0,
"Abstieg": 0
}
// {
// "Nummer": "14",
// "Land": "Schweiz",
// "Berg": "Zermatt – Matterhorn",
// "Beschreibung": "Eine der großartigsten Kulissen der Alpen und das beste Spotlight auf das Wahrzeichen der Schweiz werden bei dieser Tour mit relativ wenig Anstrengung verbunden. Der unverkennbare Blick auf den Toblerone-Gipfel kombiniert mit seiner Spiegelung in einem der Seen ist Zweifelsohne das wohl bekannteste Panorama der Schweiz. ",
// "Tourname": "",
// "Schwierigkeit": "mittel",
// "Dauer": "4",
// "KM": "9,1",
// "Aufstieg": 680,
// "Abstieg": 680
// },
// {
// "Nummer": "15",
// "Land": "Schweiz",
// "Berg": "Saxer Lücke - Fählensee",
// "Beschreibung": "Mit einer nahezu surrealen Zahl an imposanten Aussichten punktet diese Tour im Alpstein-Massiv. Die Wanderung entlang des Stauberenfirst führt den Fotowanderer zu einem der bekanntesten Fotospots der Schweiz: Die Saxer Lücke. Nahezu senkrecht erheben sich hier die Felsplatten aus dem Rheintal fast 2000 Höhenmeter und gipfeln in markant geschwungenen Felsplatten.",
// "Tourname": "",
// "Schwierigkeit": "schwer",
// "Dauer": "6,5",
// "KM": "17,5",
// "Aufstieg": 660,
// "Abstieg": 1530
// },
// {
// "Nummer": "16",
// "Land": "Schweiz",
// "Berg": "Bachalpsee",
// "Beschreibung": "Das Dreigestirn Eiger-Mönch-Jungfrau ist weit über die Landesgrenzen hinaus bekannt und zieht viele Besucher in diese Region. Die massiven Felsformationen stehen im malerischen Kontrast mit ewigem Eis, hohen Felsklippen, wasserreichen Bergbächen und einem saftig-grünen Tal.",
// "Tourname": "",
// "Schwierigkeit": "mittel/schwer",
// "Dauer": "6",
// "KM": "16",
// "Aufstieg": 780,
// "Abstieg": 1400
// },
// {
// "Nummer":"17",
// "Land": "Schweiz",
// "Berg": "Alpstein Seealpsee + Äscher ",
// "Beschreibung": "Das Gasthaus Äscher in der Nordostschweiz ist das prominenteste Gasthaus der Alpen. Binnen weniger Jahre hat es sich – aufgrund seiner atemberaubenden, einzigartigen Lage, eingerahmt von einem massiven Felsvorsprung – von einem unscheinbaren Geheimtipp zu weltwe
|
"Abstieg": 740
},
{
"Nummer": "13",
"Land": "Österreich",
|
random_line_split
|
process.go
|
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result)
|
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get parent file for rename: %v err: %v", commit.Hash, err)
return
}
if pb.IsBinary {
s.repo[commit.Hash][diff.Path] = pb
res.Files[diff.Path] = pb
continue
}
}
} else {
// this is an empty file creation
//if len(diff.Hunks) == 0 {
// panic(fmt.Errorf("no changes in commit: %v diff: %v", commit.Hash, string(ch.Diff)))
//}
}
var parentBlame *incblame.Blame
if diff.PathPrev == "" {
// file added in this commit, no parent blame for this file
} else {
switch len(commit.Parents) {
case 0: // initial commit, no parent
case 1: // regular commit
parentHash := commit.Parents[0]
pb := s.repo.GetFileOptional(parentHash, diff.PathPrev)
// file may not be in parent if this is create
if pb != nil {
parentBlame = pb
}
case 2: // merge
panic("merge passed to regular commit processing")
}
}
var blame incblame.Blame
if parentBlame == nil {
blame = incblame.Apply(incblame.Blame{}, diff, commit.Hash, diff.PathOrPrev())
} else {
if parentBlame.IsBinary {
bl, err := s.slowGitBlame(commit.Hash, diff.Path)
if err != nil {
return res, err
}
blame = bl
} else {
blame = incblame.Apply(*parentBlame, diff, commit.Hash, diff.PathOrPrev())
}
}
s.repo[commit.Hash][diff.Path] = &blame
res.Files[diff.Path] = &blame
}
if len(commit.Parents) == 0 {
// no need to copy files from prev
return
}
// copy unchanged from prev
p := commit.Parents[0]
files := s.repo.GetCommitMust(p)
|
{
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
}
|
identifier_body
|
process.go
|
}
if len(s.mergeParts) > 0 {
s.processGotMergeParts(resChan)
}
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result) {
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
}
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get parent file for rename: %v err: %v", commit.Hash, err)
return
}
if pb.IsBinary {
s.repo[commit.Hash][diff.Path] = pb
res.Files[diff.Path] = pb
continue
}
}
} else {
// this is an empty file creation
//if len(diff.Hunks) == 0 {
// panic(fmt.Errorf("no changes in commit: %v diff: %v", commit.Hash, string(ch.Diff)))
//}
}
var parentBlame *incblame.Blame
if diff.PathPrev == "" {
// file added in this commit, no parent blame for this file
} else {
switch len(commit.Parents) {
case 0: // initial commit, no parent
case 1: // regular commit
parentHash := commit.Parents[0]
pb := s.repo.GetFileOptional(parentHash, diff.PathPrev)
// file may not be in parent if this is create
if pb != nil {
parentBlame = pb
}
case 2: // merge
panic("merge passed to regular commit processing")
}
}
var blame incblame.Blame
if parentBlame == nil {
blame = incblame.Apply(incblame.Blame{}, diff, commit.Hash, diff.PathOrPrev())
} else {
if parentBlame.IsBinary {
bl, err := s.slowGitBlame(commit.Hash, diff.Path)
if err != nil {
return res, err
}
blame = bl
} else {
blame = incblame.Apply(*parentBlame, diff, commit.Hash, diff.PathOrPrev())
}
}
s.repo[commit.Hash][diff.Path] = &blame
res.Files[diff.Path] = &blame
}
if len(commit.Parents) == 0 {
// no
|
{
drainAndExit()
return err
}
|
conditional_block
|
|
process.go
|
, h := range parentHashes {
hashToParOrd[h] = i
}
for parHash, part := range parts {
for _, ch := range part.Changes {
diff := incblame.Parse(ch.Diff)
key := ""
if diff.Path != "" {
key = diff.Path
} else {
key = deletedPrefix + diff.PathPrev
}
par, ok := diffs[key]
if !ok {
par = make([]*incblame.Diff, parentCount, parentCount)
diffs[key] = par
}
parInd := hashToParOrd[parHash]
par[parInd] = &diff
}
}
// get a list of all files
files := map[string]bool{}
for k := range diffs {
files[k] = true
}
// process all files
EACHFILE:
for k := range files {
diffs := diffs[k]
isDelete := true
for _, diff := range diffs {
if diff != nil && diff.Path != "" {
isDelete = false
}
}
//fmt.Println("diffs")
//for i, d := range diffs {
// fmt.Println(i, d)
//}
if isDelete {
// only showing deletes and files changed in merge comparent to at least one parent
pathPrev := k[len(deletedPrefix):]
res.Files[pathPrev] = &incblame.Blame{Commit: commitHash}
continue
}
// below k == new file path
binaryDiffs := 0
for _, diff := range diffs {
if diff == nil {
continue
}
if diff.IsBinary {
binaryDiffs++
}
}
binParentsWithDiffs := 0
for i, diff := range diffs {
if diff == nil {
continue
}
if diff.PathPrev == "" {
// create
continue
}
parent := parentHashes[i]
pb, err := s.repo.GetFileMust(parent, diff.PathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for merge bin parent. merge: %v %v", commitHash, err)
return
}
if pb.IsBinary {
binParentsWithDiffs++
}
}
// do not try to resolve the diffs for binary files in merge commits
if binaryDiffs != 0 || binParentsWithDiffs != 0 {
bl := incblame.BlameBinaryFile(commitHash)
s.repo[commitHash][k] = bl
res.Files[k] = bl
continue
}
/*
// file is a binary
if binaryDiffs == validDiffs {
bl := incblame.BlameBinaryFile(commitHash)
s.repoSave(commitHash, k, bl)
res.Files[k] = bl
continue
}
// file is not a binary but one of the parents was a binary, need to use a regular git blame
if binaryParents != 0 {
bl, err := s.slowGitBlame(commitHash, k)
if err != nil {
return res, err
}
s.repoSave(commitHash, k, &bl)
res.Files[k] = &bl
continue
}*/
for i, diff := range diffs {
if diff == nil {
// same as parent
parent := parentHashes[i]
pb := s.repo.GetFileOptional(parent, k)
if pb != nil {
// exacly the same as parent, no changes
s.repo[commitHash][k] = pb
continue EACHFILE
}
}
}
parents := []incblame.Blame{}
for i, diff := range diffs {
if diff == nil {
// no change use prev
parentHash := parentHashes[i]
parentBlame := s.repo.GetFileOptional(parentHash, k)
if parentBlame == nil {
panic(fmt.Errorf("merge: no change for file recorded, but parent does not contain file:%v merge commit:%v parent:%v", k, commitHash, parentHash))
}
parents = append(parents, *parentBlame)
continue
}
pathPrev := diff.PathPrev
if pathPrev == "" {
// this is create, no parent blame
parents = append(parents, incblame.Blame{})
continue
}
parentHash := parentHashes[i]
parentBlame, err := s.repo.GetFileMust(parentHash, pathPrev)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case1 merge file. merge: %v %v", commitHash, err)
return
}
parents = append(parents, *parentBlame)
}
//fmt.Println("path", k)
diffs2 := []incblame.Diff{}
for _, ob := range diffs {
if ob == nil {
ob = &incblame.Diff{}
}
diffs2 = append(diffs2, *ob)
}
blame := incblame.ApplyMerge(parents, diffs2, commitHash, k)
s.repo[commitHash][k] = &blame
// only showing deletes and files changed in merge comparent to at least one parent
res.Files[k] = &blame
}
// for merge commits we need to use the most updated copy
// get a list of all files in all parents
files = map[string]bool{}
for _, p := range parentHashes {
filesInCommit := s.repo.GetCommitMust(p)
for f := range filesInCommit {
files[f] = true
}
}
root := ""
for f := range files {
alreadyAddedAbove := false
{
bl := s.repo.GetFileOptional(commitHash, f)
if bl != nil {
alreadyAddedAbove = true
}
}
if alreadyAddedAbove {
continue
}
var candidates []*incblame.Blame
for _, p := range parentHashes {
bl := s.repo.GetFileOptional(p, f)
if bl != nil {
candidates = append(candidates, bl)
}
}
// only one branch has the file
if len(candidates) == 1 {
// copy reference
s.repo[commitHash][f] = candidates[0]
continue
}
if len(candidates) == 0 {
panic("no file candidates")
}
// TODO: if more than one candidate we pick at random right now
// Need to check if this is correct? If no change at merge to any that means they are all the same?
// Or we need to check the last common parent and see? This was added in the previous design so possible is not needed anymore.
/*
if root == "" {
// TODO: this is not covered by unit tests
ts := time.Now()
// find common parent commit for all
root = s.graph.Parents.LastCommonParent(parentHashes)
dur := time.Since(ts)
if dur > time.Second {
fmt.Printf("took %v to find last common parent for %v res: %v", dur, parentHashes, root)
}
}*/
var res2 *incblame.Blame
for _, c := range candidates {
// unchanged
//if c.Commit == root {
// continue
//}
res2 = c
}
if res2 == nil {
var err error
// all are unchanged
res2, err = s.repo.GetFileMust(root, f)
if err != nil {
rerr = fmt.Errorf("could not get file for unchanged case2 merge file. merge: %v %v", commitHash, err)
return
}
}
s.repo[commitHash][f] = res2
}
return
}
func (s *Process) slowGitBlame(commitHash string, filePath string) (res incblame.Blame, _ error) {
bl, err := gitblame2.Run(s.opts.RepoDir, commitHash, filePath)
//fmt.Println("running regular blame for file switching from bin mode to regular")
if err != nil {
return res, err
}
res.Commit = commitHash
for _, l := range bl.Lines {
l2 := &incblame.Line{}
l2.Commit = l.CommitHash
l2.Line = []byte(l.Content)
res.Lines = append(res.Lines, l2)
}
return
}
func (s *Process) RunGetAll() (_ []Result, err error) {
res := make(chan Result)
done := make(chan bool)
go func() {
err = s.Run(res)
done <- true
}()
var res2 []Result
for r := range res {
res2 = append(res2, r)
}
<-done
return res2, err
}
|
func (s *Process) gitLogPatches() (io.ReadCloser, error) {
|
random_line_split
|
|
process.go
|
() Timing {
return *s.timing
}
func (s *Process) initCheckpoints() error {
if s.opts.CommitFromIncl == "" {
s.repo = repo.New()
} else {
expectedCommit := ""
if s.opts.NoStrictResume {
// validation disabled
} else {
expectedCommit = s.opts.CommitFromIncl
}
reader := repo.NewCheckpointReader(s.opts.Logger)
r, err := reader.Read(s.checkpointsDir, expectedCommit)
if err != nil {
return fmt.Errorf("Could not read checkpoint: %v", err)
}
s.repo = r
}
s.unloader = repo.NewUnloader(s.repo)
return nil
}
func (s *Process) Run(resChan chan Result) error {
defer func() {
close(resChan)
}()
if s.opts.ParentsGraph != nil {
s.graph = s.opts.ParentsGraph
} else {
s.graph = parentsgraph.New(parentsgraph.Opts{
RepoDir: s.opts.RepoDir,
AllBranches: s.opts.AllBranches,
Logger: s.opts.Logger,
})
err := s.graph.Read()
if err != nil {
return err
}
}
s.childrenProcessed = map[string]int{}
r, err := s.gitLogPatches()
if err != nil {
return err
}
defer r.Close()
commits := make(chan parser.Commit)
p := parser.New(r)
done := make(chan bool)
go func() {
defer func() {
done <- true
}()
err := p.Run(commits)
if err != nil {
panic(err)
}
}()
drainAndExit := func() {
for range commits {
}
<-done
}
i := 0
for commit := range commits {
if i == 0 {
err := s.initCheckpoints()
if err != nil {
drainAndExit()
return err
}
}
i++
commit.Parents = s.graph.Parents[commit.Hash]
err := s.processCommit(resChan, commit)
if err != nil {
drainAndExit()
return err
}
}
if len(s.mergeParts) > 0 {
s.processGotMergeParts(resChan)
}
if i == 0 {
// there were no items in log, happens when last processed commit was in a branch that is no longer recent and is skipped in incremental
// no need to write checkpoints
<-done
return nil
}
writer := repo.NewCheckpointWriter(s.opts.Logger)
err = writer.Write(s.repo, s.checkpointsDir, s.lastProcessedCommitHash)
if err != nil {
<-done
return err
}
//fmt.Println("max len of stored tree", s.maxLenOfStoredTree)
//fmt.Println("repo len", len(s.repo))
<-done
return nil
}
func (s *Process) trimGraphAfterCommitProcessed(commit string) {
parents := s.graph.Parents[commit]
for _, p := range parents {
s.childrenProcessed[p]++ // mark commit as processed
siblings := s.graph.Children[p]
if s.childrenProcessed[p] == len(siblings) {
// done with parent, can delete it
s.unloader.Unload(p)
}
}
//commitsInMemory := s.repo.CommitsInMemory()
commitsInMemory := len(s.repo)
if commitsInMemory > s.maxLenOfStoredTree {
s.maxLenOfStoredTree = commitsInMemory
}
}
func (s *Process) processCommit(resChan chan Result, commit parser.Commit) error {
if len(s.mergeParts) > 0 {
// continuing with merge
if s.mergePartsCommit == commit.Hash {
s.mergeParts[commit.MergeDiffFrom] = commit
// still same
return nil
} else {
// finished
s.processGotMergeParts(resChan)
// new commit
// continue below
}
}
if len(commit.Parents) > 1 { // this is a merge
s.mergePartsCommit = commit.Hash
s.mergeParts = map[string]parser.Commit{}
s.mergeParts[commit.MergeDiffFrom] = commit
return nil
}
res, err := s.processRegularCommit(commit)
if err != nil {
return err
}
s.trimGraphAfterCommitProcessed(commit.Hash)
resChan <- res
return nil
}
func (s *Process) processGotMergeParts(resChan chan Result) {
res, err := s.processMergeCommit(s.mergePartsCommit, s.mergeParts)
if err != nil {
panic(err)
}
s.trimGraphAfterCommitProcessed(s.mergePartsCommit)
s.mergeParts = nil
resChan <- res
}
type Timing struct {
RegularCommitsCount int
RegularCommitsTime time.Duration
MergesCount int
MergesTime time.Duration
SlowestCommits []CommitWithDuration
}
type CommitWithDuration struct {
Commit string
Duration time.Duration
}
const maxSlowestCommits = 10
func (s *Timing) UpdateSlowestCommitsWith(commit string, d time.Duration) {
s.SlowestCommits = append(s.SlowestCommits, CommitWithDuration{Commit: commit, Duration: d})
sort.Slice(s.SlowestCommits, func(i, j int) bool {
a := s.SlowestCommits[i]
b := s.SlowestCommits[j]
return a.Duration > b.Duration
})
if len(s.SlowestCommits) > maxSlowestCommits {
s.SlowestCommits = s.SlowestCommits[0:maxSlowestCommits]
}
}
func (s *Timing) SlowestCommitsDur() (res time.Duration) {
for _, c := range s.SlowestCommits {
res += c.Duration
}
return
}
/*
func (s *Timing) Stats() map[string]interface{} {
return map[string]interface{}{
"TotalRegularCommit": s.TotalRegularCommit,
"TotalMerges": s.TotalMerges,
"SlowestCommits": s.SlowestCommits,
"SlowestCommitsDur": s.SlowestCommitsDur(),
}
}*/
func (s *Timing) OutputStats(wr io.Writer) {
fmt.Fprintln(wr, "git processor timing")
fmt.Fprintln(wr, "regular commits", s.RegularCommitsCount)
fmt.Fprintln(wr, "time in regular commits", s.RegularCommitsTime)
fmt.Fprintln(wr, "merges", s.MergesCount)
fmt.Fprintln(wr, "time in merges commits", s.MergesTime)
fmt.Fprintf(wr, "time in %v slowest commits %v\n", len(s.SlowestCommits), s.SlowestCommitsDur())
fmt.Fprintln(wr, "slowest commits")
for _, c := range s.SlowestCommits {
fmt.Fprintf(wr, "%v %v\n", c.Commit, c.Duration)
}
}
func (s *Process) processRegularCommit(commit parser.Commit) (res Result, rerr error) {
s.lastProcessedCommitHash = commit.Hash
start := time.Now()
defer func() {
dur := time.Since(start)
s.timing.UpdateSlowestCommitsWith(commit.Hash, dur)
s.timing.RegularCommitsTime += dur
s.timing.RegularCommitsCount++
}()
if len(commit.Parents) > 1 {
panic("not a regular commit")
}
// note that commit exists (important for empty commits)
s.repo.AddCommit(commit.Hash)
//fmt.Println("processing regular commit", commit.Hash)
res.Commit = commit.Hash
res.Files = map[string]*incblame.Blame{}
for _, ch := range commit.Changes {
//fmt.Printf("%+v\n", string(ch.Diff))
diff := incblame.Parse(ch.Diff)
if diff.IsBinary {
// do not keep actual lines, but show in result
bl := incblame.BlameBinaryFile(commit.Hash)
if diff.Path == "" {
p := diff.PathPrev
res.Files[p] = bl
// removal
} else {
p := diff.Path
res.Files[p] = bl
s.repo[commit.Hash][p] = bl
}
continue
}
//fmt.Printf("diff %+v\n", diff)
if diff.Path == "" {
// file removed, no longer need to keep blame reference, but showcase the file in res.Files using PathPrev
res.Files[diff.PathPrev] = &incblame.Blame{Commit: commit.Hash}
continue
}
// TODO: test renames here as well
if diff.Path == "" {
panic(fmt.Errorf("commit diff does not specify Path: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// this is a rename
if diff.PathPrev != "" && diff.PathPrev != diff.Path {
if len(commit.Parents) != 1 {
panic(fmt.Errorf("rename with more than 1 parent (merge) not supported: %v diff: %v", commit.Hash, string(ch.Diff)))
}
// rename with no patch
if len(diff.Hunks) == 0 {
parent := commit.Parents[0]
pb, err
|
Timing
|
identifier_name
|
|
lstm.py
|
valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
def logprob(predictions, labels):
# prevent negative probability
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.place
|
ollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
|
holder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unr
|
conditional_block
|
lstm.py
|
valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"
|
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
def logprob(predictions, labels):
# prevent negative probability
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unrollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
|
""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
|
identifier_body
|
lstm.py
|
valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def b
|
batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
def logprob(predictions, labels):
# prevent negative probability
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unrollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
|
atches2string(
|
identifier_name
|
lstm.py
|
[valid_size:]
train_size = len(train_text)
print(train_size, train_text[:64])
print(valid_size, valid_text[:64])
# Utility functions to map characters to vocabulary IDs and back.
vocabulary_size = len(string.ascii_lowercase) + 1 # [a-z] + ' '
# ascii code for character
first_letter = ord(string.ascii_lowercase[0])
def char2id(char):
if char in string.ascii_lowercase:
return ord(char) - first_letter + 1
elif char == ' ':
return 0
else:
print('Unexpected character: %s' % char)
return 0
def id2char(dictid):
if dictid > 0:
return chr(dictid + first_letter - 1)
else:
return ' '
print(char2id('a'), char2id('z'), char2id(' '), char2id('ï'))
print(id2char(1), id2char(26), id2char(0))
# Function to generate a training batch for the LSTM model.
batch_size = 64
num_unrollings = 10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in range(self._batch_size):
# same id, same index of second dimension
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (most likely) character representation."""
# argmax for the most likely character
return [id2char(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, characters(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings)
valid_batches = BatchGenerator(valid_text, 1, 1)
print(batches2string(train_batches.next()))
print(batches2string(train_batches.next()))
print(batches2string(valid_batches.next()))
print(batches2string(valid_batches.next()))
|
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample_distribution(distribution):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
# 取一部分数据用于评估,所取数据比例随机
r = random.uniform(0, 1)
s = 0
for i in range(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def sample(prediction):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)
p[0, sample_distribution(prediction[0])] = 1.0
return p
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b / np.sum(b, 1)[:, None]
# Simple LSTM Model.
num_nodes = 64
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Input gate: input, previous output, and bias.
ix = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
im = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ib = tf.Variable(tf.zeros([1, num_nodes]))
# Forget gate: input, previous output, and bias.
fx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
fm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
fb = tf.Variable(tf.zeros([1, num_nodes]))
# Memory cell: input, state and bias.
cx = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
cm = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
cb = tf.Variable(tf.zeros([1, num_nodes]))
# Output gate: input, previous output, and bias.
ox = tf.Variable(tf.truncated_normal([vocabulary_size, num_nodes], -0.1, 0.1))
om = tf.Variable(tf.truncated_normal([num_nodes, num_nodes], -0.1, 0.1))
ob = tf.Variable(tf.zeros([1, num_nodes]))
# Variables saving state across unrollings.
saved_output = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Definition of the cell computation.
def lstm_cell(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
update = tf.matmul(i, cx) + tf.matmul(o, cm) + cb
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
for _ in range(num_unrollings + 1):
train_data.append(
tf.placeholder(tf.float32, shape=[batch_size, vocabulary_size]))
train_inputs = train_data[:num_unrollings]
train_labels = train_data[1:] # labels are inputs shifted by one time step.
# Unrolled LSTM loop.
outputs = list()
output = saved_output
state = saved_state
for i in train_inputs:
output, state = lstm_cell(i, output, state)
outputs.append(output)
# State saving across unrollings.
with tf.control_dependencies([saved_output.assign(output),
saved_state.assign(state)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, 5000, 0.1, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.float32, shape=[1, vocabulary_size])
saved_sample_output = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output.assign(tf.zeros([1, num_nodes])),
saved_sample_state.assign(tf.zeros([1, num_nodes])))
sample_output
|
def logprob(predictions, labels):
# prevent negative probability
|
random_line_split
|
dataset.py
|
train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def
|
float_feature
|
identifier_name
|
|
dataset.py
|
box/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
|
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
|
identifier_body
|
|
dataset.py
|
tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
|
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process
|
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
|
random_line_split
|
dataset.py
|
tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
|
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process
|
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
|
conditional_block
|
exec.rs
|
binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn spawn_exec_module(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
}
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// We check for built in commands
if let Some(parent_ctx) = parent_ctx
|
{
if self.commands.exists(name.as_str()) {
return self
.commands
.exec(parent_ctx, name.as_str(), store, builder);
}
}
|
conditional_block
|
|
exec.rs
|
BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn
|
(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
}
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
|
spawn_exec_module
|
identifier_name
|
exec.rs
|
BusSpawnedProcess, VirtualBusError> {
// Load the module
#[cfg(feature = "sys")]
let compiler = store.engine().name();
#[cfg(not(feature = "sys"))]
let compiler = "generic";
#[cfg(feature = "sys")]
let module = compiled_modules.get_compiled_module(&store, binary.hash().as_str(), compiler);
#[cfg(not(feature = "sys"))]
let module = compiled_modules.get_compiled_module(binary.hash().as_str(), compiler);
let module = match (module, binary.entry.as_ref()) {
(Some(a), _) => a,
(None, Some(entry)) => {
let module = Module::new(&store, &entry[..]).map_err(|err| {
error!(
"failed to compile module [{}, len={}] - {}",
name,
entry.len(),
err
);
VirtualBusError::CompileError
});
if module.is_err() {
env.cleanup(Some(Errno::Noexec as ExitCode));
}
let module = module?;
compiled_modules.set_compiled_module(binary.hash().as_str(), compiler, &module);
module
}
(None, None) => {
error!("package has no entry [{}]", name,);
env.cleanup(Some(Errno::Noexec as ExitCode));
return Err(VirtualBusError::CompileError);
}
};
// If the file system has not already been union'ed then do so
env.state.fs.conditional_union(&binary);
// Now run the module
let mut ret = spawn_exec_module(module, store, env, runtime);
if let Ok(ret) = ret.as_mut() {
ret.module_memory_footprint = binary.module_memory_footprint;
ret.file_system_memory_footprint = binary.file_system_memory_footprint;
}
ret
}
pub fn spawn_exec_module(
module: Module,
store: Store,
env: WasiEnv,
runtime: &Arc<dyn WasiRuntime + Send + Sync + 'static>,
) -> Result<BusSpawnedProcess, VirtualBusError> {
// Create a new task manager
let tasks = runtime.task_manager();
// Create the signaler
let pid = env.pid();
let signaler = Box::new(env.process.clone());
// Now run the binary
let (exit_code_tx, exit_code_rx) = mpsc::unbounded_channel();
{
// Determine if shared memory needs to be created and imported
let shared_memory = module.imports().memories().next().map(|a| *a.ty());
// Determine if we are going to create memory and import it or just rely on self creation of memory
let memory_spawn = match shared_memory {
Some(ty) => {
#[cfg(feature = "sys")]
let style = store.tunables().memory_style(&ty);
SpawnType::CreateWithType(SpawnedMemory {
ty,
#[cfg(feature = "sys")]
style,
})
}
None => SpawnType::Create,
};
// Create a thread that will run this process
let runtime = runtime.clone();
let tasks_outer = tasks.clone();
let task = {
let spawn_type = memory_spawn;
let mut store = store;
move || {
// Create the WasiFunctionEnv
let mut wasi_env = env;
wasi_env.runtime = runtime;
let memory = match wasi_env.tasks().build_memory(spawn_type) {
Ok(m) => m,
Err(err) => {
error!("wasi[{}]::wasm could not build memory error ({})", pid, err);
wasi_env.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
let mut wasi_env = WasiFunctionEnv::new(&mut store, wasi_env);
// Let's instantiate the module with the imports.
let (mut import_object, init) =
import_object_for_all_wasi_versions(&module, &mut store, &wasi_env.env);
if let Some(memory) = memory {
import_object.define(
"env",
"memory",
Memory::new_from_existing(&mut store, memory),
);
}
let instance = match Instance::new(&mut store, &module, &import_object) {
Ok(a) => a,
Err(err) => {
error!("wasi[{}]::wasm instantiate error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
};
init(&instance, &store).unwrap();
// Initialize the WASI environment
if let Err(err) = wasi_env.initialize(&mut store, instance.clone()) {
error!("wasi[{}]::wasi initialize error ({})", pid, err);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
// If this module exports an _initialize function, run that first.
if let Ok(initialize) = instance.exports.get_function("_initialize") {
if let Err(e) = initialize.call(&mut store, &[]) {
let code = match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code as ExitCode,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as ExitCode
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
Errno::Noexec as ExitCode
}
};
let _ = exit_code_tx.send(code);
wasi_env
.data(&store)
.cleanup(Some(Errno::Noexec as ExitCode));
return;
}
}
// Let's call the `_start` function, which is our `main` function in Rust.
let start = instance.exports.get_function("_start").ok();
// If there is a start function
debug!("wasi[{}]::called main()", pid);
// TODO: rewrite to use crate::run_wasi_func
let ret = if let Some(start) = start {
match start.call(&mut store, &[]) {
Ok(_) => 0,
Err(e) => match e.downcast::<WasiError>() {
Ok(WasiError::Exit(code)) => code,
Ok(WasiError::UnknownWasiVersion) => {
debug!("wasi[{}]::exec-failed: unknown wasi version", pid);
Errno::Noexec as u32
}
Err(err) => {
debug!("wasi[{}]::exec-failed: runtime error - {}", pid, err);
9999u32
}
},
}
} else {
debug!("wasi[{}]::exec-failed: missing _start function", pid);
Errno::Noexec as u32
};
debug!("wasi[{}]::main() has exited with {}", pid, ret);
// Cleanup the environment
wasi_env.data(&store).cleanup(Some(ret));
// Send the result
let _ = exit_code_tx.send(ret);
drop(exit_code_tx);
}
};
// TODO: handle this better - required because of Module not being Send.
#[cfg(feature = "js")]
let task = {
|
unsafe impl Send for UnsafeWrapper {}
let inner = UnsafeWrapper {
inner: Box::new(task),
};
move || {
(inner.inner)();
}
};
tasks_outer.task_wasm(Box::new(task)).map_err(|err| {
error!("wasi[{}]::failed to launch module - {}", pid, err);
VirtualBusError::UnknownError
})?
};
let inst = Box::new(SpawnedProcess {
exit_code: Mutex::new(None),
exit_code_rx: Mutex::new(exit_code_rx),
});
Ok(BusSpawnedProcess {
inst,
stdin: None,
stdout: None,
stderr: None,
signaler: Some(signaler),
module_memory_footprint: 0,
file_system_memory_footprint: 0,
})
}
impl BinFactory {
pub fn spawn<'a>(
&'a self,
name: String,
store: Store,
env: WasiEnv,
) -> Pin<Box<dyn Future<Output = Result<BusSpawnedProcess, VirtualBusError>> + 'a>> {
Box::pin(async move {
// Find the binary (or die trying) and make the spawn type
let binary = self
.get_binary(name.as_str(), Some(env.fs_root()))
.await
.ok_or(VirtualBusError::NotFound);
if binary.is_err() {
env.cleanup(Some(Errno::Noent as ExitCode));
}
let binary = binary?;
// Execute
spawn_exec(
binary,
name.as_str(),
store,
env,
&self.runtime,
&self.cache,
)
})
}
pub fn try_built_in(
&self,
name: String,
parent_ctx: Option<&FunctionEnvMut<'_, WasiEnv>>,
store: &mut Option<Store>,
builder: &mut Option<WasiEnv>,
|
struct UnsafeWrapper {
inner: Box<dyn FnOnce() + 'static>,
}
|
random_line_split
|
lib.rs
|
#[cfg(feature = "std")]
use std::fmt::Debug;
use sp_std::prelude::*;
pub mod abi;
pub mod contract_metadata;
pub mod gateway_inbound_protocol;
pub mod transfers;
pub use gateway_inbound_protocol::GatewayInboundProtocol;
pub type ChainId = [u8; 4];
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayType {
ProgrammableInternal(u32),
ProgrammableExternal(u32),
TxOnly(u32),
}
impl GatewayType {
pub fn fetch_nonce(self) -> u32 {
match self {
Self::ProgrammableInternal(nonce) => nonce,
Self::ProgrammableExternal(nonce) => nonce,
Self::TxOnly(nonce) => nonce,
}
}
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayVendor {
Substrate,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
/// Structure used at gateway registration as a starting point for multi-finality-verifier
pub struct GenericPrimitivesHeader {
pub parent_hash: Option<sp_core::hash::H256>,
pub number: u64,
pub state_root: Option<sp_core::hash::H256>,
pub extrinsics_root: Option<sp_core::hash::H256>,
pub digest: Option<sp_runtime::generic::Digest<sp_core::hash::H256>>,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayPointer {
pub id: ChainId,
pub vendor: GatewayVendor,
pub gateway_type: GatewayType,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayGenesisConfig {
/// SCALE-encoded modules following the format of selected frame_metadata::RuntimeMetadataVXX
pub modules_encoded: Option<Vec<u8>>,
/// SCALE-encoded signed extension - see more at frame_metadata::ExtrinsicMetadata
pub signed_extension: Option<Vec<u8>>,
/// Runtime version
pub runtime_version: sp_version::RuntimeVersion,
/// Extrinsics version
pub extrinsics_version: u8,
/// Genesis hash - block id of the genesis block use to distinct the network and sign messages
/// Length depending on parameter passed in abi::GatewayABIConfig
pub genesis_hash: Vec<u8>,
}
impl Default for GatewayGenesisConfig {
fn default() -> Self {
Self {
extrinsics_version: 0,
runtime_version: Default::default(),
genesis_hash: vec![],
modules_encoded: None,
signed_extension: None,
}
}
}
/// A struct that encodes RPC parameters required for a call to a smart-contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct Compose<Account, Balance> {
pub name: Vec<u8>,
pub code_txt: Vec<u8>,
pub exec_type: Vec<u8>,
pub dest: Account,
pub value: Balance,
pub bytes: Vec<u8>,
pub input_data: Vec<u8>,
}
/// A result type of a get storage call.
pub type FetchContractsResult = Result<Vec<u8>, ContractAccessError>;
pub type RegistryContractId<T> = <T as frame_system::Config>::Hash;
/// A result of execution of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ComposableExecResult {
/// The contract returned successfully.
///
/// There is a status code and, optionally, some data returned by the contract.
Success {
/// Flags that the contract passed along on returning to alter its exit behaviour.
/// Described in `pallet_contracts::exec::ReturnFlags`.
flags: u32,
/// Output data returned by the contract.
///
/// Can be empty.
data: Vec<u8>,
/// How much gas was consumed by the call.
gas_consumed: u64,
},
/// The contract execution either trapped or returned an error.
Error,
}
/// The possible errors that can happen querying the storage of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ContractAccessError {
/// The given address doesn't point to a contract.
DoesntExist,
/// The specified contract is a tombstone and thus cannot have any storage.
IsTombstone,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecPhase<Account, Balance> {
pub steps: Vec<ExecStep<Account, Balance>>,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecStep<Account, Balance> {
pub compose: Compose<Account, Balance>,
}
pub type GenericAddress = sp_runtime::MultiAddress<sp_runtime::AccountId32, ()>;
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct InterExecSchedule<Account, Balance> {
pub phases: Vec<ExecPhase<Account, Balance>>,
}
pub trait EscrowTrait: frame_system::Config + pallet_sudo::Config {
type Currency: Currency<Self::AccountId>;
type Time: Time;
}
type Bytes = Vec<u8>;
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Request message format that derivative of could be compatible with JSON-RPC API
/// with either signed or unsigned payload or custom transmission medium like XCMP protocol
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitOutboundMessage {
/// Message name/identifier
pub name: Bytes,
/// Module/pallet name
pub module_name: Bytes,
/// Method name
pub method_name: Bytes,
/// Encoded sender's public key
pub sender: Option<Bytes>,
/// Encoded target's public key
pub target: Option<Bytes>,
/// Array of next arguments: encoded bytes of arguments that that JSON-RPC API expects
pub arguments: Vec<Bytes>,
/// Expected results
pub expected_output: Vec<GatewayExpectedOutput>,
/// Extra payload in case the message is signed or uses custom delivery protocols like XCMP
pub extra_payload: Option<ExtraMessagePayload>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadUnsigned<'a> {
pub method_name: &'a str,
pub params: Vec<Bytes>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadSigned<'a> {
pub method_name: &'a str,
pub signed_extrinsic: Bytes,
}
impl CircuitOutboundMessage {
pub fn to_jsonrpc_unsigned(&self) -> Result<RpcPayloadUnsigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
Ok(RpcPayloadUnsigned {
method_name,
params: self.arguments.clone(),
})
}
pub fn to_jsonrpc_signed(&self) -> Result<RpcPayloadSigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
let signed_ext = self
.extra_payload
.as_ref()
.map(|payload| payload.tx_signed.clone())
.ok_or("no signed extrinsic provided")?;
Ok(RpcPayloadSigned {
method_name,
signed_extrinsic: signed_ext,
})
}
}
/// Inclusion proofs of different tries
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ProofTriePointer {
/// Proof is a merkle path in the state trie
State,
/// Proof is a merkle path in the transaction trie (extrisics in Substrate)
Transaction,
/// Proof is a merkle path in the receipts trie (in Substrate logs are entries in state trie, this doesn't apply)
Receipts,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitInboundResult {
pub result_format: Bytes,
pub proof_type: ProofTriePointer,
}
/// Inbound Steps that specifie expected data
|
use serde::{Deserialize, Serialize};
#[cfg(feature = "no_std")]
use sp_runtime::RuntimeDebug as Debug;
|
random_line_split
|
|
lib.rs
|
_runtime::RuntimeDebug as Debug;
#[cfg(feature = "std")]
use std::fmt::Debug;
use sp_std::prelude::*;
pub mod abi;
pub mod contract_metadata;
pub mod gateway_inbound_protocol;
pub mod transfers;
pub use gateway_inbound_protocol::GatewayInboundProtocol;
pub type ChainId = [u8; 4];
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayType {
ProgrammableInternal(u32),
ProgrammableExternal(u32),
TxOnly(u32),
}
impl GatewayType {
pub fn fetch_nonce(self) -> u32 {
match self {
Self::ProgrammableInternal(nonce) => nonce,
Self::ProgrammableExternal(nonce) => nonce,
Self::TxOnly(nonce) => nonce,
}
}
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum GatewayVendor {
Substrate,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
/// Structure used at gateway registration as a starting point for multi-finality-verifier
pub struct GenericPrimitivesHeader {
pub parent_hash: Option<sp_core::hash::H256>,
pub number: u64,
pub state_root: Option<sp_core::hash::H256>,
pub extrinsics_root: Option<sp_core::hash::H256>,
pub digest: Option<sp_runtime::generic::Digest<sp_core::hash::H256>>,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayPointer {
pub id: ChainId,
pub vendor: GatewayVendor,
pub gateway_type: GatewayType,
}
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct GatewayGenesisConfig {
/// SCALE-encoded modules following the format of selected frame_metadata::RuntimeMetadataVXX
pub modules_encoded: Option<Vec<u8>>,
/// SCALE-encoded signed extension - see more at frame_metadata::ExtrinsicMetadata
pub signed_extension: Option<Vec<u8>>,
/// Runtime version
pub runtime_version: sp_version::RuntimeVersion,
/// Extrinsics version
pub extrinsics_version: u8,
/// Genesis hash - block id of the genesis block use to distinct the network and sign messages
/// Length depending on parameter passed in abi::GatewayABIConfig
pub genesis_hash: Vec<u8>,
}
impl Default for GatewayGenesisConfig {
fn default() -> Self {
Self {
extrinsics_version: 0,
runtime_version: Default::default(),
genesis_hash: vec![],
modules_encoded: None,
signed_extension: None,
}
}
}
/// A struct that encodes RPC parameters required for a call to a smart-contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct Compose<Account, Balance> {
pub name: Vec<u8>,
pub code_txt: Vec<u8>,
pub exec_type: Vec<u8>,
pub dest: Account,
pub value: Balance,
pub bytes: Vec<u8>,
pub input_data: Vec<u8>,
}
/// A result type of a get storage call.
pub type FetchContractsResult = Result<Vec<u8>, ContractAccessError>;
pub type RegistryContractId<T> = <T as frame_system::Config>::Hash;
/// A result of execution of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum
|
{
/// The contract returned successfully.
///
/// There is a status code and, optionally, some data returned by the contract.
Success {
/// Flags that the contract passed along on returning to alter its exit behaviour.
/// Described in `pallet_contracts::exec::ReturnFlags`.
flags: u32,
/// Output data returned by the contract.
///
/// Can be empty.
data: Vec<u8>,
/// How much gas was consumed by the call.
gas_consumed: u64,
},
/// The contract execution either trapped or returned an error.
Error,
}
/// The possible errors that can happen querying the storage of a contract.
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ContractAccessError {
/// The given address doesn't point to a contract.
DoesntExist,
/// The specified contract is a tombstone and thus cannot have any storage.
IsTombstone,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecPhase<Account, Balance> {
pub steps: Vec<ExecStep<Account, Balance>>,
}
#[derive(Eq, PartialEq, Encode, Decode, Debug, Clone, Default)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct ExecStep<Account, Balance> {
pub compose: Compose<Account, Balance>,
}
pub type GenericAddress = sp_runtime::MultiAddress<sp_runtime::AccountId32, ()>;
#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct InterExecSchedule<Account, Balance> {
pub phases: Vec<ExecPhase<Account, Balance>>,
}
pub trait EscrowTrait: frame_system::Config + pallet_sudo::Config {
type Currency: Currency<Self::AccountId>;
type Time: Time;
}
type Bytes = Vec<u8>;
/// Outbound Step that specifies expected transmission medium for relayers connecting with that gateway.
/// Request message format that derivative of could be compatible with JSON-RPC API
/// with either signed or unsigned payload or custom transmission medium like XCMP protocol
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitOutboundMessage {
/// Message name/identifier
pub name: Bytes,
/// Module/pallet name
pub module_name: Bytes,
/// Method name
pub method_name: Bytes,
/// Encoded sender's public key
pub sender: Option<Bytes>,
/// Encoded target's public key
pub target: Option<Bytes>,
/// Array of next arguments: encoded bytes of arguments that that JSON-RPC API expects
pub arguments: Vec<Bytes>,
/// Expected results
pub expected_output: Vec<GatewayExpectedOutput>,
/// Extra payload in case the message is signed or uses custom delivery protocols like XCMP
pub extra_payload: Option<ExtraMessagePayload>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadUnsigned<'a> {
pub method_name: &'a str,
pub params: Vec<Bytes>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct RpcPayloadSigned<'a> {
pub method_name: &'a str,
pub signed_extrinsic: Bytes,
}
impl CircuitOutboundMessage {
pub fn to_jsonrpc_unsigned(&self) -> Result<RpcPayloadUnsigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
Ok(RpcPayloadUnsigned {
method_name,
params: self.arguments.clone(),
})
}
pub fn to_jsonrpc_signed(&self) -> Result<RpcPayloadSigned, &'static str> {
let method_name: &str = sp_std::str::from_utf8(&self.name[..])
.map_err(|_| "`Can't decode method name to &str")?;
let signed_ext = self
.extra_payload
.as_ref()
.map(|payload| payload.tx_signed.clone())
.ok_or("no signed extrinsic provided")?;
Ok(RpcPayloadSigned {
method_name,
signed_extrinsic: signed_ext,
})
}
}
/// Inclusion proofs of different tries
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub enum ProofTriePointer {
/// Proof is a merkle path in the state trie
State,
/// Proof is a merkle path in the transaction trie (extrisics in Substrate)
Transaction,
/// Proof is a merkle path in the receipts trie (in Substrate logs are entries in state trie, this doesn't apply)
Receipts,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct CircuitInboundResult {
pub result_format: Bytes,
pub proof_type: ProofTriePointer,
}
/// Inbound Steps that specifie expected data deposited by relayers back to the Circuit after each step
#[derive(Encode,
|
ComposableExecResult
|
identifier_name
|
monitors_test.go
|
", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000 {
t.Error("request sends json including responseTimeWarning but: ", m)
}
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
"notificationInterval": 60
}
]
}
`
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0.000000,
RequestBody: "Request Body",
ContainsString: "",
CertificationExpirationCritical: 0,
CertificationExpirationWarning: 0,
SkipCertificateVerification: false,
Headers: []HeaderField{
{
Name: "Cache-Control",
Value: "no-cache",
},
},
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Name: "role average",
Type: "expression",
IsMute: false,
NotificationInterval: 60,
Expression: "avg(roleSlots(\"server:role\",\"loadavg5\"))",
Operator: ">",
Warning: 5.000000,
Critical: 10.000000,
},
}
func TestDecodeMonitor(t *testing.T)
|
{
if got := decodeMonitorsJSON(t); !reflect.DeepEqual(got, wantMonitors) {
t.Errorf("fail to get correct data: diff: (-got +want)\n%v", pretty.Compare(got, wantMonitors))
}
}
|
identifier_body
|
|
monitors_test.go
|
m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
"notificationInterval": 60
}
]
}
`
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0.000000,
RequestBody: "Request Body",
ContainsString: "",
CertificationExpirationCritical: 0,
CertificationExpirationWarning: 0,
SkipCertificateVerification: false,
Headers: []HeaderField{
{
Name: "Cache-Control",
Value: "no-cache",
},
},
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Name: "role average",
Type: "expression",
IsMute: false,
NotificationInterval: 60,
Expression: "avg(roleSlots(\"server:role\",\"loadavg5\"))",
Operator: ">",
Warning: 5.000000,
Critical: 10.000000,
},
}
func TestDecodeMonitor(t *testing.T) {
if got := decodeMonitorsJSON(t); !reflect.DeepEqual(got, wantMonitors) {
t.Errorf("fail to get correct data: diff: (-got +want)\n%v", pretty.Compare(got, wantMonitors))
}
}
func BenchmarkDecodeMonitor(b *testing.B) {
for i := 0; i < b.N; i++ {
decodeMonitorsJSON(b)
}
}
func decodeMonitorsJSON(t testing.TB) []Monitor {
var data struct {
Monitors []json.RawMessage `json:"monitors"`
}
if err := json.NewDecoder(strings.NewReader(monitorsjson)).Decode(&data); err != nil {
t.Error(err)
}
ms := make([]Monitor, 0, len(data.Monitors))
for _, rawmes := range data.Monitors {
m, err := decodeMonitor(rawmes)
if err != nil {
t.Error(err)
}
ms = append(ms, m)
}
return ms
}
var monitorsToBeEncoded = []Monitor{
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Warning: 0.000000,
Critical: 400000.000000,
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Warning: 50.000000,
Critical: 0.000000,
},
&MonitorExpression{
ID: "2cSZzK3XfmE",
Warning: 0.000000,
Critical: 0.000000,
},
}
func
|
TestEncodeMonitor
|
identifier_name
|
|
monitors_test.go
|
"responseTimeDuration": 5,
"certificationExpirationCritical": 15,
"certificationExpirationWarning": 30,
"containsString": "Foo Bar Baz",
"skipCertificateVerification": true,
"headers": []map[string]interface{}{
{"name": "Cache-Control", "value": "no-cache"},
},
},
{
"id": "2DujfcR2kA9",
"name": "expression test",
"memo": "a monitor for expression",
"type": "expression",
"expression": "avg(roleSlots('service:role','loadavg5'))",
"operator": ">",
"warning": 20,
"critical": 30,
},
},
})
res.Header()["Content-Type"] = []string{"application/json"}
fmt.Fprint(res, string(respJSON))
}))
defer ts.Close()
client, _ := NewClientWithOptions("dummy-key", ts.URL, false)
monitors, err := client.FindMonitors()
if err != nil {
t.Error("err shoud be nil but: ", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000 {
t.Error("request sends json including responseTimeWarning but: ", m)
}
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
|
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration: 0
|
"notificationInterval": 60
}
]
}
`
|
random_line_split
|
monitors_test.go
|
"responseTimeDuration": 5,
"certificationExpirationCritical": 15,
"certificationExpirationWarning": 30,
"containsString": "Foo Bar Baz",
"skipCertificateVerification": true,
"headers": []map[string]interface{}{
{"name": "Cache-Control", "value": "no-cache"},
},
},
{
"id": "2DujfcR2kA9",
"name": "expression test",
"memo": "a monitor for expression",
"type": "expression",
"expression": "avg(roleSlots('service:role','loadavg5'))",
"operator": ">",
"warning": 20,
"critical": 30,
},
},
})
res.Header()["Content-Type"] = []string{"application/json"}
fmt.Fprint(res, string(respJSON))
}))
defer ts.Close()
client, _ := NewClientWithOptions("dummy-key", ts.URL, false)
monitors, err := client.FindMonitors()
if err != nil {
t.Error("err shoud be nil but: ", err)
}
{
m, ok := monitors[0].(*MonitorConnectivity)
if !ok || m.Type != "connectivity" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "connectivity monitor" {
t.Error("request sends json including memo but: ", m)
}
}
{
m, ok := monitors[1].(*MonitorExternalHTTP)
if !ok || m.Type != "external" {
t.Error("request sends json including type but: ", m)
}
if m.Memo != "this monitor checks example.com." {
t.Error("request sends json including memo but: ", m)
}
if m.Service != "someService" {
t.Error("request sends json including service but: ", m)
}
if m.NotificationInterval != 60 {
t.Error("request sends json including notificationInterval but: ", m)
}
if m.URL != "https://www.example.com/" {
t.Error("request sends json including url but: ", m)
}
if m.ResponseTimeCritical != 5000 {
t.Error("request sends json including responseTimeCritical but: ", m)
}
if m.ResponseTimeWarning != 10000
|
if m.ResponseTimeDuration != 5 {
t.Error("request sends json including responseTimeDuration but: ", m)
}
if m.CertificationExpirationCritical != 15 {
t.Error("request sends json including certificationExpirationCritical but: ", m)
}
if m.CertificationExpirationWarning != 30 {
t.Error("request sends json including certificationExpirationWarning but: ", m)
}
if m.ContainsString != "Foo Bar Baz" {
t.Error("request sends json including containsString but: ", m)
}
if m.SkipCertificateVerification != true {
t.Error("request sends json including skipCertificateVerification but: ", m)
}
if !reflect.DeepEqual(m.Headers, []HeaderField{{Name: "Cache-Control", Value: "no-cache"}}) {
t.Error("request sends json including headers but: ", m)
}
}
{
m, ok := monitors[2].(*MonitorExpression)
if !ok || m.Type != "expression" {
t.Error("request sends json including expression but: ", monitors[2])
}
if m.Memo != "a monitor for expression" {
t.Error("request sends json including memo but: ", m)
}
}
}
// ensure that it supports `"headers":[]` and headers must be nil by default.
func TestMonitorExternalHTTP_headers(t *testing.T) {
tests := []struct {
name string
in *MonitorExternalHTTP
want string
}{
{
name: "default",
in: &MonitorExternalHTTP{},
want: `{"headers":null}`,
},
{
name: "empty list",
in: &MonitorExternalHTTP{Headers: []HeaderField{}},
want: `{"headers":[]}`,
},
}
for _, tt := range tests {
b, err := json.Marshal(tt.in)
if err != nil {
t.Error(err)
continue
}
if got := string(b); got != tt.want {
t.Errorf("%s: got %v, want %v", tt.name, got, tt.want)
}
}
}
const monitorsjson = `
{
"monitors": [
{
"id": "2cSZzK3XfmA",
"type": "connectivity",
"scopes": [],
"excludeScopes": []
},
{
"id" : "2cSZzK3XfmB",
"type": "host",
"name": "disk.aa-00.writes.delta",
"duration": 3,
"metric": "disk.aa-00.writes.delta",
"operator": ">",
"warning": 20000.0,
"critical": 400000.0,
"scopes": [
"Hatena-Blog"
],
"excludeScopes": [
"Hatena-Bookmark: db-master"
]
},
{
"id" : "2cSZzK3XfmC",
"type": "service",
"name": "Hatena-Blog - access_num.4xx_count",
"service": "Hatena-Blog",
"duration": 1,
"metric": "access_num.4xx_count",
"operator": ">",
"warning": 50.0,
"critical": 100.0,
"notificationInterval": 60
},
{
"id" : "2cSZzK3XfmD",
"type": "external",
"name": "example.com",
"method": "POST",
"url": "https://example.com",
"service": "Hatena-Blog",
"headers": [{"name":"Cache-Control", "value":"no-cache"}],
"requestBody": "Request Body"
},
{
"id" : "2cSZzK3XfmE",
"type": "expression",
"name": "role average",
"expression": "avg(roleSlots(\"server:role\",\"loadavg5\"))",
"operator": ">",
"warning": 5.0,
"critical": 10.0,
"notificationInterval": 60
}
]
}
`
var wantMonitors = []Monitor{
&MonitorConnectivity{
ID: "2cSZzK3XfmA",
Name: "",
Type: "connectivity",
IsMute: false,
NotificationInterval: 0,
Scopes: []string{},
ExcludeScopes: []string{},
},
&MonitorHostMetric{
ID: "2cSZzK3XfmB",
Name: "disk.aa-00.writes.delta",
Type: "host",
IsMute: false,
NotificationInterval: 0,
Metric: "disk.aa-00.writes.delta",
Operator: ">",
Warning: 20000.000000,
Critical: 400000.000000,
Duration: 3,
Scopes: []string{
"Hatena-Blog",
},
ExcludeScopes: []string{
"Hatena-Bookmark: db-master",
},
},
&MonitorServiceMetric{
ID: "2cSZzK3XfmC",
Name: "Hatena-Blog - access_num.4xx_count",
Type: "service",
IsMute: false,
NotificationInterval: 60,
Service: "Hatena-Blog",
Metric: "access_num.4xx_count",
Operator: ">",
Warning: 50.000000,
Critical: 100.000000,
Duration: 1,
},
&MonitorExternalHTTP{
ID: "2cSZzK3XfmD",
Name: "example.com",
Type: "external",
IsMute: false,
NotificationInterval: 0,
Method: "POST",
URL: "https://example.com",
MaxCheckAttempts: 0.000000,
Service: "Hatena-Blog",
ResponseTimeCritical: 0.000000,
ResponseTimeWarning: 0.000000,
ResponseTimeDuration:
|
{
t.Error("request sends json including responseTimeWarning but: ", m)
}
|
conditional_block
|
impl.go
|
.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil
|
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func buildTransportAndDiscovery(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%
|
{
return err
}
|
conditional_block
|
impl.go
|
.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil {
return err
}
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error
|
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func buildTransportAndDiscovery(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3
|
{
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
|
identifier_body
|
impl.go
|
.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil {
return err
}
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func
|
(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3
|
buildTransportAndDiscovery
|
identifier_name
|
impl.go
|
.`,
Value: fmt.Sprintf("0.0.0.0:%d", params.INITIAL_PORT),
},
cli.StringFlag{
Name: "rpccorsdomain",
Usage: `Comma separated list of domains to accept cross origin requests.
(localhost enabled by default)`,
Value: "http://localhost:* /*",
},
cli.IntFlag{Name: "max-unresponsive-time",
Usage: `Max time in seconds for which an address can send no packets and
still be considered healthy.`,
Value: 120,
},
cli.IntFlag{Name: "send-ping-time",
Usage: `Time in seconds after which if we have received no message from a
node we have a connection with, we are going to send a PING message`,
Value: 60,
},
cli.BoolTFlag{Name: "rpc",
Usage: `Start with or without the RPC server. Default is to start
the RPC server`,
},
cli.StringFlag{
Name: "api-address",
Usage: `host:port" for the RPC server to listen on.`,
Value: "127.0.0.1:5001",
},
ethutils.DirectoryFlag{
Name: "datadir",
Usage: "Directory for storing raiden data.",
Value: ethutils.DirectoryString{params.DefaultDataDir()},
},
cli.StringFlag{
Name: "password-file",
Usage: "Text file containing password for provided account",
},
cli.StringFlag{
Name: "nat",
Usage: `
[auto|upnp|stun|none]
Manually specify method to use for
determining public IP / NAT traversal.
Available methods:
"auto" - Try UPnP, then
STUN, fallback to none
"upnp" - Try UPnP,
fallback to none
"stun" - Try STUN, fallback
to none
"none" - Use the local interface,only for test
address (this will likely cause connectivity
issues)
|
cli.BoolFlag{
Name: "debugcrash",
Usage: "enable debug crash feature",
},
cli.StringFlag{
Name: "conditionquit",
Usage: "quit at specified point for test",
Value: "",
},
cli.StringFlag{
Name: "turn-server",
Usage: "tur server for ice",
Value: params.DefaultTurnServer,
},
cli.StringFlag{
Name: "turn-user",
Usage: "turn username for turn server",
Value: "bai",
},
cli.StringFlag{
Name: "turn-pass",
Usage: "turn password for turn server",
Value: "bai",
},
cli.BoolFlag{
Name: "nonetwork",
Usage: "disable network, for example ,when we want to settle all channels",
},
cli.BoolFlag{
Name: "fee",
Usage: "enable mediation fee",
},
cli.StringFlag{
Name: "signal-server",
Usage: "use another signal server ",
Value: params.DefaultSignalServer,
},
cli.BoolFlag{
Name: "ignore-mediatednode-request",
Usage: "this node doesn't work as a mediated node, only work as sender or receiver",
},
}
app.Flags = append(app.Flags, debug.Flags...)
app.Action = MainCtx
app.Name = "smartraiden"
app.Version = "0.2"
app.Before = func(ctx *cli.Context) error {
if err := debug.Setup(ctx); err != nil {
return err
}
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
app.Run(os.Args)
}
func MainCtx(ctx *cli.Context) error {
var pms *network.PortMappedSocket
var err error
fmt.Printf("Welcom to smartraiden,version %s\n", ctx.App.Version)
if ctx.String("nat") != "ice" {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms, err = network.SocketFactory(host, port, ctx.String("nat"))
if err != nil {
log.Crit(fmt.Sprintf("SocketFactory err=%s", err))
return err
}
log.Trace(fmt.Sprintf("pms=%s", utils.StringInterface1(pms)))
} else {
host, port := network.SplitHostPort(ctx.String("listen-address"))
pms = &network.PortMappedSocket{
Ip: host,
Port: port,
}
}
if err != nil {
log.Error(fmt.Sprintf("start server on %s error:%s", ctx.String("listen-address"), err))
utils.SystemExit(1)
}
cfg := config(ctx, pms)
log.Debug(fmt.Sprintf("Config:%s", utils.StringInterface(cfg, 2)))
ethEndpoint := ctx.String("eth-rpc-endpoint")
client, err := helper.NewSafeClient(ethEndpoint)
if err != nil {
log.Error(fmt.Sprintf("cannot connect to geth :%s err=%s", ethEndpoint, err))
utils.SystemExit(1)
}
bcs := rpc.NewBlockChainService(cfg.PrivateKey, cfg.RegistryAddress, client)
log.Trace(fmt.Sprintf("bcs=%#v", bcs))
transport, discovery := buildTransportAndDiscovery(cfg, pms, bcs)
raidenService := smartraiden.NewRaidenService(bcs, cfg.PrivateKey, transport, discovery, cfg)
if cfg.EnableMediationFee {
//do nothing.
} else {
raidenService.SetFeePolicy(&smartraiden.NoFeePolicy{})
}
go func() {
raidenService.Start()
}()
api := smartraiden.NewRaidenApi(raidenService)
regQuitHandler(api)
restful.Start(api, cfg)
return nil
}
func buildTransportAndDiscovery(cfg *params.Config, pms *network.PortMappedSocket, bcs *rpc.BlockChainService) (transport network.Transporter, discovery network.DiscoveryInterface) {
var err error
/*
use ice and doesn't work as route node,means this node runs on a mobile phone.
*/
if cfg.NetworkMode == params.ICEOnly && cfg.IgnoreMediatedNodeRequest {
cfg.NetworkMode = params.MixUDPICE
}
switch cfg.NetworkMode {
case params.NoNetwork:
discovery = network.NewDiscovery()
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewDummyTransport(pms.Ip, pms.Port, nil, policy)
return
case params.UDPOnly:
discovery = network.NewContractDiscovery(bcs.NodeAddress, cfg.DiscoveryAddress, bcs.Client, bcs.Auth)
policy := network.NewTokenBucket(10, 1, time.Now)
transport = network.NewUDPTransport(pms.Ip, pms.Port, pms.Conn, nil, policy)
case params.ICEOnly:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
transport, err = network.NewIceTransporter(bcs.PrivKey, utils.APex2(bcs.NodeAddress))
if err != nil {
panic(err)
}
discovery = network.NewIceHelperDiscovery()
case params.MixUDPICE:
network.InitIceTransporter(cfg.Ice.TurnServer, cfg.Ice.TurnUser, cfg.Ice.TurnPassword, cfg.Ice.SignalServer)
policy := network.NewTokenBucket(10, 1, time.Now)
transport, discovery = network.NewMixTranspoter(bcs.PrivKey, utils.APex2(bcs.NodeAddress), pms.Ip, pms.Port, pms.Conn, nil, policy)
}
return
}
func regQuitHandler(api *smartraiden.RaidenApi) {
go func() {
quitSignal := make(chan os.Signal, 1)
signal.Notify(quitSignal, os.Interrupt, os.Kill)
<-quitSignal
signal.Stop(quitSignal)
api.Stop()
utils.SystemExit(0)
}()
}
func promptAccount(adviceAddress common.Address, keystorePath, passwordfile string) (addr common.Address, keybin []byte) {
am := smartraiden.NewAccountManager(keystorePath)
if len(am.Accounts) == 0 {
log.Error(fmt.Sprintf("No Ethereum accounts found in the directory %s", keystorePath))
utils.SystemExit(1)
}
if !am.AddressInKeyStore(adviceAddress) {
if adviceAddress != utils.EmptyAddress {
log.Error(fmt.Sprintf("account %s could not be found on the sytstem. aborting...", adviceAddress))
utils.SystemExit(1)
}
shouldPromt := true
fmt.Println("The following accounts were found in your machine:")
for i := 0; i < len(am.Accounts); i++ {
fmt.Printf("%3d
|
"ice"- Use ice framework for nat punching
[default: ice]`,
Value: "ice",
},
|
random_line_split
|
evaluation_confidence_mask_sinmul.py
|
/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
|
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
else:
return o
class Evaluation_confidence_mask():
def __init__(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results
|
return info["station"]
|
conditional_block
|
evaluation_confidence_mask_sinmul.py
|
/work/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
return info["station"]
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
|
else:
return o
class Evaluation_confidence_mask():
def __init__(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results
|
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
|
random_line_split
|
evaluation_confidence_mask_sinmul.py
|
/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
|
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
return info["station"]
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
else:
return o
class Evaluation_confidence_mask():
def __init__(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results
|
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
|
identifier_body
|
evaluation_confidence_mask_sinmul.py
|
/aist/pspicker/training_plan"
EVAL_DIR="/home/aab10867zc/work/aist/pspicker/evaluation/confidence_mask_sinmul_easy"
#weighted by station
class MultiInferenceConfig(config.Config):
#multi std 0110
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 1
DETECTION_MIN_CONFIDENCE=0.5
DETECTION_NMS_THRESHOLD=0.3
RPN_ANCHOR_SCALES=[1524, 2436,3648,4860,6072]
RPN_ANCHOR_RATIOS=[0.5,1,1.5,2]
DIVISION_SIZE=1028
WINDOW_STATION_DIM = 10
RPN_NMS_THRESHOLD = 0.7
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
POOL_SIZE = [WINDOW_STATION_DIM,14]
MASK_POOL_SIZE = [WINDOW_STATION_DIM,28]
MASK_SHAPE = [WINDOW_STATION_DIM,56]
BACKBONE_CONV=False
RPN_CONV=False
MRCNN_CONV=False
class SingleInferenceConfig(config.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME="pspicker"
GPU_COUNT = 1
WINDOWS_PER_GPU = 10
DETECTION_MIN_CONFIDENCE=0
RPN_ANCHOR_SCALES=[64, 128, 256, 512, 1024]
RPN_ANCHOR_RATIOS=[0.5,1,2]
DIVISION_SIZE=1024
DETECTION_NMS_THRESHOLD=0.01
DETECTION_MIN_CONFIDENCE=0.7
CONV_STATION=False
#neighbour stations
#no substations
#eazy mode (sorted by nearest station order)
class PSpickerDataset(MultiModel.Dataset):
"""Generates the pspicker synthetic dataset. The dataset consists of
seismic waveform windows of shape (stations,time_width,channels).
"""
def load_sac(self, sac_info,shape=[10,12000,3],add_sub=True):
"""Load a subset of the pspicker dataset.
dataset_dir: The root directory of the pspicker dataset.
subset: What to load (train, val, test)
return_coco: If True, returns the COCO object.
"""
# Add classes
self.add_class("pspicker", 1, "ps")
for window_id,main_event in enumerate(sac_info["windows"]):
path = [main_event["traces"][station] for station in main_event["stations"]]
if len(path)<shape[0]:
continue
self.add_window("pspicker",window_id=window_id,main_stations=main_event["stations"],
main_name=main_event["name"],shape=shape,path=path)
def load_streams(self,window_id):
info = self.window_info[window_id]
shape=info["shape"]
streams=[]
dist = []
for event in info["path"]:
paths=list(event.values())
traces=[]
for path in paths:
trace=read(path)[0]
traces.append(trace)
stream=Stream(traces=traces)
stream.detrend("constant")
stream.filter("highpass", freq=2.0)
dist.append(stream[0].stats.sac["dist"])
for i in range(len(stream)):
stream[i].data-=np.mean(stream[i].data)
stream[i].data/=np.std(stream[i].data)
streams.append(stream)
index=np.argsort(dist)[:10]
streams = [streams[i] for i in index]
return streams
def load_window(self, window_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
np.random.seed(window_id)
window=np.random.normal(0.0,0.1,shape)
for station,stream in enumerate(streams):
channel_dict={"U":0,"N":1,"E":2}
for trace in stream:
channel=channel_dict[trace.stats.channel]
npts=min(trace.stats.npts,shape[1])
window[station,:npts,channel]=trace.data
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
window=window[random_index]
return window
def window_reference(self, window_id):
"""Return the shapes data of the image."""
info = self.window_info[window_id]
if info["source"] == "pspikcer":
return info["station"]
else:
super(self.__class__).window_reference(self, window_id)
def load_mask(self, window_id):
"""Generate instance masks for shapes of the given image ID.
"""
streams = self.load_streams(window_id)
info=self.window_info[window_id]
shape=info["shape"]
mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)
for stream_id,stream in enumerate(streams):
for trace in stream:
if trace.stats.channel=="U":
start=int(round(trace.stats.sac["a"]*100))
end=int(round(trace.stats.sac["t0"]*100))
else:
continue
mask[stream_id,start:end+1,0]= 1
class_ids = np.ones([1])
if self.shuffle:
random.seed(window_id)
random_index=random.sample(range(shape[0]),shape[0])
mask[:,:,0]=mask[:,:,0][random_index]
streams=[streams[i] for i in random_index]
station=np.zeros([shape[0],shape[0],2])
for i,j in itertools.product(range(shape[0]),range(shape[0])):
station[i,j]=[streams[j][0].stats.sac["stla"]/streams[i][0].stats.sac["stla"],streams[j][0].stats.sac["stlo"]/streams[i][0].stats.sac["stlo"]]
return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1],mask.shape[0], 2], dtype=np.int32)
for i in range(mask.shape[-1]):
# Bounding box.
for j in range(mask.shape[0]):
m = mask[j, :, i]
horizontal_indicies = np.where(m)[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
# x2 should not be part of the box. Increment by 1.
x2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2 = 0, 0
boxes[i,j] = np.array([x1, x2])
return boxes.astype(np.int32)
def compute_overlap_rate(box, boxes):
"""Calculates overlap rate of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[1], boxes[:, 1])
intersection = np.maximum(x2 - x1, 0)
boxes_area = boxes[:, 1] - boxes[:, 0]
overlap = intersection/boxes_area
return overlap
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
elif type(o).__module__ == np.__name__:
return o.__str__()
else:
return o
class Evaluation_confidence_mask():
def
|
(self,single_model,multi_model,dataset,overlap_threshold=0.3):
self.single_model=single_model
self.multi_model=multi_model
self.dataset=dataset
self.overlap_threshold=overlap_threshold
def evaluate(self,window_id=None):
test_results
|
__init__
|
identifier_name
|
lib.rs
|
6-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
|
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%
|
let parsed = Url::parse(&url_dirty).unwrap();
|
random_line_split
|
lib.rs
|
-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn
|
(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%
|
clean_url
|
identifier_name
|
lib.rs
|
-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string())
|
else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String> {
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%
|
{
println!("key found: {:?}", key);
modified = true;
}
|
conditional_block
|
lib.rs
|
-F1idHcFN3Mc6-qXDHj-IeV67w1ngQrk8M12v1UgS2sQnqaTxdFpoYKOoGH-JgwxojgF7g5dvIxamd6fWC2sSWuumpAcr9TZKwES5r5Fcq2U",
"https://www.reddit.com/r/DnD/comments/bzi1oq/art_two_dragons_and_adopted_kobold_son/?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "url",
example: Some(
Example::new(
"https://www.google.com/url?q=https://meet.lync.com/skydrive3m-mmm/random/random&sa=D&ust=1560944361951000&usg=AOvVaw2hCRSIX_WKpRFxeczL2S0g",
"https://meet.lync.com/skydrive3m-mmm/random/random?")
),
},
CleanInformation {
domain: "www.google.com",
path: "/url",
querykey: "q",
example: None
},
CleanInformation {
domain: "external.fbma2-1.fna.fbcdn.net",
path: "/safe_image.php",
querykey: "url",
example: Some(
Example::new(
"https://external.fbma2-1.fna.fbcdn.net/safe_image.php?d=AQBOrzUTFofcxXN7&w=960&h=960&url=https%3A%2F%2Fi.redd.it%2F4wao306sl9931.jpg&_nc_hash=AQDTUf7UFz8PtUsf",
"https://i.redd.it/4wao306sl9931.jpg?"
)
),
},
CleanInformation {
domain: "www.youtube.com",
path: "/redirect",
querykey: "q",
example: Some(
Example::new(
"https://www.youtube.com/redirect?event=live_chat&redir_token=QUFFLUhqblp5SDEzMjVCbERUaVFEVkhXdjNuTjdiekZkUXxBQ3Jtc0tuMWtxcjlrbGhyZWljMzl4dkdNNjkyNUt2NE1sOUV4cjBRcm5aeEF3RUZjcDF6dkJ1RHQ2LVVIeERnQzJLbVZZT0RxTFhYeWRsODRwbnZ2dWI1Um50WU1rcTgzR2lMVzhiamdQOFdpNWZFVUJXaXhGdw&q=https%3A%2F%2Fforms.gle%2FQDyXJVu6x24UYErEA",
"https://forms.gle/QDyXJVu6x24UYErEA?"
)
),
},
CleanInformation {
domain: "eur02.safelinks.protection.outlook.com",
path: "/",
querykey: "url",
example: Some(
Example::new(
"https://eur02.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.regitart.se%2FDefault.aspx&data=04%7C01%7C%7C7a84ea493a30461aacd508d8d7df66dc%7C5453408ba6cd4c1e8b1018b500fb544e%7C1%7C0%7C637496701799123652%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=8nhnhqaKZveiKxfB72T%2B%2BDHr8ZJvedKJ5oHUAhwP8DY%3D&reserved=0",
"http://www.regitart.se/Default.aspx?")
),
},
]
};
pub struct UrlCleaner<'a> {
/// Information on how to obtain the link from a tracking link
cleaning_info: Vec<CleanInformation<'a>>,
/// list of known tracking query keys
tracker_query_keys: Vec<String>,
}
impl<'a> Default for UrlCleaner<'a> {
fn default() -> Self {
let cleaning_info = DOMAINS_TO_CLEAN.into();
let tracker_query_keys = KEYS_TO_CLEAN.iter().map(|s| s.to_string()).collect();
Self {
cleaning_info,
tracker_query_keys,
}
}
}
impl<'a> UrlCleaner<'a> {
// remove the click-id and similar query that can sometimes come hidden inside links
fn clean_query(&self, url: &url::Url) -> (url::Url, bool) {
let pairs = url.query_pairs();
let mut newurl = url.clone();
newurl.query_pairs_mut().clear();
let mut modified = false;
for (key, value) in pairs {
if self.tracker_query_keys.contains(&key.as_ref().to_string()) {
println!("key found: {:?}", key);
modified = true;
} else {
newurl.query_pairs_mut().append_pair(&key, &value);
}
}
(newurl, modified)
}
/// try to extract the destination url from the link if possible and also try to remove the click-id
/// query parameters that are available, if the content has been modified return Some, or if
/// the content is untouched, return None
pub fn clean_url(&self, url: &url::Url) -> Option<String>
|
// Check if there is a click identifier, and return if there is one
let (url, modified) = self.clean_query(&url);
if modified {
return Some(url.to_string());
}
}
None
}
pub fn try_clean_string(&self, url_string: String) -> String {
if let Ok(parsed) = Url::parse(&url_string) {
if let Some(clean) = self.clean_url(&parsed) {
return clean;
}
}
url_string
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn clean_facebook() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DuBKajwUM5v4%26fbclid%3DIwAR0fqKqv6CeHBG0xbnI7KyYNSkFpGpVpfSynXjFXBPFQcErCqLRLgVbfYYw&h=AT01YUWDOjvNW9S09aDSRAZQZk6L55-JZGswiFa1SY6c8_mGQC0VMlNf4HXZhjdJH4PuqdNHctfOmMqISuBRBD10xZ_gIKCnwBGkAV3mrNdTtb7t6QMgyD0GzH3PSCPHmmZGyMBHCRjZ";
let url_clean = "https://www.youtube.com/watch?v=uBKajwUM5v4";
let parsed = Url::parse(&url_dirty).unwrap();
let cleaner = UrlCleaner::default();
let clean = cleaner.clean_url(&parsed).unwrap();
assert_eq!(clean, url_clean);
}
#[test]
fn clean_facebook2() {
let url_dirty ="https://l.facebook.com/l.php?u=https%3A%2F%2Fwww.banggood.com%2FXT30-V3-ParaBoard-Parallel-Charging-Board-Banana-Plug-For-iMax-B6-Charger-p-1235388.html%3Fp%3DJQ191716342021201711%
|
{
if let Some(domain) = url.domain() {
// Check all rules that matches this domain, but return on the first clean
for domaininfo in self.cleaning_info.iter().filter(|&x| x.domain == domain) {
if domaininfo.path == url.path() {
println!("{}", url);
println!("Discusting url, cleaning");
let pairs = url.query_pairs();
// First search all the queries for the link querykey
for (key, value) in pairs {
if key.as_ref() == domaininfo.querykey {
if let Ok(url) = Url::parse(&value) {
// Before returning, remove any click identifier as well
return Some(self.clean_query(&url).0.to_string());
}
}
}
}
}
//println!("Url is clean");
|
identifier_body
|
generate-map.ts
|
] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
|
() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) {
points.push([x, y]);
}
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interiorTriang
|
iterateGrass
|
identifier_name
|
generate-map.ts
|
] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number)
|
iterateGrass() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) {
points.push([x, y]);
}
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interior
|
{
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
|
identifier_body
|
generate-map.ts
|
y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
iterateGrass() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize)) {
points.push([x, y]);
}
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interiorTriangulation;
const wallDensity = Math.random() * 0.1 + 0.1;
for (let i = 0; i < halfedges.length; i++) {
if (Math.random() > wallDensity) continue;
const j = halfedges[i];
if (j < i) continue;
const p0 = interior[triangles[i]];
const p1 = interior[triangles[j]];
if (waterPath.includes(p0) && waterPath.includes(p1)) continue;
usedPoints.push(p0);
usedPoints.push(p1);
const [x0, y0] = points[p0];
const [x1, y1] = points[p1];
mapBuilder.addWall(x0, y0, x1, y1, Math.random());
}
// add stream along water path
for (let i = 0; i < waterPath.length - 1; i++) {
const [x0, y0] = points[waterPath[i]];
|
random_line_split
|
||
generate-map.ts
|
] = d;
prev[to] = from;
}
}
}
const path: number[] = [];
let current = end;
if (prev[current] !== null || current === start) {
while (current !== null) {
path.push(current);
current = prev[current];
}
}
path.reverse();
return path;
}
class MapBuilder {
map: Map;
constructor(w: number, h: number) {
this.map = new Map(w, h);
}
addWall(x0: number, y0: number, x1: number, y1: number, gapChance?: number) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
for (const [x, y] of line([x0, y0], [x1, y1])) {
if (gapChance && Math.random() < gapChance) continue;
this.map.set(x, y, Tile.Wall);
}
}
addStream(x0: number, y0: number, x1: number, y1: number, brushSize = 2) {
[x0, y0, x1, y1] = [x0, y0, x1, y1].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (const [x, y] of line([x0, y0], [x1, y1])) {
for (let bx = 0; bx < brushSize; bx++) {
for (let by = 0; by < brushSize; by++) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Water);
}
}
}
}
addCrossing(x: number, y: number, brushSize = 2) {
[x, y] = [x, y].map(Math.round);
const bs2 = ~~(brushSize / 2);
for (let bx = -1; bx < brushSize + 1; bx++) {
for (let by = -1; by < brushSize + 1; by++) {
if (this.map.get(x + bx - bs2, y + by - bs2) === Tile.Water) {
this.map.set(x + bx - bs2, y + by - bs2, Tile.Crossing);
}
}
}
}
addPaddock(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
const hw = ~~(Math.random() * 4) + 8; // half-width
const hh = ~~(Math.random() * 3) + 5; // half-height
const w = hw * 2; // width
const h = hh * 2; // height
const p = 2 * (w + h); // perimeter
this.map.paddock = [x - hw, y - hh, w, h];
// clear rect
for (let xOffset = -hw; xOffset <= hw; xOffset++) {
for (let yOffset = -hh; yOffset <= hh; yOffset++) {
const t = this.map.get(x + xOffset, y + yOffset);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(x + xOffset, y + yOffset, Tile.Ground);
}
}
}
// random point along perimeter
const opening = ~~(Math.random() * p);
// wall in perimeter
for (let i = 0; i < 2 * (w + h); i++) {
const distanceToOpening = p / 2 - Math.abs(Math.abs(i - opening) - p / 2);
if (distanceToOpening < 5) continue;
const px = x - hw + ~~Math.max(0, Math.min(i, w, w * 2 + h - i));
const py = y - hh + ~~Math.max(0, Math.min(i - w, h, 2 * (w + h) - i));
const t = this.map.get(px, py);
if (t !== Tile.Water && t !== Tile.Crossing) {
this.map.set(px, py, Tile.Wall);
}
// plant grass
this.plantGrass(x, y);
}
}
plantGrass(x: number, y: number) {
[x, y] = [x, y].map(Math.round);
if (this.map.get(x, y) === Tile.Ground) {
this.map.set(x, y, Tile.Grass);
}
}
iterateGrass() {
const addedGrasses = [];
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
const t = this.map.get(x, y);
if (t === Tile.Ground) {
const pastureNeighbors =
(x > 0 && this.map.get(x - 1, y) === Tile.Grass ? 1 : 0) +
(x < width - 1 && this.map.get(x + 1, y) === Tile.Grass ? 1 : 0) +
(y > 0 && this.map.get(x, y - 1) === Tile.Grass ? 1 : 0) +
(y < height - 1 && this.map.get(x, y + 1) === Tile.Grass ? 1 : 0) +
(x > 0 && y > 0 && this.map.get(x - 1, y - 1) === Tile.Grass ? 1 : 0) +
(x < width - 1 && y > 0 && this.map.get(x + 1, y - 1) === Tile.Grass
? 1
: 0) +
(x < width - 1 && y < height - 1 && this.map.get(x + 1, y + 1) === Tile.Grass
? 1
: 0) +
(x > 0 && y < height - 1 && this.map.get(x - 1, y + 1) === Tile.Grass
? 1
: 0);
if (Math.random() < pastureNeighbors * 0.5) {
addedGrasses.push([x, y]);
}
}
}
}
for (const [x, y] of addedGrasses) {
this.map.set(x, y, Tile.Grass);
}
}
}
export default function generateMap(w: number, h: number) {
const mapBuilder = new MapBuilder(w, h);
const cellSize = 4;
pixels = 256;
resize();
renderer.clearColor = palette.timberwolf;
// generate a poisson disc distribution
const points: [number, number][] = [];
for (let i = 0; i < 1000; i++) {
const x = Math.random() * w;
const y = Math.random() * h;
if (!points.some(p => distSq(p, [x, y]) < cellSize * cellSize))
|
}
// calculate a triangulation of the points
const triangulation = Delaunay.from(points);
// pick a subset of points forming a circle in the center to be our playable area
const interior: number[] = [];
const sorted = points
.slice()
.sort((p0, p1) => distSq(p0, [w / 2, h / 2]) - distSq(p1, [w / 2, h / 2]));
for (let i = 0; i < points.length / 2; i++) {
interior.push(points.indexOf(sorted[i]));
}
// add walls around the hull of our interior subset
const interiorTriangulation = Delaunay.from(interior.map(i => points[i]));
const border = interiorTriangulation.hullPolygon();
for (let i = 0; i < border.length; i++) {
const [x0, y0] = border[i];
const [x1, y1] = border[(i + 1) % border.length];
mapBuilder.addWall(x0, y0, x1, y1);
}
// track which interior points are still open
const usedPoints: number[] = Array.from(
interiorTriangulation.hull.map(i => interior[i])
);
// generate a stream through the center of the map
let waterPath = [];
if (Math.random() > 0.25) {
const l = triangulation.hull.length;
const hullIndex0 = ~~(Math.random() * l);
const hullIndex1 = (hullIndex0 + ~~(l / 2 + (Math.random() - 0.5) * 0.25 * l)) % l;
const waterStart = triangulation.hull[hullIndex0];
const waterEnd = triangulation.hull[hullIndex1];
waterPath = routeTo(triangulation, waterStart, waterEnd);
}
// randomly add walls along the triangulation of interior points
const { halfedges, triangles } = interior
|
{
points.push([x, y]);
}
|
conditional_block
|
callback.go
|
allows hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
|
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func ResolveGRPCAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorizationRule
|
random_line_split
|
|
callback.go
|
allows hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func
|
(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorization
|
ResolveGRPCAuthorizationRule
|
identifier_name
|
callback.go
|
)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error) {
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func ResolveGRPCAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
switch {
case h.OverrideMakeJWTClaimsBasedAuthorizationRule != nil:
claimsBasedAuthRuleFactory = h.OverrideMakeJWTClaimsBasedAuthorizationRule
default:
claimsBasedAuthRuleFactory = authrules.MakeDefaultJWTClaimsBasedAuthorizationRule
}
claimsBasedAuthRule, err := claimsBasedAuthRuleFactory(authRuleExpression)
if err != nil {
return nil, err
}
// TODO(fletcher) inject custom http client instrumented with monitoring
httpClient, err := config.DefaultHTTPClient(ctx, nil)
if err != nil {
return nil, err
}
httpClientFactory := func(_ string) *http.Client {
return httpClient
}
// Note: this will start a new jwtauth.Authenticator with its own cache & threads running for each of our service's endpoints, we usually want a shared one.
if cfg == nil || cfg.Library.Authentication == nil || cfg.Library.Authentication.JWTAuth == nil
|
{
return nil, fmt.Errorf("method/endpoint %s requires a JWT-based authorization rule, but there is no config for library.authentication.jwtauth", endpointName)
}
|
conditional_block
|
|
callback.go
|
hand-crafted code to add middleware to the router
AddMiddleware(ctx context.Context, r chi.Router)
// BasePath allows hand-crafted code to set the base path for the Router
BasePath() string
// Config returns a structure representing the server config
// This is returned from the status endpoint
Config() interface{}
// MapError maps an error to an HTTPError in instances where custom error mapping is required. Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
MapError(ctx context.Context, err error) *common.HTTPError
// DownstreamTimeoutContext add the desired timeout duration to the context for downstreams
// A separate service timeout (usually greater than the downstream) should also be in
// place to automatically respond to callers
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// GrpcGenCallback is currently a subset of RestGenCallback so is defined separately for convenience.
type GrpcGenCallback interface {
DownstreamTimeoutContext(ctx context.Context) (context.Context, context.CancelFunc)
}
// Hooks can be used to customise the behaviour of an autogenerated sysl-go service.
type Hooks struct {
// Logger returns the common.Logger instance to set use within Sysl-go.
// By default, if this Logger hook is not set then an instance of the pkg logger is used.
// This hook can also be used to define a custom logger.
// For more information about logging see log/README.md within this project.
// Note: The returned logger is guaranteed to have the log level from the external configuration
// file (library: log: level) set against it.
Logger func() log.Logger
// MapError maps an error to an HTTPError in instances where custom error mapping is required.
// Return nil to perform default error mapping; defined as:
// 1. CustomError.HTTPError if the original error is a CustomError, otherwise
// 2. common.MapError
// By default, if this MapError hook is not customised, the default error mapping will be used.
MapError func(ctx context.Context, err error) *common.HTTPError
// AdditionalGrpcDialOptions can be used to append to the default grpc.DialOption configuration used by
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
// If given, AdditionalGrpcDialOptions will be appended to the list of default options created by
// DefaultGrpcDialOptions(CommonGRPCDownstreamData).
//
// Use AdditionalGrpcDialOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcDialOptions.
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
AdditionalGrpcDialOptions []grpc.DialOption
// OverrideGrpcDialOptions can be used to override the default grpc.DialOption configuration used by an
// an autogenerated service when it calls grpc.Dial when using a grpc.Client to connect to a gRPC server.
//
// The serviceName parameter will be filled with the name of the target service that we
// are about to call grpc.Dial to connect to -- a function implementing this hook can use the
// serviceName to customise different dial options for different targets.
//
// Prefer to use AdditionalGrpcDialOptions instead of OverrideGrpcDialOptions if you only need
// to append to the default grpc.DialOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcDialOptions and OverrideGrpcDialOptions.
OverrideGrpcDialOptions func(serviceName string, cfg *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
// AdditionalGrpcServerOptions can be used to append to the default grpc.ServerOption configuration used by
// an autogenerated service when it creates a gRPC server. If given, AdditionalGrpcServerOptions will be
// appended to the list of default options created by DefaultGrpcServerOptions(context.Context, CommonServerConfig).
//
// Use AdditionalGrpcServerOptions if you need both default and custom options. Be careful that you do
// not specify any options that clash with the default options.
//
// If you need to completely override the default options, use OverrideGrpcServerOptions.
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
AdditionalGrpcServerOptions []grpc.ServerOption
// OverrideGrpcServerOptions can be used to override the default grpc.ServerOption configuration used by an
// autogenerated service when it creates a gRPC server.
//
// Prefer to use AdditionalGrpcServerOptions instead of OverrideGrpcServerOptions if you only need
// to append to the default grpc.ServerOption configuration instead of overriding it completely.
//
// It is an error to set both AdditionalGrpcServerOptions and OverrideGrpcServerOptions.
OverrideGrpcServerOptions func(ctx context.Context, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error)
// OverrideMakeJWTClaimsBasedAuthorizationRule can be used to customise how authorization rule
// expressions are evaluated and used to decide if JWT claims are authorised. By default, if this
// hook is nil, then authrules.MakeDefaultJWTClaimsBasedAuthorizationRule is used.
OverrideMakeJWTClaimsBasedAuthorizationRule func(authorizationRuleExpression string) (authrules.JWTClaimsBasedAuthorizationRule, error)
// AddHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve all (non-admin) HTTP endpoints. By default, sysl-go installs a number of
// HTTP middleware -- refer to prepareMiddleware inside sysl-go/core. This hook can only
// be used to add middleware, not override any of the default middleware.
AddHTTPMiddleware func(ctx context.Context, r chi.Router)
// AddAdminHTTPMiddleware can be used to install additional HTTP middleware into the chi.Router
// used to serve the admin HTTP endpoints. See AddHTTPMiddleware for further details.
AddAdminHTTPMiddleware func(ctx context.Context, r chi.Router)
// DownstreamRoundTripper can be used to install additional HTTP RoundTrippers to the downstream clients
DownstreamRoundTripper func(serviceName string, serviceURL string, original http.RoundTripper) http.RoundTripper
// ValidateConfig can be used to validate (or override) values in the config.
ValidateConfig func(ctx context.Context, cfg *config.DefaultConfig) error
}
func ResolveGrpcDialOptions(ctx context.Context, serviceName string, h *Hooks, grpcDownstreamConfig *config.CommonGRPCDownstreamData) ([]grpc.DialOption, error)
|
func ResolveGrpcServerOptions(ctx context.Context, h *Hooks, grpcPublicServerConfig *config.CommonServerConfig) ([]grpc.ServerOption, error) {
switch {
case len(h.AdditionalGrpcServerOptions) > 0 && h.OverrideGrpcServerOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcServerOptions and Hooks.OverrideGrpcServerOptions cannot both be set")
case h.OverrideGrpcServerOptions != nil:
return h.OverrideGrpcServerOptions(ctx, grpcPublicServerConfig)
default:
opts, err := DefaultGrpcServerOptions(ctx, grpcPublicServerConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcServerOptions...)
return opts, nil
}
}
func ResolveGRPCAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeGRPCJWTAuthorizationRule)
}
func ResolveRESTAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string) (authrules.Rule, error) {
return resolveAuthorizationRule(ctx, h, endpointName, authRuleExpression, authrules.MakeRESTJWTAuthorizationRule)
}
func resolveAuthorizationRule(ctx context.Context, h *Hooks, endpointName string, authRuleExpression string, ruleFactory func(authRule authrules.JWTClaimsBasedAuthorizationRule, authenticator jwtauth.Authenticator) (authrules.Rule, error)) (authrules.Rule, error) {
cfg := config.GetDefaultConfig(ctx)
if cfg.Development != nil && cfg.Development.DisableAllAuthorizationRules {
log.Info(ctx, "warning: development.disableAllAuthorizationRules is set, all authorization rules are disabled, this is insecure and should not be used in production.")
return authrules.InsecureAlwaysGrantAccess, nil
}
var claimsBasedAuthRuleFactory func(authorization
|
{
switch {
case len(h.AdditionalGrpcDialOptions) > 0 && h.OverrideGrpcDialOptions != nil:
return nil, fmt.Errorf("Hooks.AdditionalGrpcDialOptions and Hooks.OverrideGrpcDialOptions cannot both be set")
case h.OverrideGrpcDialOptions != nil:
return h.OverrideGrpcDialOptions(serviceName, grpcDownstreamConfig)
default:
opts, err := config.DefaultGrpcDialOptions(ctx, grpcDownstreamConfig)
if err != nil {
return nil, err
}
opts = append(opts, h.AdditionalGrpcDialOptions...)
return opts, nil
}
}
|
identifier_body
|
gdb.rs
|
(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner) => {
match inner.report_stop(
&mut target,
MultiThreadStopReason::Signal(Signal::EXC_BAD_ACCESS),
) {
Ok(new_gdb) => new_gdb,
Err(e) => {
println!("Unable to report stop: {:?}", e);
return;
}
}
}
GdbStubStateMachine::CtrlCInterrupt(_inner) => {
println!("GDB state was in CtrlCInterrupt, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Disconnected(_inner) => {
println!("GDB state was in Disconnect, which shouldn't be possible!");
return;
}
GdbStubStateMachine::Idle(inner) => {
println!("Please connect a debugger to debug process {}", pid);
GdbStubStateMachine::Idle(inner)
}
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn init()
|
{
let mut uart = GdbUart::new(receive_irq).unwrap();
uart.enable();
let mut target = XousTarget::new();
let server = GdbStubBuilder::new(uart)
.with_packet_buffer(unsafe { &mut GDB_BUFFER })
.build()
.expect("unable to build gdb server")
.run_state_machine(&mut target)
.expect("unable to start gdb state machine");
unsafe {
GDB_STATE = Some(XousDebugState { target, server });
}
}
|
identifier_body
|
|
gdb.rs
|
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) =>
|
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running
|
{
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
|
conditional_block
|
gdb.rs
|
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
|
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn default() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner)
|
random_line_split
|
|
gdb.rs
|
#[path = "gdb/riscv.rs"]
mod cpu;
pub struct XousTarget {
pid: Option<xous_kernel::PID>,
inner: cpu::XousTargetInner,
}
pub struct XousDebugState<'a> {
pub target: XousTarget,
pub server: GdbStubStateMachine<'a, XousTarget, crate::platform::precursor::gdbuart::GdbUart>,
}
static mut GDB_STATE: Option<XousDebugState> = None;
static mut GDB_BUFFER: [u8; 4096] = [0u8; 4096];
trait ProcessPid {
fn pid(&self) -> Option<xous_kernel::PID>;
fn take_pid(&mut self) -> Option<xous_kernel::PID>;
}
impl ProcessPid for XousTarget {
fn pid(&self) -> Option<xous_kernel::PID> {
self.pid
}
fn take_pid(&mut self) -> Option<xous_kernel::PID> {
self.pid.take()
}
}
struct MicroRingBuf<const N: usize> {
buffer: [u8; N],
head: usize,
tail: usize,
}
impl<const N: usize> Default for MicroRingBuf<N> {
fn
|
() -> Self {
MicroRingBuf {
buffer: [0u8; N],
head: 0,
tail: 0,
}
}
}
impl<const N: usize> MicroRingBuf<N> {
// pub fn capacity(&self) -> usize {
// self.buffer.len()
// }
// pub fn len(&self) -> usize {
// self.head.wrapping_sub(self.tail) % N
// }
pub fn is_full(&self) -> bool {
(self.tail.wrapping_sub(1) % N) == self.head
}
pub fn try_push(&mut self, val: u8) -> Result<(), ()> {
if self.is_full() {
return Err(());
}
self.buffer[self.head] = val;
self.head = (self.head + 1) % N;
Ok(())
}
pub fn try_pop(&mut self) -> Option<u8> {
if self.tail == self.head {
return None;
}
let val = self.buffer[self.tail];
self.tail = (self.tail + 1) % N;
Some(val)
}
}
fn receive_irq(uart: &mut GdbUart) {
let mut buffer = MicroRingBuf::<32>::default();
loop {
// Try to fill up the ring buffer with as many characters
// as can fit. This is to compensate for the fact that we do
// all of this processing in an interrupt context, and the
// hardware UART buffer is only a few characters deep.
while !buffer.is_full() {
if let Some(c) = uart.getc() {
buffer.try_push(c).ok();
} else {
break;
}
}
// If there is a character in the buffer, process it. Otherwise,
// we're done.
let Some(c) = buffer.try_pop() else { break };
process_character(c);
// If the GDB server goes away for some reason, reconstitute it
unsafe {
if GDB_STATE.is_none() {
init();
}
}
}
}
impl XousTarget {
pub fn new() -> XousTarget {
XousTarget {
pid: None,
inner: cpu::XousTargetInner::default(),
}
}
}
fn state_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: &GdbStubStateMachine<'a, T, C>,
) -> bool {
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => true,
GdbStubStateMachine::CtrlCInterrupt(_) | GdbStubStateMachine::Disconnected(_) => false,
}
}
fn ensure_can_accept_characters_inner<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
recurse_count: usize,
) -> Option<GdbStubStateMachine<'a, T, C>> {
if recurse_count == 0 {
return None;
}
match machine {
GdbStubStateMachine::Idle(_) | GdbStubStateMachine::Running(_) => Some(machine),
GdbStubStateMachine::CtrlCInterrupt(gdb_stm_inner) => {
if let Some(pid) = target.pid() {
crate::services::SystemServices::with_mut(|system_services| {
if let Err(e) = system_services.pause_process_for_debug(pid) {
println!("Unable to pause process {:?} for debug: {:?}", pid, e);
}
});
}
let Ok(new_server) = gdb_stm_inner.interrupt_handled(target, Some(MultiThreadStopReason::Signal(Signal::SIGINT))) else {
return None
};
ensure_can_accept_characters_inner(new_server, target, recurse_count - 1)
}
GdbStubStateMachine::Disconnected(gdb_stm_inner) => {
if let Some(pid) = target.take_pid() {
crate::services::SystemServices::with_mut(|system_services| {
system_services.resume_process_from_debug(pid).unwrap()
});
}
ensure_can_accept_characters_inner(
gdb_stm_inner.return_to_idle(),
target,
recurse_count - 1,
)
}
}
}
fn ensure_can_accept_characters<'a, T: Target + ProcessPid, C: Connection>(
machine: GdbStubStateMachine<'a, T, C>,
target: &mut T,
) -> Option<GdbStubStateMachine<'a, T, C>> {
ensure_can_accept_characters_inner(machine, target, 4)
}
/// Advance the GDB state.
///
/// Two states accept characters:
///
/// GdbStubStateMachine::Idle
/// GdbStubStateMachine::Running
///
/// Two states exist merely to transition to other states:
///
/// GdbStubStateMachine::CtrlCInterrupt
/// GdbStubStateMachine::Disconnected
fn process_character(byte: u8) {
let XousDebugState { mut target, server } = unsafe {
GDB_STATE.take().unwrap_or_else(|| {
init();
GDB_STATE.take().unwrap()
})
};
if !state_can_accept_characters(&server) {
println!("GDB server was not in a state to accept characters");
return;
}
let new_server = match server {
GdbStubStateMachine::Idle(gdb_stm_inner) => {
let Ok(gdb) = gdb_stm_inner.incoming_data(&mut target, byte).map_err(|e| println!("gdbstub error during idle operation: {:?}", e)) else {
return;
};
gdb
}
GdbStubStateMachine::Running(gdb_stm_inner) => {
// If we're here we were running but have stopped now (either
// because we hit Ctrl+c in gdb and hence got a serial interrupt
// or we hit a breakpoint).
match gdb_stm_inner.incoming_data(&mut target, byte) {
Ok(pumped_stm) => pumped_stm,
Err(GdbStubError::TargetError(e)) => {
println!("Target raised a fatal error: {:?}", e);
return;
}
Err(e) => {
println!("gdbstub error in DeferredStopReason.pump: {:?}", e);
return;
}
}
}
_ => {
println!("GDB is in an unexpected state!");
return;
}
};
// If the user just hit Ctrl-C, then remove the pending interrupt that may or may not exist.
if let GdbStubStateMachine::CtrlCInterrupt(_) = &new_server {
target.unpatch_stepi(Tid::new(1).unwrap()).ok();
}
let Some(server) = ensure_can_accept_characters(new_server, &mut target) else {
println!("Couldn't convert GDB into a state that accepts characters");
return;
};
unsafe { GDB_STATE = Some(XousDebugState { target, server }) };
}
pub fn report_stop(_pid: xous_kernel::PID, tid: xous_kernel::TID, _pc: usize) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
target.unpatch_stepi(Tid::new(tid).unwrap()).ok();
let GdbStubStateMachine::Running(inner) = gdb else {
println!("GDB state machine was in an invalid state");
return;
};
let Ok(new_gdb) = inner.report_stop(
&mut target,
MultiThreadStopReason::SignalWithThread {
signal: Signal::EXC_BREAKPOINT,
tid: Tid::new(tid).unwrap(),
}
) else {
println!("Unable to report stop");
return;
};
unsafe {
GDB_STATE = Some(XousDebugState {
target,
server: new_gdb,
})
};
}
pub fn report_terminated(pid: xous_kernel::PID) {
let Some(XousDebugState {
mut target,
server: gdb,
}) = (unsafe { GDB_STATE.take() }) else {
println!("No GDB!");
return;
};
let new_gdb = match gdb {
GdbStubStateMachine::Running(inner
|
default
|
identifier_name
|
tls.go
|
traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Select the config pertaining to the service being processed.
func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config {
svcConfigs := make([]model.Config, 0)
for index := range configs {
virtualService := configs[index].Spec.(*v1alpha3.VirtualService)
for _, vsHost := range virtualService.Hosts {
if host.Name(vsHost).Matches(hostname) {
svcConfigs = append(svcConfigs, configs[index])
break
}
}
}
return svcConfigs
}
// hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match
func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string {
return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",")
}
func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if !listenPort.Protocol.IsTLS() {
return nil
}
actualWildcard, _ := getActualWildcardAndLocalHost(node)
// TLS matches are composed of runtime and static predicates.
// Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc.
// Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to
// evaluate them. Examples: SNI hosts, source/destination subnets, etc.
//
// A list of matches may contain duplicate runtime matches, but different static matches. For example:
//
// {sni_hosts: A, sourceLabels: X} => destination M
// {sni_hosts: A, sourceLabels: *} => destination N
//
// For a proxy with labels X, we can evaluate the static predicates to get:
// {sni_hosts: A} => destination M
// {sni_hosts: A} => destination N
//
// The matches have the same runtime predicates. Since the second match can never be reached, we only
// want to generate config for the first match.
//
// To achieve this in this function we keep track of which runtime matches we have already generated config for
// and only add config if the we have not already generated config for that set of runtime predicates.
matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set?
// Is there a virtual service with a TLS block that matches us?
hasTLSMatch := false
out := make([]*filterChainOpts, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tls := range virtualService.Tls {
for _, match := range tls.Match {
if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Use the service's CIDRs.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
// destinationCIDR will be empty for services with VIPs
destinationCIDRs := []string{destinationCIDR}
// Only set CIDR match if the listener is bound to an IP.
// If its bound to a unix domain socket, then ignore the CIDR matches
// Unix domain socket bound ports have Port value set to 0
if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 {
destinationCIDRs = match.DestinationSubnets
}
matchHash := hashRuntimeTLSMatchPredicates(match)
if !matchHasBeenHandled[matchHash] {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
sniHosts: match.SniHosts,
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta),
})
hasTLSMatch = true
}
matchHasBeenHandled[matchHash] = true
}
}
}
}
// HTTPS or TLS ports without associated virtual service
if !hasTLSMatch {
var sniHosts []string
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes)
}
// Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR.
// In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different
// target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from
// having to generate expensive permutations of the host name just like RDS does..
// NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and
// ignore one of the services.
svcListenAddress := service.GetServiceAddressForProxy(node)
if strings.Contains(svcListenAddress, "/") {
// Address is a CIDR, already captured by destinationCIDR parameter.
svcListenAddress = ""
}
if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard {
sniHosts = []string{string(service.Hostname)}
}
out = append(out, &filterChainOpts{
sniHosts: sniHosts,
destinationCIDRs: []string{destinationCIDR},
networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort),
})
}
return out
}
func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts
|
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta),
})
defaultRouteAdded = true
break TcpLoop
}
// Use the service's virtual address first.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
virtualServiceDestinationSubnets := make([]string, 0)
for _, match := range tcp.Match {
if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Scan all the match blocks
|
{
if listenPort.Protocol.IsTLS() {
return nil
}
out := make([]*filterChainOpts, 0)
// very basic TCP
// break as soon as we add one network filter with no destination addresses to match
// This is the terminating condition in the filter chain match list
defaultRouteAdded := false
TcpLoop:
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tcp := range virtualService.Tcp {
destinationCIDRs := []string{destinationCIDR}
if len(tcp.Match) == 0 {
// implicit match
out = append(out, &filterChainOpts{
|
identifier_body
|
tls.go
|
traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Collection, gateways map[string]bool, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways[gateway]
}
labelMatch := proxyLabels.IsSupersetOf(match.SourceLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Select the config pertaining to the service being processed.
func getConfigsForHost(hostname host.Name, configs []model.Config) []model.Config {
svcConfigs := make([]model.Config, 0)
for index := range configs {
virtualService := configs[index].Spec.(*v1alpha3.VirtualService)
for _, vsHost := range virtualService.Hosts {
if host.Name(vsHost).Matches(hostname) {
svcConfigs = append(svcConfigs, configs[index])
break
}
}
}
return svcConfigs
}
// hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match
func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string {
return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",")
}
func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if !listenPort.Protocol.IsTLS() {
return nil
}
actualWildcard, _ := getActualWildcardAndLocalHost(node)
// TLS matches are composed of runtime and static predicates.
// Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc.
// Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to
// evaluate them. Examples: SNI hosts, source/destination subnets, etc.
//
// A list of matches may contain duplicate runtime matches, but different static matches. For example:
//
// {sni_hosts: A, sourceLabels: X} => destination M
// {sni_hosts: A, sourceLabels: *} => destination N
//
// For a proxy with labels X, we can evaluate the static predicates to get:
// {sni_hosts: A} => destination M
// {sni_hosts: A} => destination N
//
// The matches have the same runtime predicates. Since the second match can never be reached, we only
// want to generate config for the first match.
//
// To achieve this in this function we keep track of which runtime matches we have already generated config for
// and only add config if the we have not already generated config for that set of runtime predicates.
matchHasBeenHandled := make(map[string]bool) // Runtime predicate set -> have we generated config for this set?
// Is there a virtual service with a TLS block that matches us?
hasTLSMatch := false
out := make([]*filterChainOpts, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tls := range virtualService.Tls {
for _, match := range tls.Match {
if matchTLS(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Use the service's CIDRs.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
// destinationCIDR will be empty for services with VIPs
destinationCIDRs := []string{destinationCIDR}
// Only set CIDR match if the listener is bound to an IP.
// If its bound to a unix domain socket, then ignore the CIDR matches
// Unix domain socket bound ports have Port value set to 0
if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 {
destinationCIDRs = match.DestinationSubnets
}
matchHash := hashRuntimeTLSMatchPredicates(match)
if !matchHasBeenHandled[matchHash] {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
sniHosts: match.SniHosts,
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tls.Route, push, listenPort, cfg.ConfigMeta),
})
hasTLSMatch = true
}
matchHasBeenHandled[matchHash] = true
}
}
}
}
|
// HTTPS or TLS ports without associated virtual service
if !hasTLSMatch {
var sniHosts []string
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = util.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, service.Attributes)
}
// Use the hostname as the SNI value if and only if we do not have a destination VIP or if the destination is a CIDR.
// In both cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different
// target services. If we have a VIP, then we know the destination. There is no need to do a SNI match. It saves us from
// having to generate expensive permutations of the host name just like RDS does..
// NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and
// ignore one of the services.
svcListenAddress := service.GetServiceAddressForProxy(node)
if strings.Contains(svcListenAddress, "/") {
// Address is a CIDR, already captured by destinationCIDR parameter.
svcListenAddress = ""
}
if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || svcListenAddress == actualWildcard {
sniHosts = []string{string(service.Hostname)}
}
out = append(out, &filterChainOpts{
sniHosts: sniHosts,
destinationCIDRs: []string{destinationCIDR},
networkFilters: buildOutboundNetworkFiltersWithSingleDestination(push, node, statPrefix, clusterName, listenPort),
})
}
return out
}
func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways map[string]bool, configs []model.Config) []*filterChainOpts {
if listenPort.Protocol.IsTLS() {
return nil
}
out := make([]*filterChainOpts, 0)
// very basic TCP
// break as soon as we add one network filter with no destination addresses to match
// This is the terminating condition in the filter chain match list
defaultRouteAdded := false
TcpLoop:
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tcp := range virtualService.Tcp {
destinationCIDRs := []string{destinationCIDR}
if len(tcp.Match) == 0 {
// implicit match
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadataV2(cfg.ConfigMeta),
destinationCIDRs: destinationCIDRs,
networkFilters: buildOutboundNetworkFilters(node, tcp.Route, push, listenPort, cfg.ConfigMeta),
})
defaultRouteAdded = true
break TcpLoop
}
// Use the service's virtual address first.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
virtualServiceDestinationSubnets := make([]string, 0)
for _, match := range tcp.Match {
if matchTCP(match, labels.Collection{node.Metadata.Labels}, gateways, listenPort.Port, node.Metadata.Namespace) {
// Scan all the match blocks
//
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.