_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/mssql_hook.py#L38-L49
def get_conn(self): """ """ conn = self.get_connection(self.mssql_conn_id) conn = pymssql.connect( server=conn.host, user=conn.login, password=conn.password, database=self.schema or conn.schema, port=conn.port) return conn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1929-L1977
def _update_counters(self, ti_status): """ """ for key, ti in list(ti_status.running.items()): ti.refresh_from_db() if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(key) ti_status.running.pop(key) continue # special case: if the task needs to run again put it back elif ti.state == State.UP_FOR_RETRY: self.log.warning("Task instance %s is up for retry", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: if the task needs to be rescheduled put it back elif ti.state == State.UP_FOR_RESCHEDULE: self.log.warning("Task instance %s is up for reschedule", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: The state of the task can be set to NONE by the task itself # when it reaches concurrency limits. It could also happen when the state # is changed externally, e.g. by clearing tasks from the ui. We need to cover # for that as otherwise those tasks would fall outside of the scope of # the backfill suddenly. elif ti.state == State.NONE: self.log.warning( "FIXME: task instance %s state was set to none externally or " "reaching concurrency limits. Re-adding task to queue.", ti ) ti.set_state(State.SCHEDULED) ti_status.running.pop(key) ti_status.to_run[key] = ti
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L136-L153
def get_file_to_stream(self, stream, share_name, directory_name, file_name, **kwargs): """ """ self.connection.get_file_to_stream(share_name, directory_name, file_name, stream, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L128-L161
def create_product_set( self, location, product_set, project_id=None, product_set_id=None, retry=None, timeout=None, metadata=None, ): """ """ client = self.get_conn() parent = ProductSearchClient.location_path(project_id, location) self.log.info('Creating a new ProductSet under the parent: %s', parent) response = client.create_product_set( parent=parent, product_set=product_set, product_set_id=product_set_id, retry=retry, timeout=timeout, metadata=metadata, ) self.log.info('ProductSet created: %s', response.name if response else '') self.log.debug('ProductSet created:\n%s', response) if not product_set_id: # Product set id was generated by the API product_set_id = self._get_autogenerated_id(response) self.log.info('Extracted autogenerated ProductSet ID from the response: %s', product_set_id) return product_set_id
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L69-L85
def get_conn(self): """ """ if self.client is not None: return self.client # Mongo Connection Options dict that is unpacked when passed to MongoClient options = self.extras # If we are using SSL disable requiring certs from specific hostname if options.get('ssl', False): options.update({'ssl_cert_reqs': CERT_NONE}) self.client = MongoClient(self.uri, **options) return self.client
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/jenkins_job_trigger_operator.py#L34-L79
def jenkins_request_with_headers(jenkins_server, req): """ """ try: response = jenkins_server.jenkins_request(req) response_body = response.content response_headers = response.headers if response_body is None: raise jenkins.EmptyResponseException( "Error communicating with server[%s]: " "empty response" % jenkins_server.server) return {'body': response_body.decode('utf-8'), 'headers': response_headers} except HTTPError as e: # Jenkins's funky authentication means its nigh impossible to # distinguish errors. if e.code in [401, 403, 500]: # six.moves.urllib.error.HTTPError provides a 'reason' # attribute for all python version except for ver 2.6 # Falling back to HTTPError.msg since it contains the # same info as reason raise JenkinsException( 'Error in request. ' + 'Possibly authentication failed [%s]: %s' % ( e.code, e.msg) ) elif e.code == 404: raise jenkins.NotFoundException('Requested item could not be found') else: raise except socket.timeout as e: raise jenkins.TimeoutException('Error in request: %s' % e) except URLError as e: # python 2.6 compatibility to ensure same exception raised # since URLError wraps a socket timeout on python 2.6. if str(e.reason) == "timed out": raise jenkins.TimeoutException('Error in request: %s' % e.reason) raise JenkinsException('Error in request: %s' % e.reason)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/xcom.py#L188-L218
def get_many(cls, execution_date, key=None, task_ids=None, dag_ids=None, include_prior_dates=False, limit=100, session=None): """ """ filters = [] if key: filters.append(cls.key == key) if task_ids: filters.append(cls.task_id.in_(as_tuple(task_ids))) if dag_ids: filters.append(cls.dag_id.in_(as_tuple(dag_ids))) if include_prior_dates: filters.append(cls.execution_date <= execution_date) else: filters.append(cls.execution_date == execution_date) query = ( session.query(cls).filter(and_(*filters)) .order_by(cls.execution_date.desc(), cls.timestamp.desc()) .limit(limit)) results = query.all() return results
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/__init__.py#L27-L32
def _integrate_plugins(): """""" from airflow.plugins_manager import hooks_modules for hooks_module in hooks_modules: sys.modules[hooks_module.__name__] = hooks_module globals()[hooks_module._name] = hooks_module
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/references/classification/utils.py#L172-L184
def setup_for_distributed(is_master): """ """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L135-L142
def insert_many(self, mongo_collection, docs, mongo_db=None, **kwargs): """ """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.insert_many(docs, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sftp_hook.py#L155-L163
def create_directory(self, path, mode=777): """ """ conn = self.get_conn() conn.mkdir(path, mode)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L295-L361
def generate_command(dag_id, task_id, execution_date, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, file_path=None, raw=False, job_id=None, pool=None, cfg_path=None ): """ """ iso = execution_date.isoformat() cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)] cmd.extend(["--mark_success"]) if mark_success else None cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None cmd.extend(["--job_id", str(job_id)]) if job_id else None cmd.extend(["-A"]) if ignore_all_deps else None cmd.extend(["-i"]) if ignore_task_deps else None cmd.extend(["-I"]) if ignore_depends_on_past else None cmd.extend(["--force"]) if ignore_ti_state else None cmd.extend(["--local"]) if local else None cmd.extend(["--pool", pool]) if pool else None cmd.extend(["--raw"]) if raw else None cmd.extend(["-sd", file_path]) if file_path else None cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None return cmd
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L513-L537
def _build_spark_driver_kill_command(self): """ """ # If the spark_home is passed then build the spark-submit executable path using # the spark_home; otherwise assume that spark-submit is present in the path to # the executing user if self._connection['spark_home']: connection_cmd = [os.path.join(self._connection['spark_home'], 'bin', self._connection['spark_binary'])] else: connection_cmd = [self._connection['spark_binary']] # The url ot the spark master connection_cmd += ["--master", self._connection['master']] # The actual kill command connection_cmd += ["--kill", self._driver_id] self.log.debug("Spark-Kill cmd: %s", connection_cmd) return connection_cmd
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_glue_catalog_hook.py#L95-L118
def check_for_partition(self, database_name, table_name, expression): """ """ partitions = self.get_partitions(database_name, table_name, expression, max_items=1) if partitions: return True else: return False
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/utils.py#L190-L195
def get_themes(templates_path): """""" themes = os.listdir(templates_path) if '__common__' in themes: themes.remove('__common__') return themes
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L235-L243
def cancel_transfer_operation(self, operation_name): """ """ self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L644-L650
def ready_for_retry(self): """ """ return (self.state == State.UP_FOR_RETRY and self.next_retry_datetime() < timezone.utcnow())
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L855-L921
def run_extract( # noqa self, source_project_dataset_table, destination_cloud_storage_uris, compression='NONE', export_format='CSV', field_delimiter=',', print_header=True, labels=None): """ """ source_project, source_dataset, source_table = \ _split_tablename(table_input=source_project_dataset_table, default_project_id=self.project_id, var_name='source_project_dataset_table') configuration = { 'extract': { 'sourceTable': { 'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table, }, 'compression': compression, 'destinationUris': destination_cloud_storage_uris, 'destinationFormat': export_format, } } if labels: configuration['labels'] = labels if export_format == 'CSV': # Only set fieldDelimiter and printHeader fields if using CSV. # Google does not like it if you set these fields for other export # formats. configuration['extract']['fieldDelimiter'] = field_delimiter configuration['extract']['printHeader'] = print_header return self.run_with_configuration(configuration)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L257-L273
def get_model(self, project_id, model_name): """ """ if not model_name: raise ValueError("Model name must be provided and " "it could not be an empty string") full_model_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().get(name=full_model_name) try: return request.execute() except HttpError as e: if e.resp.status == 404: self.log.error('Model was not found: %s', e) return None raise
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/databricks_hook.py#L99-L154
def _do_api_call(self, endpoint_info, json): """ """ method, endpoint = endpoint_info url = 'https://{host}/{endpoint}'.format( host=self._parse_host(self.databricks_conn.host), endpoint=endpoint) if 'token' in self.databricks_conn.extra_dejson: self.log.info('Using token auth.') auth = _TokenAuth(self.databricks_conn.extra_dejson['token']) else: self.log.info('Using basic auth.') auth = (self.databricks_conn.login, self.databricks_conn.password) if method == 'GET': request_func = requests.get elif method == 'POST': request_func = requests.post else: raise AirflowException('Unexpected HTTP Method: ' + method) attempt_num = 1 while True: try: response = request_func( url, json=json, auth=auth, headers=USER_AGENT_HEADER, timeout=self.timeout_seconds) response.raise_for_status() return response.json() except requests_exceptions.RequestException as e: if not _retryable_error(e): # In this case, the user probably made a mistake. # Don't retry. raise AirflowException('Response: {0}, Status Code: {1}'.format( e.response.content, e.response.status_code)) self._log_request_error(attempt_num, e) if attempt_num == self.retry_limit: raise AirflowException(('API requests to Databricks failed {} times. ' + 'Giving up.').format(self.retry_limit)) attempt_num += 1 sleep(self.retry_delay)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L67-L78
def _get_pretty_exception_message(e): """ """ if (hasattr(e, 'message') and 'errorName' in e.message and 'message' in e.message): return ('{name}: {message}'.format( name=e.message['errorName'], message=e.message['message'])) else: return str(e)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L106-L115
def get_conn(self): """ """ if not self._client: self._client = ProductSearchClient(credentials=self._get_credentials()) return self._client
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/veoh.py#L19-L33
def veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = False, **kwargs): """""" webpage_url = 'http://www.veoh.com/m/watch.php?v={item_id}&quality=1'.format(item_id = item_id) #grab download URL a = get_content(webpage_url, decoded=True) url = match1(a, r'<source src="(.*?)\"\W') #grab title title = match1(a, r'<meta property="og:title" content="([^"]*)"') type_, ext, size = url_info(url) print_info(site_info, title, type_, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1282-L1322
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag, simple_task_instances): """ """ TI = models.TaskInstance # actually enqueue them for simple_task_instance in simple_task_instances: simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id) command = TI.generate_command( simple_task_instance.dag_id, simple_task_instance.task_id, simple_task_instance.execution_date, local=True, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, pool=simple_task_instance.pool, file_path=simple_dag.full_filepath, pickle_id=simple_dag.pickle_id) priority = simple_task_instance.priority_weight queue = simple_task_instance.queue self.log.info( "Sending %s to executor with priority %s and queue %s", simple_task_instance.key, priority, queue ) self.executor.queue_command( simple_task_instance, command, priority=priority, queue=queue)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L175-L191
def dag_paused(dag_id, paused): """""" DagModel = models.DagModel with create_session() as session: orm_dag = ( session.query(DagModel) .filter(DagModel.dag_id == dag_id).first() ) if paused == 'true': orm_dag.is_paused = True else: orm_dag.is_paused = False session.merge(orm_dag) session.commit() return jsonify({'response': 'ok'})
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L66-L76
def get_records(self, sql): """ """ with self.get_conn() as cur: cur.execute(sql) return cur.fetchall()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/bash_sensor.py#L60-L91
def poke(self, context): """ """ bash_command = self.bash_command self.log.info("Tmp dir root location: \n %s", gettempdir()) with TemporaryDirectory(prefix='airflowtmp') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f: f.write(bytes(bash_command, 'utf_8')) f.flush() fname = f.name script_location = tmp_dir + "/" + fname self.log.info("Temporary script location: %s", script_location) self.log.info("Running command: %s", bash_command) sp = Popen( ['bash', fname], stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=tmp_dir, env=self.env, preexec_fn=os.setsid) self.sp = sp self.log.info("Output:") line = '' for line in iter(sp.stdout.readline, b''): line = line.decode(self.output_encoding).strip() self.log.info(line) sp.wait() self.log.info("Command exited with return code %s", sp.returncode) return not sp.returncode
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L268-L286
def has_access(self, permission, view_name, user=None): """ """ if not user: user = g.user if user.is_anonymous: return self.is_item_public(permission, view_name) return self._has_view_access(user, permission, view_name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/timezone.py#L82-L95
def convert_to_utc(value): """ """ if not value: return value if not is_localized(value): value = pendulum.instance(value, TIMEZONE) return value.astimezone(utc)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/oracle_hook.py#L117-L182
def insert_rows(self, table, rows, target_fields=None, commit_every=1000): """ """ if target_fields: target_fields = ', '.join(target_fields) target_fields = '({})'.format(target_fields) else: target_fields = '' conn = self.get_conn() cur = conn.cursor() if self.supports_autocommit: cur.execute('SET autocommit = 0') conn.commit() i = 0 for row in rows: i += 1 lst = [] for cell in row: if isinstance(cell, basestring): lst.append("'" + str(cell).replace("'", "''") + "'") elif cell is None: lst.append('NULL') elif type(cell) == float and \ numpy.isnan(cell): # coerce numpy NaN to NULL lst.append('NULL') elif isinstance(cell, numpy.datetime64): lst.append("'" + str(cell) + "'") elif isinstance(cell, datetime): lst.append("to_date('" + cell.strftime('%Y-%m-%d %H:%M:%S') + "','YYYY-MM-DD HH24:MI:SS')") else: lst.append(str(cell)) values = tuple(lst) sql = 'INSERT /*+ APPEND */ ' \ 'INTO {0} {1} VALUES ({2})'.format(table, target_fields, ','.join(values)) cur.execute(sql) if i % commit_every == 0: conn.commit() self.log.info('Loaded %s into %s rows so far', i, table) conn.commit() cur.close() conn.close() self.log.info('Done loading. Loaded a total of %s rows', i)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L123-L152
def lookup(self, keys, read_consistency=None, transaction=None): """ """ conn = self.get_conn() body = {'keys': keys} if read_consistency: body['readConsistency'] = read_consistency if transaction: body['transaction'] = transaction resp = (conn .projects() .lookup(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/base_executor.py#L160-L179
def get_event_buffer(self, dag_ids=None): """ """ cleared_events = dict() if dag_ids is None: cleared_events = self.event_buffer self.event_buffer = dict() else: for key in list(self.event_buffer.keys()): dag_id, _, _, _ = key if dag_id in dag_ids: cleared_events[key] = self.event_buffer.pop(key) return cleared_events
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L834-L855
def new(params, event_shape=(), validate_args=False, name=None): """""" with tf.compat.v1.name_scope(name, 'IndependentLogistic', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32), tensor_name='event_shape') output_shape = tf.concat([ tf.shape(input=params)[:-1], event_shape, ], axis=0) loc_params, scale_params = tf.split(params, 2, axis=-1) return tfd.Independent( tfd.Logistic( loc=tf.reshape(loc_params, output_shape), scale=tf.math.softplus(tf.reshape(scale_params, output_shape)), validate_args=validate_args), reinterpreted_batch_ndims=tf.size(input=event_shape), validate_args=validate_args)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L136-L160
def update_instance(self, instance_id, configuration_name, node_count, display_name, project_id=None): """ """ return self._apply_to_instance(project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.update())
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/email.py#L36-L50
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed', mime_charset='utf-8', **kwargs): """ """ path, attr = configuration.conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1) module = importlib.import_module(path) backend = getattr(module, attr) to = get_email_address_list(to) to = ", ".join(to) return backend(to, subject, html_content, files=files, dryrun=dryrun, cc=cc, bcc=bcc, mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/http_hook.py#L133-L147
def check_response(self, response): """ """ try: response.raise_for_status() except requests.exceptions.HTTPError: self.log.error("HTTP error: %s", response.reason) if self.method not in ['GET', 'HEAD']: self.log.error(response.text) raise AirflowException(str(response.status_code) + ":" + response.reason)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L779-L787
def _exit_gracefully(self, signum, frame): """ """ self.log.info("Exiting gracefully upon receiving signal %s", signum) self.terminate() self.end() self.log.debug("Finished terminating DAG processors.") sys.exit(os.EX_OK)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L98-L109
def delete_dag(dag_id): """ """ try: count = delete.delete_dag(dag_id) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response return jsonify(message="Removed {} record(s)".format(count), count=count)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/squeezenet.py#L117-L129
def squeezenet1_1(pretrained=False, **kwargs): """ model = SqueezeNet(version=1.1, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1'])) return model
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/pool.py#L26-L35
def get_pool(name, session=None): """""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) return pool
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/custom_gradient.py#L39-L133
def custom_gradient(fx, gx, x, fx_gx_manually_stopped=False, name=None): """ """ def maybe_stop(x): if fx_gx_manually_stopped: return x return tf.stop_gradient(x) with tf.compat.v1.name_scope(name, 'custom_gradient', [fx, gx, x]): fx = tf.convert_to_tensor(value=fx, name='fx') # We don't want to bother eagerly computing `gx` since we may not even need # it. with tf.control_dependencies([fx]): if is_list_like(x): x = [identity(x_, name='x') for x_ in x] else: x = [identity(x, name='x')] if is_list_like(gx): gx = [identity(gx_, dtype=fx.dtype, name='gx') for gx_ in gx] else: gx = [identity(gx, dtype=fx.dtype, name='gx')] override_grad = [] for x_, gx_ in zip(x, gx): # Observe: tf.gradients(f(x), x)[i].shape == x[i].shape # thus we check that the user is supplying correct shapes. equal_shape = tf.compat.v1.assert_equal( tf.shape(input=x_), tf.shape(input=gx_), message='Each `x` must have the same shape as each `gx`.') with tf.control_dependencies([equal_shape]): # IEEE754 ensures `(x-x)==0.` and that `0.*x==0.` so we make sure to # write the code this way, rather than, e.g., # `sum_x * stop(gx) + stop(fx - sum_x * gx)`. # For more discussion regarding the relevant portions of the IEEE754 # standard, see the StackOverflow question, # "Is there a floating point value of x, for which x-x == 0 is false?" # http://stackoverflow.com/q/2686644 zeros_like_x_ = x_ - tf.stop_gradient(x_) override_grad.append( tf.reduce_sum(input_tensor=maybe_stop(gx_) * zeros_like_x_)) override_grad = sum(override_grad) override_grad /= tf.cast(tf.size(input=fx), dtype=fx.dtype.base_dtype) # Proof of correctness: # # f(x) = x * stop[gx] + stop[fx - x * gx] # = stop[fx] # # g(x) = grad[fx] # = stop[gx] + grad[stop[fx - x * gx]] # = stop[gx] + 0 # # Notice that when x is zero it still works: # grad[x * stop(gx) + stop(fx - x * gx)] = 1 * stop[gx] + 0 = stop[gx] # # The proof is similar for the tensor-domain case, except that we # `reduce_sum` the `stop[gx] * (x - stop[x])` then rescale by # `tf.size(fx)` since this reduced version is broadcast to `fx`. return maybe_stop(fx) + override_grad
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L263-L279
def delete_one(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): """ """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_one(filter_doc, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L93-L102
def get_collection(self, mongo_collection, mongo_db=None): """ """ mongo_db = mongo_db if mongo_db is not None else self.connection.schema mongo_conn = self.get_conn() return mongo_conn.get_database(mongo_db).get_collection(mongo_collection)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/pinot_hook.py#L78-L88
def get_first(self, sql): """ """ with self.get_conn() as cur: cur.execute(sql) return cur.fetchone()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L299-L345
def metropolis_hastings_step(current_state: State, proposed_state: State, energy_change: FloatTensor, seed=None) -> Tuple[State, tf.Tensor, tf.Tensor]: """ """ flat_current = tf.nest.flatten(current_state) flat_proposed = nest.flatten_up_to(current_state, proposed_state) # Impute the None's in the current state. flat_current = [ p if c is None else c for p, c in zip(flat_proposed, flat_current) ] current_state = tf.nest.pack_sequence_as(current_state, flat_current) current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state) proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state) energy_change = tf.convert_to_tensor(value=energy_change) log_accept_ratio = -energy_change log_uniform = tf.math.log( tf.random.uniform( shape=tf.shape(input=log_accept_ratio), dtype=log_accept_ratio.dtype.base_dtype, seed=seed)) is_accepted = log_uniform < log_accept_ratio next_state = mcmc_util.choose( is_accepted, proposed_state, current_state, name='choose_next_state') return next_state, is_accepted, log_uniform
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/pod_generator.py#L67-L73
def add_volume(self, volume): """ """ self._add_volume(name=volume.name, configs=volume.configs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L154-L168
def rollback(self, transaction): """ """ conn = self.get_conn() conn.projects().rollback( projectId=self.project_id, body={'transaction': transaction} ).execute(num_retries=self.num_retries)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_sqs_hook.py#L50-L71
def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): """ """ return self.get_conn().send_message(QueueUrl=queue_url, MessageBody=message_body, DelaySeconds=delay_seconds, MessageAttributes=message_attributes or {})
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/postgres_hook.py#L63-L83
def copy_expert(self, sql, filename, open=open): """ """ if not os.path.isfile(filename): with open(filename, 'w'): pass with open(filename, 'r+') as f: with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: cur.copy_expert(sql, f) f.truncate(f.tell()) conn.commit()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1698-L1703
def params_size(num_components, event_shape=(), name=None): """""" return MixtureSameFamily.params_size( num_components, IndependentLogistic.params_size(event_shape, name=name), name=name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L316-L350
def load_file(self, filename, key, bucket_name=None, replace=False, encrypt=False): """ """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) if not replace and self.check_for_key(key, bucket_name): raise ValueError("The key {key} already exists.".format(key=key)) extra_args = {} if encrypt: extra_args['ServerSideEncryption'] = "AES256" client = self.get_conn() client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/docs/exts/docroles.py#L58-L88
def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]): """ """ text = utils.unescape(text) try: template_fields = get_template_field(app.env, text) except RoleException as e: msg = inliner.reporter.error("invalid class name %s \n%s" % (text, e, ), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = nodes.inline(rawtext=rawtext) for i, field in enumerate(template_fields): if i != 0: node += nodes.Text(", ") node += nodes.literal(field, "", nodes.Text(field)) return [node], []
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/redis_hook.py#L45-L66
def get_conn(self): """ """ conn = self.get_connection(self.redis_conn_id) self.host = conn.host self.port = conn.port self.password = None if str(conn.password).lower() in ['none', 'false', ''] else conn.password self.db = conn.extra_dejson.get('db', None) if not self.redis: self.log.debug( 'Initializing redis object for conn_id "%s" on %s:%s:%s', self.redis_conn_id, self.host, self.port, self.db ) self.redis = Redis( host=self.host, port=self.port, password=self.password, db=self.db) return self.redis
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/categorical_to_discrete.py#L127-L154
def _maybe_check_valid_map_values(map_values, validate_args): """""" assertions = [] message = 'Rank of map_values must be 1.' if tensorshape_util.rank(map_values.shape) is not None: if tensorshape_util.rank(map_values.shape) != 1: raise ValueError(message) elif validate_args: assertions.append(assert_util.assert_rank(map_values, 1, message=message)) message = 'Size of map_values must be greater than 0.' if tensorshape_util.num_elements(map_values.shape) is not None: if tensorshape_util.num_elements(map_values.shape) == 0: raise ValueError(message) elif validate_args: assertions.append( assert_util.assert_greater( tf.size(input=map_values), 0, message=message)) if validate_args: assertions.append( assert_util.assert_equal( tf.math.is_strictly_increasing(map_values), True, message='map_values is not strictly increasing.')) return assertions
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/xcom.py#L88-L136
def set( cls, key, value, execution_date, task_id, dag_id, session=None): """ """ session.expunge_all() enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling') if enable_pickling: value = pickle.dumps(value) else: try: value = json.dumps(value).encode('UTF-8') except ValueError: log = LoggingMixin().log log.error("Could not serialize the XCOM value into JSON. " "If you are using pickles instead of JSON " "for XCOM, then you need to enable pickle " "support for XCOM in your airflow config.") raise # remove any duplicate XComs session.query(cls).filter( cls.key == key, cls.execution_date == execution_date, cls.task_id == task_id, cls.dag_id == dag_id).delete() session.commit() # insert new XCom session.add(XCom( key=key, value=value, execution_date=execution_date, task_id=task_id, dag_id=dag_id)) session.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/settings.py#L220-L235
def prepare_classpath(): """ """ if DAGS_FOLDER not in sys.path: sys.path.append(DAGS_FOLDER) # Add ./config/ for loading custom log parsers etc, or # airflow_local_settings etc. config_path = os.path.join(AIRFLOW_HOME, 'config') if config_path not in sys.path: sys.path.append(config_path) if PLUGINS_FOLDER not in sys.path: sys.path.append(PLUGINS_FOLDER)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L322-L376
def submit(self, application="", **kwargs): """ """ spark_submit_cmd = self._build_spark_submit_command(application) if hasattr(self, '_env'): env = os.environ.copy() env.update(self._env) kwargs["env"] = env self._submit_sp = subprocess.Popen(spark_submit_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1, universal_newlines=True, **kwargs) self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, '')) returncode = self._submit_sp.wait() # Check spark-submit return code. In Kubernetes mode, also check the value # of exit code in the log, as it may differ. if returncode or (self._is_kubernetes and self._spark_exit_code != 0): raise AirflowException( "Cannot execute: {}. Error code is: {}.".format( spark_submit_cmd, returncode ) ) self.log.debug("Should track driver: {}".format(self._should_track_driver_status)) # We want the Airflow job to wait until the Spark driver is finished if self._should_track_driver_status: if self._driver_id is None: raise AirflowException( "No driver id is known: something went wrong when executing " + "the spark submit command" ) # We start with the SUBMITTED status as initial status self._driver_status = "SUBMITTED" # Start tracking the driver status (blocking function) self._start_driver_status_tracking() if self._driver_status != "FINISHED": raise AirflowException( "ERROR : Driver {} badly exited with status {}" .format(self._driver_id, self._driver_status) )
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mixture.py#L497-L502
def _cat_probs(self, log_probs): """""" which_softmax = tf.nn.log_softmax if log_probs else tf.nn.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = tf.unstack(cat_probs, num=self.num_components, axis=-1) return cat_probs
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/mark_tasks.py#L188-L207
def _set_dag_run_state(dag_id, execution_date, state, session=None): """ """ DR = DagRun dr = session.query(DR).filter( DR.dag_id == dag_id, DR.execution_date == execution_date ).one() dr.state = state if state == State.RUNNING: dr.start_date = timezone.utcnow() dr.end_date = None else: dr.end_date = timezone.utcnow() session.merge(dr)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/pod_launcher.py#L73-L92
def run_pod(self, pod, startup_timeout=120, get_logs=True): # type: (Pod, int, bool) -> Tuple[State, Optional[str]] """ """ resp = self.run_pod_async(pod) curr_time = dt.now() if resp.status.start_time is None: while self.pod_not_started(pod): delta = dt.now() - curr_time if delta.seconds >= startup_timeout: raise AirflowException("Pod took too long to start") time.sleep(1) self.log.debug('Pod not yet started') return self._monitor_pod(pod, get_logs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/salesforce_hook.py#L95-L108
def describe_object(self, obj): """ """ conn = self.get_conn() return conn.__getattr__(obj).describe()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L431-L446
def _process_spark_status_log(self, itr): """ """ # Consume the iterator for line in itr: line = line.strip() # Check if the log line is about the driver status and extract the status. if "driverState" in line: self._driver_status = line.split(' : ')[1] \ .replace(',', '').replace('\"', '').strip() self.log.debug("spark driver status log: {}".format(line))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L1230-L1262
def _find_zombies(self, session): """ """ now = timezone.utcnow() zombies = [] if (now - self._last_zombie_query_time).total_seconds() \ > self._zombie_query_interval: # to avoid circular imports from airflow.jobs import LocalTaskJob as LJ self.log.info("Finding 'running' jobs without a recent heartbeat") TI = airflow.models.TaskInstance limit_dttm = timezone.utcnow() - timedelta( seconds=self._zombie_threshold_secs) self.log.info("Failing jobs without heartbeat after %s", limit_dttm) tis = ( session.query(TI) .join(LJ, TI.job_id == LJ.id) .filter(TI.state == State.RUNNING) .filter( or_( LJ.state != State.RUNNING, LJ.latest_heartbeat < limit_dttm, ) ).all() ) self._last_zombie_query_time = timezone.utcnow() for ti in tis: zombies.append(SimpleTaskInstance(ti)) return zombies
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/oracle_hook.py#L184-L231
def bulk_insert_rows(self, table, rows, target_fields=None, commit_every=5000): """ """ if not rows: raise ValueError("parameter rows could not be None or empty iterable") conn = self.get_conn() cursor = conn.cursor() values_base = target_fields if target_fields else rows[0] prepared_stm = 'insert into {tablename} {columns} values ({values})'.format( tablename=table, columns='({})'.format(', '.join(target_fields)) if target_fields else '', values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)), ) row_count = 0 # Chunk the rows row_chunk = [] for row in rows: row_chunk.append(row) row_count += 1 if row_count % commit_every == 0: cursor.prepare(prepared_stm) cursor.executemany(None, row_chunk) conn.commit() self.log.info('[%s] inserted %s rows', table, row_count) # Empty chunk row_chunk = [] # Commit the leftover chunk cursor.prepare(prepared_stm) cursor.executemany(None, row_chunk) conn.commit() self.log.info('[%s] inserted %s rows', table, row_count) cursor.close() conn.close()
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/utils.py#L6-L87
def make_grid(tensor, nrow=8, padding=2, normalize=False, range=None, scale_each=False, pad_value=0): """ """ if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) # if list of tensors, convert to a 4D mini-batch Tensor if isinstance(tensor, list): tensor = torch.stack(tensor, dim=0) if tensor.dim() == 2: # single image H x W tensor = tensor.unsqueeze(0) if tensor.dim() == 3: # single image if tensor.size(0) == 1: # if single-channel, convert to 3-channel tensor = torch.cat((tensor, tensor, tensor), 0) tensor = tensor.unsqueeze(0) if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images tensor = torch.cat((tensor, tensor, tensor), 1) if normalize is True: tensor = tensor.clone() # avoid modifying tensor in-place if range is not None: assert isinstance(range, tuple), \ "range has to be a tuple (min, max) if specified. min and max are numbers" def norm_ip(img, min, max): img.clamp_(min=min, max=max) img.add_(-min).div_(max - min + 1e-5) def norm_range(t, range): if range is not None: norm_ip(t, range[0], range[1]) else: norm_ip(t, float(t.min()), float(t.max())) if scale_each is True: for t in tensor: # loop over mini-batch dimension norm_range(t, range) else: norm_range(tensor, range) if tensor.size(0) == 1: return tensor.squeeze() # make the mini-batch of images into a grid nmaps = tensor.size(0) xmaps = min(nrow, nmaps) ymaps = int(math.ceil(float(nmaps) / xmaps)) height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value) k = 0 for y in irange(ymaps): for x in irange(xmaps): if k >= nmaps: break grid.narrow(1, y * height + padding, height - padding)\ .narrow(2, x * width + padding, width - padding)\ .copy_(tensor[k]) k = k + 1 return grid
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/cli_action_loggers.py#L72-L86
def on_post_execution(**kwargs): """ """ logging.debug("Calling callbacks: %s", __post_exec_callbacks) for cb in __post_exec_callbacks: try: cb(**kwargs) except Exception: logging.exception('Failed on post-execution callback using %s', cb)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/imap_hook.py#L49-L66
def has_mail_attachment(self, name, mail_folder='INBOX', check_regex=False): """ """ mail_attachments = self._retrieve_mails_attachments_by_name(name, mail_folder, check_regex, latest_only=True) return len(mail_attachments) > 0
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/helpers.py#L121-L128
def chunks(items, chunk_size): """ """ if chunk_size <= 0: raise ValueError('Chunk size must be a positive integer') for i in range(0, len(items), chunk_size): yield items[i:i + chunk_size]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ftp_hook.py#L81-L91
def get_conn(self): """ """ if self.conn is None: params = self.get_connection(self.ftp_conn_id) pasv = params.extra_dejson.get("passive", True) self.conn = ftplib.FTP(params.host, params.login, params.password) self.conn.set_pasv(pasv) return self.conn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L82-L129
def _get_credentials(self): """ """ key_path = self._get_field('key_path', False) keyfile_dict = self._get_field('keyfile_dict', False) scope = self._get_field('scope', None) if scope: scopes = [s.strip() for s in scope.split(',')] else: scopes = _DEFAULT_SCOPES if not key_path and not keyfile_dict: self.log.info('Getting connection using `google.auth.default()` ' 'since no key file is defined for hook.') credentials, _ = google.auth.default(scopes=scopes) elif key_path: # Get credentials from a JSON file. if key_path.endswith('.json'): self.log.debug('Getting connection using JSON key file %s' % key_path) credentials = ( google.oauth2.service_account.Credentials.from_service_account_file( key_path, scopes=scopes) ) elif key_path.endswith('.p12'): raise AirflowException('Legacy P12 key file are not supported, ' 'use a JSON key file.') else: raise AirflowException('Unrecognised extension for key file.') else: # Get credentials from JSON data provided in the UI. try: keyfile_dict = json.loads(keyfile_dict) # Depending on how the JSON was formatted, it may contain # escaped newlines. Convert those to actual newlines. keyfile_dict['private_key'] = keyfile_dict['private_key'].replace( '\\n', '\n') credentials = ( google.oauth2.service_account.Credentials.from_service_account_info( keyfile_dict, scopes=scopes) ) except json.decoder.JSONDecodeError: raise AirflowException('Invalid key JSON.') return credentials.with_subject(self.delegate_to) \ if self.delegate_to else credentials
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L184-L209
def normalize(tensor, mean, std, inplace=False): """ """ if not _is_tensor_image(tensor): raise TypeError('tensor is not a torch image.') if not inplace: tensor = tensor.clone() mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device) std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device) tensor.sub_(mean[:, None, None]).div_(std[:, None, None]) return tensor
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dag.py#L64-L75
def get_last_dagrun(dag_id, session, include_externally_triggered=False): """ """ DR = DagRun query = session.query(DR).filter(DR.dag_id == dag_id) if not include_externally_triggered: query = query.filter(DR.external_trigger == False) # noqa query = query.order_by(DR.execution_date.desc()) return query.first()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L212-L281
def cholesky_covariance(x, sample_axis=0, keepdims=False, name=None): """ """ with tf.compat.v1.name_scope( name, 'cholesky_covariance', values=[x, sample_axis]): sample_axis = tf.convert_to_tensor(value=sample_axis, dtype=tf.int32) cov = covariance( x, sample_axis=sample_axis, event_axis=-1, keepdims=keepdims) return tf.linalg.cholesky(cov)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/discord_webhook_hook.py#L126-L140
def execute(self): """ """ proxies = {} if self.proxy: # we only need https proxy for Discord proxies = {'https': self.proxy} discord_payload = self._build_discord_payload() self.run(endpoint=self.webhook_endpoint, data=discord_payload, headers={'Content-type': 'application/json'}, extra_options={'proxies': proxies})
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L124-L147
def table_exists(self, project_id, dataset_id, table_id): """ """ service = self.get_service() try: service.tables().get( projectId=project_id, datasetId=dataset_id, tableId=table_id).execute(num_retries=self.num_retries) return True except HttpError as e: if e.resp['status'] == '404': return False raise
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py#L657-L662
def _to_str(x): """""" x = tf.convert_to_tensor(value=x) if x.dtype == tf.bool: return tf.where(x, tf.fill(x.shape, 'True'), tf.fill(x.shape, 'False')) return x
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/helpers.py#L243-L289
def reap_process_group(pid, log, sig=signal.SIGTERM, timeout=DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM): """ """ def on_terminate(p): log.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode) if pid == os.getpid(): raise RuntimeError("I refuse to kill myself") parent = psutil.Process(pid) children = parent.children(recursive=True) children.append(parent) try: pg = os.getpgid(pid) except OSError as err: # Skip if not such process - we experience a race and it just terminated if err.errno == errno.ESRCH: return raise log.info("Sending %s to GPID %s", sig, pg) os.killpg(os.getpgid(pid), sig) gone, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate) if alive: for p in alive: log.warn("process %s (%s) did not respond to SIGTERM. Trying SIGKILL", p, pid) os.killpg(os.getpgid(pid), signal.SIGKILL) gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate) if alive: for p in alive: log.error("Process %s (%s) could not be killed. Giving up.", p, p.pid)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/slack_webhook_hook.py#L80-L97
def _get_token(self, token, http_conn_id): """ """ if token: return token elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson return extra.get('webhook_token', '') else: raise AirflowException('Cannot get token: No valid Slack ' 'webhook token nor conn_id supplied')
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1298-L1339
def cancel_query(self): """ """ jobs = self.service.jobs() if (self.running_job_id and not self.poll_job_complete(self.running_job_id)): self.log.info('Attempting to cancel job : %s, %s', self.project_id, self.running_job_id) if self.location: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id, location=self.location).execute(num_retries=self.num_retries) else: jobs.cancel( projectId=self.project_id, jobId=self.running_job_id).execute(num_retries=self.num_retries) else: self.log.info('No running BigQuery jobs to cancel.') return # Wait for all the calls to cancel to finish max_polling_attempts = 12 polling_attempts = 0 job_complete = False while polling_attempts < max_polling_attempts and not job_complete: polling_attempts = polling_attempts + 1 job_complete = self.poll_job_complete(self.running_job_id) if job_complete: self.log.info('Job successfully canceled: %s, %s', self.project_id, self.running_job_id) elif polling_attempts == max_polling_attempts: self.log.info( "Stopping polling due to timeout. Job with id %s " "has not completed cancel and may or may not finish.", self.running_job_id) else: self.log.info('Waiting for canceled job with id %s to finish.', self.running_job_id) time.sleep(5)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L776-L811
def maybe_check_quadrature_param(param, name, validate_args): """""" with tf.name_scope("check_" + name): assertions = [] if tensorshape_util.rank(param.shape) is not None: if tensorshape_util.rank(param.shape) == 0: raise ValueError("Mixing params must be a (batch of) vector; " "{}.rank={} is not at least one.".format( name, tensorshape_util.rank(param.shape))) elif validate_args: assertions.append( assert_util.assert_rank_at_least( param, 1, message=("Mixing params must be a (batch of) vector; " "{}.rank is not at least one.".format(name)))) # TODO(jvdillon): Remove once we support k-mixtures. if tensorshape_util.with_rank_at_least(param.shape, 1)[-1] is not None: if tf.compat.dimension_value(param.shape[-1]) != 1: raise NotImplementedError("Currently only bimixtures are supported; " "{}.shape[-1]={} is not 1.".format( name, tf.compat.dimension_value( param.shape[-1]))) elif validate_args: assertions.append( assert_util.assert_equal( tf.shape(input=param)[-1], 1, message=("Currently only bimixtures are supported; " "{}.shape[-1] is not 1.".format(name)))) if assertions: return distribution_util.with_dependencies(assertions, param) return param
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cassandra_hook.py#L179-L200
def record_exists(self, table, keys): """ """ keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys()) cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format( keyspace=keyspace, table=table, keys=ks) try: rs = self.get_conn().execute(cql, keys) return rs.one() is not None except Exception: return False
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L465-L549
def correlation(x, y=None, sample_axis=0, event_axis=-1, keepdims=False, name=None): """ """ with tf.compat.v1.name_scope( name, 'correlation', values=[x, y, event_axis, sample_axis]): # Corr[X, Y] = Cov[X, Y] / (Stddev[X] * Stddev[Y]) # = Cov[X / Stddev[X], Y / Stddev[Y]] # So we could compute covariance first then divide by stddev, or # divide by stddev and compute covariance. # Dividing by stddev then computing covariance is potentially more stable. # But... computing covariance first then dividing involves 2 fewer large # broadcasts. We choose to divide first, largely because it avoids # difficulties with the various options for sample/event axis kwargs. x /= stddev(x, sample_axis=sample_axis, keepdims=True) if y is not None: y /= stddev(y, sample_axis=sample_axis, keepdims=True) return covariance( x=x, y=y, event_axis=event_axis, sample_axis=sample_axis, keepdims=keepdims)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/wasb_task_handler.py#L135-L152
def wasb_read(self, remote_log_location, return_error=False): """ """ try: return self.hook.read_file(self.wasb_container, remote_log_location) except AzureHttpError: msg = 'Could not read logs from {}'.format(remote_log_location) self.log.exception(msg) # return error if needed if return_error: return msg
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L577-L594
def adjust_saturation(img, saturation_factor): """ """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Color(img) img = enhancer.enhance(saturation_factor) return img
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L637-L643
def terminate(self): """ """ self.log.info("Sending termination message to manager.") self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/googlenet.py#L18-L47
def googlenet(pretrained=False, **kwargs): """ if pretrained: if 'transform_input' not in kwargs: kwargs['transform_input'] = True if 'aux_logits' not in kwargs: kwargs['aux_logits'] = False if kwargs['aux_logits']: warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' 'so make sure to train them') original_aux_logits = kwargs['aux_logits'] kwargs['aux_logits'] = True kwargs['init_weights'] = False model = GoogLeNet(**kwargs) model.load_state_dict(model_zoo.load_url(model_urls['googlenet'])) if not original_aux_logits: model.aux_logits = False del model.aux1, model.aux2 return model return GoogLeNet(**kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/postgres_to_gcs_operator.py#L207-L224
def convert_types(cls, value): """ """ if type(value) in (datetime.datetime, datetime.date): return time.mktime(value.timetuple()) elif type(value) == datetime.time: formated_time = time.strptime(str(value), "%H:%M:%S") return datetime.timedelta( hours=formated_time.tm_hour, minutes=formated_time.tm_min, seconds=formated_time.tm_sec).seconds elif isinstance(value, Decimal): return float(value) else: return value
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_halton_sequence.py#L269-L301
def _get_permutations(num_results, dims, seed=None): """ """ sample_range = tf.range(num_results) stream = distributions.SeedStream(seed, salt='MCMCSampleHaltonSequence3') def generate_one(d): seed = stream() fn = lambda _: tf.random.shuffle(tf.range(d), seed=seed) return tf.map_fn( fn, sample_range, parallel_iterations=1 if seed is not None else 10) return tf.concat([generate_one(d) for d in tf.unstack(dims)], axis=-1)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/logistic_regression.py#L67-L85
def covertype(): """""" import sklearn.datasets # pylint: disable=g-import-not-at-top data = sklearn.datasets.covtype.fetch_covtype() features = data.data labels = data.target # Normalize features and append a column of ones for the intercept. features -= features.mean(0) features /= features.std(0) features = np.hstack([features, np.ones([features.shape[0], 1])]) features = tf.cast(features, dtype=tf.float32) # Binarize outcomes on whether it is a specific category. _, counts = np.unique(labels, return_counts=True) specific_category = np.argmax(counts) labels = (labels == specific_category) labels = tf.cast(labels, dtype=tf.int32) return features, labels
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L208-L221
def exists(self, bucket_name, object_name): """ """ client = self.get_conn() bucket = client.get_bucket(bucket_name=bucket_name) blob = bucket.blob(blob_name=object_name) return blob.exists()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/file_processor_handler.py#L129-L149
def _init_file(self, filename): """ """ relative_path = self._render_filename(filename) full_path = os.path.join(self._get_log_directory(), relative_path) directory = os.path.dirname(full_path) if not os.path.exists(directory): try: os.makedirs(directory) except OSError: if not os.path.isdir(directory): raise if not os.path.exists(full_path): open(full_path, "a").close() return full_path
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/hdfs_sensor.py#L30-L46
def poke(self, context): """ """ sb = self.hook(self.hdfs_conn_id).get_conn() self.log.info( 'Poking for %s to be a directory with files matching %s', self.filepath, self.regex.pattern ) result = [f for f in sb.ls([self.filepath], include_toplevel=False) if f['file_type'] == 'f' and self.regex.match(f['path'].replace('%s/' % self.filepath, ''))] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) return bool(result)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L368-L396
def create_tuning_job(self, config, wait_for_completion=True, check_interval=30, max_ingestion_time=None): """ """ self.check_tuning_config(config) response = self.get_conn().create_hyper_parameter_tuning_job(**config) if wait_for_completion: self.check_status(config['HyperParameterTuningJobName'], 'HyperParameterTuningJobStatus', self.describe_tuning_job, check_interval, max_ingestion_time ) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/__init__.py#L32-L65
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports """ """ try: import tensorflow as tf except ImportError: # Print more informative error message, then reraise. print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not " "installed by default when you install TensorFlow Probability. This " "is so that users can decide whether to install the GPU-enabled " "TensorFlow package. To use TensorFlow Probability, please install " "the most recent version of TensorFlow, by following instructions at " "https://tensorflow.org/install.\n\n") raise import distutils.version # # Update this whenever we need to depend on a newer TensorFlow release. # required_tensorflow_version = "1.13" if (distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(required_tensorflow_version)): raise ImportError( "This version of TensorFlow Probability requires TensorFlow " "version >= {required}; Detected an installation of version {present}. " "Please upgrade TensorFlow to proceed.".format( required=required_tensorflow_version, present=tf.__version__))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagbag.py#L305-L339
def bag_dag(self, dag, parent_dag, root_dag): """ """ dag.test_cycle() # throws if a task cycle is found dag.resolve_template_files() dag.last_loaded = timezone.utcnow() for task in dag.tasks: settings.policy(task) subdags = dag.subdags try: for subdag in subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) self.dags[dag.dag_id] = dag self.log.debug('Loaded DAG %s', dag) except AirflowDagCycleException as cycle_exception: # There was an error in bagging the dag. Remove it from the list of dags self.log.exception('Exception bagging dag: %s', dag.dag_id) # Only necessary at the root level since DAG.subdags automatically # performs DFS to search through all subdags if dag == root_dag: for subdag in subdags: if subdag.dag_id in self.dags: del self.dags[subdag.dag_id] raise cycle_exception
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_compute_hook.py#L223-L245
def get_instance_group_manager(self, zone, resource_id, project_id=None): """ """ response = self.get_conn().instanceGroupManagers().get( project=project_id, zone=zone, instanceGroupManager=resource_id ).execute(num_retries=self.num_retries) return response
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L262-L272
def get_dag(self, dag_id): """ """ if dag_id not in self.dag_id_to_simple_dag: raise AirflowException("Unknown DAG ID {}".format(dag_id)) return self.dag_id_to_simple_dag[dag_id]
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L597-L641
def adjust_hue(img, hue_factor): """ """ if not(-0.5 <= hue_factor <= 0.5): raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor)) if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) input_mode = img.mode if input_mode in {'L', '1', 'I', 'F'}: return img h, s, v = img.convert('HSV').split() np_h = np.array(h, dtype=np.uint8) # uint8 addition take cares of rotation across boundaries with np.errstate(over='ignore'): np_h += np.uint8(hue_factor * 255) h = Image.fromarray(np_h, 'L') img = Image.merge('HSV', (h, s, v)).convert(input_mode) return img
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L882-L896
def interpolate_scale(grid, scale): """""" if len(scale) != 2: raise NotImplementedError("Currently only bimixtures are supported; " "len(scale)={} is not 2.".format(len(scale))) deg = tf.compat.dimension_value( tensorshape_util.with_rank_at_least(grid.shape, 1)[-1]) if deg is None: raise ValueError("Num quadrature grid points must be known prior " "to graph execution.") with tf.name_scope("interpolate_scale"): return [linop_add_lib.add_operators([ linop_scale(grid[..., k, q], s) for k, s in enumerate(scale) ])[0] for q in range(deg)]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L900-L918
def _refresh_dag_dir(self): """ """ elapsed_time_since_refresh = (timezone.utcnow() - self.last_dag_dir_refresh_time).total_seconds() if elapsed_time_since_refresh > self.dag_dir_list_interval: # Build up a list of Python files that could contain DAGs self.log.info("Searching for files in %s", self._dag_directory) self._file_paths = list_py_file_paths(self._dag_directory) self.last_dag_dir_refresh_time = timezone.utcnow() self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory) self.set_file_paths(self._file_paths) try: self.log.debug("Removing old import errors") self.clear_nonexistent_import_errors() except Exception: self.log.exception("Error removing old import errors")