python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# -*- coding: utf-8 -*-
# file: d3mds.py
# lab: MIT Lincoln Lab
# author(s): sw26425
# description: a rudimentary API for interacting with D3MDataSupply, which mainly consists of a Dataset and a Problem
import os, json
import pandas as pd
import numpy as np
import warnings
DATASET_SCHEMA_VERSION = '3.0'
PROBLEM_SCHEMA_VERSION = '3.0'
class D3MDataset:
dsHome = None
dsDoc = None
learningDataFile = None
def __init__(self, datasetPath):
self.dsHome = datasetPath
# read the schema in dsHome
_dsDoc = os.path.join(self.dsHome, 'datasetDoc.json')
assert os.path.exists(_dsDoc)
with open(_dsDoc, 'r') as f:
self.dsDoc = json.load(f)
# make sure the versions line up
if self.get_datasetSchemaVersion() != DATASET_SCHEMA_VERSION:
warnings.warn("the datasetSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the special learningData file
self.learningDataFile = self._get_learning_data_path()
def get_datasetID(self):
"""
Returns the datasetID from datasetDoc
"""
return self.dsDoc['about']['datasetID']
def get_datasetSchemaVersion(self):
"""
Returns the dataset schema version that was used to create this dataset
"""
return self.dsDoc['about']['datasetSchemaVersion']
def get_learning_data(self, view=None, problem=None):
"""
Returns the contents of learningData.doc as a DataFrame.
If view is 'TRAIN' or 'TEST', then the full learningData is filtered to return learningData only for that view.
For view-based filtering, the problem object has to be passed because this method used the splitsData from the problem.
"""
df = pd.read_csv(self.learningDataFile, index_col='d3mIndex')
if view is None:
return df
if view.upper() == 'TRAIN' or view.upper() == 'TEST':
if problem is None:
raise RuntimeError('asking for learningData for a split, but the problem is not given')
splitsdf = problem.get_datasplits(view)
df = df.iloc[splitsdf.index]
return df
def get_learning_data_columns(self):
res = self._get_learning_data_resource()
return res['columns']
def set_learning_data(self, df):
"""
Sets the contents of the learningData file to df
"""
df.to_csv(self.learningDataFile)
def delete_column_entries(self, target):
"""
Deletes all the entries of a particular column of a particular tabular data resource.
The deleted entries are set to numpy.NaN
"""
resID = target['resID']
colIndex = target['colIndex']
colName = target['colName']
for res in self.dsDoc['dataResources']:
_resID = res['resID']
if _resID != resID:
continue
_resPath = res['resPath']
_resPath = os.path.join(self.dsHome, _resPath)
_resType = res['resType']
assert _resType == 'table'
for col in res['columns']:
_colIndex = col['colIndex']
if _colIndex != colIndex:
continue
_colName = col['colName']
assert _colName == colName
df = pd.read_csv(_resPath)
df[_colName] = [np.NaN]*len(df[_colName])
df.to_csv(_resPath, index=None)
return True
raise RuntimeError('could not find the column')
raise RuntimeError('could not find the resource')
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.dsDoc['about']['datasetName']='redacted'
self.dsDoc['about']['redacted'] = True
try:
del self.dsDoc['about']['description']
except KeyError:
pass
try:
del self.dsDoc['about']['citation']
except KeyError:
pass
try:
del self.dsDoc['about']['source']
except KeyError:
pass
try:
del self.dsDoc['about']['sourceURI']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.dsHome, 'datasetDoc.json'), 'w') as fp:
json.dump(self.dsDoc, fp, indent=2, sort_keys=False)
############# private methods
def _get_learning_data_path(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return os.path.join(self.dsHome, resPath)
else:
raise RuntimeError('non-CSV learningData (not implemented yet ...)')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData file the dataset')
def _get_learning_data_resource(self):
"""
Returns the path of learningData.csv in a dataset
"""
for res in self.dsDoc['dataResources']:
resID = res['resID']
resPath = res['resPath']
resType = res['resType']
resFormat = res['resFormat']
if resType =='table':
if 'learningData.csv' in res['resPath'] :
return res
else:
raise RuntimeError('could not find learningData.csv')
# if the for loop is over and learningDoc is not found, then return None
raise RuntimeError('could not find learningData resource')
class D3MProblem:
prHome = None
prDoc = None
splitsFile = None
def __init__(self, problemPath):
self.prHome = problemPath
# read the schema in prHome
_prDoc = os.path.join(self.prHome, 'problemDoc.json')
assert os.path.exists(_prDoc)
with open(_prDoc, 'r') as f:
self.prDoc = json.load(f)
# make sure the versions line up
if self.get_problemSchemaVersion() != PROBLEM_SCHEMA_VERSION:
warnings.warn("the problemSchemaVersions in the API and datasetDoc do not match !!!!!!!")
# locate the splitsFile
self.splitsFile = self._get_datasplits_file()
def get_problemID(self):
"""
Returns the problemID from problemDoc
"""
return self.prDoc['about']['problemID']
def get_problemSchemaVersion(self):
"""
Returns the problem schema version that was used to create this dataset
"""
return self.prDoc['about']['problemSchemaVersion']
def get_datasetID(self):
"""
Returns the ID of the dataset referenced in the problem
"""
return self.prDoc['inputs']['data'][0]['datasetID']
def get_targets(self):
"""
Looks at the problemDoc and returns the colIndex and colName of the target variable
"""
return self.prDoc['inputs']['data'][0]['targets']
def get_datasplits(self, view=None):
"""
Returns the data splits splits in a dataframe
"""
df = pd.read_csv(self.splitsFile, index_col='d3mIndex')
if view is None:
return df
elif view.upper() == 'TRAIN':
df = df[df['type']=='TRAIN']
return df
elif view.upper() == 'TEST':
df = df[df['type']=='TEST']
return df
def set_datasplits(self, df):
"""
Sets the contents of the dataSplits file to df
"""
df.to_csv(self.splitsFile)
def delete_identifying_fields(self, view):
"""
Deletes some fields that might contain identifying information.
These fields should not be in the train or test view during the blinds evaluation.
"""
assert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test
self.prDoc['about']['problemName']='redacted'
try:
del self.prDoc['about']['problemDescription']
except KeyError:
pass
# save datasetDoc.json file
with open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:
json.dump(self.prDoc, fp, indent=2, sort_keys=False)
def get_performance_metrics(self):
return self.prDoc['inputs']['performanceMetrics']
############# private methods
def _get_datasplits_file(self):
splitsFile = self.prDoc['inputs']['dataSplits']['splitsFile']
splitsFile = os.path.join(self.prHome, splitsFile)
assert os.path.exists(splitsFile)
return splitsFile
class D3MDS:
dataset = None
problem = None
def __init__(self, datasetPath, problemPath):
self.dataset = D3MDataset(datasetPath)
self.problem = D3MProblem(problemPath)
# sanity check
assert self.dataset.get_datasetID() == self.problem.get_datasetID()
def _get_target_columns(self, df):
target_cols = []
targets = self.problem.get_targets()
for target in targets:
colIndex = target['colIndex']-1 # 0th column is d3mIndex
colName = df.columns[colIndex]
assert colName == target['colName']
target_cols.append(colIndex)
return target_cols
def get_data_all(self):
df = self.dataset.get_learning_data(view=None, problem=None)
return df
def get_train_data(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_train_targets(self):
df = self.dataset.get_learning_data(view='train', problem=self.problem)
target_cols = self._get_target_columns(df)
return np.ravel(df[df.columns[target_cols]])
def get_test_data(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
df.drop(df.columns[target_cols],axis=1,inplace=True)
return df
def get_test_targets(self):
df = self.dataset.get_learning_data(view='test', problem=self.problem)
target_cols = self._get_target_columns(df)
return np.ravel(df[df.columns[target_cols]])
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/d3mds.py
|
import os, sys, json
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score, mean_squared_error
here = os.path.dirname(os.path.abspath(__file__))
from d3mds import D3MDataset, D3MProblem, D3MDS
from feature_extraction import *
from feature_selection import *
from estimation import *
if __name__ == '__main__':
# get the paths of the dataset and problem
try:
dspath = (sys.argv[1])
except:
dspath = input('Enter the path to the dataset: ')
# dspath = os.path.join(here, '..', '..', 'data', '185_baseball_dataset')
assert os.path.exists(dspath)
try:
prpath = (sys.argv[2])
except:
prpath = input('Enter the path to the problem: ')
# prpath = os.path.join(here, '..', '..', 'data', '185_baseball_problem')
assert os.path.exists(prpath)
# check the pipeline JSON file
pipe_json = os.path.join(here, 'pipeline.json')
assert os.path.exists(pipe_json)
# read the JSON file
with open(pipe_json) as data_file:
ps = json.load(data_file)
## TBD: we need to make a check that that JSON aligns with the dataset and problem
# initialize the API class
d3mds = D3MDS(dspath, prpath) # this checks that the problem and dataset correspond
# get the train and test data
X_train = d3mds.get_train_data()
y_train = d3mds.get_train_targets()
X_test = d3mds.get_test_data()
y_test = d3mds.get_test_targets()
# get columns information
cols_info = d3mds.dataset.get_learning_data_columns()
## instantiate feature extractor
key, fe = ps['feature_extractors'].popitem()
fe_class = fe['feature_extractor']
fe_params = fe['params']
FE = eval(fe_class)(**fe_params)
if isinstance(FE, AnnotatedTabularExtractor):
FE.set_cols_info(cols_info)
## instantiate feature selector
fs = ps['feature_selector']
fs_class = fs['feature_selector']
fs_params = fs['params']
FS = eval(fs_class)(**fs_params)
## instantiate estimator
est = ps['estimator']
est_class = est['estimator']
est_params = est['params']
EST = eval(est_class)(**est_params)
## make a pipeline from the above three components
pipeline = Pipeline([
('vect', FE),
('sel', FS),
('clf', EST),
])
## train the pipeline on train data
pipeline.fit(X_train, y_train)
## predict on test data
y_pred = pipeline.predict(X_test)
targetCols = [col['colName'] for col in d3mds.problem.get_targets()]
y_pred_df = pd.DataFrame(index=X_test.index, data=y_pred, columns=targetCols)
y_pred_df.to_csv(os.path.join('.','predictions.csv'))
## compute the score on test data
metrics = d3mds.problem.get_performance_metrics()
scoresdf = pd.DataFrame(columns=['metric','value'])
for item in metrics:
metric = item['metric']
if metric == 'f1Macro':
score = f1_score(y_test, y_pred, average='macro')
print('f1Macro', score)
scoresdf.loc[len(scoresdf)]=['f1Macro', score]
elif metric == 'meanSquaredError':
score = mean_squared_error(y_test, y_pred)
print('meanSquaredError', score)
scoresdf.loc[len(scoresdf)]=['meanSquaredError', score]
scoresdf.to_csv(os.path.join('.','scores.csv'))
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/pipeline.py
|
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
from numpy import ndarray
from scipy.sparse import csr_matrix
from pandas import DataFrame
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection.base import SelectorMixin
# https://stackoverflow.com/a/3862957
def get_all_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in get_all_subclasses(s)]
def sample_param_distributions(param_distributions):
try:
return sample_param_distributions_dict(param_distributions)
except AttributeError:
i = np.random.randint(len(param_distributions))
return sample_param_distributions_dict(param_distributions[i])
def sample_param_distributions_dict(param_distributions_dict):
params = {}
for k, v in param_distributions_dict.items():
i = np.random.randint(len(v))
params[k] = v[i]
return params
class AbstractParameterized(ABC):
param_distributions = {}
@classmethod
def get_random_parameters(cls):
return sample_param_distributions(cls.param_distributions)
class AbstractFeatureExtractor(AbstractParameterized, BaseEstimator):
def fit(self, df, variables):
self.fit_transform(df, variables)
return self
@abstractmethod
def fit_transform(self, df, variables):
""" Fits the feature extractor
:param df:
:type df: DataFrame
:param variables:
:type variables: list[D3MVariable]
:return:
:rtype: csr_matrix
"""
pass
@abstractmethod
def transform(self, df):
""" Transforms the data
:param df:
:type df: DataFrame
:return:
:rtype: csr_matrix
"""
pass
class AbstractFeatureSelector(AbstractParameterized, BaseEstimator, SelectorMixin):
pass
class AbstractEstimator(AbstractParameterized, BaseEstimator):
@abstractmethod
def fit(self, X, y):
"""
:param X:
:type X: csr_matrix
:param y:
:type y: ndarray
:return:
:rtype: AbstractEstimator
"""
return self
@abstractmethod
def predict(self, X):
"""
:param X:
:type X: csr_matrix
:return:
:rtype: ndarray
"""
pass
|
d3m-model-search-master
|
test_data/test_cases_only/534_cps_85_wages/534_cps_85_wages_solution/modules/base.py
|
d3m-model-search-master
|
Stanford-D3M-Full/__init__.py
|
|
import sys
import logging
import time
import random
import os
from concurrent import futures
import multiprocessing
import os
import json
import copy
import random
import grpc
import time
import json
from multiprocessing import pool, context
from google.protobuf import json_format
import ta3ta2.api.core_pb2 as core_pb2
import ta3ta2.api.value_pb2 as value_pb2
import ta3ta2.api.core_pb2_grpc as core_pb2_grpc
import ta3ta2.api.primitive_pb2 as primitive_pb2
import ta3ta2.api.utils as ta3ta2utils
import d3m
from d3m import index
from hashlib import sha256
import threading
import datetime
import json
import os
import binascii
from multiprocessing import Process, Queue, JoinableQueue
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
import executors.ExtendedSklearnExecutor
import executors.SimpleRandomSklearnExecutor
import executors.ScoreFitProduceExecutor
import executors.HyperbandExecutor
import executors.NistSaverExecutor
import executors.Executor
import utils.utils
import utils.train_utils
class CoreServicerImplementation(core_pb2_grpc.CoreServicer):
##############################################
# Helpers for managing the ta3ta2 process
# Flow diagram: https://gitlab.com/datadrivendiscovery/ta3ta2-api/blob/devel/flow.png
def random_id(self):
self.rid_lock.acquire()
k = None
while k is None or k in self.used_ids:
k = binascii.hexlify(os.urandom(16)).decode("utf-8")
self.used_ids.add(k)
self.rid_lock.release()
return k
def add_to_stage_outputs(self, stage, obj):
self.lock.acquire()
rid = self.random_id()
if stage not in self.stage_outputs:
self.stage_outputs[stage] = {}
self.stage_outputs[stage][rid] = copy.deepcopy(obj)
self.lock.release()
return rid
def get_from_stage_outputs(self, stage, rid):
self.lock.acquire()
assert stage in self.stage_outputs
sys.stdout.flush()
assert rid in self.stage_outputs[stage]
self.lock.release()
return copy.deepcopy(self.stage_outputs[stage][rid])
def check_if_search_cancelled(self, search_id):
sys.stdout.flush()
self.cancel_search_lock.acquire()
cancelled = search_id in self.cancel_search_ids
self.cancel_search_lock.release()
return cancelled
def cancel_search(self, search_id):
sys.stdout.flush()
self.cancel_search_lock.acquire()
self.cancel_search_ids.add(search_id)
self.cancel_search_lock.release()
##############################################
def __init__(self, searchers, searchers_inputs_queue, searchers_results_queue):
self.searchers = searchers
self.searchers_inputs_queue = searchers_inputs_queue
self.searchers_results_queue = searchers_results_queue
self.lock = threading.Lock()
self.rid_lock = threading.Lock()
self.stage_outputs = {}
self.used_ids = set()
self.cancel_search_lock = threading.Lock()
self.cancel_search_ids = set()
def SearchSolutions(self, request, context):
problem, dataset_uri, tlimit = request.problem, request.inputs[0].dataset_uri, request.time_bound
# Extract problem doc and dataset for executors
problem_doc = utils.utils.convert_problem_doc_proto_to_metadata(problem)
dataset = utils.utils.convert_dataset_uri_to_dataset(problem_doc, dataset_uri)
# Other useful data
allowed_value_types = request.allowed_value_types
search_id = self.add_to_stage_outputs("SearchSolutions",
{"problem_doc" : problem_doc,
"dataset" : dataset,
"tlimit" : tlimit,
"allowed_value_types" : allowed_value_types})
return core_pb2.SearchSolutionsResponse(search_id=search_id)
def GetSearchSolutionsResults(self, request, context):
search_id = request.search_id
info_dict = self.get_from_stage_outputs("SearchSolutions", search_id)
problem_doc, dataset = info_dict["problem_doc"], info_dict["dataset"]
allowed_value_types = info_dict["allowed_value_types"]
requested_tlimit = info_dict["tlimit"]
# Configuration parameters
#tlimit = max(60*10, requested_tlimit*60)
tlimit = requested_tlimit
if tlimit == 0:
tlimit = 60*10
tstart = time.time()
target_queue_length = 1
# Scorer
scorer = executors.ScoreFitProduceExecutor.ScoreFitProduceExecutor(None, None)
# Search
# todo(maxlam): currently, only a single GetSearchSolutionsResults can be run -- if
# multiple are run in parallel, things will mess up. Can fix this by putting a lock on resources (searchers) or
# distributing resources.
while time.time()-tstart <= tlimit:
# Check if search has been cancelled
if self.check_if_search_cancelled(search_id):
print("Search was cancelled: %s" % search_id)
while not self.searchers_inputs_queue.empty():
self.searchers_inputs_queue.get()
break
if self.searchers_inputs_queue.empty():
for i in range(target_queue_length):
self.searchers_inputs_queue.put((problem_doc, dataset))
while not self.searchers_results_queue.empty():
result = self.searchers_results_queue.get(True, executors.Executor.QUEUE_TIMEOUT)
# Check if search has been cancelled
if self.check_if_search_cancelled(search_id):
print("Search was cancelled: %s" % search_id)
while not self.searchers_inputs_queue.empty():
self.searchers_inputs_queue.get()
break
if None not in result:
print("Got result!")
# Add the result to dict and yield it
score, pipeline_json = result
solution_id = self.add_to_stage_outputs("GetSearchSolutionsResults",
{"score" : score,
"pipeline_json" : pipeline_json,
"problem_doc" : problem_doc,
"dataset" : dataset,
"allowed_value_types" : allowed_value_types})
# Todo(maxlam): more descriptive done_ticks all_ticks
yield core_pb2.GetSearchSolutionsResultsResponse(
progress=core_pb2.Progress(state=core_pb2.RUNNING),
done_ticks=float(time.time()-tstart),
all_ticks=tlimit,
solution_id=solution_id,
internal_score=score)
time.sleep(executors.Executor.QUEUE_TIMEOUT)
"""solution_id = self.add_to_stage_outputs("GetSearchSolutionsResults",
{"score" : 0,
"pipeline_json" : open("/testout/pipelines/999999999999.3291.json").read(),
"problem_doc" : problem_doc,
"dataset" : dataset,
"allowed_value_types" : allowed_value_types})
# Todo(maxlam): more descriptive done_ticks all_ticks
yield core_pb2.GetSearchSolutionsResultsResponse(
progress=core_pb2.Progress(state=core_pb2.RUNNING),
done_ticks=float(time.time()-tstart)/tlimit,
all_ticks=float(time.time()-tstart)/tlimit,
solution_id=solution_id,
internal_score=0)"""
# Commented out for nyu
"""yield core_pb2.GetSearchSolutionsResultsResponse(
progress=core_pb2.Progress(state=core_pb2.COMPLETED),
done_ticks=1,
all_ticks=1,
solution_id="",
internal_score=0)"""
def EndSearchSolutions(self, request, context):
search_id = request.search_id
self.cancel_search(search_id)
return core_pb2.EndSearchSolutionsResponse()
def StopSearchSolutions(self, request, context):
search_id = request.search_id
self.cancel_search(search_id)
return core_pb2.StopSearchSolutionsResponse()
def DescribeSolution(self, request, context):
solution_id = request.solution_id
info_dict = self.get_from_stage_outputs("GetSearchSolutionsResults", solution_id)
# Serialize the pipeline
pipeline_json = info_dict["pipeline_json"]
allowed_value_types = info_dict["allowed_value_types"]
pipeline = Pipeline.from_json(pipeline_json)
pipeline_description = ta3ta2utils.encode_pipeline_description(pipeline, allowed_value_types, "/tmp")
return core_pb2.DescribeSolutionResponse(pipeline=pipeline_description)
def ScoreSolution(self, request, context):
solution_id = request.solution_id
uris = [x.dataset_uri for x in request.inputs]
metrics = [x for x in request.performance_metrics]
info_dict = self.get_from_stage_outputs("GetSearchSolutionsResults", solution_id)
info_dict["eval_uris"] = uris
info_dict["metrics"] = metrics
request_id = self.add_to_stage_outputs("ScoreSolution",
info_dict)
return core_pb2.ScoreSolutionResponse(request_id=request_id)
def GetScoreSolutionResults(self, request, context):
request_id = request.request_id
info_dict = self.get_from_stage_outputs("ScoreSolution", request_id)
score = info_dict["score"]
metrics = info_dict["metrics"]
problem_doc = info_dict["problem_doc"]
pipeline = info_dict["pipeline_json"]
dataset_train = info_dict["dataset"]
eval_datasets = [utils.utils.convert_dataset_uri_to_dataset(problem_doc, uri, mode="score") for uri in info_dict["eval_uris"]]
# Instantiate scorer evaluator
scorer = executors.ScoreFitProduceExecutor.ScoreFitProduceExecutor(None, None)
# todo(maxlam): actually use metrics
for dataset_eval in eval_datasets:
eval_scores = []
for metric in metrics:
eval_score = scorer.process_item(("score", problem_doc,
ta3ta2utils.decode_performance_metric(metric)["metric"], pipeline,
dataset_train, dataset_eval))
eval_scores.append(eval_score)
eval_score_proto = core_pb2.GetScoreSolutionResultsResponse(scores=
[core_pb2.Score(value=value_pb2.Value(raw=value_pb2.ValueRaw(double=escore)),
metric=metric) for metric, escore in zip(metrics, eval_scores)],
#progress=core_pb2.Progress(state=core_pb2.RUNNING))
progress=core_pb2.Progress(state=core_pb2.COMPLETED)) # For NYU integration
yield eval_score_proto
# Commented out for NYU yield core_pb2.GetScoreSolutionResultsResponse(progress=core_pb2.Progress(state=core_pb2.COMPLETED))
def FitSolution(self, request, context):
solution_id = request.solution_id
# Pass the data through
info_dict = self.get_from_stage_outputs("GetSearchSolutionsResults", solution_id)
problem_doc = info_dict["problem_doc"]
info_dict["train_datasets"] = [utils.utils.convert_dataset_uri_to_dataset(problem_doc, x.dataset_uri, mode="train") for x in request.inputs]
request_id = self.add_to_stage_outputs("FitSolution", info_dict)
return core_pb2.FitSolutionResponse(request_id=request_id)
def GetFitSolutionResults(self, request, context):
request_id = request.request_id
# Pass data through
info_dict = self.get_from_stage_outputs("FitSolution", request_id)
request_id = self.add_to_stage_outputs("GetFitSolutionResults", info_dict)
yield core_pb2.GetFitSolutionResultsResponse(fitted_solution_id=request_id,
progress=core_pb2.Progress(state=core_pb2.COMPLETED))
def ProduceSolution(self, request, context):
solution_id = request.fitted_solution_id
# Pass data through
info_dict = self.get_from_stage_outputs("GetFitSolutionResults", solution_id)
problem_doc = info_dict["problem_doc"]
info_dict["eval_datasets"] = [utils.utils.convert_dataset_uri_to_dataset(problem_doc, x.dataset_uri, mode="test") for x in request.inputs]
request_id = self.add_to_stage_outputs("ProduceSolution", info_dict)
return core_pb2.ProduceSolutionResponse(request_id=request_id)
def GetProduceSolutionResults(self, request, context):
request_id = request.request_id
info_dict = self.get_from_stage_outputs("ProduceSolution", request_id)
problem_doc, pipeline_json, train_datasets, eval_datasets = (
info_dict["problem_doc"],
info_dict["pipeline_json"],
info_dict["train_datasets"],
info_dict["eval_datasets"],
)
producer = executors.ScoreFitProduceExecutor.ScoreFitProduceExecutor(None, None)
progress = 0
for dataset_train in train_datasets:
for dataset_test in eval_datasets:
csv_uri = producer.process_item(("fitproduce", problem_doc, pipeline_json, dataset_train, dataset_test))
outputs = {
"outputs.0" : value_pb2.Value(csv_uri=csv_uri)
}
yield core_pb2.GetProduceSolutionResultsResponse(progress=core_pb2.Progress(state=core_pb2.COMPLETED), # For NYU integration
exposed_outputs=outputs)
progress += 1
# Commented out for NYU yield core_pb2.GetProduceSolutionResultsResponse(progress=core_pb2.Progress(state=core_pb2.COMPLETED), exposed_outputs=None)
def SolutionExport(self, request, context):
solution_id = request.fitted_solution_id
info_dict = self.get_from_stage_outputs("GetFitSolutionResults", solution_id)
problem_doc, pipeline, score = info_dict["problem_doc"], info_dict["pipeline_json"], info_dict["score"]
exporter = executors.ScoreFitProduceExecutor.ScoreFitProduceExecutor(None, None)
exporter.process_item(("export", problem_doc, pipeline, score))
return core_pb2.SolutionExportResponse()
def UpdateProblem(self, request, context):
search_id = request.search_id
return core_pb2.UpdateProblemResponse()
def ListPrimitives(self, request, context):
'''
List all primitives known to TA2, their IDs, versions, names, and digests. Using this
information a TA3 should know which primitives may be put into a pipeline template.
To narrow down potential primitives to use a TA3 can also ask a TA2 to do a solution
search and then observe which primitives the TA2 is using. If more metadata about primitives
is needed, then a TA3 can use the results of this call to map primitives to metadata
(from Python code or primitive annotations) on its own.
'''
list_primitives = []
source_primitives = []
primitives = index.search()
for prim in primitives:
try:
p = index.get_primitive(prim)
source_primitives.append(p)
except:
0
for p in source_primitives:
meta = p.metadata.to_json_structure()
list_primitives.append(
primitive_pb2.Primitive(
id=meta['id'],
version=meta['version'],
python_path=meta['python_path'],
name=meta['name'],
digest=meta['digest']
)
)
return core_pb2.ListPrimitivesResponse(primitives=list_primitives)
def Hello(self, request, context):
'''
Identify a TA2 and get supported features.
This call is also suitable for a ping/pong call to check that the gRPC connection to the
TA2 is ready.
'''
return core_pb2.HelloResponse(
user_agent='stanford_TA2_1.0', # Set to some string identifying the name and version of the TA2 system.
version=core_pb2.DESCRIPTOR.GetOptions().Extensions[core_pb2.protocol_version],
allowed_value_types=[
1,2,3
], # TODO: what value types are supported?
supported_extensions=[] # TODO: what API extensions are supported?
)
def serve():
# Create searchers
# todo(maxlam): modularize
nparallel = 4
inputs_queue = JoinableQueue()
results_queue = JoinableQueue()
executor = executors.ExtendedSklearnExecutor.ExtendedSklearnExecutor
#executor = executors.SimpleRandomSklearnExecutor.SimpleRandomSklearnExecutor
kwargs = {}
searchers = [
executor(
inputs_queue, results_queue, **kwargs
) for i in range(nparallel)]
# Start searchers
for executor in searchers:
executor.start()
# Create server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=100))
core_pb2_grpc.add_CoreServicer_to_server(CoreServicerImplementation(searchers, inputs_queue, results_queue), server)
server.add_insecure_port('[::]:45042')
server.start()
try:
while True:
time.sleep(1000000)
except KeyboardInterrupt:
server.stop(0)
if __name__=="__main__":
serve()
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/server.py
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/__init__.py
|
|
import time
import sys
import json
import os
from multiprocessing import pool, context
from google.protobuf import json_format
import grpc
import ta3ta2.api.core_pb2_grpc as core_pb2_grpc
import ta3ta2.api.core_pb2 as core_pb2
import ta3ta2.test_server_messages as msg
import ta3ta2.api.utils as ta3ta2utils
from d3m.metadata import base as metadata_base, hyperparams as hyperparams_module, pipeline as pipeline_module, problem
here = os.path.realpath(__file__)
baseball_path = "file:///Stanford-D3M-Full/test_data/185_baseball/185_baseball_dataset/datasetDoc.json"
baseball_score_path = "file:///Stanford-D3M-Full/test_data/185_baseball/SCORE/dataset_TEST/datasetDoc.json"
problem_description = problem.parse_problem_description('/Stanford-D3M-Full/test_data/185_baseball/185_baseball_problem/problemDoc.json')
parsed_problem_doc = ta3ta2utils.encode_problem_description(problem_description)
"""baseball_path = "file:///datasets/seed_datasets_transfer/LL0_acled/LL0_acled_dataset/datasetDoc.json"
baseball_score_path = "file:////datasets/seed_datasets_transfer/LL0_acled/SCORE/dataset_TEST/datasetDoc.json"
problem_description = problem.parse_problem_description('/datasets/seed_datasets_transfer/LL0_acled/LL0_acled_problem/problemDoc.json')
parsed_problem_doc = ta3ta2utils.encode_problem_description(problem_description)"""
"""baseball_path = "file:///datasets/seed_datasets_transfer/uu4_SPECT/uu4_SPECT_dataset/datasetDoc.json"
baseball_score_path = "file:////datasets/seed_datasets_transfer/uu4_SPECT/SCORE/dataset_TEST/datasetDoc.json"
problem_description = problem.parse_problem_description('/datasets/seed_datasets_transfer/uu4_SPECT/uu4_SPECT_problem/problemDoc.json')
parsed_problem_doc = ta3ta2utils.encode_problem_description(problem_description)"""
print(parsed_problem_doc)
class TA2Client():
def __init__(self):
self.ta2_address = "localhost:45042"
# Set up gRPC connection
self.channel = grpc.insecure_channel(self.ta2_address)
self.stub = core_pb2_grpc.CoreStub(self.channel)
# temp messages for timeout-able message thread
self.messages = []
self.async_message_thread = pool.ThreadPool(processes=1,)
# Int of seconds 'GetXResults' calls should last
self.timeout = 30
def connect(self):
# Try to say howdy to TA2
retries = 12
while retries:
try:
self.Hello()
print("Connected!")
return self
except Exception as e:
print(e)
print("No TA2 available at %s - retrying..." %
self.ta2_address,)
retries -= 1
time.sleep(5)
print("No TA2 available at %s - timed out.",
self.ta2_address)
return False
def Hello(self):
req = msg.GetHello()
resp = self.stub.Hello(req)
return resp
def SearchSolutions(self, dataset_train_uri, problem_json):
# Convert problem json to protobuf
problem_pb = problem_json
req = msg.GetSearchSolutionsRequest(dataset_train_uri, problem_pb)
resp = self.stub.SearchSolutions(req)
print("Finished: %s" % resp.search_id)
return resp.search_id
def GetSearchSolutionsResults(self, search_id):
req = msg.GetSearchSolutionsResultsRequest(search_id)
messages = self._blockout(req, 'GetSearchSolutionsResults')
print("GetSearchSolutionResults finished")
return messages
def StopSearchSolutions(self, search_id):
req = msg.GetStopSearchSolutionsRequest(search_id)
resp = self.stub.StopSearchSolutions(req)
print("StopSearchSolutions finished")
assert resp == msg.GetStopSearchSolutionsResponse()
def EndSearchSolutions(self, search_id):
req = msg.GetEndSearchSolutionsRequest(search_id)
resp = self.stub.EndSearchSolutions(req)
print("EndSearchSolutions finished")
assert resp == msg.GetEndSearchSolutionsResponse()
def ScoreSolution(self, solution_id, eval_dataset_uris, metrics):
req = msg.GetScoreSolutionRequest(solution_id, eval_dataset_uris, metrics)
resp = self.stub.ScoreSolution(req)
print("ScoreSolution finished")
return resp.request_id
def GetScoreSolutionResults(self, request_id):
req = msg.GetScoreSolutionResultsRequest(request_id)
messages = self._blockout(req, "GetScoreSolutionResults")
return messages
def DescribeSolution(self, solution_id):
req = msg.GetDescribeSolutionRequest(solution_id)
resp = self.stub.DescribeSolution(req)
print("DescribeSolution finished")
return resp
def FitSolution(self, solution_id, dataset_train_uri):
req = msg.GetFitSolutionRequest(solution_id,
dataset_train_uri)
resp = self.stub.FitSolution(req)
print("FitSolution finished")
return resp.request_id
def GetFitSolutionResults(self, fit_request_id):
req = msg.GetFitSolutionResultsRequest(fit_request_id)
messages = self._blockout(req, 'GetFitSolutionResults')
print("GetFitSolutionResults finished")
return messages
def ProduceSolution(self, fitted_solution_id, dataset_test_uri):
req = msg.GetProduceSolutionRequest(fitted_solution_id,
dataset_test_uri)
resp = self.stub.ProduceSolution(req)
print("ProduceSolution finished")
return resp.request_id
def GetProduceSolutionResults(self, fit_request_id):
req = msg.GetFitSolutionResultsRequest(fit_request_id)
messages = self._blockout(req, 'GetProduceSolutionResults')
print("GetProduceSolutionResults finished")
return messages
def SolutionExport(self, fit_request_id, rank):
req = msg.SolutionExportRequest(fit_request_id, rank)
resp = self.stub.SolutionExport(req)
#messages = self._blockout(req, 'SolutionExport')
print("SolutionExportRequest finished")
def _blockout(self, req, func_name):
func = getattr(self.stub, func_name)
res = []
for resp in func(req):
print("BLOCKOUT", func_name)
res.append(resp)
break
return res
# This method is super buggy as it interleaves the results of 2 streaming calls.
def _timeout(self, req, func_name):
"""
Wrapper for timing out streaming calls
"""
def inner(_self, req, func_name):
func = getattr(_self.stub, func_name)
for resp in func(req):
_self.messages.append(resp)
proc = self.async_message_thread.apply_async(inner,
(self, req, func_name))
try:
proc.get(timeout=self.timeout)
except context.TimeoutError:
pass
except Exception as err:
print(err)
messages = self.messages
self.messages = []
return messages
def test_connect():
print("Testing connect...")
client = TA2Client()
client.connect()
print("passed!")
def test_search_solutions():
print("Testing search solutions")
# Connect
client = TA2Client()
client.connect()
search_id = client.SearchSolutions(baseball_path, parsed_problem_doc)
# Time limit for test
tlimit = 60*4
tcur = time.time()
# Search and aggregate results
n_solutions = 0
for solution in client.GetSearchSolutionsResults(search_id):
if solution.progress.state == core_pb2.COMPLETED:
break
n_solutions += 1
# Shutdown
client.StopSearchSolutions(search_id)
assert(n_solutions >= 1)
print("passed!")
def test_fit_solutions():
print("Testing get score solutions...")
# Connect
client = TA2Client()
client.connect()
search_id = client.SearchSolutions(baseball_path, parsed_problem_doc)
# Search and aggregate results
all_solutions = client.GetSearchSolutionsResults(search_id)
print("Client received results", all_solutions)
while len(all_solutions) <= 1:
all_solutions += client.GetSearchSolutionsResults(search_id)
# Shutdown (but don't kill resources)
client.StopSearchSolutions(search_id)
assert(len(all_solutions) >= 1)
# Fit solution and get fit solution results
print("Getting results...")
selected_solution = all_solutions[-1]
print("Selected solution: ", selected_solution)
solution_id = selected_solution.solution_id
# Fit solution
fit_solution_response = client.FitSolution(solution_id,
baseball_path)
print("Fit solution response")
print(fit_solution_response)
# Get fit solution results
get_fit_solution_response = client.GetFitSolutionResults(fit_solution_response)
while len(get_fit_solution_response) <= 0:
get_fit_solution_response += client.GetFitSolutionResults(fit_solution_response)
assert(len(get_fit_solution_response) > 0)
fit_solution_id = get_fit_solution_response[-1].fitted_solution_id
print("Getting fit solution id: ", fit_solution_id)
# Produce the solution
produce_solution_response = client.ProduceSolution(fit_solution_id,
baseball_path)
print("Produce solution id: ", produce_solution_response)
tcur = time.time()
all_produced_solutions =[]
tlimit = 60
while time.time()-tcur < tlimit:
produce_solution_results = client.GetProduceSolutionResults(produce_solution_response)
all_produced_solutions += produce_solution_results
time.sleep(10)
print(all_produced_solutions)
assert(len(all_produced_solutions) > 0)
# Export solutions to output
client.SolutionExport(fit_solution_id, 42.0)
# Shutdown and free resources
client.EndSearchSolutions(search_id)
print("passed!")
def test_get_score_solutions():
print("Testing get score solutions...")
# Connect
client = TA2Client()
client.connect()
search_id = client.SearchSolutions(baseball_path, parsed_problem_doc)
# Search and aggregate results
all_solutions = client.GetSearchSolutionsResults(search_id)
print("Client received results", all_solutions)
# Shutdown (but don't kill resources)
client.StopSearchSolutions(search_id)
assert(len(all_solutions) >= 1)
# Fit solution and get fit solution results
print("Getting results...")
selected_solution = all_solutions[-1]
print("Selected solution: ", selected_solution)
solution_id = selected_solution.solution_id
# Score solution
request_id = client.ScoreSolution(solution_id, [baseball_score_path], [6])
scores = client.GetScoreSolutionResults(request_id)
while len(scores) < 1:
scores = client.GetScoreSolutionResults(request_id)
print("Received scores:", scores)
assert(len(scores) >= 1)
print("passed! (test_get_score_solutions)")
print([type(x) for x in scores])
def test_describe_solution():
print("Testing get score solutions...")
# Connect
client = TA2Client()
client.connect()
search_id = client.SearchSolutions(baseball_path, parsed_problem_doc)
# Search and aggregate results
all_solutions = client.GetSearchSolutionsResults(search_id)
print("Client received results", all_solutions)
# Shutdown (but don't kill resources)
client.StopSearchSolutions(search_id)
assert(len(all_solutions) >= 1)
# Fit solution and get fit solution results
print("Getting results...")
selected_solution = all_solutions[-1]
print("Selected solution: ", selected_solution)
solution_id = selected_solution.solution_id
# Describe solution
descr = client.DescribeSolution(solution_id)
print("Received descr:", descr)
print("passed!")
if __name__=="__main__":
print("Testing TA2 server api...")
#test_connect()
#test_search_solutions()
#test_get_score_solutions()
test_fit_solutions()
#test_describe_solution()
print("Done!")
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/test_server.py
|
"""
Assembles all the protobuf messages needed to
fulfill API contracts.
"""
import ta3ta2.api.core_pb2 as core_pb2
import ta3ta2.api.value_pb2 as value_pb2
import ta3ta2.api.problem_pb2 as problem_pb2
def GetHello():
msg = core_pb2.HelloRequest()
return msg
def GetProblemDescription():
msg = problem_pb2.ProblemDescription()
return msg
def GetSearchSolutionsRequest(dataset_uri, problem):
value = value_pb2.Value(
dataset_uri=dataset_uri)
msg = core_pb2.SearchSolutionsRequest(
user_agent='FakeTA3TestBot',
problem=problem,
inputs=[value, value])
return msg
def GetSearchSolutionsResultsRequest(search_id):
msg = core_pb2.GetSearchSolutionsResultsRequest(
search_id=search_id)
return msg
def GetStopSearchSolutionsRequest(search_id):
msg = core_pb2.StopSearchSolutionsRequest(
search_id=search_id)
return msg
def GetStopSearchSolutionsResponse():
msg = core_pb2.StopSearchSolutionsResponse()
return msg
def GetEndSearchSolutionsRequest(search_id):
msg = core_pb2.EndSearchSolutionsRequest(
search_id=search_id)
return msg
def GetEndSearchSolutionsResponse():
msg = core_pb2.EndSearchSolutionsResponse()
return msg
def GetScoreSolutionRequest(solution_id, eval_datasets, metrics):
msg = core_pb2.ScoreSolutionRequest(
solution_id=solution_id,
inputs=[value_pb2.Value(dataset_uri=x) for x in eval_datasets],
performance_metrics=[problem_pb2.ProblemPerformanceMetric(k=0, metric=m) for m in metrics])
return msg
def GetScoreSolutionResultsRequest(request_id):
msg = core_pb2.GetScoreSolutionResultsRequest(request_id=request_id)
return msg
def GetDescribeSolutionRequest(solution_id):
msg = core_pb2.DescribeSolutionRequest(
solution_id=solution_id)
return msg
def GetFitSolutionRequest(solution_id, dataset_uri):
value = value_pb2.Value(
dataset_uri=dataset_uri)
msg = core_pb2.FitSolutionRequest(
solution_id=solution_id,
inputs=[value])
return msg
def GetFitSolutionResultsRequest(request_id):
msg = core_pb2.GetFitSolutionResultsRequest(
request_id=request_id)
return msg
def GetSolutionExportRequest(fitted_solution_id):
msg = core_pb2.SolutionExportRequest(
fitted_solution_id=fitted_solution_id)
return msg
def GetSolutionExportResponse():
msg = core_pb2.SolutionExportResponse()
return msg
def GetProduceSolutionRequest(fitted_solution_id, dataset_uri):
value = value_pb2.Value(
dataset_uri=dataset_uri)
msg = core_pb2.ProduceSolutionRequest(
fitted_solution_id=fitted_solution_id,
inputs=[value])
return msg
def GetCompletedProgress():
msg = core_pb2.Progress(
state=core_pb2.COMPLETED)
return msg
def SolutionExportRequest(fitted_solution_id, rank):
msg = core_pb2.SolutionExportRequest(
fitted_solution_id=fitted_solution_id,
rank=rank
)
return msg
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/test_server_messages.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/pipeline_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/problem_pb2_grpc.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pipeline.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import ta3ta2.api.primitive_pb2 as primitive__pb2
import ta3ta2.api.value_pb2 as value__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pipeline.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0epipeline.proto\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0fprimitive.proto\x1a\x0bvalue.proto\"!\n\x11\x43ontainerArgument\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\t\"\x1c\n\x0c\x44\x61taArgument\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\t\"\x1d\n\rDataArguments\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\t\"!\n\x11PrimitiveArgument\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x05\"\"\n\x12PrimitiveArguments\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x05\"%\n\rValueArgument\x12\x14\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x06.Value\"k\n\x15PrimitiveStepArgument\x12\'\n\tcontainer\x18\x01 \x01(\x0b\x32\x12.ContainerArgumentH\x00\x12\x1d\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\r.DataArgumentH\x00\x42\n\n\x08\x61rgument\"\x8e\x02\n\x1bPrimitiveStepHyperparameter\x12\'\n\tcontainer\x18\x01 \x01(\x0b\x32\x12.ContainerArgumentH\x00\x12\x1d\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\r.DataArgumentH\x00\x12\'\n\tprimitive\x18\x03 \x01(\x0b\x32\x12.PrimitiveArgumentH\x00\x12\x1f\n\x05value\x18\x04 \x01(\x0b\x32\x0e.ValueArgumentH\x00\x12\"\n\x08\x64\x61ta_set\x18\x05 \x01(\x0b\x32\x0e.DataArgumentsH\x00\x12-\n\x0eprimitives_set\x18\x06 \x01(\x0b\x32\x13.PrimitiveArgumentsH\x00\x42\n\n\x08\x61rgument\"\x19\n\tStepInput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\t\"\x18\n\nStepOutput\x12\n\n\x02id\x18\x01 \x01(\t\"B\n\x0ePipelineSource\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontact\x18\x02 \x01(\t\x12\x11\n\tpipelines\x18\x03 \x03(\t\"H\n\x17PipelineDescriptionUser\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\x12\x11\n\trationale\x18\x03 \x01(\t\"(\n\x18PipelineDescriptionInput\x12\x0c\n\x04name\x18\x01 \x01(\t\"7\n\x19PipelineDescriptionOutput\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\t\"\xb2\x03\n PrimitivePipelineDescriptionStep\x12\x1d\n\tprimitive\x18\x01 \x01(\x0b\x32\n.Primitive\x12\x43\n\targuments\x18\x02 \x03(\x0b\x32\x30.PrimitivePipelineDescriptionStep.ArgumentsEntry\x12\x1c\n\x07outputs\x18\x03 \x03(\x0b\x32\x0b.StepOutput\x12G\n\x0bhyperparams\x18\x04 \x03(\x0b\x32\x32.PrimitivePipelineDescriptionStep.HyperparamsEntry\x12\'\n\x05users\x18\x05 \x03(\x0b\x32\x18.PipelineDescriptionUser\x1aH\n\x0e\x41rgumentsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.PrimitiveStepArgument:\x02\x38\x01\x1aP\n\x10HyperparamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.PrimitiveStepHyperparameter:\x02\x38\x01\"\x86\x01\n\"SubpipelinePipelineDescriptionStep\x12&\n\x08pipeline\x18\x01 \x01(\x0b\x32\x14.PipelineDescription\x12\x1a\n\x06inputs\x18\x02 \x03(\x0b\x32\n.StepInput\x12\x1c\n\x07outputs\x18\x03 \x03(\x0b\x32\x0b.StepOutput\"^\n\"PlaceholderPipelineDescriptionStep\x12\x1a\n\x06inputs\x18\x01 \x03(\x0b\x32\n.StepInput\x12\x1c\n\x07outputs\x18\x02 \x03(\x0b\x32\x0b.StepOutput\"\xce\x01\n\x17PipelineDescriptionStep\x12\x36\n\tprimitive\x18\x01 \x01(\x0b\x32!.PrimitivePipelineDescriptionStepH\x00\x12\x37\n\x08pipeline\x18\x02 \x01(\x0b\x32#.SubpipelinePipelineDescriptionStepH\x00\x12:\n\x0bplaceholder\x18\x03 \x01(\x0b\x32#.PlaceholderPipelineDescriptionStepH\x00\x42\x06\n\x04step\"\xdf\x02\n\x13PipelineDescription\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1f\n\x06source\x18\x02 \x01(\x0b\x32\x0f.PipelineSource\x12+\n\x07\x63reated\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\x07\x63ontext\x18\x04 \x01(\x0e\x32\x10.PipelineContext\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12\'\n\x05users\x18\x07 \x03(\x0b\x32\x18.PipelineDescriptionUser\x12)\n\x06inputs\x18\x08 \x03(\x0b\x32\x19.PipelineDescriptionInput\x12+\n\x07outputs\x18\t \x03(\x0b\x32\x1a.PipelineDescriptionOutput\x12\'\n\x05steps\x18\n \x03(\x0b\x32\x18.PipelineDescriptionStep*m\n\x0fPipelineContext\x12\x1c\n\x18PIPELINE_CONTEXT_UNKNOWN\x10\x00\x12\x0f\n\x0bPRETRAINING\x10\x01\x12\x0b\n\x07TESTING\x10\x02\x12\x0e\n\nEVALUATION\x10\x03\x12\x0e\n\nPRODUCTION\x10\x04\x42\nZ\x08pipelineb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,primitive__pb2.DESCRIPTOR,value__pb2.DESCRIPTOR,])
_PIPELINECONTEXT = _descriptor.EnumDescriptor(
name='PipelineContext',
full_name='PipelineContext',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PIPELINE_CONTEXT_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRETRAINING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TESTING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVALUATION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRODUCTION', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2230,
serialized_end=2339,
)
_sym_db.RegisterEnumDescriptor(_PIPELINECONTEXT)
PipelineContext = enum_type_wrapper.EnumTypeWrapper(_PIPELINECONTEXT)
PIPELINE_CONTEXT_UNKNOWN = 0
PRETRAINING = 1
TESTING = 2
EVALUATION = 3
PRODUCTION = 4
_CONTAINERARGUMENT = _descriptor.Descriptor(
name='ContainerArgument',
full_name='ContainerArgument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='ContainerArgument.data', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=148,
)
_DATAARGUMENT = _descriptor.Descriptor(
name='DataArgument',
full_name='DataArgument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='DataArgument.data', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=150,
serialized_end=178,
)
_DATAARGUMENTS = _descriptor.Descriptor(
name='DataArguments',
full_name='DataArguments',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='DataArguments.data', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=209,
)
_PRIMITIVEARGUMENT = _descriptor.Descriptor(
name='PrimitiveArgument',
full_name='PrimitiveArgument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='PrimitiveArgument.data', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=211,
serialized_end=244,
)
_PRIMITIVEARGUMENTS = _descriptor.Descriptor(
name='PrimitiveArguments',
full_name='PrimitiveArguments',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='PrimitiveArguments.data', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=246,
serialized_end=280,
)
_VALUEARGUMENT = _descriptor.Descriptor(
name='ValueArgument',
full_name='ValueArgument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='ValueArgument.data', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=282,
serialized_end=319,
)
_PRIMITIVESTEPARGUMENT = _descriptor.Descriptor(
name='PrimitiveStepArgument',
full_name='PrimitiveStepArgument',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='container', full_name='PrimitiveStepArgument.container', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='PrimitiveStepArgument.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='argument', full_name='PrimitiveStepArgument.argument',
index=0, containing_type=None, fields=[]),
],
serialized_start=321,
serialized_end=428,
)
_PRIMITIVESTEPHYPERPARAMETER = _descriptor.Descriptor(
name='PrimitiveStepHyperparameter',
full_name='PrimitiveStepHyperparameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='container', full_name='PrimitiveStepHyperparameter.container', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='PrimitiveStepHyperparameter.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='primitive', full_name='PrimitiveStepHyperparameter.primitive', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='PrimitiveStepHyperparameter.value', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_set', full_name='PrimitiveStepHyperparameter.data_set', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='primitives_set', full_name='PrimitiveStepHyperparameter.primitives_set', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='argument', full_name='PrimitiveStepHyperparameter.argument',
index=0, containing_type=None, fields=[]),
],
serialized_start=431,
serialized_end=701,
)
_STEPINPUT = _descriptor.Descriptor(
name='StepInput',
full_name='StepInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='StepInput.data', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=703,
serialized_end=728,
)
_STEPOUTPUT = _descriptor.Descriptor(
name='StepOutput',
full_name='StepOutput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='StepOutput.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=730,
serialized_end=754,
)
_PIPELINESOURCE = _descriptor.Descriptor(
name='PipelineSource',
full_name='PipelineSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='PipelineSource.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contact', full_name='PipelineSource.contact', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipelines', full_name='PipelineSource.pipelines', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=756,
serialized_end=822,
)
_PIPELINEDESCRIPTIONUSER = _descriptor.Descriptor(
name='PipelineDescriptionUser',
full_name='PipelineDescriptionUser',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='PipelineDescriptionUser.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='PipelineDescriptionUser.reason', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rationale', full_name='PipelineDescriptionUser.rationale', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=824,
serialized_end=896,
)
_PIPELINEDESCRIPTIONINPUT = _descriptor.Descriptor(
name='PipelineDescriptionInput',
full_name='PipelineDescriptionInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='PipelineDescriptionInput.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=898,
serialized_end=938,
)
_PIPELINEDESCRIPTIONOUTPUT = _descriptor.Descriptor(
name='PipelineDescriptionOutput',
full_name='PipelineDescriptionOutput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='PipelineDescriptionOutput.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='PipelineDescriptionOutput.data', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=940,
serialized_end=995,
)
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY = _descriptor.Descriptor(
name='ArgumentsEntry',
full_name='PrimitivePipelineDescriptionStep.ArgumentsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='PrimitivePipelineDescriptionStep.ArgumentsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='PrimitivePipelineDescriptionStep.ArgumentsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1278,
serialized_end=1350,
)
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY = _descriptor.Descriptor(
name='HyperparamsEntry',
full_name='PrimitivePipelineDescriptionStep.HyperparamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='PrimitivePipelineDescriptionStep.HyperparamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='PrimitivePipelineDescriptionStep.HyperparamsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1352,
serialized_end=1432,
)
_PRIMITIVEPIPELINEDESCRIPTIONSTEP = _descriptor.Descriptor(
name='PrimitivePipelineDescriptionStep',
full_name='PrimitivePipelineDescriptionStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='primitive', full_name='PrimitivePipelineDescriptionStep.primitive', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arguments', full_name='PrimitivePipelineDescriptionStep.arguments', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='PrimitivePipelineDescriptionStep.outputs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hyperparams', full_name='PrimitivePipelineDescriptionStep.hyperparams', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='users', full_name='PrimitivePipelineDescriptionStep.users', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY, _PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=998,
serialized_end=1432,
)
_SUBPIPELINEPIPELINEDESCRIPTIONSTEP = _descriptor.Descriptor(
name='SubpipelinePipelineDescriptionStep',
full_name='SubpipelinePipelineDescriptionStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='SubpipelinePipelineDescriptionStep.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='SubpipelinePipelineDescriptionStep.inputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='SubpipelinePipelineDescriptionStep.outputs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1435,
serialized_end=1569,
)
_PLACEHOLDERPIPELINEDESCRIPTIONSTEP = _descriptor.Descriptor(
name='PlaceholderPipelineDescriptionStep',
full_name='PlaceholderPipelineDescriptionStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inputs', full_name='PlaceholderPipelineDescriptionStep.inputs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='PlaceholderPipelineDescriptionStep.outputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1571,
serialized_end=1665,
)
_PIPELINEDESCRIPTIONSTEP = _descriptor.Descriptor(
name='PipelineDescriptionStep',
full_name='PipelineDescriptionStep',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='primitive', full_name='PipelineDescriptionStep.primitive', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline', full_name='PipelineDescriptionStep.pipeline', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='placeholder', full_name='PipelineDescriptionStep.placeholder', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='step', full_name='PipelineDescriptionStep.step',
index=0, containing_type=None, fields=[]),
],
serialized_start=1668,
serialized_end=1874,
)
_PIPELINEDESCRIPTION = _descriptor.Descriptor(
name='PipelineDescription',
full_name='PipelineDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='PipelineDescription.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='PipelineDescription.source', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created', full_name='PipelineDescription.created', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='context', full_name='PipelineDescription.context', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='PipelineDescription.name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='PipelineDescription.description', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='users', full_name='PipelineDescription.users', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='PipelineDescription.inputs', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputs', full_name='PipelineDescription.outputs', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='PipelineDescription.steps', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1877,
serialized_end=2228,
)
_VALUEARGUMENT.fields_by_name['data'].message_type = value__pb2._VALUE
_PRIMITIVESTEPARGUMENT.fields_by_name['container'].message_type = _CONTAINERARGUMENT
_PRIMITIVESTEPARGUMENT.fields_by_name['data'].message_type = _DATAARGUMENT
_PRIMITIVESTEPARGUMENT.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPARGUMENT.fields_by_name['container'])
_PRIMITIVESTEPARGUMENT.fields_by_name['container'].containing_oneof = _PRIMITIVESTEPARGUMENT.oneofs_by_name['argument']
_PRIMITIVESTEPARGUMENT.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPARGUMENT.fields_by_name['data'])
_PRIMITIVESTEPARGUMENT.fields_by_name['data'].containing_oneof = _PRIMITIVESTEPARGUMENT.oneofs_by_name['argument']
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['container'].message_type = _CONTAINERARGUMENT
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['data'].message_type = _DATAARGUMENT
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['primitive'].message_type = _PRIMITIVEARGUMENT
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['value'].message_type = _VALUEARGUMENT
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['data_set'].message_type = _DATAARGUMENTS
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['primitives_set'].message_type = _PRIMITIVEARGUMENTS
_PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['container'])
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['container'].containing_oneof = _PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument']
_PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['data'])
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['data'].containing_oneof = _PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument']
_PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['primitive'])
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['primitive'].containing_oneof = _PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument']
_PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['value'])
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['value'].containing_oneof = _PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument']
_PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['data_set'])
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['data_set'].containing_oneof = _PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument']
_PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument'].fields.append(
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['primitives_set'])
_PRIMITIVESTEPHYPERPARAMETER.fields_by_name['primitives_set'].containing_oneof = _PRIMITIVESTEPHYPERPARAMETER.oneofs_by_name['argument']
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY.fields_by_name['value'].message_type = _PRIMITIVESTEPARGUMENT
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY.containing_type = _PRIMITIVEPIPELINEDESCRIPTIONSTEP
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY.fields_by_name['value'].message_type = _PRIMITIVESTEPHYPERPARAMETER
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY.containing_type = _PRIMITIVEPIPELINEDESCRIPTIONSTEP
_PRIMITIVEPIPELINEDESCRIPTIONSTEP.fields_by_name['primitive'].message_type = primitive__pb2._PRIMITIVE
_PRIMITIVEPIPELINEDESCRIPTIONSTEP.fields_by_name['arguments'].message_type = _PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY
_PRIMITIVEPIPELINEDESCRIPTIONSTEP.fields_by_name['outputs'].message_type = _STEPOUTPUT
_PRIMITIVEPIPELINEDESCRIPTIONSTEP.fields_by_name['hyperparams'].message_type = _PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY
_PRIMITIVEPIPELINEDESCRIPTIONSTEP.fields_by_name['users'].message_type = _PIPELINEDESCRIPTIONUSER
_SUBPIPELINEPIPELINEDESCRIPTIONSTEP.fields_by_name['pipeline'].message_type = _PIPELINEDESCRIPTION
_SUBPIPELINEPIPELINEDESCRIPTIONSTEP.fields_by_name['inputs'].message_type = _STEPINPUT
_SUBPIPELINEPIPELINEDESCRIPTIONSTEP.fields_by_name['outputs'].message_type = _STEPOUTPUT
_PLACEHOLDERPIPELINEDESCRIPTIONSTEP.fields_by_name['inputs'].message_type = _STEPINPUT
_PLACEHOLDERPIPELINEDESCRIPTIONSTEP.fields_by_name['outputs'].message_type = _STEPOUTPUT
_PIPELINEDESCRIPTIONSTEP.fields_by_name['primitive'].message_type = _PRIMITIVEPIPELINEDESCRIPTIONSTEP
_PIPELINEDESCRIPTIONSTEP.fields_by_name['pipeline'].message_type = _SUBPIPELINEPIPELINEDESCRIPTIONSTEP
_PIPELINEDESCRIPTIONSTEP.fields_by_name['placeholder'].message_type = _PLACEHOLDERPIPELINEDESCRIPTIONSTEP
_PIPELINEDESCRIPTIONSTEP.oneofs_by_name['step'].fields.append(
_PIPELINEDESCRIPTIONSTEP.fields_by_name['primitive'])
_PIPELINEDESCRIPTIONSTEP.fields_by_name['primitive'].containing_oneof = _PIPELINEDESCRIPTIONSTEP.oneofs_by_name['step']
_PIPELINEDESCRIPTIONSTEP.oneofs_by_name['step'].fields.append(
_PIPELINEDESCRIPTIONSTEP.fields_by_name['pipeline'])
_PIPELINEDESCRIPTIONSTEP.fields_by_name['pipeline'].containing_oneof = _PIPELINEDESCRIPTIONSTEP.oneofs_by_name['step']
_PIPELINEDESCRIPTIONSTEP.oneofs_by_name['step'].fields.append(
_PIPELINEDESCRIPTIONSTEP.fields_by_name['placeholder'])
_PIPELINEDESCRIPTIONSTEP.fields_by_name['placeholder'].containing_oneof = _PIPELINEDESCRIPTIONSTEP.oneofs_by_name['step']
_PIPELINEDESCRIPTION.fields_by_name['source'].message_type = _PIPELINESOURCE
_PIPELINEDESCRIPTION.fields_by_name['created'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_PIPELINEDESCRIPTION.fields_by_name['context'].enum_type = _PIPELINECONTEXT
_PIPELINEDESCRIPTION.fields_by_name['users'].message_type = _PIPELINEDESCRIPTIONUSER
_PIPELINEDESCRIPTION.fields_by_name['inputs'].message_type = _PIPELINEDESCRIPTIONINPUT
_PIPELINEDESCRIPTION.fields_by_name['outputs'].message_type = _PIPELINEDESCRIPTIONOUTPUT
_PIPELINEDESCRIPTION.fields_by_name['steps'].message_type = _PIPELINEDESCRIPTIONSTEP
DESCRIPTOR.message_types_by_name['ContainerArgument'] = _CONTAINERARGUMENT
DESCRIPTOR.message_types_by_name['DataArgument'] = _DATAARGUMENT
DESCRIPTOR.message_types_by_name['DataArguments'] = _DATAARGUMENTS
DESCRIPTOR.message_types_by_name['PrimitiveArgument'] = _PRIMITIVEARGUMENT
DESCRIPTOR.message_types_by_name['PrimitiveArguments'] = _PRIMITIVEARGUMENTS
DESCRIPTOR.message_types_by_name['ValueArgument'] = _VALUEARGUMENT
DESCRIPTOR.message_types_by_name['PrimitiveStepArgument'] = _PRIMITIVESTEPARGUMENT
DESCRIPTOR.message_types_by_name['PrimitiveStepHyperparameter'] = _PRIMITIVESTEPHYPERPARAMETER
DESCRIPTOR.message_types_by_name['StepInput'] = _STEPINPUT
DESCRIPTOR.message_types_by_name['StepOutput'] = _STEPOUTPUT
DESCRIPTOR.message_types_by_name['PipelineSource'] = _PIPELINESOURCE
DESCRIPTOR.message_types_by_name['PipelineDescriptionUser'] = _PIPELINEDESCRIPTIONUSER
DESCRIPTOR.message_types_by_name['PipelineDescriptionInput'] = _PIPELINEDESCRIPTIONINPUT
DESCRIPTOR.message_types_by_name['PipelineDescriptionOutput'] = _PIPELINEDESCRIPTIONOUTPUT
DESCRIPTOR.message_types_by_name['PrimitivePipelineDescriptionStep'] = _PRIMITIVEPIPELINEDESCRIPTIONSTEP
DESCRIPTOR.message_types_by_name['SubpipelinePipelineDescriptionStep'] = _SUBPIPELINEPIPELINEDESCRIPTIONSTEP
DESCRIPTOR.message_types_by_name['PlaceholderPipelineDescriptionStep'] = _PLACEHOLDERPIPELINEDESCRIPTIONSTEP
DESCRIPTOR.message_types_by_name['PipelineDescriptionStep'] = _PIPELINEDESCRIPTIONSTEP
DESCRIPTOR.message_types_by_name['PipelineDescription'] = _PIPELINEDESCRIPTION
DESCRIPTOR.enum_types_by_name['PipelineContext'] = _PIPELINECONTEXT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ContainerArgument = _reflection.GeneratedProtocolMessageType('ContainerArgument', (_message.Message,), dict(
DESCRIPTOR = _CONTAINERARGUMENT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:ContainerArgument)
))
_sym_db.RegisterMessage(ContainerArgument)
DataArgument = _reflection.GeneratedProtocolMessageType('DataArgument', (_message.Message,), dict(
DESCRIPTOR = _DATAARGUMENT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:DataArgument)
))
_sym_db.RegisterMessage(DataArgument)
DataArguments = _reflection.GeneratedProtocolMessageType('DataArguments', (_message.Message,), dict(
DESCRIPTOR = _DATAARGUMENTS,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:DataArguments)
))
_sym_db.RegisterMessage(DataArguments)
PrimitiveArgument = _reflection.GeneratedProtocolMessageType('PrimitiveArgument', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVEARGUMENT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitiveArgument)
))
_sym_db.RegisterMessage(PrimitiveArgument)
PrimitiveArguments = _reflection.GeneratedProtocolMessageType('PrimitiveArguments', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVEARGUMENTS,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitiveArguments)
))
_sym_db.RegisterMessage(PrimitiveArguments)
ValueArgument = _reflection.GeneratedProtocolMessageType('ValueArgument', (_message.Message,), dict(
DESCRIPTOR = _VALUEARGUMENT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:ValueArgument)
))
_sym_db.RegisterMessage(ValueArgument)
PrimitiveStepArgument = _reflection.GeneratedProtocolMessageType('PrimitiveStepArgument', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVESTEPARGUMENT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitiveStepArgument)
))
_sym_db.RegisterMessage(PrimitiveStepArgument)
PrimitiveStepHyperparameter = _reflection.GeneratedProtocolMessageType('PrimitiveStepHyperparameter', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVESTEPHYPERPARAMETER,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitiveStepHyperparameter)
))
_sym_db.RegisterMessage(PrimitiveStepHyperparameter)
StepInput = _reflection.GeneratedProtocolMessageType('StepInput', (_message.Message,), dict(
DESCRIPTOR = _STEPINPUT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:StepInput)
))
_sym_db.RegisterMessage(StepInput)
StepOutput = _reflection.GeneratedProtocolMessageType('StepOutput', (_message.Message,), dict(
DESCRIPTOR = _STEPOUTPUT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:StepOutput)
))
_sym_db.RegisterMessage(StepOutput)
PipelineSource = _reflection.GeneratedProtocolMessageType('PipelineSource', (_message.Message,), dict(
DESCRIPTOR = _PIPELINESOURCE,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PipelineSource)
))
_sym_db.RegisterMessage(PipelineSource)
PipelineDescriptionUser = _reflection.GeneratedProtocolMessageType('PipelineDescriptionUser', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEDESCRIPTIONUSER,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PipelineDescriptionUser)
))
_sym_db.RegisterMessage(PipelineDescriptionUser)
PipelineDescriptionInput = _reflection.GeneratedProtocolMessageType('PipelineDescriptionInput', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEDESCRIPTIONINPUT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PipelineDescriptionInput)
))
_sym_db.RegisterMessage(PipelineDescriptionInput)
PipelineDescriptionOutput = _reflection.GeneratedProtocolMessageType('PipelineDescriptionOutput', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEDESCRIPTIONOUTPUT,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PipelineDescriptionOutput)
))
_sym_db.RegisterMessage(PipelineDescriptionOutput)
PrimitivePipelineDescriptionStep = _reflection.GeneratedProtocolMessageType('PrimitivePipelineDescriptionStep', (_message.Message,), dict(
ArgumentsEntry = _reflection.GeneratedProtocolMessageType('ArgumentsEntry', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitivePipelineDescriptionStep.ArgumentsEntry)
))
,
HyperparamsEntry = _reflection.GeneratedProtocolMessageType('HyperparamsEntry', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitivePipelineDescriptionStep.HyperparamsEntry)
))
,
DESCRIPTOR = _PRIMITIVEPIPELINEDESCRIPTIONSTEP,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PrimitivePipelineDescriptionStep)
))
_sym_db.RegisterMessage(PrimitivePipelineDescriptionStep)
_sym_db.RegisterMessage(PrimitivePipelineDescriptionStep.ArgumentsEntry)
_sym_db.RegisterMessage(PrimitivePipelineDescriptionStep.HyperparamsEntry)
SubpipelinePipelineDescriptionStep = _reflection.GeneratedProtocolMessageType('SubpipelinePipelineDescriptionStep', (_message.Message,), dict(
DESCRIPTOR = _SUBPIPELINEPIPELINEDESCRIPTIONSTEP,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:SubpipelinePipelineDescriptionStep)
))
_sym_db.RegisterMessage(SubpipelinePipelineDescriptionStep)
PlaceholderPipelineDescriptionStep = _reflection.GeneratedProtocolMessageType('PlaceholderPipelineDescriptionStep', (_message.Message,), dict(
DESCRIPTOR = _PLACEHOLDERPIPELINEDESCRIPTIONSTEP,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PlaceholderPipelineDescriptionStep)
))
_sym_db.RegisterMessage(PlaceholderPipelineDescriptionStep)
PipelineDescriptionStep = _reflection.GeneratedProtocolMessageType('PipelineDescriptionStep', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEDESCRIPTIONSTEP,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PipelineDescriptionStep)
))
_sym_db.RegisterMessage(PipelineDescriptionStep)
PipelineDescription = _reflection.GeneratedProtocolMessageType('PipelineDescription', (_message.Message,), dict(
DESCRIPTOR = _PIPELINEDESCRIPTION,
__module__ = 'pipeline_pb2'
# @@protoc_insertion_point(class_scope:PipelineDescription)
))
_sym_db.RegisterMessage(PipelineDescription)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\010pipeline'))
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY.has_options = True
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_ARGUMENTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY.has_options = True
_PRIMITIVEPIPELINEDESCRIPTIONSTEP_HYPERPARAMSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/pipeline_pb2.py
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/__init__.py
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import ta3ta2.api.core_pb2 as core__pb2
class CoreStub(object):
"""See each message's comments for information about each particular call.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SearchSolutions = channel.unary_unary(
'/Core/SearchSolutions',
request_serializer=core__pb2.SearchSolutionsRequest.SerializeToString,
response_deserializer=core__pb2.SearchSolutionsResponse.FromString,
)
self.GetSearchSolutionsResults = channel.unary_stream(
'/Core/GetSearchSolutionsResults',
request_serializer=core__pb2.GetSearchSolutionsResultsRequest.SerializeToString,
response_deserializer=core__pb2.GetSearchSolutionsResultsResponse.FromString,
)
self.EndSearchSolutions = channel.unary_unary(
'/Core/EndSearchSolutions',
request_serializer=core__pb2.EndSearchSolutionsRequest.SerializeToString,
response_deserializer=core__pb2.EndSearchSolutionsResponse.FromString,
)
self.StopSearchSolutions = channel.unary_unary(
'/Core/StopSearchSolutions',
request_serializer=core__pb2.StopSearchSolutionsRequest.SerializeToString,
response_deserializer=core__pb2.StopSearchSolutionsResponse.FromString,
)
self.DescribeSolution = channel.unary_unary(
'/Core/DescribeSolution',
request_serializer=core__pb2.DescribeSolutionRequest.SerializeToString,
response_deserializer=core__pb2.DescribeSolutionResponse.FromString,
)
self.ScoreSolution = channel.unary_unary(
'/Core/ScoreSolution',
request_serializer=core__pb2.ScoreSolutionRequest.SerializeToString,
response_deserializer=core__pb2.ScoreSolutionResponse.FromString,
)
self.GetScoreSolutionResults = channel.unary_stream(
'/Core/GetScoreSolutionResults',
request_serializer=core__pb2.GetScoreSolutionResultsRequest.SerializeToString,
response_deserializer=core__pb2.GetScoreSolutionResultsResponse.FromString,
)
self.FitSolution = channel.unary_unary(
'/Core/FitSolution',
request_serializer=core__pb2.FitSolutionRequest.SerializeToString,
response_deserializer=core__pb2.FitSolutionResponse.FromString,
)
self.GetFitSolutionResults = channel.unary_stream(
'/Core/GetFitSolutionResults',
request_serializer=core__pb2.GetFitSolutionResultsRequest.SerializeToString,
response_deserializer=core__pb2.GetFitSolutionResultsResponse.FromString,
)
self.ProduceSolution = channel.unary_unary(
'/Core/ProduceSolution',
request_serializer=core__pb2.ProduceSolutionRequest.SerializeToString,
response_deserializer=core__pb2.ProduceSolutionResponse.FromString,
)
self.GetProduceSolutionResults = channel.unary_stream(
'/Core/GetProduceSolutionResults',
request_serializer=core__pb2.GetProduceSolutionResultsRequest.SerializeToString,
response_deserializer=core__pb2.GetProduceSolutionResultsResponse.FromString,
)
self.SolutionExport = channel.unary_unary(
'/Core/SolutionExport',
request_serializer=core__pb2.SolutionExportRequest.SerializeToString,
response_deserializer=core__pb2.SolutionExportResponse.FromString,
)
self.UpdateProblem = channel.unary_unary(
'/Core/UpdateProblem',
request_serializer=core__pb2.UpdateProblemRequest.SerializeToString,
response_deserializer=core__pb2.UpdateProblemResponse.FromString,
)
self.ListPrimitives = channel.unary_unary(
'/Core/ListPrimitives',
request_serializer=core__pb2.ListPrimitivesRequest.SerializeToString,
response_deserializer=core__pb2.ListPrimitivesResponse.FromString,
)
self.Hello = channel.unary_unary(
'/Core/Hello',
request_serializer=core__pb2.HelloRequest.SerializeToString,
response_deserializer=core__pb2.HelloResponse.FromString,
)
class CoreServicer(object):
"""See each message's comments for information about each particular call.
"""
def SearchSolutions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSearchSolutionsResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EndSearchSolutions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopSearchSolutions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DescribeSolution(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ScoreSolution(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetScoreSolutionResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FitSolution(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFitSolutionResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProduceSolution(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProduceSolutionResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SolutionExport(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateProblem(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListPrimitives(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Hello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'SearchSolutions': grpc.unary_unary_rpc_method_handler(
servicer.SearchSolutions,
request_deserializer=core__pb2.SearchSolutionsRequest.FromString,
response_serializer=core__pb2.SearchSolutionsResponse.SerializeToString,
),
'GetSearchSolutionsResults': grpc.unary_stream_rpc_method_handler(
servicer.GetSearchSolutionsResults,
request_deserializer=core__pb2.GetSearchSolutionsResultsRequest.FromString,
response_serializer=core__pb2.GetSearchSolutionsResultsResponse.SerializeToString,
),
'EndSearchSolutions': grpc.unary_unary_rpc_method_handler(
servicer.EndSearchSolutions,
request_deserializer=core__pb2.EndSearchSolutionsRequest.FromString,
response_serializer=core__pb2.EndSearchSolutionsResponse.SerializeToString,
),
'StopSearchSolutions': grpc.unary_unary_rpc_method_handler(
servicer.StopSearchSolutions,
request_deserializer=core__pb2.StopSearchSolutionsRequest.FromString,
response_serializer=core__pb2.StopSearchSolutionsResponse.SerializeToString,
),
'DescribeSolution': grpc.unary_unary_rpc_method_handler(
servicer.DescribeSolution,
request_deserializer=core__pb2.DescribeSolutionRequest.FromString,
response_serializer=core__pb2.DescribeSolutionResponse.SerializeToString,
),
'ScoreSolution': grpc.unary_unary_rpc_method_handler(
servicer.ScoreSolution,
request_deserializer=core__pb2.ScoreSolutionRequest.FromString,
response_serializer=core__pb2.ScoreSolutionResponse.SerializeToString,
),
'GetScoreSolutionResults': grpc.unary_stream_rpc_method_handler(
servicer.GetScoreSolutionResults,
request_deserializer=core__pb2.GetScoreSolutionResultsRequest.FromString,
response_serializer=core__pb2.GetScoreSolutionResultsResponse.SerializeToString,
),
'FitSolution': grpc.unary_unary_rpc_method_handler(
servicer.FitSolution,
request_deserializer=core__pb2.FitSolutionRequest.FromString,
response_serializer=core__pb2.FitSolutionResponse.SerializeToString,
),
'GetFitSolutionResults': grpc.unary_stream_rpc_method_handler(
servicer.GetFitSolutionResults,
request_deserializer=core__pb2.GetFitSolutionResultsRequest.FromString,
response_serializer=core__pb2.GetFitSolutionResultsResponse.SerializeToString,
),
'ProduceSolution': grpc.unary_unary_rpc_method_handler(
servicer.ProduceSolution,
request_deserializer=core__pb2.ProduceSolutionRequest.FromString,
response_serializer=core__pb2.ProduceSolutionResponse.SerializeToString,
),
'GetProduceSolutionResults': grpc.unary_stream_rpc_method_handler(
servicer.GetProduceSolutionResults,
request_deserializer=core__pb2.GetProduceSolutionResultsRequest.FromString,
response_serializer=core__pb2.GetProduceSolutionResultsResponse.SerializeToString,
),
'SolutionExport': grpc.unary_unary_rpc_method_handler(
servicer.SolutionExport,
request_deserializer=core__pb2.SolutionExportRequest.FromString,
response_serializer=core__pb2.SolutionExportResponse.SerializeToString,
),
'UpdateProblem': grpc.unary_unary_rpc_method_handler(
servicer.UpdateProblem,
request_deserializer=core__pb2.UpdateProblemRequest.FromString,
response_serializer=core__pb2.UpdateProblemResponse.SerializeToString,
),
'ListPrimitives': grpc.unary_unary_rpc_method_handler(
servicer.ListPrimitives,
request_deserializer=core__pb2.ListPrimitivesRequest.FromString,
response_serializer=core__pb2.ListPrimitivesResponse.SerializeToString,
),
'Hello': grpc.unary_unary_rpc_method_handler(
servicer.Hello,
request_deserializer=core__pb2.HelloRequest.FromString,
response_serializer=core__pb2.HelloResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Core', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/core_pb2_grpc.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: primitive.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='primitive.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0fprimitive.proto\x1a google/protobuf/descriptor.proto\"[\n\tPrimitive\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x13\n\x0bpython_path\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x64igest\x18\x05 \x01(\tB\nZ\x08pipelineb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_PRIMITIVE = _descriptor.Descriptor(
name='Primitive',
full_name='Primitive',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Primitive.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='Primitive.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='python_path', full_name='Primitive.python_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='Primitive.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='digest', full_name='Primitive.digest', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=144,
)
DESCRIPTOR.message_types_by_name['Primitive'] = _PRIMITIVE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Primitive = _reflection.GeneratedProtocolMessageType('Primitive', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVE,
__module__ = 'primitive_pb2'
# @@protoc_insertion_point(class_scope:Primitive)
))
_sym_db.RegisterMessage(Primitive)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\010pipeline'))
# @@protoc_insertion_point(module_scope)
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/primitive_pb2.py
|
"""
We represent values in this module at three levels:
* A GRPC ``Value`` message.
* An *intermediate level* with a dict with ``type`` and ``value`` fields
where ``type`` can be one of the ``object``, ``dataset_uri``, ``csv_uri``,
``pickle_uri``, ``plasma_id``, ``error``. All values except for ``object``
type are strings. Values for ``object`` type are raw Python values.
* A raw Python value.
One can use `decode_value` to convert from a GRPC ``Value`` message to
the value at the *intermediate level*. And `encode_value` to convert
from a value at the *intermediate level* to the GRPC ``Value`` message.
One can use `load_value` to convert from a value at the *intermediate level*
to the raw Python value. And `save_value` to convert from a ray Python value
to the value at the *intermediate level*.
The reason for three levels is that sometimes you want to pass a value
around Python codebase without loading the whole value into the memory.
So conversion from and to GRPC API can be done at the different place
than loading and saving raw Python values.
"""
import binascii
import datetime
import os.path
import pickle
import shutil
import sys
import tempfile
import typing
import uuid
from urllib import parse as url_parse
import frozendict
import pandas
from google.protobuf import timestamp_pb2
from d3m import container, exceptions, runtime as runtime_module, utils as d3m_utils
from d3m.metadata import base as metadata_base, pipeline as pipeline_module, problem as problem_module
from d3m.metadata.base import ArgumentType, Context
from . import core_pb2
from . import pipeline_pb2
from . import primitive_pb2
from . import problem_pb2
from . import value_pb2
MAX_WIRE_OBJECT_SIZE = 64 * 1024 # bytes
def _hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
def _binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
return hex_identifier.decode()
class ValueType(d3m_utils.Enum):
"""
Enumeration of possible value types.
Values are kept in sync with TA2-TA3 API's ``ValueType`` enumeration.
"""
RAW = 1
DATASET_URI = 2
CSV_URI = 3
PICKLE_URI = 4
PICKLE_BLOB = 5
PLASMA_ID = 6
def _can_encode_raw(value):
"""
Can the value be encoded as raw GRPC value for TA2-TA3 API?
Parameters
----------
value : Any
Value to try to encode as raw GRPC value.
Returns
-------
bool
``True`` if the value can be encoded as raw GRPC value.
"""
try:
encode_raw_value(value)
return True
except Exception:
return False
def _can_pickle(value):
"""
Can the value be pickled?
Parameters
----------
value : Any
Value to try to pickle.
Returns
-------
bool
``True`` if the value can be pickled.
"""
try:
pickle.dumps(value)
return True
except Exception:
return False
def _fix_uri(uri):
"""
Make a real URI from an absolute path. Some clients do not use proper URIs.
Parameters
----------
uri : str
An input URI.
Returns
-------
str
A fixed URI.
"""
if not uri.startswith('file://') and uri.startswith('/'):
uri = 'file://{uri}'.format(uri=uri)
return uri
def _open_file_uri(uri, mode='r', encoding=None, validate_uri=None):
"""
Opens a file URI and returns a file object.
Parameters
----------
uri : str
A file URI to the file.
mode : str
Mode to open a file with.
encoding : str
Encoding to use.
Returns
-------
File
A file object.
"""
if validate_uri is not None:
validate_uri(uri)
parsed_uri = url_parse.urlparse(uri)
return open(parsed_uri.path, mode=mode, encoding=encoding)
def _fix_file_uri_host(uri):
"""
If ``uri`` is a file URI, make sure that it has a host set to ``localhost``.
Parameters
----------
uri : str
URI to fix.
Returns
-------
str
Fixed URI.
"""
parsed_uri = url_parse.urlparse(uri)
if parsed_uri.scheme == 'file' and parsed_uri.netloc == '':
parsed_uri = parsed_uri._replace(netloc='localhost')
uri = url_parse.urlunparse(parsed_uri)
return uri
def encode_primitive(primitive):
"""
Encodes a primitive into a GRPC message.
Parameters
----------
primitive : Type[PrimitiveBase]
A primitive class.
Returns
-------
Primitive
A GRPC message.
"""
metadata = primitive.metadata.query()
return primitive_pb2.Primitive(
id=metadata['id'],
version=metadata['version'],
python_path=metadata['python_path'],
name=metadata['name'],
digest=metadata.get('digest', None),
)
def encode_primitive_description(primitive_description):
"""
Encodes a primitive description into a GRPC message.
Parameters
----------
primitive_description : Dict
A primitive description.
Returns
-------
Primitive
A GRPC message.
"""
return primitive_pb2.Primitive(
id=primitive_description['id'],
version=primitive_description['version'],
python_path=primitive_description['python_path'],
name=primitive_description['name'],
digest=primitive_description.get('digest', None),
)
def decode_primitive(primitive):
"""
Decodes a GRPC message into a primitive description.
Parameters
----------
primitive : Primitive
A GRPC message.
Returns
-------
Dict
A primitive description.
"""
primitive_dict = {
'id': primitive.id,
'version': primitive.version,
'python_path': primitive.python_path,
'name': primitive.name,
}
if primitive.digest:
primitive_dict['digest'] = primitive.digest
return primitive_dict
def encode_problem_description(problem_description):
"""
Encodes a problem description into a GRPC message.
Parameters
----------
problem_description : Dict
A problem description.
Returns
-------
ProblemDescription
A GRPC message.
"""
performance_metrics = []
for performance_metric in problem_description['problem'].get('performance_metrics', []):
performance_metrics.append(encode_performance_metric(performance_metric))
problem = problem_pb2.Problem(
id=problem_description['problem'].get('id', None),
version=problem_description['problem'].get('version', None),
name=problem_description['problem'].get('name', None),
description=problem_description['problem'].get('description', None),
task_type=problem_description['problem']['task_type'].value,
task_subtype=problem_description['problem']['task_subtype'].value,
performance_metrics=performance_metrics,
)
inputs = []
for problem_input in problem_description.get('inputs', []):
targets = []
for target in problem_input.get('targets', []):
targets.append(
problem_pb2.ProblemTarget(
target_index=target['target_index'],
resource_id=target['resource_id'],
column_index=target['column_index'],
column_name=target['column_name'],
clusters_number=target.get('clusters_number', None),
),
)
inputs.append(
problem_pb2.ProblemInput(dataset_id=problem_input['dataset_id'], targets=targets),
)
return problem_pb2.ProblemDescription(
problem=problem,
inputs=inputs,
)
def decode_problem_description(problem_description):
"""
Decodes a GRPC message into a problem description.
Parameters
----------
problem_description : ProblemDescription
A GRPC message.
Returns
-------
Union[Dict, None]
A problem description, or ``None`` if problem is not defined.
"""
if problem_description.problem.task_type == problem_pb2.TaskType.Value('TASK_TYPE_UNDEFINED') and problem_description.problem.task_subtype == problem_pb2.TaskSubtype.Value('TASK_SUBTYPE_UNDEFINED'):
return None
description = {
'schema': problem_module.PROBLEM_SCHEMA_VERSION,
'problem': {
'id': problem_description.problem.id,
'version': problem_description.problem.version,
'name': problem_description.problem.name,
'task_type': problem_module.TaskType(problem_description.problem.task_type),
'task_subtype': problem_module.TaskSubtype(problem_description.problem.task_subtype),
},
}
if problem_description.problem.description:
description['problem']['description'] = problem_description.problem.description
performance_metrics = []
for performance_metric in problem_description.problem.performance_metrics:
performance_metrics.append(decode_performance_metric(performance_metric))
if performance_metrics:
description['problem']['performance_metrics'] = performance_metrics
inputs = []
for problem_input in problem_description.inputs:
targets = []
for target in problem_input.targets:
targets.append(
{
'target_index': target.target_index,
'resource_id': target.resource_id,
'column_index': target.column_index,
'column_name': target.column_name,
},
)
if target.clusters_number:
targets[-1]['clusters_number'] = target.clusters_number
problem_input = {
'dataset_id': problem_input.dataset_id,
}
if targets:
problem_input['targets'] = targets
inputs.append(problem_input)
if inputs:
description['inputs'] = inputs
problem_module.PROBLEM_SCHEMA_VALIDATOR.validate(description)
return description
def encode_pipeline_description(pipeline, allowed_value_types, scratch_dir, *, plasma_put=None, validate_uri=None):
"""
Encodes a pipeline into a GRPC message.
Parameters
----------
pipeline : Pipeline
A pipeline instance. Primitives do not have to be resolved. Sub-pipelines should be nested.
allowed_value_types : Sequence[ValueType]
A list of allowed value types to encode this value as. This
list is tried in order until encoding succeeds.
scratch_dir : str
Path to a directory to store any temporary files needed during execution.
plasma_put : Callable
A function to store a value into a Plasma store.
The function takes a value to store into Plasma store and should return
stored object's ID as bytes.
validate_uri : Callable
A function which can validate that URI is a valid and supported file URI.
The function takes an URI as a string and should throw an exception if URI is invalid.
Returns
-------
PipelineDescription
A GRPC message.
"""
if pipeline.source is not None:
source = pipeline_pb2.PipelineSource(
name=pipeline.source.get('name', None),
contact=pipeline.source.get('contact', None),
pipelines=[p['id'] for p in pipeline.source.get('from', {}).get('pipelines', []) if pipeline.source.get('from', {}).get('type', None) == 'PIPELINE'],
)
else:
source = None
steps = []
for step in pipeline.steps:
if isinstance(step, pipeline_module.PrimitiveStep):
arguments = {}
for name, argument in step.arguments.items():
if argument['type'] == ArgumentType.CONTAINER:
arguments[name] = pipeline_pb2.PrimitiveStepArgument(
container=pipeline_pb2.ContainerArgument(
data=argument['data'],
),
)
elif argument['type'] == ArgumentType.DATA:
assert not isinstance(argument['data'], typing.Sequence), type(argument['data'])
arguments[name] = pipeline_pb2.PrimitiveStepArgument(
data=pipeline_pb2.DataArgument(
data=argument['data'],
),
)
else:
raise exceptions.UnexpectedValueError("Unknown argument type: {argument_type}".format(argument_type=argument['type']))
hyperparams = {}
for name, hyperparameter in step.hyperparams.items():
if hyperparameter['type'] == ArgumentType.CONTAINER:
hyperparams[name] = pipeline_pb2.PrimitiveStepHyperparameter(
container=pipeline_pb2.ContainerArgument(
data=hyperparameter['data'],
),
)
elif hyperparameter['type'] == ArgumentType.DATA:
if isinstance(hyperparameter['data'], typing.Sequence):
hyperparams[name] = pipeline_pb2.PrimitiveStepHyperparameter(
data_set=pipeline_pb2.DataArguments(
data=hyperparameter['data'],
),
)
else:
hyperparams[name] = pipeline_pb2.PrimitiveStepHyperparameter(
data=pipeline_pb2.DataArgument(
data=hyperparameter['data'],
),
)
elif hyperparameter['type'] == ArgumentType.PRIMITIVE:
if isinstance(hyperparameter['data'], typing.Sequence):
hyperparams[name] = pipeline_pb2.PrimitiveStepHyperparameter(
primitives_set=pipeline_pb2.PrimitiveArguments(
data=hyperparameter['data'],
),
)
else:
hyperparams[name] = pipeline_pb2.PrimitiveStepHyperparameter(
primitive=pipeline_pb2.PrimitiveArgument(
data=hyperparameter['data'],
),
)
elif hyperparameter['type'] == ArgumentType.VALUE:
hyperparams[name] = pipeline_pb2.PrimitiveStepHyperparameter(
value=pipeline_pb2.ValueArgument(
data=encode_value({'type': 'object', 'value': hyperparameter['data']}, allowed_value_types, scratch_dir, plasma_put=plasma_put, validate_uri=validate_uri),
),
)
else:
raise exceptions.UnexpectedValueError("Unknown hyperparameter type: {hyperparameter_type}".format(hyperparameter_type=hyperparameter['type']))
# If the primitive is not resolved.
if step.primitive is None:
primitive = encode_primitive_description(step.primitive_description)
else:
primitive = encode_primitive(step.primitive)
steps.append(
pipeline_pb2.PipelineDescriptionStep(
primitive=pipeline_pb2.PrimitivePipelineDescriptionStep(
primitive=primitive,
arguments=arguments,
outputs=[pipeline_pb2.StepOutput(id=output_id) for output_id in step.outputs],
hyperparams=hyperparams,
users=[pipeline_pb2.PipelineDescriptionUser(id=user['id'], reason=user.get('reason', None), rationale=user.get('rationale', None)) for user in step.users],
),
),
)
elif isinstance(step, pipeline_module.SubpipelineStep):
steps.append(
pipeline_pb2.PipelineDescriptionStep(
pipeline=pipeline_pb2.SubpipelinePipelineDescriptionStep(
pipeline=encode_pipeline_description(step.pipeline, allowed_value_types, scratch_dir, plasma_put=plasma_put, validate_uri=validate_uri),
inputs=[pipeline_pb2.StepInput(data=input_data) for input_data in step.inputs],
outputs=[pipeline_pb2.StepOutput(id=output_id) for output_id in step.outputs],
),
),
)
elif isinstance(step, pipeline_module.PlaceholderStep):
steps.append(
pipeline_pb2.PipelineDescriptionStep(
placeholder=pipeline_pb2.PlaceholderPipelineDescriptionStep(
inputs=[pipeline_pb2.StepInput(data=input_data) for input_data in step.inputs],
outputs=[pipeline_pb2.StepOutput(id=output_id) for output_id in step.outputs],
),
),
)
else:
raise exceptions.UnexpectedValueError("Unknown step type: {step_type}".format(step_type=type(step)))
return pipeline_pb2.PipelineDescription(
id=pipeline.id,
source=source,
created=encode_timestamp(pipeline.created),
context=pipeline.context.value,
name=pipeline.name,
description=pipeline.description,
users=[pipeline_pb2.PipelineDescriptionUser(id=user['id'], reason=user.get('reason', None), rationale=user.get('rationale', None)) for user in pipeline.users],
inputs=[pipeline_pb2.PipelineDescriptionInput(name=input.get('name', None)) for input in pipeline.inputs],
outputs=[pipeline_pb2.PipelineDescriptionOutput(name=output.get('name', None), data=output['data']) for output in pipeline.outputs],
steps=steps,
)
def _decode_user_description(user_description):
user_description_dict = {'id': user_description.id}
if user_description.reason:
user_description_dict['reason'] = user_description.reason
if user_description.rationale:
user_description_dict['rationale'] = user_description.rationale
return user_description_dict
def decode_pipeline_description(pipeline_description, resolver, *, pipeline_class=None, plasma_get=None, validate_uri=None):
"""
Decodes a GRPC message into a pipeline.
Parameters
----------
pipeline_description : PipelineDescription
A GRPC message.
resolver : Resolver
An instance of primitive and pipeline resolver to use.
pipeline_class : Type[Pipeline]
A pipeline class to use for instances of a pipeline.
By default `d3m.metadata.pipeline.Pipeline`.
plasma_get : Callable
A function to load a value from a Plasma store.
The function takes object's ID as bytes and should return
stored the value from a Plasma store.
validate_uri : Callable
A function which can validate that URI is a valid and supported file URI.
The function takes an URI as a string and should throw an exception if URI is invalid.
Returns
-------
Union[Pipeline, None]
A pipeline instance, or ``None`` if pipeline is not defined.
"""
if pipeline_description.context == pipeline_pb2.PipelineContext.Value('PIPELINE_CONTEXT_UNKNOWN') and not pipeline_description.steps:
return None
if pipeline_class is None:
pipeline_class = pipeline_module.Pipeline
source = {}
if pipeline_description.source.name:
source['name'] = pipeline_description.source.name
if pipeline_description.source.contact:
source['contact'] = pipeline_description.source.contact
if pipeline_description.source.pipelines:
source['from'] = {
'type': 'PIPELINE',
'pipelines': [{'id': pipeline_id} for pipeline_id in pipeline_description.source.pipelines],
}
if not source:
source = None
pipeline = pipeline_class(
pipeline_id=pipeline_description.id, context=pipeline_module.PipelineContext(pipeline_description.context),
created=decode_timestamp(pipeline_description.created), source=source,
name=(pipeline_description.name or None), description=(pipeline_description.description or None),
)
for input_description in pipeline_description.inputs:
pipeline.add_input(input_description.name or None)
for step_description in pipeline_description.steps:
step_type = step_description.WhichOneof('step')
if step_type == 'primitive':
step = pipeline._get_step_class(pipeline_module.PipelineStep.PRIMITIVE)(primitive_description=decode_primitive(step_description.primitive.primitive), resolver=resolver)
for argument_name, argument_description in step_description.primitive.arguments.items():
argument_type = argument_description.WhichOneof('argument')
if argument_type == 'container':
step.add_argument(argument_name, ArgumentType.CONTAINER, argument_description.container.data)
elif argument_type == 'data':
step.add_argument(argument_name, ArgumentType.DATA, argument_description.data.data)
else:
raise exceptions.UnexpectedValueError("Unknown argument type: {argument_type}".format(argument_type=argument_type))
for output_description in step_description.primitive.outputs:
step.add_output(output_description.id)
for hyperparameter_name, hyperparameter_description in step_description.primitive.hyperparams.items():
argument_type = hyperparameter_description.WhichOneof('argument')
if argument_type == 'container':
step.add_hyperparameter(hyperparameter_name, ArgumentType.CONTAINER, hyperparameter_description.container.data)
elif argument_type == 'data':
step.add_hyperparameter(hyperparameter_name, ArgumentType.DATA, hyperparameter_description.data.data)
elif argument_type == 'primitive':
step.add_hyperparameter(hyperparameter_name, ArgumentType.PRIMITIVE, hyperparameter_description.primitive.data)
elif argument_type == 'value':
value = decode_value(hyperparameter_description.value.data, validate_uri=validate_uri)
step.add_hyperparameter(hyperparameter_name, ArgumentType.VALUE, load_value(value, plasma_get=plasma_get, validate_uri=validate_uri))
elif argument_type == 'data_set':
step.add_hyperparameter(hyperparameter_name, ArgumentType.DATA, hyperparameter_description.data_set.data)
elif argument_type == 'primitives_set':
step.add_hyperparameter(hyperparameter_name, ArgumentType.PRIMITIVE, hyperparameter_description.primitives_set.data)
else:
raise exceptions.UnexpectedValueError("Unknown argument type: {argument_type}".format(argument_type=argument_type))
for user_description in step_description.primitive.users:
step.add_user(_decode_user_description(user_description))
elif step_type == 'pipeline':
subpipeline = decode_pipeline_description(step_description.pipeline.pipeline, resolver, pipeline_class=pipeline_class, plasma_get=plasma_get, validate_uri=validate_uri)
step = pipeline._get_step_class(pipeline_module.PipelineStep.SUBPIPELINE)(pipeline_id=step_description.pipeline.pipeline.id, pipeline=subpipeline, resolver=resolver)
for pipeline_input in step_description.pipeline.inputs:
step.add_input(pipeline_input.data)
for pipeline_output in step_description.pipeline.outputs:
step.add_output(pipeline_output.id or None)
elif step_type == 'placeholder':
step = pipeline._get_step_class(pipeline_module.PipelineStep.PLACEHOLDER)(resolver=resolver)
for placeholder_input in step_description.placeholder.inputs:
step.add_input(placeholder_input.data)
for placeholder_output in step_description.placeholder.outputs:
step.add_output(placeholder_output.id)
else:
raise exceptions.InvalidArgumentValueError("Invalid step type '{step_type}'.".format(step_type=step_type))
pipeline.add_step(step)
for output_description in pipeline_description.outputs:
pipeline.add_output(output_description.data, output_description.name or None)
for user_description in pipeline_description.users:
pipeline.add_user(_decode_user_description(user_description))
# Generating JSON also checks it against the pipeline schema.
# This requires all sub-pipelines to be resolved, but this is true in our case.
pipeline.to_json_structure(nest_subpipelines=True)
return pipeline
def encode_performance_metric(metric):
"""
Encodes a dict describing a performance metric into a GRPC message.
Parameters
----------
metric : Dict
A dict with fields ``metric`` and ``params``, where ``metric``
is a ``PerformanceMetric`` enumeration value.
Returns
-------
ProblemPerformanceMetric
A GRPC message.
"""
return problem_pb2.ProblemPerformanceMetric(
metric=metric['metric'].value,
k=metric.get('params', {}).get('k', None),
pos_label=metric.get('params', {}).get('pos_label', None),
)
def decode_performance_metric(metric):
"""
Decodes a GRPC message into a dict describing a performance metric.
Parameters
----------
metric : ProblemPerformanceMetric
A GRPC message.
Returns
-------
Dict
A dict with fields ``metric`` and ``params``, where ``metric``
is a ``PerformanceMetric`` enumeration value.
"""
params = {}
if metric.k:
params['k'] = metric.k
if metric.pos_label:
params['pos_label'] = metric.pos_label
return {
# TODO: Support additional metrics like "LOSS".
'metric': problem_module.PerformanceMetric(metric.metric),
'params': params,
}
def encode_score(score, allowed_value_types, scratch_dir, *, plasma_put=None, validate_uri=None):
"""
Encodes a score description into a GRPC message.
Parameters
----------
score : Dict
A score description is a dict with fields: ``metric``
``fold``, ``targets``, ``value``.
allowed_value_types : Sequence[ValueType]
A list of allowed value types to encode this value as. This
list is tried in order until encoding succeeds.
scratch_dir : str
Path to a directory to store any temporary files needed during execution.
plasma_put : Callable
A function to store a value into a Plasma store.
The function takes a value to store into Plasma store and should return
stored object's ID as bytes.
validate_uri : Callable
A function which can validate that URI is a valid and supported file URI.
The function takes an URI as a string and should throw an exception if URI is invalid.
Returns
-------
Score
A GRPC message.
"""
return core_pb2.Score(
metric=encode_performance_metric(score['metric']),
fold=score['fold'],
targets=[problem_pb2.ProblemTarget(target_index=target['target_index'], resource_id=target['resource_id'], column_index=target['column_index'], column_name=target['column_name']) for target in score['targets']],
value=encode_value({'type': 'object', 'value': score['value']}, allowed_value_types, scratch_dir, plasma_put=plasma_put, validate_uri=validate_uri),
)
def encode_raw_value(value):
"""
Encodes a simple Python value into a GRPC message.
Parameters
----------
value : Any
A simple Python value.
Returns
-------
ValueRaw
A GRPC message.
"""
if value is None:
return value_pb2.ValueRaw(null=value_pb2.NullValue.Value('NULL_VALUE'))
elif isinstance(value, bool):
return value_pb2.ValueRaw(bool=value)
elif d3m_utils.is_float(type(value)):
return value_pb2.ValueRaw(double=float(value))
elif d3m_utils.is_int(type(value)):
return value_pb2.ValueRaw(int64=int(value))
elif isinstance(value, str):
return value_pb2.ValueRaw(string=value)
elif isinstance(value, bytes):
return value_pb2.ValueRaw(bytes=value)
elif isinstance(value, (dict, frozendict.frozendict)):
return value_pb2.ValueRaw(dict=value_pb2.ValueDict(items={key: encode_raw_value(val) for key, val in value.items()}))
# We do not want to encode container type "List" as raw value to not lose metadata.
elif isinstance(value, (list, tuple)) and not isinstance(value, container.List):
return value_pb2.ValueRaw(list=value_pb2.ValueList(items=[encode_raw_value(item) for item in value]))
else:
raise exceptions.InvalidArgumentTypeError("Unsupported type '{value_type}' for raw value.".format(value_type=type(value)))
def decode_raw_value(value):
"""
Decodes a GRPC message into a simple Python value.
Parameters
----------
value : ValueRaw
A GRPC message.
Returns
-------
Any
A simple Python value.
"""
value_type = value.WhichOneof('raw')
if value_type == 'null':
return None
elif value_type in ['double', 'int64', 'bool', 'string', 'bytes']:
return getattr(value, value_type)
elif value_type == 'list':
return [decode_raw_value(item) for item in value.list.items]
elif value_type == 'dict':
return {key: decode_raw_value(val) for key, val in value.dict.items.items()}
else:
raise exceptions.InvalidArgumentTypeError("Unsupported raw value type '{value_type}'.".format(value_type=value_type))
def encode_timestamp(value):
"""
Encodes a Python's ``datetime`` into a GRPC message.
Parameters
----------
value : datetime
A ``datetime`` instance.
Returns
-------
Timestamp
A GRPC message.
"""
if value is None:
return None
if value.tzinfo is None or value.tzinfo.utcoffset(value) is None:
raise exceptions.InvalidArgumentValueError("Value is missing timezone information.")
else:
# Convert to UTC timezone and set "tzinfo" to "datetime.timezone.utc".
# Then we remove timezone information before converting it to GRPC because
# GRPC does not support conversion from timezone aware datetime objects.
# See: https://github.com/google/protobuf/issues/5003
value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None)
timestamp = timestamp_pb2.Timestamp()
timestamp.FromDatetime(value)
return timestamp
def decode_timestamp(value):
"""
Decodes a GRPC message into a Python's ``datetime``.
Parameters
----------
value : Timestamp
A GRPC message.
Returns
-------
datetime
A ``datetime`` instance.
"""
if value is None:
return None
# Default value.
if value == timestamp_pb2.Timestamp():
return None
# Timestamp is in UTC>
value = value.ToDatetime().replace(tzinfo=datetime.timezone.utc)
return value
def save_value(value, allowed_value_types, scratch_dir, *, plasma_put=None, raise_error=False):
"""
Saves a raw Python value and returns a dict representing the
value at the *intermediate level*.
It tries to save it based on allowed value types, potentially saving it to a disk
and providing an URI to the location. It uses Python `tempfile` module to generate
the location.
Parameters
----------
value : Any
A value to save.
allowed_value_types : Sequence[ValueType]
A list of allowed value types to save this value as. This
list is tried in order until encoding succeeds.
scratch_dir : str
Path to a directory to store any temporary files needed during run.
plasma_put : Callable
A function to store a value into a Plasma store.
The function takes a value to store into Plasma store and should return
stored object's ID as bytes.
raise_error : bool
If value cannot be encoded, should an exception be raised or
should it be returned as value type ``error``?
Returns
-------
Dict
A dict with ``type`` and ``value`` fields. ``type`` can be one of
``object``, ``dataset_uri``, ``csv_uri``, ``pickle_uri``, ``plasma_id``, ``error``.
``value`` is then a corresponding value for a given type.
``error`` value type is possible only if ``raise_error`` is ``False``.
"""
last_error = None
for allowed_value_type in allowed_value_types:
try:
if allowed_value_type == ValueType.RAW:
if sys.getsizeof(value) <= MAX_WIRE_OBJECT_SIZE and _can_encode_raw(value):
return {
'type': 'object',
'value': value,
}
elif allowed_value_type == ValueType.DATASET_URI:
if isinstance(value, container.Dataset):
dataset_id = str(uuid.uuid4())
# We change dataset ID to a new value to assure it is unique.
value = value.copy()
value.metadata = value.metadata.update((), {'id': dataset_id})
dataset_dir = tempfile.mkdtemp(prefix=dataset_id, dir=scratch_dir)
try:
os.chmod(dataset_dir, 0o755)
uri = _fix_uri(os.path.abspath(os.path.join(dataset_dir, 'datasetDoc.json')))
value.save(uri)
return {
'type': 'dataset_uri',
'value': uri,
}
except Exception as error:
# Clean-up the directory, it will not be used.
try:
shutil.rmtree(dataset_dir, ignore_errors=True)
except Exception:
pass
raise error
elif allowed_value_type == ValueType.CSV_URI:
dataframe_value = None
if isinstance(value, container.List):
dataframe_value = container.DataFrame(value)
elif isinstance(value, container.ndarray):
metadata = value.metadata.query((metadata_base.ALL_ELEMENTS,))
if 'dimension' in metadata:
# Extract the column names so we can add them to the created dataframe, or set it to index string.
num_cols = value.metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
col_names = [value.metadata.query((metadata_base.ALL_ELEMENTS, i)).get('name', str(i)) for i in range(num_cols)]
else:
col_names = None
dataframe_value = container.DataFrame(value, columns=col_names)
elif isinstance(value, container.DataFrame):
dataframe_value = value
if dataframe_value is not None:
csv_file_descriptor, csv_path = tempfile.mkstemp(suffix='.csv', dir=scratch_dir)
try:
os.chmod(csv_path, 0o644)
with open(csv_file_descriptor, 'w') as csv_file:
runtime_module.export_dataframe(dataframe_value, csv_file)
uri = _fix_uri(os.path.abspath(csv_path))
return {
'type': 'csv_uri',
'value': uri,
}
except Exception as error:
# Clean-up the file, it will not be used.
try:
os.close(csv_file_descriptor)
except Exception:
pass
try:
os.remove(csv_path)
except Exception:
pass
raise error
elif allowed_value_type == ValueType.PICKLE_URI:
value_file_descriptor, value_path = tempfile.mkstemp(suffix='.pickle', dir=scratch_dir)
try:
os.chmod(value_path, 0o644)
with open(value_file_descriptor, 'wb') as value_file:
pickle.dump(value, value_file)
uri = _fix_uri(os.path.abspath(value_path))
return {
'type': 'pickle_uri',
'value': uri,
}
except Exception as error:
# Clean-up the file, it will not be used.
try:
os.close(value_file_descriptor)
except Exception:
pass
try:
os.remove(value_path)
except Exception:
pass
raise error
elif allowed_value_type == ValueType.PICKLE_BLOB:
if sys.getsizeof(value) <= MAX_WIRE_OBJECT_SIZE and _can_pickle(value):
return {
'type': 'object',
'value': value,
}
elif plasma_put is not None and allowed_value_type == ValueType.PLASMA_ID:
object_id = plasma_put(value)
return {
'type': 'plasma_id',
'value': _binary_to_hex(object_id),
}
else:
raise exceptions.UnexpectedValueError("Unknown allowed value type: {allowed_value_type}".format(allowed_value_type=allowed_value_type))
except Exception as error:
last_error = error
# TODO: Add a second pass to try the conversion between "DATASET_URI" and "CSV_URI".
if last_error is not None:
if raise_error:
raise last_error
else:
return {
'type': 'error',
'value': str(last_error),
}
error_message = "None of the allowed value types could encode the value of type '{value_type}'.".format(value_type=type(value))
if raise_error:
raise ValueError(error_message)
else:
return {
'type': 'error',
'value': error_message,
}
def load_value(value, *, plasma_get=None, validate_uri=None):
"""
Loads and returns a raw Python value from a dict representing a value
at the *intermediate level*.
Parameters
----------
value : Dict
A dict with ``type`` and ``value`` fields. ``type`` can be one of
``object``, ``dataset_uri``, ``csv_uri``, ``pickle_uri``, ``plasma_id``, ``error``.
``value`` is then a corresponding value for a given type.
plasma_get : Callable
A function to load a value from a Plasma store.
The function takes object's ID as bytes and should return
stored the value from a Plasma store.
validate_uri : Callable
A function which can validate that URI is a valid and supported file URI.
The function takes an URI as a string and should throw an exception if URI is invalid.
Returns
-------
Any
Loaded raw Python value.
"""
if value['type'] == 'object':
return value['value']
elif value['type'] == 'dataset_uri':
uri = _fix_uri(value['value'])
return container.Dataset.load(uri)
elif value['type'] == 'csv_uri':
uri = _fix_uri(value['value'])
# Pandas requires a host for "file" URIs.
uri = _fix_file_uri_host(uri)
data = pandas.read_csv(
uri,
# We do not want to do any conversion of values at this point.
# This should be done by primitives later on.
dtype=str,
# We always expect one row header.
header=0,
# We want empty strings and not NaNs.
na_filter=False,
encoding='utf8',
low_memory=False,
)
return container.DataFrame(data)
elif value['type'] == 'pickle_uri':
uri = _fix_uri(value['value'])
with _open_file_uri(uri, 'rb', validate_uri=validate_uri) as file:
# TODO: Limit the types of values being able to load to prevent arbitrary code execution by a malicious pickle.
return pickle.load(file)
elif plasma_get is not None and value['type'] == 'plasma_id':
return plasma_get(_hex_to_binary(value['value']))
elif value['type'] == 'error':
raise ValueError("Error in value: {message}".format(message=value['value']))
else:
raise exceptions.UnexpectedValueError("Unknown value type: {value_type}".format(value_type=value['type']))
def encode_value(value, allowed_value_types, scratch_dir, *, plasma_put=None, validate_uri=None):
"""
Encodes a value into a GRPC message.
The input is a dict representation of the value at the *intermediate level*.
Parameters
----------
value : Dict
A dict with ``type`` and ``value`` fields. ``type`` can be one of
``object``, ``dataset_uri``, ``csv_uri``, ``pickle_uri``, ``plasma_id``, ``error``.
``value`` is then a corresponding value for a given type.
allowed_value_types : Sequence[ValueType]
A list of allowed value types to encode this value as. This
list is tried in order until encoding succeeds.
scratch_dir : str
Path to a directory to store any temporary files needed during execution.
plasma_put : Callable
A function to store a value into a Plasma store.
The function takes a value to store into Plasma store and should return
stored object's ID as bytes.
validate_uri : Callable
A function which can validate that URI is a valid and supported file URI.
The function takes an URI as a string and should throw an exception if URI is invalid.
Returns
-------
Value
A GRPC message.
"""
if validate_uri is None:
def validate_uri(uri):
return uri
assert value['type'] in ['object', 'dataset_uri', 'csv_uri', 'pickle_uri', 'plasma_id', 'error']
if value['type'] == 'error':
return value_pb2.Value(
error=value_pb2.ValueError(message=value['value']),
)
last_error = None
# The first pass is without any conversion and tries to match existing value type to allowed value type.
for allowed_value_type in allowed_value_types:
try:
if allowed_value_type == ValueType.RAW:
if value['type'] == 'object' and sys.getsizeof(value['value']) <= MAX_WIRE_OBJECT_SIZE:
return value_pb2.Value(
raw=encode_raw_value(value['value']),
)
elif allowed_value_type == ValueType.DATASET_URI:
if value['type'] == 'dataset_uri':
uri = _fix_uri(value['value'])
validate_uri(uri)
return value_pb2.Value(
dataset_uri=uri,
)
elif allowed_value_type == ValueType.CSV_URI:
if value['type'] == 'csv_uri':
uri = _fix_uri(value['value'])
validate_uri(uri)
return value_pb2.Value(
csv_uri=uri,
)
elif allowed_value_type == ValueType.PICKLE_URI:
if value['type'] == 'pickle_uri':
uri = _fix_uri(value['value'])
validate_uri(uri)
return value_pb2.Value(
pickle_uri=uri,
)
elif allowed_value_type == ValueType.PICKLE_BLOB:
if value['type'] == 'object' and sys.getsizeof(value['value']) <= MAX_WIRE_OBJECT_SIZE:
return value_pb2.Value(
pickle_blob=pickle.dumps(value['value']),
)
elif allowed_value_type == ValueType.PLASMA_ID:
if value['type'] == 'plasma_id':
return value_pb2.Value(
plasma_id=_hex_to_binary(value['value']),
)
else:
raise exceptions.UnexpectedValueError("Unknown allowed value type: {allowed_value_type}".format(allowed_value_type=allowed_value_type))
except Exception as error:
last_error = error
# The second pass tries to convert between value types to match an allowed value type.
# TODO: Support also conversion between "DATASET_URI" and "CSV_URI".
for allowed_value_type in allowed_value_types:
try:
if allowed_value_type == ValueType.PICKLE_URI and value['type'] == 'object':
value_file_descriptor, value_path = tempfile.mkstemp(suffix='.pickle', dir=scratch_dir)
try:
os.chmod(value_path, 0o644)
with open(value_file_descriptor, 'wb') as value_file:
pickle.dump(value['value'], value_file)
uri = _fix_uri(os.path.abspath(value_path))
validate_uri(uri)
return value_pb2.Value(
pickle_uri=uri,
)
except Exception as error:
# Clean-up the file, it will not be used.
try:
os.close(value_file_descriptor)
except Exception:
pass
try:
os.remove(value_path)
except Exception:
pass
raise error
elif plasma_put is not None and allowed_value_type == ValueType.PLASMA_ID and value['type'] == 'object':
object_id = plasma_put(value['value'])
return value_pb2.Value(
plasma_id=object_id,
)
except Exception as error:
last_error = error
if last_error is not None:
return value_pb2.Value(
error=value_pb2.ValueError(message=str(last_error)),
)
return value_pb2.Value(
error=value_pb2.ValueError(message="None of the allowed value types could encode the value of type '{value_type}'.".format(value_type=type(value))),
)
def decode_value(value, *, validate_uri=None, raise_error=True):
"""
Decodes a GRPC message.
The output is a dict representation of the value at the *intermediate level*.
Parameters
----------
value : Value
A GRPC message to decode.
validate_uri : Callable
A function which can validate that URI is a valid and supported file URI.
The function takes an URI as a string and should throw an exception if URI is invalid.
raise_error : bool
If value is representing an error, should an exception be raised or
should it be returned as value type ``error``?
Returns
-------
Dict
A dict with ``type`` and ``value`` fields. ``type`` can be one of
``object``, ``dataset_uri``, ``csv_uri``, ``pickle_uri``, ``plasma_id``, ``error``.
``value`` is then a corresponding value for a given type.
``error`` value type is possible only if ``raise_error`` is ``False``.
"""
if validate_uri is None:
def validate_uri(uri):
return uri
value_type = value.WhichOneof('value')
if value_type == 'error':
if raise_error:
raise ValueError("Error in value: {message}".format(message=value.error.message))
else:
return {
'type': 'error',
'value': value.error.message,
}
elif value_type == 'raw':
value = decode_raw_value(value.raw)
return {
'type': 'object',
'value': value,
}
elif value_type in ['dataset_uri', 'csv_uri', 'pickle_uri']:
uri = getattr(value, value_type)
uri = _fix_uri(uri)
validate_uri(uri)
return {
'type': value_type,
'value': uri,
}
elif value_type == 'pickle_blob':
# TODO: Limit the types of values being able to load to prevent arbitrary code execution by a malicious pickle.
value = pickle.loads(value.pickle_blob)
return {
'type': 'object',
'value': value,
}
elif value_type == 'plasma_id':
return {
'type': 'plasma_id',
'value': _binary_to_hex(value.plasma_id),
}
else:
raise exceptions.InvalidArgumentValueError("Unsupported value type '{value_type}'.".format(value_type=value_type))
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/utils.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/primitive_pb2_grpc.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: core.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import ta3ta2.api.pipeline_pb2 as pipeline__pb2
import ta3ta2.api.primitive_pb2 as primitive__pb2
import ta3ta2.api.problem_pb2 as problem__pb2
import ta3ta2.api.value_pb2 as value__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='core.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\ncore.proto\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0epipeline.proto\x1a\x0fprimitive.proto\x1a\rproblem.proto\x1a\x0bvalue.proto\"\x9c\x01\n\x14ScoringConfiguration\x12!\n\x06method\x18\x01 \x01(\x0e\x32\x11.EvaluationMethod\x12\r\n\x05\x66olds\x18\x02 \x01(\x05\x12\x18\n\x10train_test_ratio\x18\x03 \x01(\x01\x12\x0f\n\x07shuffle\x18\x04 \x01(\x08\x12\x13\n\x0brandom_seed\x18\x05 \x01(\x05\x12\x12\n\nstratified\x18\x06 \x01(\x08\"x\n\x05Score\x12)\n\x06metric\x18\x01 \x01(\x0b\x32\x19.ProblemPerformanceMetric\x12\x0c\n\x04\x66old\x18\x02 \x01(\x05\x12\x1f\n\x07targets\x18\x03 \x03(\x0b\x32\x0e.ProblemTarget\x12\x15\n\x05value\x18\x04 \x01(\x0b\x32\x06.Value\"\x8d\x01\n\x08Progress\x12\x1d\n\x05state\x18\x01 \x01(\x0e\x32\x0e.ProgressState\x12\x0e\n\x06status\x18\x02 \x01(\t\x12)\n\x05start\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03\x65nd\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"O\n\x14UpdateProblemRequest\x12\x11\n\tsearch_id\x18\x01 \x01(\t\x12$\n\x07problem\x18\x02 \x01(\x0b\x32\x13.ProblemDescription\"\x17\n\x15UpdateProblemResponse\"\xf2\x01\n\x16SearchSolutionsRequest\x12\x12\n\nuser_agent\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\ntime_bound\x18\x03 \x01(\x01\x12\x10\n\x08priority\x18\x04 \x01(\x01\x12\'\n\x13\x61llowed_value_types\x18\x05 \x03(\x0e\x32\n.ValueType\x12$\n\x07problem\x18\x06 \x01(\x0b\x32\x13.ProblemDescription\x12&\n\x08template\x18\x07 \x01(\x0b\x32\x14.PipelineDescription\x12\x16\n\x06inputs\x18\x08 \x03(\x0b\x32\x06.Value\",\n\x17SearchSolutionsResponse\x12\x11\n\tsearch_id\x18\x01 \x01(\t\".\n\x19\x45ndSearchSolutionsRequest\x12\x11\n\tsearch_id\x18\x01 \x01(\t\"\x1c\n\x1a\x45ndSearchSolutionsResponse\"/\n\x1aStopSearchSolutionsRequest\x12\x11\n\tsearch_id\x18\x01 \x01(\t\"\x1d\n\x1bStopSearchSolutionsResponse\"c\n\x13SolutionSearchScore\x12\x34\n\x15scoring_configuration\x18\x01 \x01(\x0b\x32\x15.ScoringConfiguration\x12\x16\n\x06scores\x18\x02 \x03(\x0b\x32\x06.Score\"5\n GetSearchSolutionsResultsRequest\x12\x11\n\tsearch_id\x18\x01 \x01(\t\"\xba\x01\n!GetSearchSolutionsResultsResponse\x12\x1b\n\x08progress\x18\x01 \x01(\x0b\x32\t.Progress\x12\x12\n\ndone_ticks\x18\x02 \x01(\x01\x12\x11\n\tall_ticks\x18\x03 \x01(\x01\x12\x13\n\x0bsolution_id\x18\x04 \x01(\t\x12\x16\n\x0einternal_score\x18\x05 \x01(\x01\x12$\n\x06scores\x18\x06 \x03(\x0b\x32\x14.SolutionSearchScore\".\n\x17\x44\x65scribeSolutionRequest\x12\x13\n\x0bsolution_id\x18\x01 \x01(\t\"\x97\x01\n\x18PrimitiveStepDescription\x12?\n\x0bhyperparams\x18\x01 \x03(\x0b\x32*.PrimitiveStepDescription.HyperparamsEntry\x1a:\n\x10HyperparamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x15\n\x05value\x18\x02 \x01(\x0b\x32\x06.Value:\x02\x38\x01\"=\n\x1aSubpipelineStepDescription\x12\x1f\n\x05steps\x18\x01 \x03(\x0b\x32\x10.StepDescription\"z\n\x0fStepDescription\x12.\n\tprimitive\x18\x01 \x01(\x0b\x32\x19.PrimitiveStepDescriptionH\x00\x12/\n\x08pipeline\x18\x02 \x01(\x0b\x32\x1b.SubpipelineStepDescriptionH\x00\x42\x06\n\x04step\"c\n\x18\x44\x65scribeSolutionResponse\x12&\n\x08pipeline\x18\x01 \x01(\x0b\x32\x14.PipelineDescription\x12\x1f\n\x05steps\x18\x02 \x03(\x0b\x32\x10.StepDescription\"I\n\x0cStepProgress\x12\x1b\n\x08progress\x18\x01 \x01(\x0b\x32\t.Progress\x12\x1c\n\x05steps\x18\x02 \x03(\x0b\x32\r.StepProgress\">\n\x0fSolutionRunUser\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x63hoosen\x18\x02 \x01(\x08\x12\x0e\n\x06reason\x18\x03 \x01(\t\"\xca\x01\n\x14ScoreSolutionRequest\x12\x13\n\x0bsolution_id\x18\x01 \x01(\t\x12\x16\n\x06inputs\x18\x02 \x03(\x0b\x32\x06.Value\x12\x36\n\x13performance_metrics\x18\x03 \x03(\x0b\x32\x19.ProblemPerformanceMetric\x12\x1f\n\x05users\x18\x04 \x03(\x0b\x32\x10.SolutionRunUser\x12,\n\rconfiguration\x18\x05 \x01(\x0b\x32\x15.ScoringConfiguration\"+\n\x15ScoreSolutionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"4\n\x1eGetScoreSolutionResultsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"V\n\x1fGetScoreSolutionResultsResponse\x12\x1b\n\x08progress\x18\x01 \x01(\x0b\x32\t.Progress\x12\x16\n\x06scores\x18\x02 \x03(\x0b\x32\x06.Score\"\xa2\x01\n\x12\x46itSolutionRequest\x12\x13\n\x0bsolution_id\x18\x01 \x01(\t\x12\x16\n\x06inputs\x18\x02 \x03(\x0b\x32\x06.Value\x12\x16\n\x0e\x65xpose_outputs\x18\x03 \x03(\t\x12&\n\x12\x65xpose_value_types\x18\x04 \x03(\x0e\x32\n.ValueType\x12\x1f\n\x05users\x18\x05 \x03(\x0b\x32\x10.SolutionRunUser\")\n\x13\x46itSolutionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"2\n\x1cGetFitSolutionResultsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"\x82\x02\n\x1dGetFitSolutionResultsResponse\x12\x1b\n\x08progress\x18\x01 \x01(\x0b\x32\t.Progress\x12\x1c\n\x05steps\x18\x02 \x03(\x0b\x32\r.StepProgress\x12K\n\x0f\x65xposed_outputs\x18\x03 \x03(\x0b\x32\x32.GetFitSolutionResultsResponse.ExposedOutputsEntry\x12\x1a\n\x12\x66itted_solution_id\x18\x04 \x01(\t\x1a=\n\x13\x45xposedOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x15\n\x05value\x18\x02 \x01(\x0b\x32\x06.Value:\x02\x38\x01\"\xad\x01\n\x16ProduceSolutionRequest\x12\x1a\n\x12\x66itted_solution_id\x18\x01 \x01(\t\x12\x16\n\x06inputs\x18\x02 \x03(\x0b\x32\x06.Value\x12\x16\n\x0e\x65xpose_outputs\x18\x03 \x03(\t\x12&\n\x12\x65xpose_value_types\x18\x04 \x03(\x0e\x32\n.ValueType\x12\x1f\n\x05users\x18\x05 \x03(\x0b\x32\x10.SolutionRunUser\"-\n\x17ProduceSolutionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"6\n GetProduceSolutionResultsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"\xee\x01\n!GetProduceSolutionResultsResponse\x12\x1b\n\x08progress\x18\x01 \x01(\x0b\x32\t.Progress\x12\x1c\n\x05steps\x18\x02 \x03(\x0b\x32\r.StepProgress\x12O\n\x0f\x65xposed_outputs\x18\x03 \x03(\x0b\x32\x36.GetProduceSolutionResultsResponse.ExposedOutputsEntry\x1a=\n\x13\x45xposedOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x15\n\x05value\x18\x02 \x01(\x0b\x32\x06.Value:\x02\x38\x01\"A\n\x15SolutionExportRequest\x12\x1a\n\x12\x66itted_solution_id\x18\x01 \x01(\t\x12\x0c\n\x04rank\x18\x02 \x01(\x01\"\x18\n\x16SolutionExportResponse\"\x17\n\x15ListPrimitivesRequest\"8\n\x16ListPrimitivesResponse\x12\x1e\n\nprimitives\x18\x01 \x03(\x0b\x32\n.Primitive\"\x0e\n\x0cHelloRequest\"{\n\rHelloResponse\x12\x12\n\nuser_agent\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\'\n\x13\x61llowed_value_types\x18\x03 \x03(\x0e\x32\n.ValueType\x12\x1c\n\x14supported_extensions\x18\x04 \x03(\t*\x82\x01\n\x10\x45valuationMethod\x12\x1f\n\x1b\x45VALUATION_METHOD_UNDEFINED\x10\x00\x12\x0b\n\x07HOLDOUT\x10\x01\x12\n\n\x06K_FOLD\x10\x02\x12\x11\n\rLEAVE_ONE_OUT\x10\x64\x12\x0e\n\nPREDICTION\x10\x65\x12\x11\n\rTRAINING_DATA\x10\x66*[\n\rProgressState\x12\x14\n\x10PROGRESS_UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\x0b\n\x07\x45RRORED\x10\x04\x32\x88\t\n\x04\x43ore\x12\x46\n\x0fSearchSolutions\x12\x17.SearchSolutionsRequest\x1a\x18.SearchSolutionsResponse\"\x00\x12\x66\n\x19GetSearchSolutionsResults\x12!.GetSearchSolutionsResultsRequest\x1a\".GetSearchSolutionsResultsResponse\"\x00\x30\x01\x12O\n\x12\x45ndSearchSolutions\x12\x1a.EndSearchSolutionsRequest\x1a\x1b.EndSearchSolutionsResponse\"\x00\x12R\n\x13StopSearchSolutions\x12\x1b.StopSearchSolutionsRequest\x1a\x1c.StopSearchSolutionsResponse\"\x00\x12I\n\x10\x44\x65scribeSolution\x12\x18.DescribeSolutionRequest\x1a\x19.DescribeSolutionResponse\"\x00\x12@\n\rScoreSolution\x12\x15.ScoreSolutionRequest\x1a\x16.ScoreSolutionResponse\"\x00\x12`\n\x17GetScoreSolutionResults\x12\x1f.GetScoreSolutionResultsRequest\x1a .GetScoreSolutionResultsResponse\"\x00\x30\x01\x12:\n\x0b\x46itSolution\x12\x13.FitSolutionRequest\x1a\x14.FitSolutionResponse\"\x00\x12Z\n\x15GetFitSolutionResults\x12\x1d.GetFitSolutionResultsRequest\x1a\x1e.GetFitSolutionResultsResponse\"\x00\x30\x01\x12\x46\n\x0fProduceSolution\x12\x17.ProduceSolutionRequest\x1a\x18.ProduceSolutionResponse\"\x00\x12\x66\n\x19GetProduceSolutionResults\x12!.GetProduceSolutionResultsRequest\x1a\".GetProduceSolutionResultsResponse\"\x00\x30\x01\x12\x43\n\x0eSolutionExport\x12\x16.SolutionExportRequest\x1a\x17.SolutionExportResponse\"\x00\x12@\n\rUpdateProblem\x12\x15.UpdateProblemRequest\x1a\x16.UpdateProblemResponse\"\x00\x12\x43\n\x0eListPrimitives\x12\x16.ListPrimitivesRequest\x1a\x17.ListPrimitivesResponse\"\x00\x12(\n\x05Hello\x12\r.HelloRequest\x1a\x0e.HelloResponse\"\x00:8\n\x10protocol_version\x12\x1c.google.protobuf.FileOptions\x18\xd4\xa6\x03 \x01(\tB\x16Z\x08pipeline\xa2\xb5\x1a\x08\x32\x30\x31\x38.7.7b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,pipeline__pb2.DESCRIPTOR,primitive__pb2.DESCRIPTOR,problem__pb2.DESCRIPTOR,value__pb2.DESCRIPTOR,])
_EVALUATIONMETHOD = _descriptor.EnumDescriptor(
name='EvaluationMethod',
full_name='EvaluationMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='EVALUATION_METHOD_UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HOLDOUT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='K_FOLD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEAVE_ONE_OUT', index=3, number=100,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PREDICTION', index=4, number=101,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRAINING_DATA', index=5, number=102,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3847,
serialized_end=3977,
)
_sym_db.RegisterEnumDescriptor(_EVALUATIONMETHOD)
EvaluationMethod = enum_type_wrapper.EnumTypeWrapper(_EVALUATIONMETHOD)
_PROGRESSSTATE = _descriptor.EnumDescriptor(
name='ProgressState',
full_name='ProgressState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PROGRESS_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMPLETED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERRORED', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3979,
serialized_end=4070,
)
_sym_db.RegisterEnumDescriptor(_PROGRESSSTATE)
ProgressState = enum_type_wrapper.EnumTypeWrapper(_PROGRESSSTATE)
EVALUATION_METHOD_UNDEFINED = 0
HOLDOUT = 1
K_FOLD = 2
LEAVE_ONE_OUT = 100
PREDICTION = 101
TRAINING_DATA = 102
PROGRESS_UNKNOWN = 0
PENDING = 1
RUNNING = 2
COMPLETED = 3
ERRORED = 4
PROTOCOL_VERSION_FIELD_NUMBER = 54100
protocol_version = _descriptor.FieldDescriptor(
name='protocol_version', full_name='protocol_version', index=0,
number=54100, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None, file=DESCRIPTOR)
_SCORINGCONFIGURATION = _descriptor.Descriptor(
name='ScoringConfiguration',
full_name='ScoringConfiguration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='method', full_name='ScoringConfiguration.method', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='folds', full_name='ScoringConfiguration.folds', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_test_ratio', full_name='ScoringConfiguration.train_test_ratio', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shuffle', full_name='ScoringConfiguration.shuffle', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='ScoringConfiguration.random_seed', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stratified', full_name='ScoringConfiguration.stratified', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=299,
)
_SCORE = _descriptor.Descriptor(
name='Score',
full_name='Score',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metric', full_name='Score.metric', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fold', full_name='Score.fold', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='Score.targets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Score.value', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=301,
serialized_end=421,
)
_PROGRESS = _descriptor.Descriptor(
name='Progress',
full_name='Progress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='Progress.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='Progress.status', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='Progress.start', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='Progress.end', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=424,
serialized_end=565,
)
_UPDATEPROBLEMREQUEST = _descriptor.Descriptor(
name='UpdateProblemRequest',
full_name='UpdateProblemRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='search_id', full_name='UpdateProblemRequest.search_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='problem', full_name='UpdateProblemRequest.problem', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=567,
serialized_end=646,
)
_UPDATEPROBLEMRESPONSE = _descriptor.Descriptor(
name='UpdateProblemResponse',
full_name='UpdateProblemResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=648,
serialized_end=671,
)
_SEARCHSOLUTIONSREQUEST = _descriptor.Descriptor(
name='SearchSolutionsRequest',
full_name='SearchSolutionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user_agent', full_name='SearchSolutionsRequest.user_agent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='SearchSolutionsRequest.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_bound', full_name='SearchSolutionsRequest.time_bound', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='SearchSolutionsRequest.priority', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowed_value_types', full_name='SearchSolutionsRequest.allowed_value_types', index=4,
number=5, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='problem', full_name='SearchSolutionsRequest.problem', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='template', full_name='SearchSolutionsRequest.template', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='SearchSolutionsRequest.inputs', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=674,
serialized_end=916,
)
_SEARCHSOLUTIONSRESPONSE = _descriptor.Descriptor(
name='SearchSolutionsResponse',
full_name='SearchSolutionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='search_id', full_name='SearchSolutionsResponse.search_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=918,
serialized_end=962,
)
_ENDSEARCHSOLUTIONSREQUEST = _descriptor.Descriptor(
name='EndSearchSolutionsRequest',
full_name='EndSearchSolutionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='search_id', full_name='EndSearchSolutionsRequest.search_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=964,
serialized_end=1010,
)
_ENDSEARCHSOLUTIONSRESPONSE = _descriptor.Descriptor(
name='EndSearchSolutionsResponse',
full_name='EndSearchSolutionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1012,
serialized_end=1040,
)
_STOPSEARCHSOLUTIONSREQUEST = _descriptor.Descriptor(
name='StopSearchSolutionsRequest',
full_name='StopSearchSolutionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='search_id', full_name='StopSearchSolutionsRequest.search_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1042,
serialized_end=1089,
)
_STOPSEARCHSOLUTIONSRESPONSE = _descriptor.Descriptor(
name='StopSearchSolutionsResponse',
full_name='StopSearchSolutionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1091,
serialized_end=1120,
)
_SOLUTIONSEARCHSCORE = _descriptor.Descriptor(
name='SolutionSearchScore',
full_name='SolutionSearchScore',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scoring_configuration', full_name='SolutionSearchScore.scoring_configuration', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scores', full_name='SolutionSearchScore.scores', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1122,
serialized_end=1221,
)
_GETSEARCHSOLUTIONSRESULTSREQUEST = _descriptor.Descriptor(
name='GetSearchSolutionsResultsRequest',
full_name='GetSearchSolutionsResultsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='search_id', full_name='GetSearchSolutionsResultsRequest.search_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1223,
serialized_end=1276,
)
_GETSEARCHSOLUTIONSRESULTSRESPONSE = _descriptor.Descriptor(
name='GetSearchSolutionsResultsResponse',
full_name='GetSearchSolutionsResultsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='progress', full_name='GetSearchSolutionsResultsResponse.progress', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='done_ticks', full_name='GetSearchSolutionsResultsResponse.done_ticks', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_ticks', full_name='GetSearchSolutionsResultsResponse.all_ticks', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='solution_id', full_name='GetSearchSolutionsResultsResponse.solution_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='internal_score', full_name='GetSearchSolutionsResultsResponse.internal_score', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scores', full_name='GetSearchSolutionsResultsResponse.scores', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1279,
serialized_end=1465,
)
_DESCRIBESOLUTIONREQUEST = _descriptor.Descriptor(
name='DescribeSolutionRequest',
full_name='DescribeSolutionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='solution_id', full_name='DescribeSolutionRequest.solution_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1467,
serialized_end=1513,
)
_PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY = _descriptor.Descriptor(
name='HyperparamsEntry',
full_name='PrimitiveStepDescription.HyperparamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='PrimitiveStepDescription.HyperparamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='PrimitiveStepDescription.HyperparamsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1609,
serialized_end=1667,
)
_PRIMITIVESTEPDESCRIPTION = _descriptor.Descriptor(
name='PrimitiveStepDescription',
full_name='PrimitiveStepDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hyperparams', full_name='PrimitiveStepDescription.hyperparams', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1516,
serialized_end=1667,
)
_SUBPIPELINESTEPDESCRIPTION = _descriptor.Descriptor(
name='SubpipelineStepDescription',
full_name='SubpipelineStepDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='steps', full_name='SubpipelineStepDescription.steps', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1669,
serialized_end=1730,
)
_STEPDESCRIPTION = _descriptor.Descriptor(
name='StepDescription',
full_name='StepDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='primitive', full_name='StepDescription.primitive', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline', full_name='StepDescription.pipeline', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='step', full_name='StepDescription.step',
index=0, containing_type=None, fields=[]),
],
serialized_start=1732,
serialized_end=1854,
)
_DESCRIBESOLUTIONRESPONSE = _descriptor.Descriptor(
name='DescribeSolutionResponse',
full_name='DescribeSolutionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='DescribeSolutionResponse.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='DescribeSolutionResponse.steps', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1856,
serialized_end=1955,
)
_STEPPROGRESS = _descriptor.Descriptor(
name='StepProgress',
full_name='StepProgress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='progress', full_name='StepProgress.progress', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='StepProgress.steps', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1957,
serialized_end=2030,
)
_SOLUTIONRUNUSER = _descriptor.Descriptor(
name='SolutionRunUser',
full_name='SolutionRunUser',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SolutionRunUser.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='choosen', full_name='SolutionRunUser.choosen', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='SolutionRunUser.reason', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2032,
serialized_end=2094,
)
_SCORESOLUTIONREQUEST = _descriptor.Descriptor(
name='ScoreSolutionRequest',
full_name='ScoreSolutionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='solution_id', full_name='ScoreSolutionRequest.solution_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='ScoreSolutionRequest.inputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='performance_metrics', full_name='ScoreSolutionRequest.performance_metrics', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='users', full_name='ScoreSolutionRequest.users', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='configuration', full_name='ScoreSolutionRequest.configuration', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2097,
serialized_end=2299,
)
_SCORESOLUTIONRESPONSE = _descriptor.Descriptor(
name='ScoreSolutionResponse',
full_name='ScoreSolutionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='ScoreSolutionResponse.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2301,
serialized_end=2344,
)
_GETSCORESOLUTIONRESULTSREQUEST = _descriptor.Descriptor(
name='GetScoreSolutionResultsRequest',
full_name='GetScoreSolutionResultsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='GetScoreSolutionResultsRequest.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2346,
serialized_end=2398,
)
_GETSCORESOLUTIONRESULTSRESPONSE = _descriptor.Descriptor(
name='GetScoreSolutionResultsResponse',
full_name='GetScoreSolutionResultsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='progress', full_name='GetScoreSolutionResultsResponse.progress', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scores', full_name='GetScoreSolutionResultsResponse.scores', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2400,
serialized_end=2486,
)
_FITSOLUTIONREQUEST = _descriptor.Descriptor(
name='FitSolutionRequest',
full_name='FitSolutionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='solution_id', full_name='FitSolutionRequest.solution_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='FitSolutionRequest.inputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expose_outputs', full_name='FitSolutionRequest.expose_outputs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expose_value_types', full_name='FitSolutionRequest.expose_value_types', index=3,
number=4, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='users', full_name='FitSolutionRequest.users', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2489,
serialized_end=2651,
)
_FITSOLUTIONRESPONSE = _descriptor.Descriptor(
name='FitSolutionResponse',
full_name='FitSolutionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='FitSolutionResponse.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2653,
serialized_end=2694,
)
_GETFITSOLUTIONRESULTSREQUEST = _descriptor.Descriptor(
name='GetFitSolutionResultsRequest',
full_name='GetFitSolutionResultsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='GetFitSolutionResultsRequest.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2696,
serialized_end=2746,
)
_GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY = _descriptor.Descriptor(
name='ExposedOutputsEntry',
full_name='GetFitSolutionResultsResponse.ExposedOutputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='GetFitSolutionResultsResponse.ExposedOutputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='GetFitSolutionResultsResponse.ExposedOutputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2946,
serialized_end=3007,
)
_GETFITSOLUTIONRESULTSRESPONSE = _descriptor.Descriptor(
name='GetFitSolutionResultsResponse',
full_name='GetFitSolutionResultsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='progress', full_name='GetFitSolutionResultsResponse.progress', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='GetFitSolutionResultsResponse.steps', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exposed_outputs', full_name='GetFitSolutionResultsResponse.exposed_outputs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fitted_solution_id', full_name='GetFitSolutionResultsResponse.fitted_solution_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2749,
serialized_end=3007,
)
_PRODUCESOLUTIONREQUEST = _descriptor.Descriptor(
name='ProduceSolutionRequest',
full_name='ProduceSolutionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fitted_solution_id', full_name='ProduceSolutionRequest.fitted_solution_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='ProduceSolutionRequest.inputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expose_outputs', full_name='ProduceSolutionRequest.expose_outputs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expose_value_types', full_name='ProduceSolutionRequest.expose_value_types', index=3,
number=4, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='users', full_name='ProduceSolutionRequest.users', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3010,
serialized_end=3183,
)
_PRODUCESOLUTIONRESPONSE = _descriptor.Descriptor(
name='ProduceSolutionResponse',
full_name='ProduceSolutionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='ProduceSolutionResponse.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3185,
serialized_end=3230,
)
_GETPRODUCESOLUTIONRESULTSREQUEST = _descriptor.Descriptor(
name='GetProduceSolutionResultsRequest',
full_name='GetProduceSolutionResultsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='GetProduceSolutionResultsRequest.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3232,
serialized_end=3286,
)
_GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY = _descriptor.Descriptor(
name='ExposedOutputsEntry',
full_name='GetProduceSolutionResultsResponse.ExposedOutputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='GetProduceSolutionResultsResponse.ExposedOutputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='GetProduceSolutionResultsResponse.ExposedOutputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2946,
serialized_end=3007,
)
_GETPRODUCESOLUTIONRESULTSRESPONSE = _descriptor.Descriptor(
name='GetProduceSolutionResultsResponse',
full_name='GetProduceSolutionResultsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='progress', full_name='GetProduceSolutionResultsResponse.progress', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='GetProduceSolutionResultsResponse.steps', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exposed_outputs', full_name='GetProduceSolutionResultsResponse.exposed_outputs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3289,
serialized_end=3527,
)
_SOLUTIONEXPORTREQUEST = _descriptor.Descriptor(
name='SolutionExportRequest',
full_name='SolutionExportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fitted_solution_id', full_name='SolutionExportRequest.fitted_solution_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rank', full_name='SolutionExportRequest.rank', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3529,
serialized_end=3594,
)
_SOLUTIONEXPORTRESPONSE = _descriptor.Descriptor(
name='SolutionExportResponse',
full_name='SolutionExportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3596,
serialized_end=3620,
)
_LISTPRIMITIVESREQUEST = _descriptor.Descriptor(
name='ListPrimitivesRequest',
full_name='ListPrimitivesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3622,
serialized_end=3645,
)
_LISTPRIMITIVESRESPONSE = _descriptor.Descriptor(
name='ListPrimitivesResponse',
full_name='ListPrimitivesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='primitives', full_name='ListPrimitivesResponse.primitives', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3647,
serialized_end=3703,
)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3705,
serialized_end=3719,
)
_HELLORESPONSE = _descriptor.Descriptor(
name='HelloResponse',
full_name='HelloResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='user_agent', full_name='HelloResponse.user_agent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='HelloResponse.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowed_value_types', full_name='HelloResponse.allowed_value_types', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='supported_extensions', full_name='HelloResponse.supported_extensions', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3721,
serialized_end=3844,
)
_SCORINGCONFIGURATION.fields_by_name['method'].enum_type = _EVALUATIONMETHOD
_SCORE.fields_by_name['metric'].message_type = problem__pb2._PROBLEMPERFORMANCEMETRIC
_SCORE.fields_by_name['targets'].message_type = problem__pb2._PROBLEMTARGET
_SCORE.fields_by_name['value'].message_type = value__pb2._VALUE
_PROGRESS.fields_by_name['state'].enum_type = _PROGRESSSTATE
_PROGRESS.fields_by_name['start'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_PROGRESS.fields_by_name['end'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATEPROBLEMREQUEST.fields_by_name['problem'].message_type = problem__pb2._PROBLEMDESCRIPTION
_SEARCHSOLUTIONSREQUEST.fields_by_name['allowed_value_types'].enum_type = value__pb2._VALUETYPE
_SEARCHSOLUTIONSREQUEST.fields_by_name['problem'].message_type = problem__pb2._PROBLEMDESCRIPTION
_SEARCHSOLUTIONSREQUEST.fields_by_name['template'].message_type = pipeline__pb2._PIPELINEDESCRIPTION
_SEARCHSOLUTIONSREQUEST.fields_by_name['inputs'].message_type = value__pb2._VALUE
_SOLUTIONSEARCHSCORE.fields_by_name['scoring_configuration'].message_type = _SCORINGCONFIGURATION
_SOLUTIONSEARCHSCORE.fields_by_name['scores'].message_type = _SCORE
_GETSEARCHSOLUTIONSRESULTSRESPONSE.fields_by_name['progress'].message_type = _PROGRESS
_GETSEARCHSOLUTIONSRESULTSRESPONSE.fields_by_name['scores'].message_type = _SOLUTIONSEARCHSCORE
_PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY.fields_by_name['value'].message_type = value__pb2._VALUE
_PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY.containing_type = _PRIMITIVESTEPDESCRIPTION
_PRIMITIVESTEPDESCRIPTION.fields_by_name['hyperparams'].message_type = _PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY
_SUBPIPELINESTEPDESCRIPTION.fields_by_name['steps'].message_type = _STEPDESCRIPTION
_STEPDESCRIPTION.fields_by_name['primitive'].message_type = _PRIMITIVESTEPDESCRIPTION
_STEPDESCRIPTION.fields_by_name['pipeline'].message_type = _SUBPIPELINESTEPDESCRIPTION
_STEPDESCRIPTION.oneofs_by_name['step'].fields.append(
_STEPDESCRIPTION.fields_by_name['primitive'])
_STEPDESCRIPTION.fields_by_name['primitive'].containing_oneof = _STEPDESCRIPTION.oneofs_by_name['step']
_STEPDESCRIPTION.oneofs_by_name['step'].fields.append(
_STEPDESCRIPTION.fields_by_name['pipeline'])
_STEPDESCRIPTION.fields_by_name['pipeline'].containing_oneof = _STEPDESCRIPTION.oneofs_by_name['step']
_DESCRIBESOLUTIONRESPONSE.fields_by_name['pipeline'].message_type = pipeline__pb2._PIPELINEDESCRIPTION
_DESCRIBESOLUTIONRESPONSE.fields_by_name['steps'].message_type = _STEPDESCRIPTION
_STEPPROGRESS.fields_by_name['progress'].message_type = _PROGRESS
_STEPPROGRESS.fields_by_name['steps'].message_type = _STEPPROGRESS
_SCORESOLUTIONREQUEST.fields_by_name['inputs'].message_type = value__pb2._VALUE
_SCORESOLUTIONREQUEST.fields_by_name['performance_metrics'].message_type = problem__pb2._PROBLEMPERFORMANCEMETRIC
_SCORESOLUTIONREQUEST.fields_by_name['users'].message_type = _SOLUTIONRUNUSER
_SCORESOLUTIONREQUEST.fields_by_name['configuration'].message_type = _SCORINGCONFIGURATION
_GETSCORESOLUTIONRESULTSRESPONSE.fields_by_name['progress'].message_type = _PROGRESS
_GETSCORESOLUTIONRESULTSRESPONSE.fields_by_name['scores'].message_type = _SCORE
_FITSOLUTIONREQUEST.fields_by_name['inputs'].message_type = value__pb2._VALUE
_FITSOLUTIONREQUEST.fields_by_name['expose_value_types'].enum_type = value__pb2._VALUETYPE
_FITSOLUTIONREQUEST.fields_by_name['users'].message_type = _SOLUTIONRUNUSER
_GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY.fields_by_name['value'].message_type = value__pb2._VALUE
_GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY.containing_type = _GETFITSOLUTIONRESULTSRESPONSE
_GETFITSOLUTIONRESULTSRESPONSE.fields_by_name['progress'].message_type = _PROGRESS
_GETFITSOLUTIONRESULTSRESPONSE.fields_by_name['steps'].message_type = _STEPPROGRESS
_GETFITSOLUTIONRESULTSRESPONSE.fields_by_name['exposed_outputs'].message_type = _GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY
_PRODUCESOLUTIONREQUEST.fields_by_name['inputs'].message_type = value__pb2._VALUE
_PRODUCESOLUTIONREQUEST.fields_by_name['expose_value_types'].enum_type = value__pb2._VALUETYPE
_PRODUCESOLUTIONREQUEST.fields_by_name['users'].message_type = _SOLUTIONRUNUSER
_GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY.fields_by_name['value'].message_type = value__pb2._VALUE
_GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY.containing_type = _GETPRODUCESOLUTIONRESULTSRESPONSE
_GETPRODUCESOLUTIONRESULTSRESPONSE.fields_by_name['progress'].message_type = _PROGRESS
_GETPRODUCESOLUTIONRESULTSRESPONSE.fields_by_name['steps'].message_type = _STEPPROGRESS
_GETPRODUCESOLUTIONRESULTSRESPONSE.fields_by_name['exposed_outputs'].message_type = _GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY
_LISTPRIMITIVESRESPONSE.fields_by_name['primitives'].message_type = primitive__pb2._PRIMITIVE
_HELLORESPONSE.fields_by_name['allowed_value_types'].enum_type = value__pb2._VALUETYPE
DESCRIPTOR.message_types_by_name['ScoringConfiguration'] = _SCORINGCONFIGURATION
DESCRIPTOR.message_types_by_name['Score'] = _SCORE
DESCRIPTOR.message_types_by_name['Progress'] = _PROGRESS
DESCRIPTOR.message_types_by_name['UpdateProblemRequest'] = _UPDATEPROBLEMREQUEST
DESCRIPTOR.message_types_by_name['UpdateProblemResponse'] = _UPDATEPROBLEMRESPONSE
DESCRIPTOR.message_types_by_name['SearchSolutionsRequest'] = _SEARCHSOLUTIONSREQUEST
DESCRIPTOR.message_types_by_name['SearchSolutionsResponse'] = _SEARCHSOLUTIONSRESPONSE
DESCRIPTOR.message_types_by_name['EndSearchSolutionsRequest'] = _ENDSEARCHSOLUTIONSREQUEST
DESCRIPTOR.message_types_by_name['EndSearchSolutionsResponse'] = _ENDSEARCHSOLUTIONSRESPONSE
DESCRIPTOR.message_types_by_name['StopSearchSolutionsRequest'] = _STOPSEARCHSOLUTIONSREQUEST
DESCRIPTOR.message_types_by_name['StopSearchSolutionsResponse'] = _STOPSEARCHSOLUTIONSRESPONSE
DESCRIPTOR.message_types_by_name['SolutionSearchScore'] = _SOLUTIONSEARCHSCORE
DESCRIPTOR.message_types_by_name['GetSearchSolutionsResultsRequest'] = _GETSEARCHSOLUTIONSRESULTSREQUEST
DESCRIPTOR.message_types_by_name['GetSearchSolutionsResultsResponse'] = _GETSEARCHSOLUTIONSRESULTSRESPONSE
DESCRIPTOR.message_types_by_name['DescribeSolutionRequest'] = _DESCRIBESOLUTIONREQUEST
DESCRIPTOR.message_types_by_name['PrimitiveStepDescription'] = _PRIMITIVESTEPDESCRIPTION
DESCRIPTOR.message_types_by_name['SubpipelineStepDescription'] = _SUBPIPELINESTEPDESCRIPTION
DESCRIPTOR.message_types_by_name['StepDescription'] = _STEPDESCRIPTION
DESCRIPTOR.message_types_by_name['DescribeSolutionResponse'] = _DESCRIBESOLUTIONRESPONSE
DESCRIPTOR.message_types_by_name['StepProgress'] = _STEPPROGRESS
DESCRIPTOR.message_types_by_name['SolutionRunUser'] = _SOLUTIONRUNUSER
DESCRIPTOR.message_types_by_name['ScoreSolutionRequest'] = _SCORESOLUTIONREQUEST
DESCRIPTOR.message_types_by_name['ScoreSolutionResponse'] = _SCORESOLUTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetScoreSolutionResultsRequest'] = _GETSCORESOLUTIONRESULTSREQUEST
DESCRIPTOR.message_types_by_name['GetScoreSolutionResultsResponse'] = _GETSCORESOLUTIONRESULTSRESPONSE
DESCRIPTOR.message_types_by_name['FitSolutionRequest'] = _FITSOLUTIONREQUEST
DESCRIPTOR.message_types_by_name['FitSolutionResponse'] = _FITSOLUTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetFitSolutionResultsRequest'] = _GETFITSOLUTIONRESULTSREQUEST
DESCRIPTOR.message_types_by_name['GetFitSolutionResultsResponse'] = _GETFITSOLUTIONRESULTSRESPONSE
DESCRIPTOR.message_types_by_name['ProduceSolutionRequest'] = _PRODUCESOLUTIONREQUEST
DESCRIPTOR.message_types_by_name['ProduceSolutionResponse'] = _PRODUCESOLUTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetProduceSolutionResultsRequest'] = _GETPRODUCESOLUTIONRESULTSREQUEST
DESCRIPTOR.message_types_by_name['GetProduceSolutionResultsResponse'] = _GETPRODUCESOLUTIONRESULTSRESPONSE
DESCRIPTOR.message_types_by_name['SolutionExportRequest'] = _SOLUTIONEXPORTREQUEST
DESCRIPTOR.message_types_by_name['SolutionExportResponse'] = _SOLUTIONEXPORTRESPONSE
DESCRIPTOR.message_types_by_name['ListPrimitivesRequest'] = _LISTPRIMITIVESREQUEST
DESCRIPTOR.message_types_by_name['ListPrimitivesResponse'] = _LISTPRIMITIVESRESPONSE
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloResponse'] = _HELLORESPONSE
DESCRIPTOR.enum_types_by_name['EvaluationMethod'] = _EVALUATIONMETHOD
DESCRIPTOR.enum_types_by_name['ProgressState'] = _PROGRESSSTATE
DESCRIPTOR.extensions_by_name['protocol_version'] = protocol_version
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ScoringConfiguration = _reflection.GeneratedProtocolMessageType('ScoringConfiguration', (_message.Message,), dict(
DESCRIPTOR = _SCORINGCONFIGURATION,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ScoringConfiguration)
))
_sym_db.RegisterMessage(ScoringConfiguration)
Score = _reflection.GeneratedProtocolMessageType('Score', (_message.Message,), dict(
DESCRIPTOR = _SCORE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:Score)
))
_sym_db.RegisterMessage(Score)
Progress = _reflection.GeneratedProtocolMessageType('Progress', (_message.Message,), dict(
DESCRIPTOR = _PROGRESS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:Progress)
))
_sym_db.RegisterMessage(Progress)
UpdateProblemRequest = _reflection.GeneratedProtocolMessageType('UpdateProblemRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEPROBLEMREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:UpdateProblemRequest)
))
_sym_db.RegisterMessage(UpdateProblemRequest)
UpdateProblemResponse = _reflection.GeneratedProtocolMessageType('UpdateProblemResponse', (_message.Message,), dict(
DESCRIPTOR = _UPDATEPROBLEMRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:UpdateProblemResponse)
))
_sym_db.RegisterMessage(UpdateProblemResponse)
SearchSolutionsRequest = _reflection.GeneratedProtocolMessageType('SearchSolutionsRequest', (_message.Message,), dict(
DESCRIPTOR = _SEARCHSOLUTIONSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SearchSolutionsRequest)
))
_sym_db.RegisterMessage(SearchSolutionsRequest)
SearchSolutionsResponse = _reflection.GeneratedProtocolMessageType('SearchSolutionsResponse', (_message.Message,), dict(
DESCRIPTOR = _SEARCHSOLUTIONSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SearchSolutionsResponse)
))
_sym_db.RegisterMessage(SearchSolutionsResponse)
EndSearchSolutionsRequest = _reflection.GeneratedProtocolMessageType('EndSearchSolutionsRequest', (_message.Message,), dict(
DESCRIPTOR = _ENDSEARCHSOLUTIONSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:EndSearchSolutionsRequest)
))
_sym_db.RegisterMessage(EndSearchSolutionsRequest)
EndSearchSolutionsResponse = _reflection.GeneratedProtocolMessageType('EndSearchSolutionsResponse', (_message.Message,), dict(
DESCRIPTOR = _ENDSEARCHSOLUTIONSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:EndSearchSolutionsResponse)
))
_sym_db.RegisterMessage(EndSearchSolutionsResponse)
StopSearchSolutionsRequest = _reflection.GeneratedProtocolMessageType('StopSearchSolutionsRequest', (_message.Message,), dict(
DESCRIPTOR = _STOPSEARCHSOLUTIONSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:StopSearchSolutionsRequest)
))
_sym_db.RegisterMessage(StopSearchSolutionsRequest)
StopSearchSolutionsResponse = _reflection.GeneratedProtocolMessageType('StopSearchSolutionsResponse', (_message.Message,), dict(
DESCRIPTOR = _STOPSEARCHSOLUTIONSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:StopSearchSolutionsResponse)
))
_sym_db.RegisterMessage(StopSearchSolutionsResponse)
SolutionSearchScore = _reflection.GeneratedProtocolMessageType('SolutionSearchScore', (_message.Message,), dict(
DESCRIPTOR = _SOLUTIONSEARCHSCORE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SolutionSearchScore)
))
_sym_db.RegisterMessage(SolutionSearchScore)
GetSearchSolutionsResultsRequest = _reflection.GeneratedProtocolMessageType('GetSearchSolutionsResultsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSEARCHSOLUTIONSRESULTSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetSearchSolutionsResultsRequest)
))
_sym_db.RegisterMessage(GetSearchSolutionsResultsRequest)
GetSearchSolutionsResultsResponse = _reflection.GeneratedProtocolMessageType('GetSearchSolutionsResultsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSEARCHSOLUTIONSRESULTSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetSearchSolutionsResultsResponse)
))
_sym_db.RegisterMessage(GetSearchSolutionsResultsResponse)
DescribeSolutionRequest = _reflection.GeneratedProtocolMessageType('DescribeSolutionRequest', (_message.Message,), dict(
DESCRIPTOR = _DESCRIBESOLUTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:DescribeSolutionRequest)
))
_sym_db.RegisterMessage(DescribeSolutionRequest)
PrimitiveStepDescription = _reflection.GeneratedProtocolMessageType('PrimitiveStepDescription', (_message.Message,), dict(
HyperparamsEntry = _reflection.GeneratedProtocolMessageType('HyperparamsEntry', (_message.Message,), dict(
DESCRIPTOR = _PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:PrimitiveStepDescription.HyperparamsEntry)
))
,
DESCRIPTOR = _PRIMITIVESTEPDESCRIPTION,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:PrimitiveStepDescription)
))
_sym_db.RegisterMessage(PrimitiveStepDescription)
_sym_db.RegisterMessage(PrimitiveStepDescription.HyperparamsEntry)
SubpipelineStepDescription = _reflection.GeneratedProtocolMessageType('SubpipelineStepDescription', (_message.Message,), dict(
DESCRIPTOR = _SUBPIPELINESTEPDESCRIPTION,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SubpipelineStepDescription)
))
_sym_db.RegisterMessage(SubpipelineStepDescription)
StepDescription = _reflection.GeneratedProtocolMessageType('StepDescription', (_message.Message,), dict(
DESCRIPTOR = _STEPDESCRIPTION,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:StepDescription)
))
_sym_db.RegisterMessage(StepDescription)
DescribeSolutionResponse = _reflection.GeneratedProtocolMessageType('DescribeSolutionResponse', (_message.Message,), dict(
DESCRIPTOR = _DESCRIBESOLUTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:DescribeSolutionResponse)
))
_sym_db.RegisterMessage(DescribeSolutionResponse)
StepProgress = _reflection.GeneratedProtocolMessageType('StepProgress', (_message.Message,), dict(
DESCRIPTOR = _STEPPROGRESS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:StepProgress)
))
_sym_db.RegisterMessage(StepProgress)
SolutionRunUser = _reflection.GeneratedProtocolMessageType('SolutionRunUser', (_message.Message,), dict(
DESCRIPTOR = _SOLUTIONRUNUSER,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SolutionRunUser)
))
_sym_db.RegisterMessage(SolutionRunUser)
ScoreSolutionRequest = _reflection.GeneratedProtocolMessageType('ScoreSolutionRequest', (_message.Message,), dict(
DESCRIPTOR = _SCORESOLUTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ScoreSolutionRequest)
))
_sym_db.RegisterMessage(ScoreSolutionRequest)
ScoreSolutionResponse = _reflection.GeneratedProtocolMessageType('ScoreSolutionResponse', (_message.Message,), dict(
DESCRIPTOR = _SCORESOLUTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ScoreSolutionResponse)
))
_sym_db.RegisterMessage(ScoreSolutionResponse)
GetScoreSolutionResultsRequest = _reflection.GeneratedProtocolMessageType('GetScoreSolutionResultsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSCORESOLUTIONRESULTSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetScoreSolutionResultsRequest)
))
_sym_db.RegisterMessage(GetScoreSolutionResultsRequest)
GetScoreSolutionResultsResponse = _reflection.GeneratedProtocolMessageType('GetScoreSolutionResultsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSCORESOLUTIONRESULTSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetScoreSolutionResultsResponse)
))
_sym_db.RegisterMessage(GetScoreSolutionResultsResponse)
FitSolutionRequest = _reflection.GeneratedProtocolMessageType('FitSolutionRequest', (_message.Message,), dict(
DESCRIPTOR = _FITSOLUTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:FitSolutionRequest)
))
_sym_db.RegisterMessage(FitSolutionRequest)
FitSolutionResponse = _reflection.GeneratedProtocolMessageType('FitSolutionResponse', (_message.Message,), dict(
DESCRIPTOR = _FITSOLUTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:FitSolutionResponse)
))
_sym_db.RegisterMessage(FitSolutionResponse)
GetFitSolutionResultsRequest = _reflection.GeneratedProtocolMessageType('GetFitSolutionResultsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETFITSOLUTIONRESULTSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetFitSolutionResultsRequest)
))
_sym_db.RegisterMessage(GetFitSolutionResultsRequest)
GetFitSolutionResultsResponse = _reflection.GeneratedProtocolMessageType('GetFitSolutionResultsResponse', (_message.Message,), dict(
ExposedOutputsEntry = _reflection.GeneratedProtocolMessageType('ExposedOutputsEntry', (_message.Message,), dict(
DESCRIPTOR = _GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetFitSolutionResultsResponse.ExposedOutputsEntry)
))
,
DESCRIPTOR = _GETFITSOLUTIONRESULTSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetFitSolutionResultsResponse)
))
_sym_db.RegisterMessage(GetFitSolutionResultsResponse)
_sym_db.RegisterMessage(GetFitSolutionResultsResponse.ExposedOutputsEntry)
ProduceSolutionRequest = _reflection.GeneratedProtocolMessageType('ProduceSolutionRequest', (_message.Message,), dict(
DESCRIPTOR = _PRODUCESOLUTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ProduceSolutionRequest)
))
_sym_db.RegisterMessage(ProduceSolutionRequest)
ProduceSolutionResponse = _reflection.GeneratedProtocolMessageType('ProduceSolutionResponse', (_message.Message,), dict(
DESCRIPTOR = _PRODUCESOLUTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ProduceSolutionResponse)
))
_sym_db.RegisterMessage(ProduceSolutionResponse)
GetProduceSolutionResultsRequest = _reflection.GeneratedProtocolMessageType('GetProduceSolutionResultsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETPRODUCESOLUTIONRESULTSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetProduceSolutionResultsRequest)
))
_sym_db.RegisterMessage(GetProduceSolutionResultsRequest)
GetProduceSolutionResultsResponse = _reflection.GeneratedProtocolMessageType('GetProduceSolutionResultsResponse', (_message.Message,), dict(
ExposedOutputsEntry = _reflection.GeneratedProtocolMessageType('ExposedOutputsEntry', (_message.Message,), dict(
DESCRIPTOR = _GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetProduceSolutionResultsResponse.ExposedOutputsEntry)
))
,
DESCRIPTOR = _GETPRODUCESOLUTIONRESULTSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:GetProduceSolutionResultsResponse)
))
_sym_db.RegisterMessage(GetProduceSolutionResultsResponse)
_sym_db.RegisterMessage(GetProduceSolutionResultsResponse.ExposedOutputsEntry)
SolutionExportRequest = _reflection.GeneratedProtocolMessageType('SolutionExportRequest', (_message.Message,), dict(
DESCRIPTOR = _SOLUTIONEXPORTREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SolutionExportRequest)
))
_sym_db.RegisterMessage(SolutionExportRequest)
SolutionExportResponse = _reflection.GeneratedProtocolMessageType('SolutionExportResponse', (_message.Message,), dict(
DESCRIPTOR = _SOLUTIONEXPORTRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:SolutionExportResponse)
))
_sym_db.RegisterMessage(SolutionExportResponse)
ListPrimitivesRequest = _reflection.GeneratedProtocolMessageType('ListPrimitivesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTPRIMITIVESREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ListPrimitivesRequest)
))
_sym_db.RegisterMessage(ListPrimitivesRequest)
ListPrimitivesResponse = _reflection.GeneratedProtocolMessageType('ListPrimitivesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTPRIMITIVESRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:ListPrimitivesResponse)
))
_sym_db.RegisterMessage(ListPrimitivesResponse)
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloResponse = _reflection.GeneratedProtocolMessageType('HelloResponse', (_message.Message,), dict(
DESCRIPTOR = _HELLORESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:HelloResponse)
))
_sym_db.RegisterMessage(HelloResponse)
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(protocol_version)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\010pipeline\242\265\032\0102018.7.7'))
_PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY.has_options = True
_PRIMITIVESTEPDESCRIPTION_HYPERPARAMSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY.has_options = True
_GETFITSOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY.has_options = True
_GETPRODUCESOLUTIONRESULTSRESPONSE_EXPOSEDOUTPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CORE = _descriptor.ServiceDescriptor(
name='Core',
full_name='Core',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=4073,
serialized_end=5233,
methods=[
_descriptor.MethodDescriptor(
name='SearchSolutions',
full_name='Core.SearchSolutions',
index=0,
containing_service=None,
input_type=_SEARCHSOLUTIONSREQUEST,
output_type=_SEARCHSOLUTIONSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetSearchSolutionsResults',
full_name='Core.GetSearchSolutionsResults',
index=1,
containing_service=None,
input_type=_GETSEARCHSOLUTIONSRESULTSREQUEST,
output_type=_GETSEARCHSOLUTIONSRESULTSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='EndSearchSolutions',
full_name='Core.EndSearchSolutions',
index=2,
containing_service=None,
input_type=_ENDSEARCHSOLUTIONSREQUEST,
output_type=_ENDSEARCHSOLUTIONSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='StopSearchSolutions',
full_name='Core.StopSearchSolutions',
index=3,
containing_service=None,
input_type=_STOPSEARCHSOLUTIONSREQUEST,
output_type=_STOPSEARCHSOLUTIONSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='DescribeSolution',
full_name='Core.DescribeSolution',
index=4,
containing_service=None,
input_type=_DESCRIBESOLUTIONREQUEST,
output_type=_DESCRIBESOLUTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ScoreSolution',
full_name='Core.ScoreSolution',
index=5,
containing_service=None,
input_type=_SCORESOLUTIONREQUEST,
output_type=_SCORESOLUTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetScoreSolutionResults',
full_name='Core.GetScoreSolutionResults',
index=6,
containing_service=None,
input_type=_GETSCORESOLUTIONRESULTSREQUEST,
output_type=_GETSCORESOLUTIONRESULTSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='FitSolution',
full_name='Core.FitSolution',
index=7,
containing_service=None,
input_type=_FITSOLUTIONREQUEST,
output_type=_FITSOLUTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetFitSolutionResults',
full_name='Core.GetFitSolutionResults',
index=8,
containing_service=None,
input_type=_GETFITSOLUTIONRESULTSREQUEST,
output_type=_GETFITSOLUTIONRESULTSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ProduceSolution',
full_name='Core.ProduceSolution',
index=9,
containing_service=None,
input_type=_PRODUCESOLUTIONREQUEST,
output_type=_PRODUCESOLUTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='GetProduceSolutionResults',
full_name='Core.GetProduceSolutionResults',
index=10,
containing_service=None,
input_type=_GETPRODUCESOLUTIONRESULTSREQUEST,
output_type=_GETPRODUCESOLUTIONRESULTSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='SolutionExport',
full_name='Core.SolutionExport',
index=11,
containing_service=None,
input_type=_SOLUTIONEXPORTREQUEST,
output_type=_SOLUTIONEXPORTRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='UpdateProblem',
full_name='Core.UpdateProblem',
index=12,
containing_service=None,
input_type=_UPDATEPROBLEMREQUEST,
output_type=_UPDATEPROBLEMRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ListPrimitives',
full_name='Core.ListPrimitives',
index=13,
containing_service=None,
input_type=_LISTPRIMITIVESREQUEST,
output_type=_LISTPRIMITIVESRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='Hello',
full_name='Core.Hello',
index=14,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLORESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_CORE)
DESCRIPTOR.services_by_name['Core'] = _CORE
# @@protoc_insertion_point(module_scope)
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/core_pb2.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: problem.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='problem.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\rproblem.proto\x1a google/protobuf/descriptor.proto\"\\\n\x18ProblemPerformanceMetric\x12\"\n\x06metric\x18\x01 \x01(\x0e\x32\x12.PerformanceMetric\x12\t\n\x01k\x18\x02 \x01(\x05\x12\x11\n\tpos_label\x18\x03 \x01(\t\"\xc3\x01\n\x07Problem\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x1c\n\ttask_type\x18\x05 \x01(\x0e\x32\t.TaskType\x12\"\n\x0ctask_subtype\x18\x06 \x01(\x0e\x32\x0c.TaskSubtype\x12\x36\n\x13performance_metrics\x18\x07 \x03(\x0b\x32\x19.ProblemPerformanceMetric\"~\n\rProblemTarget\x12\x14\n\x0ctarget_index\x18\x01 \x01(\x05\x12\x13\n\x0bresource_id\x18\x02 \x01(\t\x12\x14\n\x0c\x63olumn_index\x18\x03 \x01(\x05\x12\x13\n\x0b\x63olumn_name\x18\x04 \x01(\t\x12\x17\n\x0f\x63lusters_number\x18\x05 \x01(\x05\"C\n\x0cProblemInput\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\x1f\n\x07targets\x18\x02 \x03(\x0b\x32\x0e.ProblemTarget\"N\n\x12ProblemDescription\x12\x19\n\x07problem\x18\x01 \x01(\x0b\x32\x08.Problem\x12\x1d\n\x06inputs\x18\x02 \x03(\x0b\x32\r.ProblemInput*\x96\x02\n\x08TaskType\x12\x17\n\x13TASK_TYPE_UNDEFINED\x10\x00\x12\x12\n\x0e\x43LASSIFICATION\x10\x01\x12\x0e\n\nREGRESSION\x10\x02\x12\x0e\n\nCLUSTERING\x10\x03\x12\x13\n\x0fLINK_PREDICTION\x10\x04\x12\x15\n\x11VERTEX_NOMINATION\x10\x05\x12\x17\n\x13\x43OMMUNITY_DETECTION\x10\x06\x12\x14\n\x10GRAPH_CLUSTERING\x10\x07\x12\x12\n\x0eGRAPH_MATCHING\x10\x08\x12\x1b\n\x17TIME_SERIES_FORECASTING\x10\t\x12\x1b\n\x17\x43OLLABORATIVE_FILTERING\x10\n\x12\x14\n\x10OBJECT_DETECTION\x10\x0b*\xa6\x01\n\x0bTaskSubtype\x12\x1a\n\x16TASK_SUBTYPE_UNDEFINED\x10\x00\x12\x08\n\x04NONE\x10\x01\x12\n\n\x06\x42INARY\x10\x02\x12\x0e\n\nMULTICLASS\x10\x03\x12\x0e\n\nMULTILABEL\x10\x04\x12\x0e\n\nUNIVARIATE\x10\x05\x12\x10\n\x0cMULTIVARIATE\x10\x06\x12\x0f\n\x0bOVERLAPPING\x10\x07\x12\x12\n\x0eNONOVERLAPPING\x10\x08*\xb2\x03\n\x11PerformanceMetric\x12\x14\n\x10METRIC_UNDEFINED\x10\x00\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\r\n\tPRECISION\x10\x02\x12\n\n\x06RECALL\x10\x03\x12\x06\n\x02\x46\x31\x10\x04\x12\x0c\n\x08\x46\x31_MICRO\x10\x05\x12\x0c\n\x08\x46\x31_MACRO\x10\x06\x12\x0b\n\x07ROC_AUC\x10\x07\x12\x11\n\rROC_AUC_MICRO\x10\x08\x12\x11\n\rROC_AUC_MACRO\x10\t\x12\x16\n\x12MEAN_SQUARED_ERROR\x10\n\x12\x1b\n\x17ROOT_MEAN_SQUARED_ERROR\x10\x0b\x12\x1f\n\x1bROOT_MEAN_SQUARED_ERROR_AVG\x10\x0c\x12\x17\n\x13MEAN_ABSOLUTE_ERROR\x10\r\x12\r\n\tR_SQUARED\x10\x0e\x12!\n\x1dNORMALIZED_MUTUAL_INFORMATION\x10\x0f\x12\x1c\n\x18JACCARD_SIMILARITY_SCORE\x10\x10\x12\x16\n\x12PRECISION_AT_TOP_K\x10\x11\x12&\n\"OBJECT_DETECTION_AVERAGE_PRECISION\x10\x12\x12\x08\n\x04LOSS\x10\x64\x42\nZ\x08pipelineb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_TASKTYPE = _descriptor.EnumDescriptor(
name='TaskType',
full_name='TaskType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TASK_TYPE_UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLASSIFICATION', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REGRESSION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTERING', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LINK_PREDICTION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERTEX_NOMINATION', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMUNITY_DETECTION', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAPH_CLUSTERING', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAPH_MATCHING', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TIME_SERIES_FORECASTING', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COLLABORATIVE_FILTERING', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OBJECT_DETECTION', index=11, number=11,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=621,
serialized_end=899,
)
_sym_db.RegisterEnumDescriptor(_TASKTYPE)
TaskType = enum_type_wrapper.EnumTypeWrapper(_TASKTYPE)
_TASKSUBTYPE = _descriptor.EnumDescriptor(
name='TaskSubtype',
full_name='TaskSubtype',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TASK_SUBTYPE_UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NONE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BINARY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTICLASS', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTILABEL', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNIVARIATE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTIVARIATE', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OVERLAPPING', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NONOVERLAPPING', index=8, number=8,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=902,
serialized_end=1068,
)
_sym_db.RegisterEnumDescriptor(_TASKSUBTYPE)
TaskSubtype = enum_type_wrapper.EnumTypeWrapper(_TASKSUBTYPE)
_PERFORMANCEMETRIC = _descriptor.EnumDescriptor(
name='PerformanceMetric',
full_name='PerformanceMetric',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='METRIC_UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCURACY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRECISION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECALL', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F1', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F1_MICRO', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F1_MACRO', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROC_AUC', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROC_AUC_MICRO', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROC_AUC_MACRO', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEAN_SQUARED_ERROR', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROOT_MEAN_SQUARED_ERROR', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROOT_MEAN_SQUARED_ERROR_AVG', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEAN_ABSOLUTE_ERROR', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='R_SQUARED', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NORMALIZED_MUTUAL_INFORMATION', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JACCARD_SIMILARITY_SCORE', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRECISION_AT_TOP_K', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OBJECT_DETECTION_AVERAGE_PRECISION', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOSS', index=19, number=100,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1071,
serialized_end=1505,
)
_sym_db.RegisterEnumDescriptor(_PERFORMANCEMETRIC)
PerformanceMetric = enum_type_wrapper.EnumTypeWrapper(_PERFORMANCEMETRIC)
TASK_TYPE_UNDEFINED = 0
CLASSIFICATION = 1
REGRESSION = 2
CLUSTERING = 3
LINK_PREDICTION = 4
VERTEX_NOMINATION = 5
COMMUNITY_DETECTION = 6
GRAPH_CLUSTERING = 7
GRAPH_MATCHING = 8
TIME_SERIES_FORECASTING = 9
COLLABORATIVE_FILTERING = 10
OBJECT_DETECTION = 11
TASK_SUBTYPE_UNDEFINED = 0
NONE = 1
BINARY = 2
MULTICLASS = 3
MULTILABEL = 4
UNIVARIATE = 5
MULTIVARIATE = 6
OVERLAPPING = 7
NONOVERLAPPING = 8
METRIC_UNDEFINED = 0
ACCURACY = 1
PRECISION = 2
RECALL = 3
F1 = 4
F1_MICRO = 5
F1_MACRO = 6
ROC_AUC = 7
ROC_AUC_MICRO = 8
ROC_AUC_MACRO = 9
MEAN_SQUARED_ERROR = 10
ROOT_MEAN_SQUARED_ERROR = 11
ROOT_MEAN_SQUARED_ERROR_AVG = 12
MEAN_ABSOLUTE_ERROR = 13
R_SQUARED = 14
NORMALIZED_MUTUAL_INFORMATION = 15
JACCARD_SIMILARITY_SCORE = 16
PRECISION_AT_TOP_K = 17
OBJECT_DETECTION_AVERAGE_PRECISION = 18
LOSS = 100
_PROBLEMPERFORMANCEMETRIC = _descriptor.Descriptor(
name='ProblemPerformanceMetric',
full_name='ProblemPerformanceMetric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metric', full_name='ProblemPerformanceMetric.metric', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='k', full_name='ProblemPerformanceMetric.k', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pos_label', full_name='ProblemPerformanceMetric.pos_label', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=143,
)
_PROBLEM = _descriptor.Descriptor(
name='Problem',
full_name='Problem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Problem.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='Problem.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='Problem.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='Problem.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_type', full_name='Problem.task_type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_subtype', full_name='Problem.task_subtype', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='performance_metrics', full_name='Problem.performance_metrics', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=146,
serialized_end=341,
)
_PROBLEMTARGET = _descriptor.Descriptor(
name='ProblemTarget',
full_name='ProblemTarget',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target_index', full_name='ProblemTarget.target_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_id', full_name='ProblemTarget.resource_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='column_index', full_name='ProblemTarget.column_index', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='column_name', full_name='ProblemTarget.column_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusters_number', full_name='ProblemTarget.clusters_number', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=343,
serialized_end=469,
)
_PROBLEMINPUT = _descriptor.Descriptor(
name='ProblemInput',
full_name='ProblemInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_id', full_name='ProblemInput.dataset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='ProblemInput.targets', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=471,
serialized_end=538,
)
_PROBLEMDESCRIPTION = _descriptor.Descriptor(
name='ProblemDescription',
full_name='ProblemDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='problem', full_name='ProblemDescription.problem', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inputs', full_name='ProblemDescription.inputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=540,
serialized_end=618,
)
_PROBLEMPERFORMANCEMETRIC.fields_by_name['metric'].enum_type = _PERFORMANCEMETRIC
_PROBLEM.fields_by_name['task_type'].enum_type = _TASKTYPE
_PROBLEM.fields_by_name['task_subtype'].enum_type = _TASKSUBTYPE
_PROBLEM.fields_by_name['performance_metrics'].message_type = _PROBLEMPERFORMANCEMETRIC
_PROBLEMINPUT.fields_by_name['targets'].message_type = _PROBLEMTARGET
_PROBLEMDESCRIPTION.fields_by_name['problem'].message_type = _PROBLEM
_PROBLEMDESCRIPTION.fields_by_name['inputs'].message_type = _PROBLEMINPUT
DESCRIPTOR.message_types_by_name['ProblemPerformanceMetric'] = _PROBLEMPERFORMANCEMETRIC
DESCRIPTOR.message_types_by_name['Problem'] = _PROBLEM
DESCRIPTOR.message_types_by_name['ProblemTarget'] = _PROBLEMTARGET
DESCRIPTOR.message_types_by_name['ProblemInput'] = _PROBLEMINPUT
DESCRIPTOR.message_types_by_name['ProblemDescription'] = _PROBLEMDESCRIPTION
DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE
DESCRIPTOR.enum_types_by_name['TaskSubtype'] = _TASKSUBTYPE
DESCRIPTOR.enum_types_by_name['PerformanceMetric'] = _PERFORMANCEMETRIC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProblemPerformanceMetric = _reflection.GeneratedProtocolMessageType('ProblemPerformanceMetric', (_message.Message,), dict(
DESCRIPTOR = _PROBLEMPERFORMANCEMETRIC,
__module__ = 'problem_pb2'
# @@protoc_insertion_point(class_scope:ProblemPerformanceMetric)
))
_sym_db.RegisterMessage(ProblemPerformanceMetric)
Problem = _reflection.GeneratedProtocolMessageType('Problem', (_message.Message,), dict(
DESCRIPTOR = _PROBLEM,
__module__ = 'problem_pb2'
# @@protoc_insertion_point(class_scope:Problem)
))
_sym_db.RegisterMessage(Problem)
ProblemTarget = _reflection.GeneratedProtocolMessageType('ProblemTarget', (_message.Message,), dict(
DESCRIPTOR = _PROBLEMTARGET,
__module__ = 'problem_pb2'
# @@protoc_insertion_point(class_scope:ProblemTarget)
))
_sym_db.RegisterMessage(ProblemTarget)
ProblemInput = _reflection.GeneratedProtocolMessageType('ProblemInput', (_message.Message,), dict(
DESCRIPTOR = _PROBLEMINPUT,
__module__ = 'problem_pb2'
# @@protoc_insertion_point(class_scope:ProblemInput)
))
_sym_db.RegisterMessage(ProblemInput)
ProblemDescription = _reflection.GeneratedProtocolMessageType('ProblemDescription', (_message.Message,), dict(
DESCRIPTOR = _PROBLEMDESCRIPTION,
__module__ = 'problem_pb2'
# @@protoc_insertion_point(class_scope:ProblemDescription)
))
_sym_db.RegisterMessage(ProblemDescription)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\010pipeline'))
# @@protoc_insertion_point(module_scope)
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/problem_pb2.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/value_pb2_grpc.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: value.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='value.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0bvalue.proto\x1a google/protobuf/descriptor.proto\"\x1d\n\nValueError\x12\x0f\n\x07message\x18\x01 \x01(\t\"%\n\tValueList\x12\x18\n\x05items\x18\x01 \x03(\x0b\x32\t.ValueRaw\"j\n\tValueDict\x12$\n\x05items\x18\x01 \x03(\x0b\x32\x15.ValueDict.ItemsEntry\x1a\x37\n\nItemsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.ValueRaw:\x02\x38\x01\"\xbb\x01\n\x08ValueRaw\x12\x1a\n\x04null\x18\x01 \x01(\x0e\x32\n.NullValueH\x00\x12\x10\n\x06\x64ouble\x18\x02 \x01(\x01H\x00\x12\x0f\n\x05int64\x18\x03 \x01(\x03H\x00\x12\x0e\n\x04\x62ool\x18\x04 \x01(\x08H\x00\x12\x10\n\x06string\x18\x05 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x06 \x01(\x0cH\x00\x12\x1a\n\x04list\x18\x07 \x01(\x0b\x32\n.ValueListH\x00\x12\x1a\n\x04\x64ict\x18\x08 \x01(\x0b\x32\n.ValueDictH\x00\x42\x05\n\x03raw\"\xb4\x01\n\x05Value\x12\x1c\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x0b.ValueErrorH\x00\x12\x18\n\x03raw\x18\x02 \x01(\x0b\x32\t.ValueRawH\x00\x12\x15\n\x0b\x64\x61taset_uri\x18\x03 \x01(\tH\x00\x12\x11\n\x07\x63sv_uri\x18\x04 \x01(\tH\x00\x12\x14\n\npickle_uri\x18\x05 \x01(\tH\x00\x12\x15\n\x0bpickle_blob\x18\x06 \x01(\x0cH\x00\x12\x13\n\tplasma_id\x18\x07 \x01(\x0cH\x00\x42\x07\n\x05value*|\n\tValueType\x12\x18\n\x14VALUE_TYPE_UNDEFINED\x10\x00\x12\x07\n\x03RAW\x10\x01\x12\x0f\n\x0b\x44\x41TASET_URI\x10\x02\x12\x0b\n\x07\x43SV_URI\x10\x03\x12\x0e\n\nPICKLE_URI\x10\x04\x12\x0f\n\x0bPICKLE_BLOB\x10\x05\x12\r\n\tPLASMA_ID\x10\x06*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\nZ\x08pipelineb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_VALUETYPE = _descriptor.EnumDescriptor(
name='ValueType',
full_name='ValueType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='VALUE_TYPE_UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RAW', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_URI', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CSV_URI', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PICKLE_URI', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PICKLE_BLOB', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PLASMA_ID', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=600,
serialized_end=724,
)
_sym_db.RegisterEnumDescriptor(_VALUETYPE)
ValueType = enum_type_wrapper.EnumTypeWrapper(_VALUETYPE)
_NULLVALUE = _descriptor.EnumDescriptor(
name='NullValue',
full_name='NullValue',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NULL_VALUE', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=726,
serialized_end=753,
)
_sym_db.RegisterEnumDescriptor(_NULLVALUE)
NullValue = enum_type_wrapper.EnumTypeWrapper(_NULLVALUE)
VALUE_TYPE_UNDEFINED = 0
RAW = 1
DATASET_URI = 2
CSV_URI = 3
PICKLE_URI = 4
PICKLE_BLOB = 5
PLASMA_ID = 6
NULL_VALUE = 0
_VALUEERROR = _descriptor.Descriptor(
name='ValueError',
full_name='ValueError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='ValueError.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=49,
serialized_end=78,
)
_VALUELIST = _descriptor.Descriptor(
name='ValueList',
full_name='ValueList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='ValueList.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=117,
)
_VALUEDICT_ITEMSENTRY = _descriptor.Descriptor(
name='ItemsEntry',
full_name='ValueDict.ItemsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ValueDict.ItemsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ValueDict.ItemsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=170,
serialized_end=225,
)
_VALUEDICT = _descriptor.Descriptor(
name='ValueDict',
full_name='ValueDict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='ValueDict.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_VALUEDICT_ITEMSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=225,
)
_VALUERAW = _descriptor.Descriptor(
name='ValueRaw',
full_name='ValueRaw',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='null', full_name='ValueRaw.null', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='double', full_name='ValueRaw.double', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int64', full_name='ValueRaw.int64', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bool', full_name='ValueRaw.bool', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='string', full_name='ValueRaw.string', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bytes', full_name='ValueRaw.bytes', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='ValueRaw.list', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dict', full_name='ValueRaw.dict', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='raw', full_name='ValueRaw.raw',
index=0, containing_type=None, fields=[]),
],
serialized_start=228,
serialized_end=415,
)
_VALUE = _descriptor.Descriptor(
name='Value',
full_name='Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='Value.error', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='raw', full_name='Value.raw', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_uri', full_name='Value.dataset_uri', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='csv_uri', full_name='Value.csv_uri', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pickle_uri', full_name='Value.pickle_uri', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pickle_blob', full_name='Value.pickle_blob', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='plasma_id', full_name='Value.plasma_id', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='Value.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=418,
serialized_end=598,
)
_VALUELIST.fields_by_name['items'].message_type = _VALUERAW
_VALUEDICT_ITEMSENTRY.fields_by_name['value'].message_type = _VALUERAW
_VALUEDICT_ITEMSENTRY.containing_type = _VALUEDICT
_VALUEDICT.fields_by_name['items'].message_type = _VALUEDICT_ITEMSENTRY
_VALUERAW.fields_by_name['null'].enum_type = _NULLVALUE
_VALUERAW.fields_by_name['list'].message_type = _VALUELIST
_VALUERAW.fields_by_name['dict'].message_type = _VALUEDICT
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['null'])
_VALUERAW.fields_by_name['null'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['double'])
_VALUERAW.fields_by_name['double'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['int64'])
_VALUERAW.fields_by_name['int64'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['bool'])
_VALUERAW.fields_by_name['bool'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['string'])
_VALUERAW.fields_by_name['string'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['bytes'])
_VALUERAW.fields_by_name['bytes'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['list'])
_VALUERAW.fields_by_name['list'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUERAW.oneofs_by_name['raw'].fields.append(
_VALUERAW.fields_by_name['dict'])
_VALUERAW.fields_by_name['dict'].containing_oneof = _VALUERAW.oneofs_by_name['raw']
_VALUE.fields_by_name['error'].message_type = _VALUEERROR
_VALUE.fields_by_name['raw'].message_type = _VALUERAW
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['error'])
_VALUE.fields_by_name['error'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['raw'])
_VALUE.fields_by_name['raw'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['dataset_uri'])
_VALUE.fields_by_name['dataset_uri'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['csv_uri'])
_VALUE.fields_by_name['csv_uri'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['pickle_uri'])
_VALUE.fields_by_name['pickle_uri'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['pickle_blob'])
_VALUE.fields_by_name['pickle_blob'].containing_oneof = _VALUE.oneofs_by_name['value']
_VALUE.oneofs_by_name['value'].fields.append(
_VALUE.fields_by_name['plasma_id'])
_VALUE.fields_by_name['plasma_id'].containing_oneof = _VALUE.oneofs_by_name['value']
DESCRIPTOR.message_types_by_name['ValueError'] = _VALUEERROR
DESCRIPTOR.message_types_by_name['ValueList'] = _VALUELIST
DESCRIPTOR.message_types_by_name['ValueDict'] = _VALUEDICT
DESCRIPTOR.message_types_by_name['ValueRaw'] = _VALUERAW
DESCRIPTOR.message_types_by_name['Value'] = _VALUE
DESCRIPTOR.enum_types_by_name['ValueType'] = _VALUETYPE
DESCRIPTOR.enum_types_by_name['NullValue'] = _NULLVALUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ValueError = _reflection.GeneratedProtocolMessageType('ValueError', (_message.Message,), dict(
DESCRIPTOR = _VALUEERROR,
__module__ = 'value_pb2'
# @@protoc_insertion_point(class_scope:ValueError)
))
_sym_db.RegisterMessage(ValueError)
ValueList = _reflection.GeneratedProtocolMessageType('ValueList', (_message.Message,), dict(
DESCRIPTOR = _VALUELIST,
__module__ = 'value_pb2'
# @@protoc_insertion_point(class_scope:ValueList)
))
_sym_db.RegisterMessage(ValueList)
ValueDict = _reflection.GeneratedProtocolMessageType('ValueDict', (_message.Message,), dict(
ItemsEntry = _reflection.GeneratedProtocolMessageType('ItemsEntry', (_message.Message,), dict(
DESCRIPTOR = _VALUEDICT_ITEMSENTRY,
__module__ = 'value_pb2'
# @@protoc_insertion_point(class_scope:ValueDict.ItemsEntry)
))
,
DESCRIPTOR = _VALUEDICT,
__module__ = 'value_pb2'
# @@protoc_insertion_point(class_scope:ValueDict)
))
_sym_db.RegisterMessage(ValueDict)
_sym_db.RegisterMessage(ValueDict.ItemsEntry)
ValueRaw = _reflection.GeneratedProtocolMessageType('ValueRaw', (_message.Message,), dict(
DESCRIPTOR = _VALUERAW,
__module__ = 'value_pb2'
# @@protoc_insertion_point(class_scope:ValueRaw)
))
_sym_db.RegisterMessage(ValueRaw)
Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict(
DESCRIPTOR = _VALUE,
__module__ = 'value_pb2'
# @@protoc_insertion_point(class_scope:Value)
))
_sym_db.RegisterMessage(Value)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\010pipeline'))
_VALUEDICT_ITEMSENTRY.has_options = True
_VALUEDICT_ITEMSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
|
d3m-model-search-master
|
Stanford-D3M-Full/ta3ta2/api/value_pb2.py
|
# Description
# -----------------
# This file trains/tests a LSTM that predicts the performance of a pipeline.
#
# Training:
# One can train a new LSTM by supplying a data-set of pipelines and their respective performance scores.
# The data can be collected by running apps/basic_run.py with 'save_all_scored_pipelines_path' option turned on
#
# Testing:
# One can use a trained LSTM to predict the performance of a given pipeline
# Current testing code is used for validation purposes - to see where the model is actually used for whole solution,
# refer to LSTMPredictWorker.py in the same directory.
# To test LSTM model, one should provide 1. list of pipelines to test 2. path to lstm model
#
# Example usage:
# -------------------
# Train: python LSTMMetaLearnerApp.py --mode train --lstm_model_save_path saved_models/test/ --path_to_pipelines data.json
# Test: python LSTMMetaLearnerApp.py --mode test --lstm_model_save_path saved_models/test/ --path_to_pipelines data.json
import sys
import os
import argparse
import json
import numpy as np
from itertools import product
from LSTMMetaLearner import LSTMMetaLearner
from d3m.index import search
# Python args
parser = argparse.ArgumentParser()
parser.add_argument("--n_seeds", help="Number of seeds per hyperparameter search", type=int, default=1)
parser.add_argument("--n_epochs", help="Number of training epochs to perform in train mode", type=int, default=100)
parser.add_argument("--mode", help="Either train or test (train a lstm on given pipeline jsons, score on given pipeline jsons)",
default="train", choices=["train", "test"])
parser.add_argument("--path_to_pipelines", help="Path to pipeline jsons", default=None, type=str)
parser.add_argument("--lstm_model_save_path", help="Path to lstm model (either to save to or to load from)", default=None, type=str)
args = parser.parse_args()
NUM_SEEDS = args.n_seeds
NUM_EPOCHS = args.n_epochs
class LSTMMetaLearnerFramework:
def __init__(self, primitives, data):
self.primitive_dict = {word.lower():ind for (ind, word) in enumerate(primitives)}
self.data = data
def hyperParameterSearch(self, hyperParameters):
'''
Params:
- hyperParameters: dictionary which includes the following values:
- lr (learning rate)
- dropout
- l2_reg
Tests all combinations of the hyperparameters to identify the best performing one
'''
print("Starting hyperparameter search")
all_options = [[(key, v) for v in vs] for key, vs in hyperParameters.items()]
param_options = [dict(items) for items in product(*all_options)]
# test each parameter option
perfs = []
best_model = None
best_val = float('inf')
for param_option in param_options:
(lr, dropout, l2_reg) = map(lambda key: param_option[key], ["lr", "dropout", "l2_reg"])
exp_name = "lr_" + str(lr) + "_dropout_" + str(dropout)+ "_l2_reg_" + str(l2_reg)
print("Starting {}".format(exp_name))
val_rmse_list = []
test_rmse_list = []
for i in range(NUM_SEEDS):
print("Starting seed {}".format(i))
np.random.seed(i)
data = np.copy(self.data)
np.random.shuffle(data)
lstm = LSTMMetaLearner(self.primitive_dict)
lstm.constructModel(lr, dropout, l2_reg)
(best_val_rmse, test_rmse_at_best_val_rmse, _, _) = lstm.train(data, NUM_EPOCHS)
val_rmse_list.append(best_val_rmse)
test_rmse_list.append(test_rmse_at_best_val_rmse)
perfs.append((exp_name, val_rmse_list, test_rmse_list))
if np.mean(val_rmse_list) < best_val:
best_model = lstm
best_val = np.mean(val_rmse_list)
print("{}| val : {:.4f}, test : {:.4f}".format(exp_name, np.mean(val_rmse_list), np.mean(test_rmse_list)))
perfs = sorted(perfs, key=lambda x:np.mean(x[1]))
print("best validation accuracy model ", perfs, np.mean(perfs[-1][1]), np.std(perfs[-1][2] ) )
return best_model
def train(path_to_pipelines, lstm_save_path):
print("Training LSTM...")
with open(path_to_pipelines, "r") as f:
data = json.loads(f.read())
primitive_names = search()
print("In app, length of primitive names: {}".format(len(primitive_names)))
framework = LSTMMetaLearnerFramework(primitive_names, data)
"""hyperParameters = {
"lr": [0.005, 0.001, 0.01, 0.05],
"dropout": [0.2, 0.5],
"l2_reg": [3e-3, 1e-3, 1e-2, 3e-2]
}"""
hyperParameters = {
"lr": [0.01],
"dropout": [0.2],
"l2_reg": [1e-3]
}
lstm = framework.hyperParameterSearch(hyperParameters)
lstm.save(lstm_save_path)
print("Training completed.")
def test(path_to_pipelines, lstm_save_path):
print("Testing LSTM...")
with open(path_to_pipelines, "r") as f:
data = json.loads(f.read())
testPipelines= data[:5]
lstm = LSTMMetaLearner()
lstm.load(lstm_save_path)
scores = lstm.test(testPipelines)
print(scores)
print("Testing completed")
if __name__ == '__main__':
path_to_pipelines, lstm_save_path = args.path_to_pipelines, args.lstm_model_save_path
assert(path_to_pipelines is not None)
assert(lstm_save_path is not None)
# Create save dir if not exists
base_save_dir = os.path.dirname(lstm_save_path)
if not os.path.exists(base_save_dir):
os.makedirs(base_save_dir)
if args.mode == "train":
train(path_to_pipelines, lstm_save_path)
if args.mode == "test":
test(path_to_pipelines, lstm_save_path)
|
d3m-model-search-master
|
Stanford-D3M-Full/experimental/lstm_predictor/LSTMMetaLearnerApp.py
|
from keras.models import Sequential, load_model
from keras.layers.embeddings import Embedding
from keras.layers import Dense, LSTM, BatchNormalization, Activation
from keras import backend
from keras import regularizers
from keras import optimizers
from keras.preprocessing import sequence
import numpy as np
import time
import pickle
import json
from LSTMConstants import MAX_NUM_CONV_LAYERS, EMBEDDING_SIZE, LSTM_DIM, DENSE_LAYER_DIMS, BN_AXIS, BATCH_SIZE
from LSTMUtils import get_data_from_json, split_data, get_model_path
def rmse(y_true, y_pred):
return backend.mean(backend.sqrt(backend.square(y_pred - y_true) ), axis=-1)
class LSTMMetaLearner:
def __init__(self, primitive_dict = None):
self.primitive_dict = primitive_dict
self.model = None
def constructModel(self, lr, dropout, l2_reg):
'''
Construct a lstm model using given hyperparameters
'''
self.model = Sequential()
vocab_size = len(self.primitive_dict)
self.model.add(Embedding(vocab_size, EMBEDDING_SIZE, input_length=MAX_NUM_CONV_LAYERS))
self.model.add(LSTM(LSTM_DIM, return_sequences=True, dropout=dropout))
self.model.add(LSTM(LSTM_DIM, dropout=dropout))
for dense_dim in DENSE_LAYER_DIMS:
self.model.add(Dense(dense_dim, kernel_regularizer=regularizers.l2(l2_reg), bias_regularizer = regularizers.l2(l2_reg) ) )
#model.add(BatchNormalization(axis=BN_AXIS) )
self.model.add(Activation("relu") )
assert DENSE_LAYER_DIMS[-1] == 1
opt = optimizers.Adam(lr=lr)
self.model.compile(loss='mean_squared_error', optimizer=opt, metrics=[rmse])
print(self.model.summary())
def train(self, pipeline_jsons, num_epochs):
'''
Params:
- pipeline_jsons: list of jsons
- num_epochs: number of training rounds
Returns:
- (best validation rmse, test rmse at best validation rmse, index that gave best validation rmse)
Trains given model for 'num_epochs' times
'''
(pipeline_data, pipeline_rank) = get_data_from_json(pipeline_jsons)
X = [[self.primitive_dict[step.lower()] for step in pipeline] for pipeline in pipeline_data]
# subtracting the minimum value (which is non-negative) to get data closer to 0
min_rank = min(pipeline_rank)
y = [y - min_rank for y in pipeline_rank]
X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)
# Pad X and convert y into numpy arrays
[X_train, X_val, X_test] = map(lambda x: sequence.pad_sequences(x, maxlen=MAX_NUM_CONV_LAYERS), [X_train, X_val, X_test] )
[y_train, y_val, y_test] = map(lambda y: np.array(y), [y_train, y_val, y_test] )
best_val_rmse = float('inf')
test_rmse_at_best_val_rmse = float('inf')
best_val_ind = -1
full_history = []
# train the model, check the validation set RMSE and test set RMSE
for i in range(num_epochs):
history = self.model.fit(X_train, y_train, validation_data = (X_val, y_val),
epochs = 1, batch_size = BATCH_SIZE, verbose = 0)
scores = self.model.evaluate(X_test, y_test, verbose = 0)
full_history.append(history.history)
if history.history["val_rmse"][0] < best_val_rmse:
best_val_rmse = history.history["val_rmse"][0]
test_rmse_at_best_val_rmse = scores[1]
best_val_ind = i
return (best_val_rmse, test_rmse_at_best_val_rmse, best_val_ind, full_history)
def test(self, pipelines, is_json=True):
'''
Params:
- pipelines: list of pipelines to test
- is_json: indicates if 'pipelines' is in json format or is list of strings
Returns:
scores: The expected values output by the model
'''
if is_json:
(pipeline_data, _) = get_data_from_json(pipelines)
else:
pipeline_data = pipelines
x = [[self.primitive_dict[step.lower()] for step in pipeline] for pipeline in pipeline_data]
x = sequence.pad_sequences(x, maxlen=MAX_NUM_CONV_LAYERS)
scores = self.model.predict(x, verbose = 0)
return [float(x) for x in scores]
def filterPipelines(self, pipelines, num_output, is_json=True):
'''
Params:
- pipelines: list of pipelines to test
- num_output: number of filtered pipelines to output
- is_json: indicates if 'pipelines' is in json format or is list of strings
Returns:
- filtered_pipelines: list of best performing pipelines, as predicted by lstm
'''
results = self.test(pipelines, is_json)
num_output = min(num_output, len(results))
# get the indices of top num_output results, the last element being the index of largest element
inds = sorted(range(len(results)), key=lambda x: results[x])[-num_output:]
best_pipelines = [pipelines[ind] for ind in reversed(inds)]
return best_pipelines
def save(self, path):
(lstm_model_path, json_path) = get_model_path(path)
self.model.save(lstm_model_path) # save network
with open(json_path, "wb") as f:
pickle.dump(self.primitive_dict, f, pickle.HIGHEST_PROTOCOL)
print("LSTM model saved")
def load(self, path):
(lstm_model_path, json_path) = get_model_path(path)
self.model = load_model(lstm_model_path, custom_objects={'rmse': rmse})
with open(json_path, "rb") as f:
self.primitive_dict = pickle.load(f)
print("LSTM model loaded")
|
d3m-model-search-master
|
Stanford-D3M-Full/experimental/lstm_predictor/LSTMMetaLearner.py
|
import numpy as np
import pickle
import json
from experimental.lstm_predictor.LSTMConstants import MAX_NUM_CONV_LAYERS
from experimental.lstm_predictor.LSTMUtils import get_data_from_json, get_model_path
'''
This file loads an lstm model and filters a given list of data-loading pipelines
to predict which pipeline will perform the best
Used by SklearnStackedLSTMExecutor.py
Note that this is not the most efficient way to perform this: we are loading LSTM
every time we have to filter, rather than retain it in memory. The reason for this
is that we have a lot of known issues with parallelism in Keras.
'''
def LSTM_filter(path, pipelines, num_output, is_json=True):
from keras.models import load_model
from keras import backend as K
from keras.preprocessing import sequence
def rmse(y_true, y_pred):
return K.mean(K.sqrt(K.square(y_pred - y_true)), axis=-1)
(lstm_model_path, json_path) = get_model_path(path)
model = load_model(lstm_model_path, custom_objects={'rmse': rmse})
with open(json_path, "rb") as f:
primitive_dict = pickle.load(f)
print("LSTM model loaded")
if is_json:
(pipeline_data, _) = get_data_from_json(pipelines)
else:
pipeline_data = pipelines
x = [[primitive_dict[step.lower()] for step in pipeline] for pipeline in pipeline_data]
x = sequence.pad_sequences(x, maxlen=MAX_NUM_CONV_LAYERS)
scores = model.predict(x, verbose = 0)
results = [float(x) for x in scores]
num_output = min(num_output, len(results))
# get the indices of top num_output results (note lower score predicted by lstm is better)
inds = sorted(range(len(results)), key=lambda x: results[x])[0:num_output]
best_pipelines = [pipelines[ind] for ind in inds]
return best_pipelines
'''
Keeping code for now: some attempts to get multi-process worker to work
class LSTMPredictWorker:
def __init__(self, path):
from keras.models import load_model
from keras import backend as K
from keras.preprocessing import sequence
def rmse(y_true, y_pred):
return K.mean(K.sqrt(K.square(y_pred - y_true)), axis=-1)
lstm_model_path = path
self.model = load_model(lstm_model_path, custom_objects={'rmse': rmse})
_ = self.model.predict(sequence.pad_sequences(np.array([[0,0]]), maxlen=MAX_NUM_CONV_LAYERS)) #warmup
self.session = K.get_session()
self.graph = self.session.graph
self.graph.finalize()
json_path = path + "_primitives_list.json"
with open(json_path, "rb") as f:
self.primitive_dict = pickle.load(f)
def filter_pipelines(self, pipelines, num_output, is_json=True):
from keras.preprocessing import sequence
if is_json:
(pipeline_data, _) = get_data_from_json(pipelines)
else:
pipeline_data = pipelines
x = [[self.primitive_dict[step.lower()] for step in pipeline] for pipeline in pipeline_data]
x = sequence.pad_sequences(x, maxlen=MAX_NUM_CONV_LAYERS)
with self.session.as_default():
with self.graph.as_default():
print("Starting prediction")
scores = self.model.predict(x, verbose = 0)
print("Finished prediction")
results = [float(x) for x in scores]
num_output = min(num_output, len(results))
# get the indices of top num_output results, the last element being the index of largest element
inds = sorted(range(len(results)), key=lambda x: results[x])[-num_output:]
best_pipelines = [pipelines[ind] for ind in reversed(inds)]
return best_pipelines
'''
|
d3m-model-search-master
|
Stanford-D3M-Full/experimental/lstm_predictor/LSTMPredictWorker.py
|
import json
from sklearn.model_selection import train_test_split
def get_data_from_json(pipeline_jsons):
pipeline_data = []
pipeline_rank = []
for pipeline_json in pipeline_jsons:
pipeline = json.loads(pipeline_json)
stepIds = [ step["primitive"]["python_path"] for step in pipeline["steps"]]
pipeline_data.append(stepIds)
if "pipeline_rank" in pipeline:
pipeline_rank.append(pipeline["pipeline_rank"])
return (pipeline_data, pipeline_rank)
# ratio should be train:validation:test
def split_data(X_data, Y_data, ratio = [0.6, 0.2, 0.2]):
denom = float(sum(ratio))
test_ratio = ratio[2] / denom
val_ratio = ratio[1] / (ratio[0] + ratio[1])
X_train, X_test, y_train, y_test = train_test_split(X_data, Y_data, test_size=test_ratio, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_ratio, random_state=1)
return (X_train, X_val, X_test, y_train, y_val, y_test)
def get_model_path(path):
if path[-1] != "/" or path[-1] != "\\":
path += "/"
lstm_path = path + "lstm"
json_path = path + "primitives_list.json"
return (lstm_path, json_path)
|
d3m-model-search-master
|
Stanford-D3M-Full/experimental/lstm_predictor/LSTMUtils.py
|
MAX_NUM_CONV_LAYERS=15
EMBEDDING_SIZE=40
LSTM_DIM = 100
DENSE_LAYER_DIMS = [100, 10, 1]
BN_AXIS = 1
BATCH_SIZE = 32
|
d3m-model-search-master
|
Stanford-D3M-Full/experimental/lstm_predictor/LSTMConstants.py
|
"""Run all tests with filenames beginning with "test*" inside module"""
import sys
import unittest
if __name__ == '__main__':
print("run_tests.py running all tests...")
test_suite = unittest.defaultTestLoader.discover('.', 'test*py')
test_runner = unittest.TextTestRunner(resultclass=unittest.TextTestResult)
result = test_runner.run(test_suite)
sys.exit(not result.wasSuccessful())
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/run_tests.py
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/ta3ta2/__init__.py
|
|
import unittest
import os
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import shutil
import pdb
import numpy as np
class UtilsTest(unittest.TestCase):
def test_get_global_score(self):
# Load baseball
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Test global score
f1_scores = [68.5, 30, 10]
global_scores = []
for f1_score in f1_scores:
global_score = utils.utils.get_global_score(f1_score, problem_doc)
global_scores.append(global_score)
# Make sure that scores are sorted (68.5 f1 is better than 30 and 10)
assert(sorted(global_scores) == global_scores)
def test_save_scored_pipeline(self):
# Load baseball
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Load a random saved pipeline
pipeline_json_path = (
utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball_pipeline/test_pipeline.json")
with open(pipeline_json_path) as f:
pipeline = utils.primitive_pipeline_utils.load_pipeline(f.read())
# Create a test outputdirectory
test_outdir = "/tmp/test_outdir"
if os.path.exists(test_outdir):
shutil.rmtree(test_outdir)
local_score = 69
utils.utils.save_scored_pipeline(test_outdir, local_score, pipeline, problem_doc)
# Make sure there are files in executables, supporting files and pipelines
expected_pipelines_filepath = "/tmp/test_outdir/pipelines_ranked/999999999931.0.json"
assert(os.path.exists(expected_pipelines_filepath))
# The new D3M evaluation doesn't require executables anymore.
#expected_supporting_filepath = "/tmp/test_outdir/supporting_files/999999999931.0/Stanford-D3M-Full/"
#expected_execuables_filepath = "/tmp/test_outdir/executables/999999999931.0.sh"
#assert(os.path.exists(expected_supporting_filepath))
#assert(os.path.exists(expected_execuables_filepath))
def test_save_scored_pipeline_keep_top_5(self):
# Load baseball
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Load a random saved pipeline
pipeline_json_path = (
utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball_pipeline/test_pipeline.json")
with open(pipeline_json_path) as f:
pipeline = utils.primitive_pipeline_utils.load_pipeline(f.read())
# Create a test outputdirectory
test_outdir = "/tmp/test_outdir"
if os.path.exists(test_outdir):
shutil.rmtree(test_outdir)
scores = [100, 90, 50, 30, 20, 10, 5, 1]
for score in scores:
utils.utils.save_scored_pipeline(test_outdir, score, pipeline, problem_doc, keep_top=5)
# Make sure only the top 5 are saved
top_pipelines = os.listdir("/tmp/test_outdir/pipelines_ranked")
assert(len(top_pipelines) <= 5)
assert(set(top_pipelines) == set(["999999999900.0.json", "999999999910.0.json",
"999999999950.0.json", "999999999970.0.json",
"999999999980.0.json"]))
def test_write_predictions_to_file(self):
np.random.seed(0)
# Create a pipeline that just extracts attributes and target
p = utils.primitive_pipeline_utils.PipelineWrapper(
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
])
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
dataset_train, dataset_validate = utils.train_utils.split_dataset(dataset, problem_doc)
# Run
predictions = p.run(dataset_train, eval_datasets=[dataset_validate])[0].values['outputs.0']
test_out_predictions_path = "/tmp/test_predictions"
if os.path.exists(test_out_predictions_path):
os.remove(test_out_predictions_path)
assert(not os.path.exists(test_out_predictions_path))
utils.utils.write_predictions_to_file(predictions,
test_out_predictions_path,
problem_doc,
dataset_validate)
assert(os.path.exists(test_out_predictions_path))
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/utils/test_utils.py
|
import unittest
import utils.train_utils
import utils.utils
import utils.primitive_pipeline_utils
import random
import numpy as np
import os
import pdb
class TrainUtilsTest(unittest.TestCase):
def test_extract_labels(self):
random.seed(0)
np.random.seed(0)
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
dataset_train, dataset_validate = utils.train_utils.split_dataset(dataset, problem_doc)
preds = utils.train_utils.extract_labels(dataset_validate).values.tolist()
assert(preds == [['0'], ['0'], ['0'], ['2'], ['0'], ['0'], ['0'], ['0'], ['0'], ['1'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['2'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['2'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['1'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['2'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['2'], ['1'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['2'], ['0'], ['0'], ['1'], ['0'], ['0'], ['0'], ['0'], ['1'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['1'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['2'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['2'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['1'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['0'], ['2'], ['0'], ['0'], ['0'], ['0'],
['0'], ['0'], ['0'], ['0'], ['0'], ['2'], ['0']])
def create_pipeliner(self):
kwargs = {"data_loading_pipeline_candidates": [
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
"input": "cast_to_type"
},
]
]}
pipeliner = utils.primitive_pipeline_utils.SKLearnPipeliner(**kwargs)
# Override with just skdecision tree
pipeliner.sklearn_primitives = ['d3m.primitives.classification.decision_tree.SKlearn']
return pipeliner
def test_score(self):
random.seed(0)
np.random.seed(0)
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
dataset_train, dataset_validate = utils.train_utils.split_dataset(dataset, problem_doc)
# Make prediction
p = self.create_pipeliner()
predictions = p.run(dataset_train, eval_datasets=[dataset_validate])[0].values['outputs.0']
# Test the f1 score
f1_score = utils.train_utils.score(predictions, dataset_validate, problem_doc)
assert(np.abs(f1_score - 0.47746807194308194) < .001)
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/utils/test_train_utils.py
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/utils/__init__.py
|
|
import unittest
import os
import random
import utils.primitive_pipeline_utils
import utils.utils
import utils.train_utils
import numpy as np
from d3m.metadata import problem
class PrimitivePipelineUtilsTest(unittest.TestCase):
def test_simple_sklearn_pipeliner(self):
np.random.seed(0)
random.seed(0)
kwargs = { "data_loading_pipeline_candidates" :[
[
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
"input": "cast_to_type"
}
]
]}
pipeliner = utils.primitive_pipeline_utils.SKLearnPipeliner(**kwargs)
# Override with just skdecision tree
pipeliner.sklearn_primitives = ['d3m.primitives.classification.decision_tree.SKlearn']
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
dataset_train, dataset_validate = utils.train_utils.split_dataset(dataset, problem_doc)
# Run
predictions = pipeliner.run(dataset_train, eval_datasets=[dataset_validate])
print(predictions[0].error)
assert(len(list(predictions[0].values.keys())) > 0)
def test_pipeline_wrapper_add_stage(self):
np.random.seed(0)
# Create a pipeline that just extracts attributes and target
p = utils.primitive_pipeline_utils.PipelineWrapper(
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
])
p.add_stage({
"input" : "extract_attrs",
"primitive" : "d3m.primitives.data_cleaning.imputer.SKlearn"
})
assert(set(p.get_branches()) == set(['d3m.primitives.data_cleaning.imputer.SKlearn', 'extract_targets']))
def test_pipeline_wrapper_branches(self):
np.random.seed(0)
# Create a pipeline that just extracts attributes and target
p = utils.primitive_pipeline_utils.PipelineWrapper(
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
])
assert(set(p.get_branches()) == set(['extract_attrs', 'extract_targets']))
def test_pipeline_wrapper(self):
np.random.seed(0)
# Create a pipeline that just extracts attributes and target
p = utils.primitive_pipeline_utils.PipelineWrapper(
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
])
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
dataset_train, dataset_validate = utils.train_utils.split_dataset(dataset, problem_doc)
# Run
predictions = p.run(dataset_train, eval_datasets=[dataset_validate])
# The targets is around 209 rows by 1 columns
assert(predictions[0].values['outputs.0'].shape == (216,1))
def test_pipeline_wrapper_return_pipeline(self):
np.random.seed(0)
# Create a pipeline that just extracts attributes and target
p = utils.primitive_pipeline_utils.PipelineWrapper(
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
])
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
dataset_train, dataset_validate = utils.train_utils.split_dataset(dataset, problem_doc)
# Run
predictions, pipeline = p.run(dataset_train, eval_datasets=[dataset_validate], return_pipeline=True)
print(type(pipeline))
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/utils/test_primitive_pipeline_utils.py
|
import os
import unittest
import utils.utils
import utils.train_utils
import executors.HyperbandExecutor
import executors.Executor
from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty
def get_data_loading_pipelines_override():
return [
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"input": "cast_to_type",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
},
]
]
class HyperbandExecutorTest(unittest.TestCase):
def test_hyperband_executor_basic(self):
# Create executor
inputs_queue = JoinableQueue()
results_queue = JoinableQueue()
executor = executors.HyperbandExecutor.HyperbandExecutor(
inputs_queue, results_queue, hyperband_epochs_budget=100
)
executor.get_data_loading_pipelines = get_data_loading_pipelines_override
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(
os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Process item
inputs_queue.put((problem_doc, dataset))
executor.start()
inputs_queue.join()
# Gather results
results = []
while not results_queue.empty():
print("Gathering...")
results.append(results_queue.get(True, executors.Executor.QUEUE_TIMEOUT))
executor.terminate()
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/executor/test_HyperbandExecutor.py
|
import os
import unittest
import utils.utils
import utils.train_utils
import executors.SklearnStackedLSTMExecutor
import executors.Executor
from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty
def get_data_loading_pipelines_override():
return [
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"input": "cast_to_type",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
},
]
]
class SklearnStackedLSTMExecutorTest(unittest.TestCase):
def test_sklearn_stacked_lstm_executor(self):
git_root_path = utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__)))
# Create executor
inputs_queue = JoinableQueue()
results_queue = JoinableQueue()
lstm_path = git_root_path + "/Stanford-D3M-Full/experimental/lstm_predictor/saved_models/20190130/"
assert(os.path.exists(lstm_path))
executor = executors.SklearnStackedLSTMExecutor.SklearnStackedLSTMExecutor(
inputs_queue,
results_queue,
override_sklearn_primitives_set=["d3m.primitives.regression.linear_svr.SKlearn", "d3m.primitives.regression.gaussian_process.SKlearn"],
lstm_path= lstm_path
)
executor.get_data_loading_pipelines = get_data_loading_pipelines_override
# Load sample data
data_uri = git_root_path + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Process item
inputs_queue.put((problem_doc, dataset))
inputs_queue.put((problem_doc, dataset))
inputs_queue.put((problem_doc, dataset))
executor.start()
inputs_queue.join()
# Gather results
results = []
while not results_queue.empty():
print("Gathering...")
results.append(results_queue.get(True, executors.Executor.QUEUE_TIMEOUT))
executor.terminate()
for result in results:
assert None not in result
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/executor/test_SklearnStackedLSTMExecutor.py
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/executor/__init__.py
|
|
import os
import unittest
import utils.utils
import utils.train_utils
import executors.SimpleRandomSklearnExecutor
import executors.Executor
from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty
def get_data_loading_pipelines_override():
return [
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"input": "cast_to_type",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
},
]
]
class SimpleRandomSklearnExecutorTest(unittest.TestCase):
def test_simple_random_sklearn_executor(self):
# Create executor
inputs_queue = JoinableQueue()
results_queue = JoinableQueue()
executor = executors.SimpleRandomSklearnExecutor.SimpleRandomSklearnExecutor(
inputs_queue, results_queue, override_sklearn_primitives_set=["d3m.primitives.regression.linear_svr.SKlearn"]
)
executor.get_data_loading_pipelines = get_data_loading_pipelines_override
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(
os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Process item
inputs_queue.put((problem_doc, dataset))
inputs_queue.put((problem_doc, dataset))
inputs_queue.put((problem_doc, dataset))
executor.start()
inputs_queue.join()
# Gather results
results = []
while not results_queue.empty():
print("Gathering...")
results.append(results_queue.get(True, executors.Executor.QUEUE_TIMEOUT))
executor.terminate()
assert(len(results) == 3)
for result in results:
assert None not in result
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/executor/test_SimpleRandomSklearnExecutor.py
|
import os
import unittest
import shutil
import utils.utils
import utils.train_utils
import executors.NistSaverExecutor
import executors.Executor
from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty
class SimpleRandomSklearnExecutorTest(unittest.TestCase):
def test_saver_executor(self):
# Create executor
inputs_queue = JoinableQueue()
results_queue = JoinableQueue()
executor = executors.NistSaverExecutor.NistSaverExecutor(
inputs_queue, results_queue
)
# Load sample data
data_uri = utils.utils.get_git_root(os.path.dirname(
os.path.abspath(__file__))) + "/test_data/185_baseball"
assert(os.path.exists(data_uri))
problem_doc, dataset = utils.utils.load_data_from_dir(data_uri, mode="train")
# Load a random saved pipeline
pipeline_json_path = (
utils.utils.get_git_root(os.path.dirname(os.path.abspath(__file__))) + "/test_data/185_baseball_pipeline/test_pipeline.json")
with open(pipeline_json_path) as f:
pipeline_json = f.read()
# Process item
base_outputdir, pipeline_json, problem_doc, score = (
"/tmp/test_outdir", pipeline_json, problem_doc, 69
)
if os.path.exists(base_outputdir):
shutil.rmtree(base_outputdir)
item = (base_outputdir, pipeline_json, problem_doc, score)
inputs_queue.put(item)
executor.start()
inputs_queue.join()
executor.terminate()
# Make sure there are files in executables, supporting files and pipelines
#expected_supporting_filepath = "/tmp/test_outdir/supporting_files/999999999931.0/Stanford-D3M-Full/"
expected_pipelines_filepath = "/tmp/test_outdir/pipelines_ranked/999999999931.0.json"
#expected_execuables_filepath = "/tmp/test_outdir/executables/999999999931.0.sh"
#assert(os.path.exists(expected_supporting_filepath))
assert(os.path.exists(expected_pipelines_filepath))
#assert(os.path.exists(expected_execuables_filepath))
if __name__ == '__main__':
unittest.main()
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/executor/test_NistSaverExecutor.py
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/unit_tests/apps/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/integration_tests/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/integration_tests/ta3ta2/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/integration_tests/utils/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/integration_tests/executor/__init__.py
|
|
d3m-model-search-master
|
Stanford-D3M-Full/tests/integration_tests/apps/__init__.py
|
|
"""
train_utils.py
------------------------------------
Contains utilities for:
- K-fold cross validation
- Scoring
- Extracting labels
"""
import sys
import glob
import pdb
import shutil
import json
import pandas
import numpy as np
import os
import copy
import traceback
import time
from d3m import metadata
from d3m.metadata import problem
from d3m.metadata.problem import *
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import Metadata
from d3m.container.dataset import D3MDatasetLoader, Dataset
from d3m import primitives
from d3m_outputs import Predictions
import d3m.metrics
import d3m_outputs
from utils.utils import *
from utils.primitive_pipeline_utils import *
def get_entrypoint_resource_id(dataset):
for resource_id in dataset.keys():
if ('https://metadata.datadrivendiscovery.org/types/DatasetEntryPoint' in
dataset.metadata.query(selector=(resource_id,))["semantic_types"]):
return resource_id
return resource_id
def split_dataset(dataset, problem_doc):
"""
Split dataset into 80% train and 20% validate.
"""
resource_id = get_entrypoint_resource_id(dataset)
full_dataframe = dataset[resource_id]
# 20% split of the data
msk = np.random.rand(len(full_dataframe)) < .8
train = full_dataframe[msk]
test = full_dataframe[~msk]
train.reset_index(inplace=True, drop=True)
test.reset_index(inplace=True, drop=True)
# Copy over to new train and validation datasets
dataset_train = copy.copy(dataset)
dataset_validate = copy.copy(dataset)
dataset_train[resource_id] = train
dataset_validate[resource_id] = test
# Update length
train_length = len(train)
validate_length = len(test)
dataset_train.metadata = dataset_train.metadata.update(selector=[resource_id], metadata={"dimension": {"length" : train_length}},
source=dataset_train.metadata)
dataset_validate.metadata = dataset_validate.metadata.update(selector=[resource_id], metadata={"dimension": {"length" : validate_length}},
source=dataset_validate.metadata)
add_target_columns_metadata(dataset_train, problem_doc)
add_target_columns_metadata(dataset_validate, problem_doc)
return dataset_train, dataset_validate
def split_dataset_kfold(dataset, problem_doc, k=3):
dataset_trains, dataset_validates = [], []
# Get full dataframe
resource_id = get_entrypoint_resource_id(dataset)
full_dataframe = dataset[resource_id]
# Shuffle dataframe
full_dataframe = full_dataframe.sample(frac=1).reset_index(drop=True)
# split into k folds and generate train and test
for fold in range(k):
length_of_validation_set = int(1 / float(k) * len(full_dataframe))
start_of_validation = fold * length_of_validation_set
end_of_validation = len(full_dataframe) if fold == k-1 else (fold+1)*length_of_validation_set
msk = np.zeros(len(full_dataframe))
msk[start_of_validation : end_of_validation] = 1
msk = msk.astype(np.bool)
# Partition
train = full_dataframe[~msk]
test = full_dataframe[msk]
train.reset_index(inplace=True, drop=True)
test.reset_index(inplace=True, drop=True)
# Copy over to new train and validation datasets
dataset_train = copy.copy(dataset)
dataset_validate = copy.copy(dataset)
dataset_train[resource_id] = train
dataset_validate[resource_id] = test
# Update length
train_length = len(train)
validate_length = len(test)
dataset_train.metadata = dataset_train.metadata.update(selector=[resource_id], metadata={"dimension": {"length" : train_length}}, source=dataset_train.metadata)
dataset_validate.metadata = dataset_validate.metadata.update(selector=[resource_id], metadata={"dimension": {"length" : validate_length}}, source=dataset_validate.metadata)
add_target_columns_metadata(dataset_train, problem_doc)
add_target_columns_metadata(dataset_validate, problem_doc)
dataset_trains.append(dataset_train)
dataset_validates.append(dataset_validate)
return dataset_trains, dataset_validates
def create_labels_extraction_pipeline():
"""
Create a pipeline to extract labels from a dataset.
"""
pipeliner = PipelineWrapper(
[
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : PipelineWrapper.PIPELINE_INPUT
},
{
"stage_name" : "parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
])
return pipeliner
def extract_labels(dataset):
"""
Extract labels from dataset. Returns a pandas dataframe.
"""
labels_extraction_pipeliner = create_labels_extraction_pipeline()
return labels_extraction_pipeliner.run(dataset, eval_datasets=[dataset])[0].values['outputs.0']
def metric_keys_from_doc(doc):
return [PerformanceMetric.get_map()[x.get("metric")] for x in
doc.query(()).get("inputs").get("performanceMetrics")]
def score(predictions, dataset_test, problem_doc, override_metric_key=None):
"""
Score predictions against test labels
"""
target_key = predictions.keys()[-1]
predictions = predictions.loc[:,target_key]
test_labels = extract_labels(dataset_test)
# Extract the score function
metric = problem_doc.query(()).get("inputs").get("performanceMetrics")[0].get("metric")
metric_key = PerformanceMetric.get_map()[metric]
if override_metric_key is not None:
print("train_utils.py:score - Overriding metric from %s -> %s" % (str(metric_key), str(override_metric_key)))
sys.stdout.flush()
metric_key = override_metric_key
score_function = d3m.metrics.functions_map[metric_key]
# Expect predictions and test_labels to both be pandas dataframe
groundtruth_vector, preds_vector = test_labels, predictions
try:
groundtruth_vector = groundtruth_vector.values
preds_vector = preds_vector.values
except:
pass
try:
groundtruth_vector = groundtruth_vector.astype(np.float)
preds_vector = preds_vector.astype(np.float)
except:
pass
try:
groundtruth_vector = groundtruth_vector.flatten()
preds_vector = preds_vector.flatten()
except:
pass
return score_function(groundtruth_vector, preds_vector)
def get_score_on_score_set(inputdir, problem_doc, predictions):
"""
NIST seed datasets have a score directory with groundtruth files.
This method returns the score of a given set of predictions against this.
"""
path_to_score_root = "%s/SCORE/" % inputdir
groundtruth_path = "%s/targets.csv" % path_to_score_root
result_file_path = "/tmp/%s" % str(uuid.uuid4())
while os.path.exists(result_file_path):
result_file_path = "/tmp/%s" % str(uuid.uuid4())
write_predictions_to_file(predictions,
result_file_path,
problem_doc,
dataset=None)
p = Predictions(result_file_path, path_to_score_root)
#assert p.is_valid()
scores = p.score(groundtruth_path)
return list(scores)[0].scorevalue
|
d3m-model-search-master
|
Stanford-D3M-Full/utils/train_utils.py
|
d3m-model-search-master
|
Stanford-D3M-Full/utils/__init__.py
|
|
"""
pipeline_utils.py
------------------------------------
Contains utilities for:
- Saving and loading piplelines
- Constructing pipelines
- Running pipelines
- Loading primitives
"""
import sys
import glob
import pdb
import shutil
import uuid
import json
import random
import pandas
import numpy as np
import os
import traceback
import time
import copy
from d3m.metadata import base as metadata_base, hyperparams as hyperparams_module, pipeline as pipeline_module, problem
import d3m
from d3m import metadata
from d3m.metadata import problem
from d3m.metadata.problem import *
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import ArgumentType, Context
from d3m.metadata.base import Metadata
from d3m.container.dataset import D3MDatasetLoader, Dataset
from d3m import primitives
import d3m_outputs
from d3m_outputs import Predictions
from d3m.runtime import Runtime
def load_pipeline(pipeline_json):
return Pipeline.from_json(pipeline_json)
def get_primitive_names():
"""Return list of names of primitives."""
return d3m.index.search()
def get_primitive_with_name(name):
"""Loads and returns a primitive with given name."""
return d3m.index.get_primitive(name)
def get_data_primitives_names():
"""Helper to return names of all primitives that deal with data."""
return [x for x in get_primitive_names() if ".data." in x or ".datasets." in x]
def get_sklearn_primitives_names():
"""Helper to return names of all sklearn primitives."""
return [x for x in get_primitive_names() if ".SKlearn" in x]
def load_all_primitives():
"""Loads all primitives."""
d3m.index.load_all()
def get_loaded_primitives():
"""Returns loaded primitives."""
return d3m.index.get_loaded_primitives()
def get_primitive_attribute(primitive_or_primitive_name, attribute_selector):
"""Given a primitive or primitive name, gets the value in the metadata of primitive associated with attribute_selector.
Example:
=> get_primitive_attribute("d3m.primitives.bbn.sklearn_wrap.BBNMLPClassifier", ("primitive_code","class_type_arguments","Inputs"))
=> <class 'd3m.container.pandas.DataFrame'>
"""
primitive = primitive_or_primitive_name
if type(primitive_or_primitive_name) == str:
primitive = get_primitive_with_name(primitive_or_primitive_name)
metadata_selector = primitive.metadata.query()
for attribute in attribute_selector:
metadata_selector = metadata_selector.get(attribute)
return metadata_selector
def get_primitive_input_type(primitive_or_primitive_name):
"""Returns the input type of given primitive or primitive name."""
return get_primitive_attribute(primitive_or_primitive_name, ("primitive_code", "class_type_arguments", "Inputs"))
def get_primitive_output_type(primitive_or_primitive_name):
"""Returns the output type of given primitive or primitive name."""
return get_primitive_attribute(primitive_or_primitive_name, ("primitive_code", "class_type_arguments", "Outputs"))
def get_primitive_family_type(primitive_or_primitive_name):
"""Returns the primitive type of given primitive or primitive name."""
return get_primitive_attribute(primitive_or_primitive_name, ("primitive_family",))
class PipelineWrapper(object):
"""
Wrapper around the d3m pipeline object which allows fast and easy construction
of pipelines.
"""
PIPELINE_INPUT = "PIPELINE_INPUT"
def __init__(self, pipeline_architecture):
"""
Create a new PipelineWrapper object. pipeline_architecture should be a
list of dictionaries specifiying the stages of the pipeline.
Each stage dictionary in the pipeline_architecture list should specify:
"primitive" - primitive name or class to instantiate (required)
"stage_name" - name of the stage (required)
"input" - name of the stage whose output should be passed
as input to the current stage (required). Use
PipelineWrapper.PIPELINE_INPUT for referencing the
input of the pipeline.
"hyperparameters" - dict mapping names to data. (optional)
Example:
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.dsbox.Denormalize",
"input" : PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize",
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
]
"""
self.pipeline_architecture_dict = copy.deepcopy(pipeline_architecture)
self.verify_pipeline_architecture_dict(self.pipeline_architecture_dict)
def get_stage_names(self):
return set([x["stage_name"] for x in self.pipeline_architecture_dict])
def run(self, input_dataset, eval_datasets=[], return_pipeline=False):
"""
Converts internal pipeline architecture dict into pipeline and runs it.
Args:
- input_dataset: Input dataset to train
- eval_dataset: Dataset to evaluate
- return_pipeline: Whether to return the pipeline which fitted and produced the preds
Returns:
- If return_pipeline is False, returns just the predictions, otherwise returns a tuple
(preds, pipeline)
"""
pipeline = self.load_pipeline_architecture(self.pipeline_architecture_dict)
pipeline.check()
runtime = Runtime(pipeline, context=Context.TESTING)
runtime.fit(inputs=[input_dataset], return_values=['outputs.0'])
all_preds = []
for dataset in eval_datasets:
all_preds.append(runtime.produce(inputs=[dataset], return_values=['outputs.0']))
results = all_preds
if return_pipeline:
results = (all_preds, pipeline)
return results
def load_pipeline_architecture(self, pipeline_architecture_dict):
"""
Loads pipeline architecture dictionary and returns a d3m Pipeline object.
Return pipeline
"""
pipeline_description = Pipeline(context=Context.TESTING)
pipeline_description.add_input(name='inputs')
# For each corresponding stage in the dictionary create a step
steps = []
stage_name_to_reference_name = {}
for stage_dict in pipeline_architecture_dict:
# Extract stage attributes
primitive = stage_dict["primitive"]
if type(primitive) == str:
primitive = get_primitive_with_name(primitive)
cur_stage_name = stage_dict["stage_name"]
input_stage = stage_dict["input"]
# Create primitive step
step = PrimitiveStep(primitive_description=primitive.metadata.query())
data_reference = "inputs.0" if input_stage == PipelineWrapper.PIPELINE_INPUT else stage_name_to_reference_name[input_stage]
step.add_argument(name="inputs", argument_type=ArgumentType.CONTAINER, data_reference=data_reference)
if "hyperparameters" in stage_dict:
for k,v in stage_dict["hyperparameters"].items():
step.add_hyperparameter(name=k, argument_type=ArgumentType.VALUE, data=v)
if "arguments" in stage_dict:
for k,v in stage_dict["arguments"].items():
step.add_argument(name=k, argument_type=ArgumentType.CONTAINER, data_reference=stage_name_to_reference_name[v])
step.add_output("produce")
pipeline_description.add_step(step)
reference_name = next(iter(step.get_output_data_references()))
# Update accounting
stage_name_to_reference_name[cur_stage_name] = reference_name
steps.append(step)
# Output is output of the last step
last_output_reference = next(iter(steps[-1].get_output_data_references()))
pipeline_description.add_output(name="output", data_reference=last_output_reference)
return pipeline_description
def get_branches(self):
"""
Returns stagenames that are not being used as inputs for
any other stages.
"""
stage_names = [x["stage_name"] for x in self.pipeline_architecture_dict]
for stage_dict in self.pipeline_architecture_dict:
input_name = stage_dict["input"]
if input_name in stage_names:
stage_names.remove(input_name)
return stage_names
def add_stage(self, new_stage):
"""
Adds a stage to the pipeline architecture dict; then asserts validity
of stages.
"""
# If new_stage does not have a stage_name, give it a unique name
if "stage_name" not in new_stage:
primitive_name = str(new_stage["primitive"])
stage_names = [x["stage_name"] for x in self.pipeline_architecture_dict]
candidate_stage_name = primitive_name
while candidate_stage_name in stage_names:
candidate_stage_name = primitive_name + "_" + str(uuid.uuid4())
new_stage["stage_name"] = candidate_stage_name
self.pipeline_architecture_dict.append(new_stage)
self.verify_pipeline_architecture_dict(self.pipeline_architecture_dict)
def verify_pipeline_architecture_dict(self, pipeline_json):
"""
Verifies correctness of pipeline architecture dict.
- Make sure that stage dictionaries have "stage_name", "primitive", "input"
- Make sure that stage dictionaries have valid "primitive"
- Make sure that stage_names are unique.
- Make sure the inputs reference an existing stage name.
"""
# Verify that dictionaries have "stage_name" "primitive" "input" keys
for stage_dict in pipeline_json:
keys_to_check = ["stage_name", "primitive", "input"]
for k in keys_to_check:
if k not in stage_dict.keys():
raise Exception("Key '%s' not in pipeline dictionary" % k)
# Verify that "primitives" are valid
valid_primitives_names = get_primitive_names()
for stage_dict in pipeline_json:
primitive_or_primitive_name = stage_dict["primitive"]
if type(primitive_or_primitive_name) == str:
if primitive_or_primitive_name not in valid_primitives_names:
raise Exception("Primitive name '%s' not in list of valid primitives." % primitive_or_primitive_name)
elif type(primitive_or_primitive_name) != d3m.primitive_interfaces.base.PrimitiveBaseMeta:
raise Exception("Primitive '%s' not the right type (got object of type %s instead of %s" %
(str(primitive_or_primitive_name),
str(type(primitive_or_primitive_name)),
str(type(d3m.primitive_interfaces.base.PrimitiveBaseMeta))))
# Verify that stage names are unique
stage_names = set([x["stage_name"] for x in pipeline_json])
n_unique_stage_names = len(stage_names)
if n_unique_stage_names != len(pipeline_json):
raise Exception("Stage names in pipeline dictionary are not unique.")
# Make sure inputs reference previous stage names
stage_names_so_far = set()
for stage_dict in pipeline_json:
input_stage = stage_dict["input"]
if input_stage != PipelineWrapper.PIPELINE_INPUT and input_stage not in stage_names_so_far:
raise Exception("Stage not found: %s" % input_stage)
stage_names_so_far.add(stage_dict["stage_name"])
class SKLearnPipeliner(object):
"""
Represents an object which can construct random simple sklearn classification
pipelines and run them on inputs. One can either
(1) provide a specific pipeline with 'data_loading_pipeline', or
(2) provide a list of candidate piplines via 'data_loading_pipeline_candidates'
"""
def __init__(self, data_loading_pipeline=None, data_loading_pipeline_candidates=None, sklearn_predictor=None):
"""
Create SimpleSKLearnPipeliner.
Note: a "pipeline" specifies data loading pipeline (see PipelineWrapper).
Expect stage output branches to be ["attributes", "targets"]
- data_loading_pipeline - a pipeline architecture
- data_loading_pipelines - a list of pipeline architecture dicts
- sklearn_predictor - the sklearn predictor to use as last step of the pipeline
"""
self.data_loading_pipeline = data_loading_pipeline
self.data_loading_pipeline_candidates = data_loading_pipeline_candidates
self.sklearn_predictor = sklearn_predictor
# check that we are given either a specific pipeline or candidates
assert bool(data_loading_pipeline) ^ bool(data_loading_pipeline_candidates)
if sklearn_predictor is None:
self.sklearn_primitives = get_sklearn_primitives_names()
def run(self, dataset_train, eval_datasets=[], return_pipeline=False):
"""
If we are given a configuration of primitives, create a pipeline
and run it on the given input datasets
If we are not given a specific configuration, samples a simple sklearn
based classification pipeline and runs it on the given input datasets.
"""
# Load data loading pipeline
pipeline = random.choice(self.data_loading_pipeline_candidates) if (self.data_loading_pipeline is None) \
else self.data_loading_pipeline
pipeline_wrapper = PipelineWrapper(pipeline)
expected_stage_outputs = set(["attributes", "targets"])
stage_outputs = set(pipeline_wrapper.get_branches())
if expected_stage_outputs != stage_outputs:
raise Exception("Expected stage outputs (%s) does not match actual (%s)." % (str(expected_stage_outputs),
str(stage_outputs)))
# Make sure we have `dataset_to_dataframe` since that's required for predictions
assert("column_parser" in pipeline_wrapper.get_stage_names())
# Sample a random sklearn primitive
sklearn_primitive = random.choice(self.sklearn_primitives) if (self.sklearn_predictor is None) \
else self.sklearn_predictor
pipeline_wrapper.add_stage({
"stage_name": "predict",
"input" : "attributes",
"primitive": sklearn_primitive,
"arguments": {
"outputs": "targets"
},
"hyperparameters": {
# Todo(maxlam): Better way to handle GeneralRelationalDataset....
"use_semantic_types": True if random.random() <= .5 else False,
}
})
# Make sure to write predictions in the correct format
pipeline_wrapper.add_stage({
"input": "predict",
"primitive": "d3m.primitives.data_transformation.construct_predictions.DataFrameCommon",
"arguments": {
"reference": "column_parser"
}
})
# Run the pipeline
return pipeline_wrapper.run(dataset_train, eval_datasets=eval_datasets,
return_pipeline=return_pipeline)
\
|
d3m-model-search-master
|
Stanford-D3M-Full/utils/primitive_pipeline_utils.py
|
"""
utils.py
------------------------------------------------
Contains general utilities for:
- Loading d3m dataset, writing d3m predictions
- Dealing with problem docs, protos
- Running functions with timelimit
"""
import sys
import glob
import pdb
import shutil
import json
import pandas
import numpy as np
import os
import traceback
import time
import git
import shutil
import pdb
from multiprocessing import Process
from d3m import metadata
from d3m.metadata import problem
from d3m.metadata.problem import *
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import Metadata
from d3m.container.dataset import D3MDatasetLoader, Dataset
from d3m import primitives
import d3m_outputs
from d3m.metadata import base as metadata_base
from d3m_outputs import Predictions
def get_git_root(path):
git_repo = git.Repo(path, search_parent_directories=True)
git_root = git_repo.git.rev_parse("--show-toplevel")
return git_root
def load_problem_doc(problem_doc_uri: str):
"""
Load problem_doc from problem_doc_uri
Parameters
---------
problem_doc_uri
Uri where the problemDoc.json is located
"""
with open(problem_doc_uri) as file:
problem_doc = json.load(file)
problem_doc_metadata = Metadata(problem_doc)
return problem_doc_metadata
def add_target_columns_metadata(dataset: 'Dataset', problem_doc_metadata: 'Metadata'):
"""
Add metadata to the dataset from problem_doc_metadata
Parameters
---------
dataset
Dataset
problem_doc_metadata:
Metadata about the problemDoc
"""
for data in problem_doc_metadata.query(())['inputs']['data']:
targets = data['targets']
for target in targets:
semantic_types = list(dataset.metadata.query(
(target['resID'], metadata_base.ALL_ELEMENTS, target['colIndex'])).get('semantic_types', []))
if 'https://metadata.datadrivendiscovery.org/types/Target' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.update(
(target['resID'], metadata_base.ALL_ELEMENTS, target['colIndex']), {'semantic_types': semantic_types})
if 'https://metadata.datadrivendiscovery.org/types/TrueTarget' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/TrueTarget')
dataset.metadata = dataset.metadata.update(
(target['resID'], metadata_base.ALL_ELEMENTS, target['colIndex']), {'semantic_types': semantic_types})
return dataset
def load_data_from_dir(inputdir, mode="train"):
"""
Returns problem_doc and dataset given input directory.
"""
assert mode in ["train", "test", "score"]
if mode == "train":
problemdir = "%s/TRAIN/problem_TRAIN" % inputdir
problem_doc_uri = "%s/problemDoc.json" % problemdir
datasetdir = "%s/TRAIN/dataset_TRAIN" % inputdir
dataset_uri = "%s/datasetDoc.json" % datasetdir
if mode == "test":
problemdir = "%s/TEST/problem_TEST" % inputdir
problem_doc_uri = "%s/problemDoc.json" % problemdir
datasetdir = "%s/TEST/dataset_TEST" % inputdir
dataset_uri = "%s/datasetDoc.json" % datasetdir
if mode == "score":
problemdir = "%s/SCORE/problem_TEST" % inputdir
problem_doc_uri = "%s/problemDoc.json" % problemdir
datasetdir = "%s/SCORE/dataset_TEST" % inputdir
dataset_uri = "%s/datasetDoc.json" % datasetdir
# Problem doc and dataset loading
problem_doc = load_problem_doc(problem_doc_uri)
# Dataset
if 'file:' not in dataset_uri:
dataset_uri = 'file://{dataset_uri}'.format(dataset_uri=os.path.abspath(dataset_uri))
dataset = D3MDatasetLoader()
dataset = dataset.load(dataset_uri=dataset_uri)
dataset = add_target_columns_metadata(dataset, problem_doc)
if mode == "train" or mode == "score":
dataset = add_target_columns_metadata(dataset, problem_doc)
return problem_doc, dataset
def is_higher_score_better(problem_doc):
"""
Returns whether higher score is better for the given problem.
Currently this is a hack:
If metric string has "err" in it, return False, otherwise True
"""
metric_string = problem_doc.query(selector=()).get("inputs").get("performanceMetrics")[0].get("metric")
return not "err" in metric_string.lower()
def get_global_score(local_score, problem_doc):
"""
Convert local_score to global_score. A global_score has the property
the a lower value always means a better solution.
The algorithm is as follows:
If problem_doc indicates higher scores are better:
return LARGE_EPS - local_score
else
return LARGE_EPS + local_score
LARGE_EPS should be chosen to be bigger than any possible |local_score|.
However if this is not the case and |local_score| > LARGE_EPS, we return
a global score LARGE_EPS (which is basically 0) to indicate an error.
"""
LARGE_EPS = 1e12
if np.abs(local_score) > LARGE_EPS:
return LARGE_EPS
higher_is_better = is_higher_score_better(problem_doc)
sign = -1 if higher_is_better else 1
return LARGE_EPS + sign*local_score
def save_pipeline(pipeline_file_path, pipeline, pipeline_rank=None):
"""
Save pipeline to pipeline_file_path. pipeline is a d3m object.
"""
to_json_structure = pipeline.to_json_structure()
if pipeline_rank is not None:
to_json_structure["pipeline_rank"] = pipeline_rank
with open(pipeline_file_path, "w") as f:
f.write(json.dumps(to_json_structure,
sort_keys=True, indent=4, separators=(',', ': ')))
def save_supporting_files(supporting_files_path):
"""
Save all supporting files to the given directory path.
Simply copy over the whole repository to this path.
"""
git_root_path = get_git_root(os.path.dirname(os.path.abspath(__file__)))
def ign_func(d, files):
keep_files_with_py = [f for f in files if os.path.isfile(os.path.join(d, f)) and not f.endswith(".py")]
ignore_files_with_leading_dot = [f for f in files if os.path.isfile(os.path.join(d,f)) and f[0] == "."]
return keep_files_with_py + ignore_files_with_leading_dot
shutil.copytree(git_root_path, supporting_files_path, ignore=ign_func)
def save_executable(exe_path, pipeline_id):
"""
Save a bash script with a command that will run the pipeline
against a target directory and generate predictions.
"""
# TODO(maxlam): FINISH
command = "testing"
with open(exe_path, "w") as f:
f.write(command)
def clear_all_files_but_top(output_dir, n_to_keep=5):
"""
Clears all files but the top n_to_keep.
Assumes that lower scores are better.
"""
pipelines_files_path = "%s/pipelines_ranked" % output_dir
files = os.listdir(pipelines_files_path)
files_and_scores = [(x, float(x.replace(".json",""))) for x in files]
files_and_scores.sort(key=lambda x:x[1])
files_and_scores_to_delete = files_and_scores[n_to_keep:]
for f, score in files_and_scores_to_delete:
pipeline_id = str(score)
pipelines_files_path_to_delete = "%s/pipelines_ranked/%s.json" % (output_dir, pipeline_id)
for to_delete in [pipelines_files_path_to_delete]:
if os.path.isdir(to_delete):
shutil.rmtree(to_delete)
else:
if os.path.exists(to_delete):
os.remove(to_delete)
def save_scored_pipeline(outputdir, local_score, pipeline, problem_doc, keep_top=5, clear_directories=False):
"""
Save scored pipeline, executables and supporting files to the given output directory as specified by NIST.
Keep only the top keep_top configurations.
Creates the following directory structure if it doesn't exist:
outputdir/
pipelines_ranked
pipelines_scored
pipelines_searched
subpipelines
pipeline_runs
additional_inputs
The process is as follows:
1. Given local_score (e.g: 68.5), converts this to a global score (e.g: 1000-68.5 = 931.5)
in which a lower value implies a better prediction. This global score is the id of the pipeline.
Set the pipeline's id as such.
2. Given the id of the pipeline, create `outputdir`/pipelines_ranked/`pipeline_id`.json containing
the pipeline json.
3. Collect all pipelines saved; sort them; delete all but the top keep_top pipelines (in pipelines_ranked.
pipelines_scored should contain every single pipeline ever scored).
########################################################
# Note this function is not necessarily thread safe!!! #
########################################################
Furthermore: It is possible that this function may be in the middle of writing to output
when the program is terminated, leading to nasty bugs. Please give enough time for this
function before the deadline of the evaluation.
Arguments
- outputdir - Full path to output to save pipelines, supporting_files, executables
- local_score - The local score of the pipeline
- pipeline - The pipeline object (not json) representing pipeline to save
- problem_doc - Problem doc of the problem of the pipeline being saved
- keep_top - Number of total pipelines / supporting files to keep
- clear_directories - Should clear the directories or not
"""
# Create the directory structure
pipelines_ranked = "%s/pipelines_ranked" % outputdir
pipelines_scored = "%s/pipelines_scored" % outputdir
pipelines_searched = "%s/pipelines_searched" % outputdir
subpipelines = "%s/subpipelines" % outputdir
pipeline_runs = "%s/pipeline_runs" % outputdir
additional_inputs = "%s/additional_inputs" % outputdir
all_paths = [pipelines_ranked, pipelines_scored,
pipelines_searched, subpipelines,
pipeline_runs, additional_inputs]
# Clear the path outputs if specified
if clear_directories:
for path_to_clear in all_paths:
if os.path.exists(path_to_clear):
shutil.rmtree(path_to_clear)
# Create the dirs
for necessary_path in all_paths:
if not os.path.exists(necessary_path):
os.makedirs(necessary_path)
# Get pipeline id
pipeline_id = str(get_global_score(local_score, problem_doc))
pipeline.id = pipeline_id
target_pipeline_file_path = "%s/%s.json" % (pipelines_ranked, pipeline_id)
# If pipeline json with pipeline id already exists, don't do anything
if os.path.exists(target_pipeline_file_path):
return
paths_to_save = ["%s/%s.json" % (d, pipeline_id) for d in [pipelines_ranked, pipelines_searched, pipelines_scored]]
# Save the pipeline
for p in paths_to_save:
save_pipeline(p, pipeline, pipeline_rank=pipeline_id)
# The new D3M evaluation doesn't require saving supporting files or executables!
#save_supporting_files(target_supporting_files_path)
#save_executable(target_executable_file_path, pipeline_id)
# Clear all but the top
clear_all_files_but_top(outputdir, n_to_keep=keep_top)
def get_entrypoint_resource_id(dataset):
"""
Gets the entrypoint resource id of a dataset.
TODO(maxlam): Maybe put this in train utils?
"""
for resource_id in dataset.keys():
if ('https://metadata.datadrivendiscovery.org/types/DatasetEntryPoint' in
dataset.metadata.query(selector=(resource_id,)).get("semantic_types")):
return resource_id
return resource_id
def write_predictions_to_file(preds, fpath, problemdoc, dataset=None):
"""
Writes predictions to file path fpath.
Args:
- preds - Dataframe of predictions
TODO(maxlam): CLEAN THIS UP!
"""
preds.to_csv(fpath, index=False, header=True)
def run_with_limited_time(func, args, kwargs, time):
"""Runs a function with time limit
:param func: The function to run
:param args: The functions args, given as tuple
:param kwargs: The functions keywords, given as dict
:param time: The time limit in seconds
:return: True if the function ended successfully. False if it was terminated.
"""
p = Process(target=func, args=args, kwargs=kwargs)
p.start()
p.join(time)
if p.is_alive():
p.terminate()
return (False, None)
return True
def convert_problem_doc_proto_to_metadata(problem_doc):
# Convert problem_doc proto to json
json_problem_doc = {}
# General info
json_problem_doc["about"] = {
"problemID": problem_doc.problem.id,
"problemVersion": problem_doc.problem.version,
"problemName": problem_doc.problem.name,
"taskType": problem_doc.problem.task_type,
"taskSubType": problem_doc.problem.task_subtype,
"problemSchemaVersion": "3.1.1",
"problemDescription": problem_doc.problem.description
}
# Set inputs
inputs_data = []
for input_proto in problem_doc.inputs:
targets_data = []
for target_data in input_proto.targets:
targets_data.append({
"targetIndex": target_data.target_index,
"resID": target_data.resource_id,
"colIndex": target_data.column_index,
"colName": target_data.column_name,
})
inputs_data.append({
"datasetID": input_proto.dataset_id,
"targets": targets_data
})
# Set metrics
perf_metrics = []
for performance_metric in problem_doc.problem.performance_metrics:
print(performance_metric.metric)
reverse_map = {v:k for k,v in PerformanceMetric.get_map().items()}
metric_string = reverse_map[PerformanceMetric(performance_metric.metric)]
perf_metrics.append({
"metric": metric_string
})
json_problem_doc["inputs"] = {
"data": inputs_data,
"performanceMetrics": perf_metrics
}
# From json use standard methods to parse to metadata
return Metadata(json_problem_doc)
def convert_dataset_uri_to_dataset(problem_doc, dataset_uri, mode="train"):
if "file://" in dataset_uri:
dataset_uri = dataset_uri[len("file://"):]
# Dataset
dataset_uri = 'file://{dataset_uri}'.format(dataset_uri=os.path.abspath(dataset_uri))
dataset = D3MDatasetLoader()
dataset = dataset.load(dataset_uri=dataset_uri)
dataset = add_target_columns_metadata(dataset, problem_doc)
if mode == "train" or mode == "score":
dataset = add_target_columns_metadata(dataset, problem_doc)
return dataset
|
d3m-model-search-master
|
Stanford-D3M-Full/utils/utils.py
|
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import executors.Executor
import executors.SimpleRandomSklearnExecutor
class ExtendedSklearnExecutor(executors.SimpleRandomSklearnExecutor.SimpleRandomSklearnExecutor):
"""
Executor that processes tuples of (problem_doc, dataset) and writes
(pipelines, scores) to the results. Has more data loading pipelines than SimpleSklearnExecutor
"""
def get_data_loading_pipelines(self):
return super().get_data_loading_pipelines() + [
# General Relational Dataset
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "general",
"primitive": "d3m.primitives.sri.psl.GeneralRelationalDataset",
"input": utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT
},
{
"stage_name": "attributes",
"input": "general",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
],
]
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/ExtendedSklearnExecutor.py
|
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import executors.Executor
import executors.NistSaverExecutor
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import ArgumentType, Context
from d3m.runtime import Runtime
import uuid
import os
import sys
class ScoreFitProduceExecutor(executors.Executor.Executor):
"""
Executor that either scores, fits & produces results.
Input to process item is (mode, ...). Arguments depend on mode.
There are 2 modes:
"score" - input should look like (mode, problem_doc, pipeline, dataset_train, dataset_validate)
"fitproduce" - input should look like (mode, problem_doc, pipeline, dataset_train, dataset_test, should_export)
"export" - input should look like (mode, problem_doc, pipeline)
"""
def __init__(self, inputs_queue, results_queue):
super().__init__(inputs_queue, results_queue)
def process_item(self, input_item):
mode = input_item[0]
if mode == "score":
return self.score(input_item)
elif mode == "fitproduce":
return self.fitproduce(input_item)
elif mode == "export":
return self.export(input_item)
def get_predictions_save_path(self):
savepath = None
while savepath is None or os.path.exists(savepath):
savedir = os.environ["D3MOUTPUTDIR"]
savepath = "%s/predictions-%s.csv" % (savedir, uuid.uuid4())
return savepath
def score(self, input_item):
problem_doc, metric, pipeline_json, dataset_train, dataset_test = input_item[1:]
# Run pipeline
pipeline = Pipeline.from_json(pipeline_json)
pipeline_runtime = Runtime(pipeline, context=Context.TESTING)
pipeline_runtime.fit(inputs=[dataset_train], return_values=['outputs.0'])
score_predictions = pipeline_runtime.produce(inputs=[dataset_test], return_values=['outputs.0'])
score_predictions = score_predictions.values['outputs.0']
# Evaluate scores on score dir
achieved_score = utils.train_utils.score(score_predictions, dataset_test, problem_doc, override_metric_key=metric)
return achieved_score
def fitproduce(self, input_item):
problem_doc, pipeline_json, dataset_train, dataset_test = input_item[1:]
# Run pipeline
pipeline = Pipeline.from_json(pipeline_json)
pipeline_runtime = Runtime(pipeline, context=Context.TESTING)
pipeline_runtime.fit(inputs=[dataset_train], return_values=['outputs.0'])
score_predictions = pipeline_runtime.produce(inputs=[dataset_test], return_values=['outputs.0'])
score_predictions = score_predictions.values['outputs.0']
# Write predictions to output path
path = self.get_predictions_save_path()
utils.utils.write_predictions_to_file(score_predictions, path, problem_doc)
path_uri = "file://%s" % path
return path_uri
def export(self, input_item):
problem_doc, pipeline_json, score = input_item[1:]
# Call Nist Saver
saver = executors.NistSaverExecutor.NistSaverExecutor(None, None)
saver.process_item((os.environ["D3MOUTPUTDIR"], pipeline_json, problem_doc, score))
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/ScoreFitProduceExecutor.py
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/__init__.py
|
|
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import executors.Executor
class NistSaverExecutor(executors.Executor.Executor):
"""
Executor which saves (baseoutputdir, pipeline_json, score) to output.
"""
def process_item(self, input_item):
base_outputdir, pipeline_json, problem_doc, score = input_item
# Convert pipeline_json to pipeline
pipeline = utils.primitive_pipeline_utils.load_pipeline(pipeline_json)
utils.utils.save_scored_pipeline(base_outputdir, score,
pipeline, problem_doc)
return None
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/NistSaverExecutor.py
|
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import executors.Executor
import executors.SimpleRandomSklearnExecutor
import numpy as np
import sys
import math
import random
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
class HyperbandExecutor(executors.Executor.Executor):
"""
Executor that processes tuples of (problem_doc, dataset) and writes
(pipelines, scores) to the results. Uses Hyperband.
"""
def __init__(self, inputs_queue, results_queue,
hyperband_epochs_budget=60*20,
hyperband_proportion_discard=2):
"""
Initializes a hyperband executor. Upon initialization, will create a hyperband
schedule with the given parameters; this schedule will guide the hyperband search of
each task that is popped from the queue.
Specifically, for each (problem, dataset) that is popped from the queue, this executor
will perform the full hyperband search on the given task.
Note that hyperband_epochs_budget is actually the time budget in number of seconds.
todo(maxlam): Change hyperband_epochs_budget => hyperband_seconds_budget
"""
super().__init__(inputs_queue, results_queue)
# Option to override set of sklearn primitives to sample from
print("Initializing Hyperband executor")
# Hyperband parameters
self.hyperband_epochs_budget = hyperband_epochs_budget
self.hyperband_proportion_discard = hyperband_proportion_discard
# Given the budget, generate the largest hyperband schedule
# within budget
self.hyperband_schedule = self.get_largest_schedule_within_budget(
self.hyperband_epochs_budget, self.hyperband_proportion_discard
)
# Print the search schedule
self.pretty_print_schedule(self.hyperband_schedule)
def create_hyperband_infinite_schedule(self):
"""
An infinite iterator that continuously yields the next (resources, number of threads)
in the next band. This is not currently used anywhere but could be useful.
"""
k = 1
while True:
for s in range(1, k):
if k-s >= np.log2(s):
bk_s = 2**k
nk_s = 2**s
yield (bk_s, nk_s)
k += 1
def pretty_print_schedule(
self, hyperband_schedule, describe_hyperband=True
):
"""
Prints scheduler for user to read.
"""
print("=========================================")
print("| Hyperband Schedule |")
print("=========================================")
if describe_hyperband:
# Print a message indicating what the below schedule means
print(
"Table consists of tuples of "
"(num configs, num_resources_per_config)"
"which specify how many configs to run and"
"for how many epochs. "
)
print(
"Each bracket starts with a list of random "
"configurations which is successively halved "
"according the schedule."
)
print(
"See the Hyperband paper "
"(https://arxiv.org/pdf/1603.06560.pdf) for more details."
)
print("-----------------------------------------")
for bracket_index, bracket in enumerate(hyperband_schedule):
bracket_string = "Bracket %d:" % bracket_index
for n_i, r_i in bracket:
bracket_string += " (%d, %d)" % (n_i, r_i)
print(bracket_string)
print("-----------------------------------------")
def get_largest_schedule_within_budget(self, budget, proportion_discard):
"""
Gets the largest hyperband schedule within target_budget.
This is required since the original hyperband algorithm uses R,
the maximum number of resources per configuration.
TODO(maxlam): Possibly binary search it if this becomes a bottleneck.
Args:
budget: total budget of the schedule.
proportion_discard: hyperband parameter that specifies
the proportion of configurations to discard per iteration.
"""
# Exhaustively generate schedules and check if
# they're within budget, adding to a list.
valid_schedules_and_costs = []
for R in range(1, budget):
schedule = self.generate_hyperband_schedule(R, proportion_discard)
cost = self.compute_schedule_cost(schedule)
if cost <= budget:
valid_schedules_and_costs.append((schedule, cost))
# Choose a valid schedule that maximizes usage of the budget.
valid_schedules_and_costs.sort(key=lambda x: x[1], reverse=True)
return valid_schedules_and_costs[0][0]
def compute_schedule_cost(self, schedule):
# Sum up all n_i * r_i for each band.
flattened = [item for sublist in schedule for item in sublist]
return sum([x[0] * x[1] for x in flattened])
def generate_hyperband_schedule(self, R, eta):
"""
Generate hyperband schedule according to the paper.
Args:
R: maximum resources per config.
eta: proportion of configruations to discard per
iteration of successive halving.
Returns: hyperband schedule, which is represented
as a list of brackets, where each bracket
contains a list of (num configurations,
num resources to use per configuration).
See the paper for more details.
"""
schedule = []
s_max = int(math.floor(math.log(R, eta)))
# B = (s_max + 1) * R
for s in range(0, s_max + 1):
n = math.ceil(int((s_max + 1) / (s + 1)) * eta ** s)
r = R * eta ** (-s)
bracket = []
for i in range(0, s + 1):
n_i = int(math.floor(n * eta ** (-i)))
r_i = int(r * eta ** i)
bracket.append((n_i, r_i))
schedule = [bracket] + schedule
return schedule
def search(self, problem_doc, dataset):
"""
Perform hyperband search with problem_doc and dataset.
Since hyperband usually targets hyperparameter tuning (and we are doing architecture
search), the first band of each bracket samples a random architectures with default
hyperparameters to execute. Subsequent bands of the bracket use the same architecture with random
hyperparameters.
"""
best_score, best_pipeline = float("inf"), None
# Split into train and validation
dataset_train, dataset_validate = (
utils.train_utils.split_dataset(dataset, problem_doc))
# Loop over each bracket
n_models_scored = 0
for bracket_index, bracket in enumerate(self.hyperband_schedule):
# Sample random configurations to seed SuccessiveHalving
n_starting_configurations, _ = bracket[0]
configurations = [HyperbandPipeliner() for i in
range(n_starting_configurations)]
# Successive Halving
for band_index, (n_i, r_i) in enumerate(bracket):
assert len(configurations) <= n_i
# Evaluate each configuration for r_i epochs
scored_configurations = []
for i, configuration in enumerate(configurations):
cur_model_index = n_models_scored
# Train model and get the score. Sample hyperparameters if not
# on the first band.
try:
# hack: Give at least 30 seconds to each job
# todo(maxlam): figure better solution
results = configuration.run(dataset_train,
eval_datasets=[dataset_validate],
return_pipeline=True,
sample_random_hyperparameters=band_index != 0,
tlimit_secs=r_i+30)
predictions, pipeline = results[0][0].values['outputs.0'], results[1]
score = utils.train_utils.score(predictions, dataset_validate, problem_doc)
# Normalize the score (lower is better for normalized scores)
normalized_score = utils.utils.get_global_score(score, problem_doc)
except Exception as e:
pipeline = None
score = float("inf")
normalized_score = float("inf")
print(e)
print("Hyperband executor achieved score: %s" % (str(score)))
if normalized_score < best_score:
best_score = normalized_score
best_pipeline = pipeline
# Add current best score to queue
self.results_queue.put(self.to_return_result(score, best_pipeline), True, executors.Executor.QUEUE_TIMEOUT)
# Add score and model to list
scored_configurations.append(
(normalized_score, cur_model_index, configuration)
)
n_models_scored += 1
# Sort scored configurations by score
scored_configurations.sort(key=lambda x: x[0])
# Successively halve the configurations
if band_index + 1 < len(bracket):
n_to_keep, _ = bracket[band_index + 1]
configurations = [x[2] for x in scored_configurations][
:n_to_keep
]
# Return the best pipeline
return best_score, best_pipeline
def to_return_result(self, score, pipeline):
return_result = (None, None)
if score is not None and pipeline is not None:
return_result = (score, pipeline.to_json(indent=4, sort_keys=True, ensure_ascii=False))
return return_result
def process_item(self, input_item):
problem_doc, dataset = input_item
# Perform search
score, pipeline = self.search(problem_doc, dataset)
return_result = self.to_return_result(score, pipeline)
return return_result
class HyperbandPipeliner(object):
"""
Represents an object which can construct random pipelines and run them on inputs.
This pipeliner has the following features:
- Support creating random architecture pipelines
- Support running pipelines with random hyperparameters or default hyperparameters
- Support running pipelines with time limit
For now, this pipeliner only supports sklearn primitives.
todo(maxlam): support other primitives
"""
def __init__(self):
"""
Create HyperbandPipeliner
"""
self.data_loading_pipelines = self.get_data_loading_pipelines()
self.sklearn_primitives = utils.primitive_pipeline_utils.get_sklearn_primitives_names()
def run(self, dataset_train, eval_datasets=[], return_pipeline=False,
sample_random_hyperparameters=False, tlimit_secs=120):
"""
Samples a simple sklearn based classification pipeline and runs it
on the given input datasets.
"""
# Load data loading pipeline
data_pipeline_to_use = random.choice(self.data_loading_pipelines)
pipeline_wrapper = utils.primitive_pipeline_utils.PipelineWrapper(data_pipeline_to_use)
expected_stage_outputs = set(["attributes", "targets"])
stage_outputs = set(pipeline_wrapper.get_branches())
if expected_stage_outputs != stage_outputs:
raise Exception("Expected stage outputs (%s) does not match actual (%s)." % (str(expected_stage_outputs),
str(stage_outputs)))
# Sample a random sklearn primitive
random_sklearn_primitive = random.choice(self.sklearn_primitives)
sklearn_stage_dict = {
"input" : "attributes",
"primitive": random_sklearn_primitive,
"arguments": {
"outputs": "targets"
}
}
pipeline_wrapper.add_stage(sklearn_stage_dict)
# Extract the full pipeline from the wrapper; set hyperparameters for the sklearn primitive.
pipeline = pipeline_wrapper.load_pipeline_architecture(pipeline_wrapper.pipeline_architecture_dict)
if sample_random_hyperparameters:
# Sample hyperparameters for the last sklearn primitive
sklearn_hyperparam_dict = self.sample_hyperparameters_for_step_as_dict(pipeline, len(pipeline.steps)-1)
# Recreate pipeline wrapper
pipeline_wrapper = utils.primitive_pipeline_utils.PipelineWrapper(data_pipeline_to_use)
sklearn_with_hyperparams_dict_stage = dict(sklearn_stage_dict)
sklearn_with_hyperparams_dict_stage["hyperparameters"] = sklearn_hyperparam_dict
pipeline_wrapper.add_stage(sklearn_with_hyperparams_dict_stage)
# Run the pipeline with a time limit
# It is difficult to pass objects between processes. So we use a hack:
# - Run the training process with half the time. Check if finished.
# - If finished, train and evaluate in the current process, otherwise return None
print("HyperbandExecutor: Running pipeline with time limit: %s" % str(tlimit_secs))
finished = utils.utils.run_with_limited_time(pipeline_wrapper.run, (dataset_train,), {"eval_datasets":eval_datasets,
"return_pipeline":return_pipeline}, tlimit_secs/2)
if finished:
results = pipeline_wrapper.run(dataset_train, eval_datasets=eval_datasets,
return_pipeline=return_pipeline)
return results
return None, None
def sample_hyperparameters_for_step_as_dict(self, pipeline, step_index):
"""
Samples hyperparameters for a given step and adds them to a dict.
We have to do this because d3m's `add_hyperparameter` method screws up the entire
pipeline. So an easy way to extract, sample and add hyperparameters is to
recreate the whole pipeline.
"""
step = pipeline.steps[step_index]
hyperparams = step.get_primitive_hyperparams().configuration
hyperparam_dict = {}
for hyperparam_name, hyperparam in hyperparams.items():
# If hyperparmeter exists delete it
if hyperparam_name in step.hyperparams:
del step.hyperparams[hyperparam_name]
# If hyperparameter name in blacklist, skip
if hyperparam_name in ["return_result", "use_semantic_types", "add_index_columns", "use_columns", "exclude_columns"]:
continue
# Sample and use it
sampled_value = hyperparam.sample()
if sampled_value is not None:
if "numpy" in str(type(sampled_value)):
sampled_value = np.asscalar(sampled_value)
print("Sample for primitive %s: %s => %s" % (step.primitive_description["name"], hyperparam_name, str(sampled_value)))
hyperparam_dict[hyperparam_name] = sampled_value
return hyperparam_dict
def get_data_loading_pipelines(self):
return [
# Simple sklearn data loading
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.byudml.imputer.RandomSamplingImputer",
"input": "cast_to_type"
},
],
# General Relational Dataset
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.sri.psl.GeneralRelationalDataset",
"input": utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT
},
],
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"input": "cast_to_type",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
},
]
]
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/HyperbandExecutor.py
|
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import executors.Executor
from experimental.lstm_predictor.LSTMPredictWorker import LSTM_filter
import random
import json
MAX_TRIAL = 10
class SklearnStackedLSTMExecutor(executors.Executor.Executor):
"""
Executor that processes tuples of (problem_doc, dataset) and writes
(pipelines, scores) to the results.
"""
def __init__(self, inputs_queue, results_queue, lstm_path, override_sklearn_primitives_set=None, num_candidates=10, tlimit=None):
super().__init__(inputs_queue, results_queue)
# Option to override set of sklearn primitives to sample from
self.override_sklearn_primitives_set = override_sklearn_primitives_set
self.tlimit = tlimit
self.num_candidates = num_candidates
self.lstm_path = lstm_path
# DanKang - Create a local Cache
# Note that this is local, and is not shared across processes.
# However, for two reasons, I am just keeping it local
# 1. If we use Managers.dict() to keep track of global cache, the lookup
# time will be too slow and will cause a bottleneck
# 2. As we get more primitives, the possibility of generating same
# pipelines will decrease, that there is not too much need for a global check
self.cache = set()
def get_data_loading_pipelines(self):
return [
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"input": "cast_to_type",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
},
]
]
def create_pipeline(self, sklearn_primitive):
# Randomly choose a data loading pipeline, and append given sklearn_primitive
data_load_pipeline = random.choice(self.get_data_loading_pipelines())
pipeline_names = [step["primitive"] for step in data_load_pipeline]
pipeline_names.append(sklearn_primitive)
return pipeline_names
def get_key(self, pipeline):
# Reconstruct the data loading pipeline from names
return "_".join([step["primitive"] for step in pipeline])
def get_key_from_str(self, str_pipeline):
return "_".join(str_pipeline)
def process_item(self, input_item):
problem_doc, dataset = input_item
# Split into train and validation
dataset_train, dataset_validate = (
utils.train_utils.split_dataset(dataset, problem_doc))
# Create a list of pipelines
print("LSTM>Creating candidate pipelines")
if self.override_sklearn_primitives_set is not None:
sklearn_primitives = self.override_sklearn_primitives_set
else:
sklearn_primitives = utils.primitive_pipeline_utils.get_sklearn_primitives_names()
num_pipeline, num_trial = 0, 0
candidate_pipelines = []
while num_pipeline < self.num_candidates:
new_pipeline = self.create_pipeline(random.choice(sklearn_primitives))
new_pipeline_key = self.get_key_from_str(new_pipeline)
if new_pipeline_key not in self.cache or num_trial > MAX_TRIAL:
num_pipeline += 1
candidate_pipelines.append(new_pipeline)
num_trial = 0
num_trial += 1
# select the pipeline that lstm predicts to be the best
print("LSTM>Filtering best pipeline from candidates")
best_pipeline = LSTM_filter(self.lstm_path, candidate_pipelines, 1, is_json=False)[0]
best_sklearn_predictor = best_pipeline[-1]
print("LSTM>Best candidate found")
print(best_pipeline)
loading_pipelines_dict = {self.get_key(pipeline):pipeline for pipeline in self.get_data_loading_pipelines()}
best_pipeline_key = self.get_key_from_str(best_pipeline[0:-1])
kwargs = {
"data_loading_pipeline": loading_pipelines_dict[best_pipeline_key],
"sklearn_predictor": best_sklearn_predictor
}
pipeliner = utils.primitive_pipeline_utils.SKLearnPipeliner(**kwargs)
# mark the pipeline as visited
self.cache.add(self.get_key_from_str(best_pipeline))
# Run
# TODO(maxlam): Put a timeout?
try:
# Enforce time limit. Hack: Run with half the time limit to see if it finishes.
if self.tlimit is not None:
finished = utils.utils.run_with_limited_time(pipeliner.run, (dataset_train,), {"eval_datasets":dataset_validate,
"return_pipeline":True}, self.tlimit/2)
if not finished:
raise Exception("TLE")
results = pipeliner.run(dataset_train, eval_datasets=[dataset_validate], return_pipeline=True)
predictions, pipeline = results[0][0].values['outputs.0'], results[1]
score = utils.train_utils.score(predictions, dataset_validate, problem_doc)
except Exception as e:
predictions = None
pipeline = None
score = None
print(e)
# Score the predictions
return_result = (None, None)
if score is not None:
return_result = (score, pipeline.to_json(indent=4, sort_keys=True, ensure_ascii=False))
return return_result
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/SklearnStackedLSTMExecutor.py
|
import sys
import os
from multiprocessing import Process, Queue, JoinableQueue
from queue import Empty
import logging
import uuid
import d3m.utils
from d3m.utils import redirect_to_logging
QUEUE_TIMEOUT = 3
"""
The Executor class, a framework around a process to make
processing input items from a queue and returning outputs more easily.
"""
class Executor(Process):
def __init__(self, inputs_queue, results_queue):
Process.__init__(self)
self.inputs_queue = inputs_queue
self.results_queue = results_queue
def process_item(self, input_item):
"""
Processes an item from the inputs_queue, returns an object to be added to the results_queue.
"""
raise NotImplementedError()
def run(self):
while True:
try:
input_item = self.inputs_queue.get(True, QUEUE_TIMEOUT)
result = self.process_item(input_item)
self.results_queue.put(result, True, QUEUE_TIMEOUT)
self.inputs_queue.task_done()
except Empty:
continue
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/Executor.py
|
import utils.utils
import utils.primitive_pipeline_utils
import utils.train_utils
import executors.Executor
class SimpleRandomSklearnExecutor(executors.Executor.Executor):
"""
Executor that processes tuples of (problem_doc, dataset) and writes
(pipelines, scores) to the results.
"""
def __init__(self, inputs_queue, results_queue, override_sklearn_primitives_set=None, tlimit=None):
super().__init__(inputs_queue, results_queue)
# Option to override set of sklearn primitives to sample from
self.override_sklearn_primitives_set = override_sklearn_primitives_set
self.tlimit = tlimit
'''
Commenting outpart of pipeline for now, as we do not have RandomSamplingImputer defined in primitives yet
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.byudml.imputer.RandomSamplingImputer",
"input": "cast_to_type"
},
],
'''
def get_data_loading_pipelines(self):
return [
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attributes",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "cast_to_type",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"input": "extract_attributes",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
{
"stage_name": "attributes",
"input": "cast_to_type",
"primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
},
]
]
def process_item(self, input_item):
problem_doc, dataset = input_item
# Split into train and validation
dataset_train, dataset_validate = (
utils.train_utils.split_dataset(dataset, problem_doc))
# Run
kwargs = { "data_loading_pipeline_candidates": self.get_data_loading_pipelines() }
pipeliner = utils.primitive_pipeline_utils.SKLearnPipeliner(**kwargs)
if self.override_sklearn_primitives_set is not None:
pipeliner.sklearn_primitives = self.override_sklearn_primitives_set
# TODO(maxlam): Put a timeout?
try:
# Enforce time limit. Hack: Run with half the time limit to see if it finishes.
if self.tlimit is not None:
finished = utils.utils.run_with_limited_time(pipeliner.run, (dataset_train,), {"eval_datasets":dataset_validate,
"return_pipeline":True}, self.tlimit/2)
if not finished:
raise Exception("TLE")
results = pipeliner.run(dataset_train, eval_datasets=[dataset_validate], return_pipeline=True)
predictions, pipeline = results[0][0].values['outputs.0'], results[1]
score = utils.train_utils.score(predictions, dataset_validate, problem_doc)
except Exception as e:
predictions = None
pipeline = None
score = None
print(e)
# Score the predictions
return_result = (None, None)
if score is not None:
return_result = (score, pipeline.to_json(indent=4, sort_keys=True, ensure_ascii=False))
return return_result
|
d3m-model-search-master
|
Stanford-D3M-Full/executors/SimpleRandomSklearnExecutor.py
|
import matplotlib.pyplot as plt
import numpy as np
import argparse
import json
'''
Given multiple benchmark data, compare them and plot the result
'''
if __name__ == "__main__":
benchmark_results = ["190225_lstm_sklearn_n_threads_1_tlimit_100.json", "190225_simple_sklearn_n_threads_1_tlimit_100.json"]
results = []
for result in benchmark_results:
with open("benchmarks/" + result, "r") as f:
data = json.load(f)
timesteps = (data["data"]["timestamps"])
scores = (data["data"]["scores"])
results.append((timesteps, scores))
fig, ax = plt.subplots(nrows=1, ncols=1)
for result in results:
(timesteps, scores) = result
cumulative_max_scores = list(np.maximum.accumulate(scores))
ax.plot(timesteps, cumulative_max_scores)
print([str(timestep) + ": " + str(score) for timestep, score in zip(timesteps, cumulative_max_scores)])
ax.legend(benchmark_results, loc='lower right')
fig.savefig('benchmarks/compare.png')
print("Finished comparing models")
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/compare_benchmarks.py
|
import time
import json
import sys
import os
import argparse
from multiprocessing import Process, Queue, JoinableQueue
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import ArgumentType, Context
from d3m.runtime import Runtime
import executors.ExtendedSklearnExecutor
import executors.SimpleRandomSklearnExecutor
import executors.NistSaverExecutor
import executors.Executor
import utils.utils
import utils.train_utils
if __name__ == "__main__":
# Get args
try:
path_to_pipeline_json = sys.argv[1]
inputdir = sys.argv[2]
# Load datasets
problem_doc, dataset = utils.utils.load_data_from_dir(inputdir)
# Create pipeline
with open(path_to_pipeline_json, "r") as f:
pipeline = Pipeline.from_json(f.read())
pipeline_runtime = Runtime(pipeline, context=Context.TESTING)
pipeline_runtime.fit(inputs=[dataset], return_values=['outputs.0'])
problem_doc_score, dataset_score = utils.utils.load_data_from_dir(inputdir, mode="score")
score_predictions = pipeline_runtime.produce(inputs=[dataset_score], return_values=['outputs.0'])
score_predictions = score_predictions.values['outputs.0']
# Evaluate scores on score dir
achieved_score = utils.train_utils.score(score_predictions, dataset_score, problem_doc_score)
print(achieved_score)
except:
print("N/A")
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/compute_score.py
|
import time
import json
import sys
import os
import argparse
import copy
from multiprocessing import Process, Queue, JoinableQueue
import executors.ExtendedSklearnExecutor
import executors.SimpleRandomSklearnExecutor
import executors.HyperbandExecutor
import executors.NistSaverExecutor
import executors.SklearnStackedLSTMExecutor
import executors.Executor
import utils.utils
import datetime
import json
SIMPLE_SKLEARN = 'simple_sklearn'
EXTENDED_SKLEARN = 'extended_sklearn'
HYPERBAND = 'hyperband'
LSTM_SKLEARN = 'lstm_sklearn'
parser = argparse.ArgumentParser()
parser.add_argument("--n_threads", help="Number of threads for parallel search", type=int, default=8)
parser.add_argument("--tlimit", help="Time limit in seconds", type=int, default=60*5)
parser.add_argument("--executor", help="Which executor to run", type=str, default="simple_sklearn", choices=[SIMPLE_SKLEARN, EXTENDED_SKLEARN, HYPERBAND, LSTM_SKLEARN])
parser.add_argument("--save_all_scored_pipelines_path", help="Path to json file to save a list of scored pipelines. If None then scored pipelines are not saved.", type=str, default=None)
parser.add_argument("--tlimit_per_pipeline", help="Time limit for executing a single pipeline. Only works for simple_sklearn and extended_sklearn executors.", type=int, default=None)
parser.add_argument("--lstm_path", help="Path (up to file name) for the lstm model to use", type=str, default=None)
parser.add_argument("--save_benchmark",help="Whether to save the benchmarkoutput or not", action="store_true")
def select_executor(executor_string):
if executor_string == SIMPLE_SKLEARN:
return executors.SimpleRandomSklearnExecutor.SimpleRandomSklearnExecutor
if executor_string == EXTENDED_SKLEARN:
return executors.ExtendedSklearnExecutor.ExtendedSklearnExecutor
if executor_string == HYPERBAND:
return executors.HyperbandExecutor.HyperbandExecutor
if executor_string == LSTM_SKLEARN:
return executors.SklearnStackedLSTMExecutor.SklearnStackedLSTMExecutor
def main(raw_args=None):
args = parser.parse_args(raw_args)
print("Starting search with n_threads=%d, tlimit=%d" % (args.n_threads, args.tlimit))
# Input directory
inputdir = os.environ["D3MINPUTDIR"]
outputdir = os.environ["D3MOUTPUTDIR"]
# Get problem doc and dataset_uri
problem_doc, dataset = utils.utils.load_data_from_dir(inputdir)
# Create executor
inputs_queue = JoinableQueue()
results_queue = JoinableQueue()
executor = select_executor(args.executor)
kwargs = {}
if args.tlimit_per_pipeline is not None:
assert args.executor in [SIMPLE_SKLEARN, EXTENDED_SKLEARN, LSTM_SKLEARN]
kwargs["tlimit"] = args.tlimit_per_pipeline
if args.lstm_path is not None:
assert args.executor == LSTM_SKLEARN
kwargs["lstm_path"] = args.lstm_path
sklearn_executors = [
executor(
inputs_queue, results_queue, **kwargs
) for i in range(args.n_threads)]
# Executor for saving configurations
save_pipelines_queue = JoinableQueue()
unused_queue = JoinableQueue()
nist_saver = executors.NistSaverExecutor.NistSaverExecutor(
save_pipelines_queue, unused_queue)
# Start executors
for executor in sklearn_executors:
executor.start()
nist_saver.start()
# Number of requests in the queue at a time
target_queue_length = 100
start_time = time.time()
# Processed results
processed_results = []
# save progress
timestamps = []
scores = []
while time.time() - start_time <= args.tlimit:
# Refill the inputs queue
if inputs_queue.empty():
print("Refilling queue...")
for i in range(target_queue_length):
inputs_queue.put((problem_doc, dataset))
while not results_queue.empty():
result = results_queue.get(True, executors.Executor.QUEUE_TIMEOUT)
if None not in result:
processed_results.append(result)
print("Got result! Have %d results." % len(processed_results))
# Save it
score, pipeline_json = result
item = (
outputdir,
pipeline_json,
problem_doc,
score
)
save_pipelines_queue.put(item)
timestamps.append(time.time() - start_time)
scores.append(score)
time.sleep(executors.Executor.QUEUE_TIMEOUT)
# Terminate executors
for executor in sklearn_executors:
executor.terminate()
print("#"*100)
print("Done")
print("#"*100)
print(processed_results)
print("#"*100)
print("Collected %d total results" % len(processed_results))
if args.save_all_scored_pipelines_path is not None:
# Gather all pipeline jsons
all_pipeline_dicts = []
for result in processed_results:
score, pipeline_json = result
# Override pipeline_rank with normalized score
normalized_score = utils.utils.get_global_score(score, problem_doc)
pipeline_dict = json.loads(pipeline_json)
pipeline_dict["pipeline_rank"] = normalized_score
pipeline_json_modded = json.dumps(pipeline_dict)
all_pipeline_dicts.append(pipeline_json_modded)
with open(args.save_all_scored_pipelines_path, "w") as save_pipelines_file:
json.dump(all_pipeline_dicts, save_pipelines_file)
if args.save_benchmark:
filename = datetime.datetime.today().strftime('%y%m%d') + "_" + args.executor
for key,val in vars(args).items():
if key in ["n_threads", "tlimit", "tlimit_per_pipeline"] and val is not None:
filename += "_{}_{}".format(key, val)
content = {
"arguments": vars(args),
"data": {
"timestamps": timestamps,
"scores": scores
}
}
with open("./apps/benchmarks/"+filename+".json", 'w') as f:
json.dump(content, f)
if __name__ == "__main__":
main()
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/basic_run.py
|
import sys
import json
import pandas
import numpy as np
import os
import traceback
from d3m.runtime import Runtime
import utils.utils
import utils.train_utils
import utils.primitive_pipeline_utils
if __name__ == "__main__":
# Get args
path_to_pipeline_json = sys.argv[1]
inputdir = sys.argv[2]
outputdir = sys.argv[3]
################
# Load dataset #
################
problem_doc_train, dataset_train = (
utils.utils.load_data_from_dir(inputdir, mode="train"))
###################
# Create pipeline #
###################
with open(path_to_pipeline_json, "r") as f:
pipeline = utils.primitive_pipeline_utils.load_pipeline(f.read())
pipeline_runtime = Runtime(pipeline)
pipeline_runtime.fit(inputs=[dataset_train])
############################
# Try scoring on SCORE set #
############################
try:
problem_doc_score, dataset_score = utils.utils.load_data_from_dir(inputdir, mode="score")
score_predictions = pipeline_runtime.produce(inputs=[dataset_score]).values['outputs.0']
validation_score = utils.train_utils.get_score_on_score_set(inputdir, problem_doc_score, score_predictions)
print("PipelineId: %s, Score: %s" % (pipeline.id, validation_score))
except:
print("-------------------------------")
print("Failed to evaluate on SCORE set")
print("-------------------------------")
traceback.print_exc()
#######################
# Predict on TEST set #
#######################
try:
problem_doc_test, dataset_test = utils.utils.load_data_from_dir(inputdir, mode="test")
test_predictions = pipeline_runtime.produce(inputs=[dataset_test]).values['outputs.0']
savedir = "%s/predictions/%s" % (outputdir, pipeline.id)
if not os.path.exists(savedir):
os.makedirs(savedir)
savepath = "%s/predictions.csv" % (savedir)
utils.utils.write_predictions_to_file(test_predictions, savepath, problem_doc_test, dataset=dataset_test)
except:
print("-------------------------------")
print("Failed to predict on TEST set")
print("-------------------------------")
traceback.print_exc()
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/predict.py
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/__init__.py
|
|
import time
import json
import sys
import os
import argparse
from multiprocessing import Process, Queue, JoinableQueue
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.runtime import Runtime
import executors.ExtendedSklearnExecutor
import executors.SimpleRandomSklearnExecutor
import executors.NistSaverExecutor
import executors.Executor
import utils.utils
import utils.train_utils
if __name__ == "__main__":
# Get args
try:
achieved_score = float(sys.argv[1])
baseline_score = float(sys.argv[2])
inputdir = sys.argv[3]
# Load datasets
problem_doc, dataset = utils.utils.load_data_from_dir(inputdir)
# Normalize score
achieved_score = utils.utils.get_global_score(achieved_score, problem_doc)
baseline_score = utils.utils.get_global_score(baseline_score, problem_doc)
win = int(achieved_score < baseline_score)
print(win)
except Exception as e:
print("0")
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/compare_score.py
|
import time
import json
import sys
import os
import argparse
from multiprocessing import Process, Queue, JoinableQueue
from d3m.runtime import Runtime
import executors.ExtendedSklearnExecutor
import executors.SimpleRandomSklearnExecutor
import executors.NistSaverExecutor
import executors.Executor
import utils.utils
import utils.train_utils
if __name__ == "__main__":
print("Usage: python -m apps.debug_pipeline abs_path_to_problem_and_dataset")
path_to_problem_dataset = sys.argv[1]
# Create a pipeline that just extracts attributes and target
p = utils.primitive_pipeline_utils.PipelineWrapper(
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.data_transformation.denormalize.Common",
"input" : utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize"
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
},
{
"stage_name": "attributes",
"primitive": "d3m.primitives.sri.psl.GeneralRelationalDataset",
#"primitive": "d3m.primitives.sri.autoflow.DatasetTextReader",
"input": utils.primitive_pipeline_utils.PipelineWrapper.PIPELINE_INPUT,
#"hyperparameters": {
# "jvm_memory" : .1,
# "psl_temp_dir": "/tmp/testinggyoo"
#}
},
{
"stage_name": "cast_to_type",
"input": "attributes",
"primitive": "d3m.primitives.data_transformation.cast_to_type.Common",
"hyperparameters" : {
"type_to_cast" : "float"
}
},
#{
# "stage_name": "attributes",
# "primitive": "d3m.primitives.data_cleaning.imputer.SKlearn",
# "input": "cast_to_type"
#},
{
"stage_name" : "output",
"input" : "cast_to_type",
"primitive": "d3m.primitives.classification.extra_trees.SKlearn",
# "primitive": "d3m.primitives.classification.linear_svc.SKlearn",
# #"primitive":"d3m.primitives.regression.random_forest.SKlearn",
"arguments": {
"outputs": "targets"
},
#"hyperparameters": {
# "use_semantic_types": True
# }
}
])
# Load sample data
data_uri = path_to_problem_dataset
assert(os.path.exists(data_uri))
problem_doc, dataset_train = utils.utils.load_data_from_dir(data_uri, mode="train")
problem_doc, dataset_score = utils.utils.load_data_from_dir(data_uri, mode="score")
# Run
predictions = p.run(dataset_train, eval_datasets=[dataset_score], return_pipeline=False)
assert(len(predictions[0].values.keys()) > 0)
predictions = predictions[0].values["outputs.0"]
print("YOOO", predictions)
# Score
achieved_score = utils.train_utils.score(predictions, dataset_score, problem_doc)
print("Achieved: %s" % str(achieved_score))
|
d3m-model-search-master
|
Stanford-D3M-Full/apps/debug_pipeline.py
|
# coding=utf-8
# Copyright 2020- The Google AI Language Team Authors and The HuggingFace Inc. team and Facebook Inc.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import logging
import os
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split(" ")
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels))
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = tokenizer.num_added_tokens()
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length, breakpoint()
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)
)
return features
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
|
bio-lm-main
|
biolm/utils_sequence_labelling.py
|
# Copyright (c) 2020-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
bio-lm-main
|
biolm/__init__.py
|
# coding=utf-8
# Copyright 2020- The Google AI Language Team Authors and The HuggingFace Inc. team and Facebook Inc.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""processors and helpers for classification"""
import logging
import os
from transformers.file_utils import is_tf_available
from transformers.data.processors.utils import DataProcessor, InputExample, InputFeatures
from transformers.data.metrics import pearson_and_spearman, simple_accuracy
from sklearn.metrics import f1_score, precision_score, recall_score, precision_recall_fscore_support
import numpy as np
import json
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(
example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, return_token_type_ids=True,
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
elif output_mode == 'multilabel_classification':
label = [label_map[l] for l in example.label]
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
if output_mode == 'multilabel_classification':
logger.info("label: %s (id = %s)" % (example.label, str(label)))
else:
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
class HOCProcessor(DataProcessor):
"""Processor for the HOC data set"""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [
'activating invasion and metastasis',
'avoiding immune destruction',
'cellular energetics',
'enabling replicative immortality',
'evading growth suppressors',
'genomic instability and mutation',
'inducing angiogenesis',
'resisting cell death',
'sustaining proliferative signaling',
'tumor promoting inflammation'
]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i != 0:
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
label = line[2].split(',') if line[2] != '' else []
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class MedNLIProcessor(DataProcessor):
"""Processor for the HOC data set"""
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def _read_jsonl(self, fi):
dps = []
for line in open(fi):
dps.append(json.loads(line))
return dps
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "mli_train_v1.jsonl")), 'train')
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "mli_dev_v1.jsonl")), 'dev')
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "mli_test_v1.jsonl")), 'test')
def get_labels(self):
"""See base class."""
return ["entailment", 'neutral', "contradiction"]
def _create_examples(self, items, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for item in items:
guid = set_type + '-' + item['pairID']
text_a = item['sentence1']
text_b =item['sentence2']
label = item['gold_label']
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ChemProtProcessor(DataProcessor):
"""Processor for the HOC data set"""
chem_pattern = '@CHEMICAL$'
gene_pattern = '@GENE$'
chem_gene_pattern = "@CHEMICAL-GENE$"
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['CPR:3', 'CPR:4', 'CPR:5', 'CPR:6', 'CPR:9', 'false']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i != 0:
guid = "%s-%s-%s" % (str(i), set_type, line[0])
if True:#line[2] != 'false':
text_a = line[1]
text_a = text_a.replace('@CHEMICAL$', self.chem_pattern).replace(
'@GENE$', self.gene_pattern).replace(
'@CHEM-GENE$', self.chem_gene_pattern)
label = line[2]
assert label in self.get_labels()
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class GADProcessor(DataProcessor):
"""Processor for the HOC data set"""
disease_pattern = '@DISEASE$'
gene_pattern = '@GENE$'
fold = 1
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, str(self.fold), f"train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, str(self.fold), f"test.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, str(self.fold), f"test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if len(line) == 2:
line = [i] + line
if line[0] == 'index':
continue
guid = "%s-%s-%s" % (str(i), set_type, line[0])
text_a = line[1]
text_a = text_a.replace('@DISEASE$', self.disease_pattern).replace(
'@GENE$', self.gene_pattern)
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class EUADRProcessor(DataProcessor):
"""Processor for the HOC data set"""
disease_pattern = '@DISEASE$'
gene_pattern = '@GENE$'
fold = 1
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, str(self.fold), f"train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, str(self.fold), f"test.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, str(self.fold), f"test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['0', '1']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if len(line) == 2:
line = [i] + line
if line[0] == 'index':
continue
guid = "%s-%s-%s" % (str(i), set_type, line[0])
text_a = line[1]
text_a = text_a.replace('@DISEASE$', self.disease_pattern).replace(
'@GENE$', self.gene_pattern)
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class DDIProcessor(DataProcessor):
"""Processor for the HOC data set"""
drug_pattern = '@DRUG$'
drug_drug_pattern = '@DRUG-DRUG$'
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['DDI-advise', 'DDI-effect', 'DDI-int', 'DDI-mechanism', 'DDI-false']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i != 0:
guid = "%s-%s-%s" % (str(i), set_type, line[0])
# if line[2] == 'DDI-false':
# continue
text_a = line[1]
text_a = text_a.replace('@DRUG$', self.drug_pattern).replace(
'@DRUG-DRUG$', self.drug_drug_pattern)
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class I2B22010Processor(DataProcessor):
"""Processor for the HOC data set"""
problem_pattern = '@PROBLEM$'
treatment_pattern = '@TREATMENT$'
test_pattern = '@TEST$'
problem_problem_pattern = "@PROBLEM-PROBLEM$"
test_problem_pattern = "@TEST-PROBLEM$"
test_test_pattern = "@TEST-TEST$"
treatment_test_pattern = '@TREATMENT-TEST$'
treatment_treatment_pattern = '@TREATMENT-TREATMENT$'
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train_new.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_new.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['PIP', 'TeCP', 'TeRP', 'TrAP', 'TrCP', 'TrIP', 'TrNAP', 'TrWP','false']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i != 0:
guid = "%s-%s-%s" % (str(i), set_type, line[0])
text_a = line[1]
text_a = text_a.replace(
'@problem$', self.problem_pattern).replace(
'@treatment$', self.treatment_pattern).replace(
'@test$', self.test_pattern).replace(
"@problem-problem$", self.problem_problem_pattern).replace(
"@test-problem$", self.test_problem_pattern).replace(
"@test-test$", self.test_test_pattern).replace(
'@treatment-test$', self.treatment_test_pattern).replace(
'@treatment-treatment$', self.treatment_treatment_pattern
)
label = line[2]
assert label in self.get_labels()
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
tasks_num_labels = {
"hoc": 10,
"mednli": 3,
"chemprot": 6,
"gad1": 2,
"gad2": 2,
"gad3": 2,
"gad4": 2,
"gad5": 2,
"gad6": 2,
"gad7": 2,
"gad8": 2,
"gad9": 2,
"gad10": 2,
"euadr1": 2,
"euadr2": 2,
"euadr3": 2,
"euadr4": 2,
"euadr5": 2,
"euadr6": 2,
"euadr7": 2,
"euadr8": 2,
"euadr9": 2,
"euadr10": 2,
'ddi': 5,
"i2b22010re": 9,
}
processors = {
"hoc": HOCProcessor,
"mednli": MedNLIProcessor,
"chemprot": ChemProtProcessor,
"gad1": GADProcessor,
"gad2": GADProcessor,
"gad3": GADProcessor,
"gad4": GADProcessor,
"gad5": GADProcessor,
"gad6": GADProcessor,
"gad7": GADProcessor,
"gad8": GADProcessor,
"gad9": GADProcessor,
"gad10": GADProcessor,
"euadr1": EUADRProcessor,
"euadr2": EUADRProcessor,
"euadr3": EUADRProcessor,
"euadr4": EUADRProcessor,
"euadr5": EUADRProcessor,
"euadr6": EUADRProcessor,
"euadr7": EUADRProcessor,
"euadr8": EUADRProcessor,
"euadr9": EUADRProcessor,
"euadr10": EUADRProcessor,
"ddi": DDIProcessor,
"i2b22010re": I2B22010Processor,
}
output_modes = {
"hoc": "multilabel_classification",
"mednli": "classification",
"chemprot": "classification",
"gad1": "classification",
"gad2": "classification",
"gad3": "classification",
"gad4": "classification",
"gad5": "classification",
"gad6": "classification",
"gad7": "classification",
"gad8": "classification",
"gad9": "classification",
"gad10": "classification",
"euadr1": "classification",
"euadr2": "classification",
"euadr3": "classification",
"euadr4": "classification",
"euadr5": "classification",
"euadr6": "classification",
"euadr7": "classification",
"euadr8": "classification",
"euadr9": "classification",
"euadr10": "classification",
"ddi": "classification",
"i2b22010re": "classification",
}
stopping_metrics = {
"hoc": "f",
"mednli": "acc",
"chemprot": "micro_f1",
"gad1": "f1",
"gad2": "f1",
"gad3": "f1",
"gad4": "f1",
"gad5": "f1",
"gad6": "f1",
"gad7": "f1",
"gad8": "f1",
"gad9": "f1",
"gad10": "f1",
"euadr1": "f1",
"euadr2": "f1",
"euadr3": "f1",
"euadr4": "f1",
"euadr5": "f1",
"euadr6": "f1",
"euadr7": "f1",
"euadr8": "f1",
"euadr9": "f1",
"euadr10": "f1",
"ddi": "micro_f1",
"i2b22010re": "micro_f1",
}
def multiclass_acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
macro_f1 = f1_score(y_true=labels, y_pred=preds, average='macro')
macro_weighted_f1 = f1_score(y_true=labels, y_pred=preds, average='weighted')
macro_precision = precision_score(y_true=labels, y_pred=preds, average='macro')
macro_weighted_precision = precision_score(y_true=labels, y_pred=preds, average='weighted')
macro_recall = recall_score(y_true=labels, y_pred=preds, average='macro')
macro_weighted_recall = recall_score(y_true=labels, y_pred=preds, average='weighted')
micro_f1 = f1_score(y_true=labels, y_pred=preds, average='micro')
return {
"acc": acc,
'micro_f1': micro_f1,
"macro_f1": macro_f1,
"macro_weighted_f1": macro_weighted_f1,
"macro_precision": macro_precision,
"macro_weighted_precision": macro_weighted_precision,
"macro_recall": macro_recall,
"macro_weighted_recall": macro_weighted_recall,
}
def acc_and_micro_f1(preds, labels):
acc = simple_accuracy(preds, labels)
micro_f1 = f1_score(y_true=labels, y_pred=preds, average='micro')
return {
"acc": acc,
"micro_f1": micro_f1,
}
def acc_p_r_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds, )
recall = recall_score(y_true=labels, y_pred=preds, )
precision = precision_score(y_true=labels, y_pred=preds, )
return {
"acc": acc,
"f1": f1,
'precision': precision,
'recall': recall
}
def hoc_get_p_r_f_arrary(preds, labels, examples):
"""adapted from BLUE benchmark: https://github.com/ncbi-nlp/BLUE_Benchmark/blob/b6216f2cb9bba209ee7028fc874123d8fd5a810c/blue/eval_hoc.py """
threshold = 0.5
cat = 10
test_predict_label = {}
test_true_label = {}
for pred, label, example in zip(preds, labels, examples):
doc_id = example.guid.split('-')[1].split('_')[0]
snum = int(example.guid.split('-')[1].split('_s')[1])
ttl = test_true_label.get(doc_id, [0 for _ in range(10)])
tpl = test_predict_label.get(doc_id, [0 for _ in range(10)])
for ind in range(10):
if pred[ind] > threshold:
tpl[ind] = 1
if label[ind] == 1:
ttl[ind] = 1
test_true_label[doc_id] = ttl
test_predict_label[doc_id] = tpl
doc_ids = list(test_true_label.keys())
acc_list = []
prc_list = []
rec_list = []
f_score_list = []
for doc_id in doc_ids:
label_pred_set = set()
label_gold_set = set()
for j in range(cat):
if test_predict_label[doc_id][j] == 1:
label_pred_set.add(j)
if test_true_label[doc_id][j] == 1:
label_gold_set.add(j)
uni_set = label_gold_set.union(label_pred_set)
intersec_set = label_gold_set.intersection(label_pred_set)
tt = len(intersec_set)
if len(label_pred_set) == 0:
prc = 0
else:
prc = tt / len(label_pred_set)
acc = tt / len(uni_set)
rec = tt / len(label_gold_set)
if prc == 0 and rec == 0:
f_score = 0
else:
f_score = 2 * prc * rec / (prc + rec)
acc_list.append(acc)
prc_list.append(prc)
rec_list.append(rec)
f_score_list.append(f_score)
mean_prc = np.mean(prc_list)
mean_rec = np.mean(rec_list)
def divide(x, y):
return np.true_divide(x, y, out=np.zeros_like(x, dtype=np.float), where=y != 0)
f_score = divide(2 * mean_prc * mean_rec, (mean_prc + mean_rec))
return {'p': mean_prc, 'r': mean_rec, 'f': f_score, 'acc': np.mean(acc_list)}
def chemprot_eval(preds, labels):
p,r,f,s = precision_recall_fscore_support(y_pred=preds, y_true=labels, labels=[0, 1, 2, 3, 4], average="micro")
return {
"micro_p": p,
'micro_f1': f,
"micro_r": r,
}
def ddi_eval(preds, labels):
p,r,f,s = precision_recall_fscore_support(y_pred=preds, y_true=labels, labels=[0, 1, 2, 3], average="micro")
return {
"micro_p": p,
'micro_f1': f,
"micro_r": r,
}
def i2b22010re_eval(preds, labels):
p,r,f,s = precision_recall_fscore_support(y_pred=preds, y_true=labels, labels=[0, 1, 2, 3, 4, 5, 6, 7], average="micro")
return {
"micro_p": p,
'micro_f1': f,
"micro_r": r,
}
def compute_metrics(task_name, preds, labels, examples):
assert len(preds) == len(labels) == len(examples)
if task_name == "medsts":
return pearson_and_spearman(preds, labels)
elif task_name == "biosses":
return pearson_and_spearman(preds, labels)
elif task_name == "hoc":
return hoc_get_p_r_f_arrary(preds, labels, examples)
elif task_name == "mednli":
return multiclass_acc_and_f1(preds, labels)
elif task_name =='chemprot':
return chemprot_eval(preds, labels)
elif task_name == 'gad1':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad2':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad3':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad4':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad5':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad6':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad7':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad8':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad9':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'gad10':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr1':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr2':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr3':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr4':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr5':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr6':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr7':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr8':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr9':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'euadr10':
return acc_p_r_and_f1(preds, labels)
elif task_name == 'ddi':
return ddi_eval(preds, labels)
elif task_name == "i2b22010":
return i2b22010re_eval(preds, labels)
else:
raise KeyError(task_name)
|
bio-lm-main
|
biolm/utils_classification.py
|
# coding=utf-8
# Copyright 2020- The Google AI Language Team Authors and The HuggingFace Inc. team and Facebook Inc.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition and sequence labelling"""
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from biolm.utils_sequence_labelling import convert_examples_to_features, get_labels, read_examples_from_file
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list, out_label_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length)
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = AutoModelForTokenClassification.from_pretrained(checkpoint)
model.to(args.device)
result, _, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
model = AutoModelForTokenClassification.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions, out_label_list = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
# Save results
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + out_label_list[example_id].pop(0) + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
|
bio-lm-main
|
biolm/run_sequence_labelling.py
|
# coding=utf-8
# Copyright 2020- The Google AI Language Team Authors and The HuggingFace Inc. team and Facebook Inc.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification"""
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch import nn
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from biolm.utils_classification import compute_metrics
from biolm.utils_classification import convert_examples_to_features
from biolm.utils_classification import output_modes
from biolm.utils_classification import processors, stopping_metrics
from transformers.data.processors.utils import InputExample
import dataclasses
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), (),)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
# if args.local_rank in [-1, 0]:
# tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
best_stopping_score = -1.
stopping_metric = stopping_metrics[args.task_name]
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
if args.output_mode == 'multilabel_classification':
loss_fn = nn.BCEWithLogitsLoss()
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
if args.output_mode == 'multilabel_classification':
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
labels = batch[3]
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = loss_fn(outputs[0], labels.to(outputs[0]))
else:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
# for key, value in logs.items():
# tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
results = evaluate(args, model, tokenizer, prefix="checkpoint-{}".format(global_step))
stopping_score = results[stopping_metric]
logger.info(f'Step: {global_step} | {stopping_metric}: {stopping_score}')
if stopping_score > best_stopping_score:
logger.info(f'New best {stopping_metric}: {stopping_score}')
best_stopping_score = stopping_score
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
torch.save(optimizer.state_dict(), os.path.join(args.output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(args.output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", args.output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
if args.output_mode == 'multilabel_classification':
loss_fn = nn.BCEWithLogitsLoss()
else:
loss_fn = None
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, examples = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True, return_examples=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if args.output_mode == 'multilabel_classification':
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
modified_inputs ={"input_ids": batch[0], "attention_mask": batch[1]}
if args.model_type != "distilbert":
modified_inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**modified_inputs)
logits = outputs[0]
tmp_eval_loss = loss_fn(logits, inputs['labels'].to(outputs[0]))
else:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
elif args.output_mode == 'multilabel_classification':
preds = torch.sigmoid(torch.tensor(preds)).numpy()
result = compute_metrics(eval_task, preds, out_label_ids, examples)
results.update(result)
results['eval_loss'] = eval_loss
output_eval_file = os.path.join(eval_output_dir, prefix, "test_results.txt" if args.do_test else "eval_results.txt")
with open(output_eval_file, "w") as writer:
# with open('bioroberta_chemprot.txt', 'w') as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
with open(output_eval_file.replace('_results.txt', '_predictions.tsv'), "w") as writer:
for example, pred in zip(examples, preds):
if args.output_mode == "classification":
pred_label = args.label_list[pred]
elif args.output_mode == "regression":
pred_label = str(pred)
elif args.output_mode == 'multilabel_classification':
pred_label = [args.label_list[ind] for ind, p in enumerate(pred) if p > 0.5]
writer.write(f'{example.guid}\t{pred_label}\n')
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False, return_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
if args.task_name.startswith('gad') or args.task_name.startswith('euadr'):
fold = int(''.join([t for t in args.task_name if t.isdigit()]))
processor.fold = fold
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
("test" if args.do_test else "dev") if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
examples = []
for line in open(cached_features_file + '.examples.jsonl'):
examples.append(InputExample(**json.loads(line)))
assert len(examples) == len(features)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
(processor.get_test_examples(args.data_dir) if args.do_test else processor.get_dev_examples(args.data_dir))
if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
with open(cached_features_file + '.examples.jsonl', 'w') as f:
for example in examples:
dmp = json.dumps(dataclasses.asdict(example), sort_keys=True) + "\n"
f.write(dmp)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
elif output_mode == 'multilabel_classification':
all_labels = torch.zeros((len(features), len(processor.get_labels())), dtype=torch.long)
for feat_no, feat in enumerate(features):
for l in feat.label:
all_labels[feat_no, l] = 1
else:
raise Exception('')
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
if return_examples:
return dataset, examples
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action="store_true", help="Whether to run eval on the Test set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--overwrite_predictions", action="store_true", help="Overwrite predictions files",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if args.do_test:
assert not args.do_eval
assert not args.do_train
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
if args.task_name.startswith('gad') or args.task_name.startswith('euadr'):
fold = int(''.join([t for t in args.task_name if t.isdigit()]))
processor.fold = fold
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
args.label_list = label_list
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if 'roberta' in args.model_name_or_path:
from transformers import RobertaTokenizerFast
tokenizer = RobertaTokenizerFast.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
add_prefix_space=False,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
else:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
assert not args.eval_all_checkpoints
# Training
if args.do_train:
assert not args.do_test
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelForSequenceClassification.from_pretrained(args.output_dir)
if 'roberta' in args.model_name_or_path:
from transformers import RobertaTokenizerFast
tokenizer = RobertaTokenizerFast.from_pretrained(
args.output_dir,
add_prefix_space=False,
)
else:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if (args.do_eval or args.do_test) and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
assert not args.do_test
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
if checkpoint == args.output_dir:
prefix = ''
else:
prefix = checkpoint.split("/")[-1]
if args.do_test:
output_eval_file = os.path.join(args.output_dir, prefix, "test_results.txt")
else:
output_eval_file = os.path.join(args.output_dir, prefix, "eval_results.txt")
if os.path.exists(output_eval_file) and not args.overwrite_predictions:
print(f'skipping {checkpoint}, eval already exists')
continue
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
del(model)
return results
if __name__ == "__main__":
main()
|
bio-lm-main
|
biolm/run_classification.py
|
# Copyright (c) 2020-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from transformers import AutoTokenizer
import argparse
from tqdm import tqdm
def main(args):
"""Inputs and cleans and ensures no inputs are too long
Adapted from https://raw.githubusercontent.com/stefan-it/fine-tuned-berts-seq/master/scripts/preprocess.py
"""
subword_len_counter = 0
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
for line in tqdm(open(args.filename), desc=f'processing {args.filename}'):
line = line.rstrip()
if not line:
print(line)
subword_len_counter = 0
continue
token = line.split()[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > args.max_len:
print("")
print(line)
subword_len_counter = 0
continue
subword_len_counter += current_subwords_len
print(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--filename",
type=str,
required=True,
)
parser.add_argument(
"--model_name_or_path",
type=str,
default='roberta-large',
)
parser.add_argument(
"--max_len",
default=512,
type=int,
)
args = parser.parse_args()
main(args)
|
bio-lm-main
|
preprocessing/clean_conll_file.py
|
# Copyright (c) 2020-present Emily Alsentzer and Facebook Inc.
# Copyright (c) 2019 Emily Alsentzer
# All rights reserved.
#
# This source code is licensed under the MIT license, which can be found here https://github.com/EmilyAlsentzer/clinicalBERT/blob/master/LICENSE
#
"""Adapted from clinicalBERT preprocessing notebooks: https://github.com/EmilyAlsentzer/clinicalBERT"""
import os, xml, xml.etree.ElementTree as ET, numpy as np, argparse
START_CDATA = "<TEXT><![CDATA["
END_CDATA = "]]></TEXT>"
TAGS = ['MEDICATION', 'OBSEE', 'SMOKER', 'HYPERTENSION', 'event', 'FAMILY_HIST']
def read_xml_file(xml_path, event_tag_type='ALL_CHILDREN', match_text=True):
with open(xml_path, mode='r') as f:
lines = f.readlines()
text, in_text = [], False
for i, l in enumerate(lines):
if START_CDATA in l:
text.append(list(l[l.find(START_CDATA) + len(START_CDATA):]))
in_text = True
elif END_CDATA in l:
text.append(list(l[:l.find(END_CDATA)]))
break
elif in_text:
text.append(list(l))
pos_transformer = {}
linear_pos = 1
for line, sentence in enumerate(text):
for char_pos, char in enumerate(sentence):
pos_transformer[linear_pos] = (line, char_pos)
linear_pos += 1
try:
xml_parsed = ET.parse(xml_path)
except:
with open(xml_path) as f:
txt = f.read()
before = txt.split('<TEXT>')[0]
after = txt.split('</TEXT>')[1]
with open('temporary.xml', 'w') as f:
f.write((before + after).replace('&', 'AMPERSAND'))
xml_parsed = ET.parse('temporary.xml')
# raise
tag_containers = xml_parsed.findall('TAGS')
assert len(tag_containers) == 1, "Found multiple tag sets!"
tag_container = tag_containers[0]
event_tags = tag_container.findall('EVENT')
event_labels = [['O'] * len(sentence) for sentence in text]
for event_tag in event_tags:
base_label = event_tag.attrib['type']
start_pos, end_pos, event_text = event_tag.attrib['start'], event_tag.attrib['end'], event_tag.attrib['text']
start_pos, end_pos = int(start_pos) + 1, int(end_pos)
event_text = ' '.join(event_text.split())
event_text = event_text.replace('AMPERSAND', '&')
(start_line, start_char), (end_line, end_char) = pos_transformer[start_pos], pos_transformer[end_pos]
obs_text = []
for line in range(start_line, end_line + 1):
t = text[line]
s = start_char if line == start_line else 0
e = end_char if line == end_line else len(t)
obs_text.append(''.join(t[s:e + 1]).strip())
obs_text = ' '.join(obs_text)
obs_text = ' '.join(obs_text.split())
if ''' in obs_text and ''' not in event_text: event_text = event_text.replace("'", "'")
if '"' in obs_text and '"' not in event_text: event_text = event_text.replace('"', '"')
if match_text: assert obs_text == event_text, (
("Texts don't match! %s v %s" % (event_text, obs_text)) + '\n' + str((
start_pos, end_pos, line, s, e, t, xml_path
))
)
if base_label.strip() == '': continue
event_labels[end_line][end_char] = 'I-%s' % base_label
event_labels[start_line][start_char] = 'B-%s' % base_label
for line in range(start_line, end_line + 1):
t = text[line]
s = start_char + 1 if line == start_line else 0
e = end_char - 1 if line == end_line else len(t) - 1
for i in range(s, e + 1): event_labels[line][i] = 'I-%s' % base_label
return text, event_labels
def merge_into_words(text_by_char, all_labels_by_char):
assert len(text_by_char) == len(all_labels_by_char), "Incorrect # of sentences!"
N = len(text_by_char)
text_by_word, all_labels_by_word = [], []
for sentence_num in range(N):
sentence_by_char = text_by_char[sentence_num]
labels_by_char = all_labels_by_char[sentence_num]
assert len(sentence_by_char) == len(labels_by_char), "Incorrect # of chars in sentence!"
S = len(sentence_by_char)
if labels_by_char == (['O'] * len(sentence_by_char)):
sentence_by_word = ''.join(sentence_by_char).split()
labels_by_word = ['O'] * len(sentence_by_word)
else:
sentence_by_word, labels_by_word = [], []
text_chunks, labels_chunks = [], []
s = 0
for i in range(S):
if i == S - 1:
text_chunks.append(sentence_by_char[s:])
labels_chunks.append(labels_by_char[s:])
elif labels_by_char[i] == 'O':
continue
else:
if i > 0 and labels_by_char[i - 1] == 'O':
text_chunks.append(sentence_by_char[s:i])
labels_chunks.append(labels_by_char[s:i])
s = i
if labels_by_char[i + 1] == 'O' or labels_by_char[i + 1][2:] != labels_by_char[i][2:]:
text_chunks.append(sentence_by_char[s:i + 1])
labels_chunks.append(labels_by_char[s:i + 1])
s = i + 1
for text_chunk, labels_chunk in zip(text_chunks, labels_chunks):
assert len(text_chunk) == len(labels_chunk), "Bad Chunking (len)"
assert len(text_chunk) > 0, "Bad chunking (len 0)" + str(text_chunks) + str(labels_chunks)
labels_set = set(labels_chunk)
assert labels_set == set(['O']) or (len(labels_set) <= 3 and 'O' not in labels_set), (
("Bad chunking (contents) %s" % ', '.join(labels_set)) + str(text_chunks) + str(labels_chunks)
)
text_chunk_by_word = ''.join(text_chunk).split()
W = len(text_chunk_by_word)
if W == 0:
continue
if labels_chunk[0] == 'O':
labels_chunk_by_word = ['O'] * W
elif W == 1:
labels_chunk_by_word = [labels_chunk[0]]
elif W == 2:
labels_chunk_by_word = [labels_chunk[0], labels_chunk[-1]]
else:
labels_chunk_by_word = [
labels_chunk[0]
] + [labels_chunk[1]] * (W - 2) + [
labels_chunk[-1]
]
sentence_by_word.extend(text_chunk_by_word)
labels_by_word.extend(labels_chunk_by_word)
assert len(sentence_by_word) == len(labels_by_word), "Incorrect # of words in sentence!"
if len(sentence_by_word) == 0: continue
text_by_word.append(sentence_by_word)
all_labels_by_word.append(labels_by_word)
return text_by_word, all_labels_by_word
def reprocess_event_labels(folders, base_path='.', event_tag_type='event', match_text=True, dev_set_size=None):
all_texts_by_patient, all_labels_by_patient = {}, {}
for folder in folders:
folder_dir = os.path.join(base_path, folder)
xml_filenames = [x for x in os.listdir(folder_dir) if x.endswith('xml')]
for xml_filename in xml_filenames:
patient_num = int(xml_filename[:-4])
xml_filepath = os.path.join(folder_dir, xml_filename)
results = read_xml_file(
xml_filepath,
event_tag_type=event_tag_type,
match_text=match_text
)
if results is None:
continue
text_by_char, labels_by_char = results
text_by_word, labels_by_word = merge_into_words(text_by_char, labels_by_char)
if patient_num not in all_texts_by_patient:
all_texts_by_patient[patient_num] = []
all_labels_by_patient[patient_num] = []
all_texts_by_patient[patient_num].extend(text_by_word)
all_labels_by_patient[patient_num].extend(labels_by_word)
patients = set(all_texts_by_patient.keys())
if dev_set_size is None:
train_patients, dev_patients = list(patients), []
else:
N_train = int(len(patients) * (1 - dev_set_size))
patients_random = np.random.permutation(list(patients))
train_patients = list(patients_random[:N_train])
dev_patients = list(patients_random[N_train:])
train_texts, train_labels = [], []
dev_texts, dev_labels = [], []
for patient_num in train_patients:
train_texts.extend(all_texts_by_patient[patient_num])
train_labels.extend(all_labels_by_patient[patient_num])
for patient_num in dev_patients:
dev_texts.extend(all_texts_by_patient[patient_num])
dev_labels.extend(all_labels_by_patient[patient_num])
train_out_text_by_sentence = []
for text, labels in zip(train_texts, train_labels):
train_out_text_by_sentence.append('\n'.join('%s %s' % x for x in zip(text, labels)))
dev_out_text_by_sentence = []
for text, labels in zip(dev_texts, dev_labels):
dev_out_text_by_sentence.append('\n'.join('%s %s' % x for x in zip(text, labels)))
return '\n\n'.join(train_out_text_by_sentence), '\n\n'.join(dev_out_text_by_sentence)
def main(raw_data_dir, task_dir):
final_train_text, final_dev_text = reprocess_event_labels(
[os.path.join(raw_data_dir, '2012-07-15.original-annotation.release')], dev_set_size=0.1, match_text=True
)
test_text, _ = reprocess_event_labels(
[os.path.join(raw_data_dir, '2012-08-08.test-data.event-timex-groundtruth/xml')], match_text=False, dev_set_size=None
)
labels = {}
for s in final_train_text, final_dev_text, test_text:
for line in s.split('\n'):
if line == '': continue
label = line.split()[-1]
assert label == 'O' or label.startswith('B-') or label.startswith('I-'), "label wrong! %s" % label
if label not in labels: labels[label] = 1
else: labels[label] += 1
with open(os.path.join(task_dir, 'train.txt.conll'), mode='w') as f:
f.write(final_train_text)
with open(os.path.join(task_dir, 'dev.txt.conll'), mode='w') as f:
f.write(final_dev_text)
with open(os.path.join(task_dir, 'test.txt.conll'), mode='w') as f:
f.write(test_text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw_data_dir",
type=str,
required=True,
)
parser.add_argument(
"--task_dir",
type=str,
required=True,
)
args = parser.parse_args()
main(args.raw_data_dir, args.task_dir)
|
bio-lm-main
|
preprocessing/preprocess_i2b2_2012_ner.py
|
# Copyright (c) 2020-present Emily Alsentzer and Facebook Inc.
# Copyright (c) 2019 Emily Alsentzer
# All rights reserved.
#
# This source code is licensed under the MIT license, which can be found here https://github.com/EmilyAlsentzer/clinicalBERT/blob/master/LICENSE
#
"""Adapted from clinicalBERT preprocessing notebooks: https://github.com/EmilyAlsentzer/clinicalBERT"""
import os, re, pickle, numpy as np
import argparse
def process_concept(concept_str):
"""
takes string like
'c="asymptomatic" 16:2 16:2||t="problem"'
and returns dictionary like
{'t': 'problem', 'start_line': 16, 'start_pos': 2, 'end_line': 16, 'end_pos': 2}
"""
try:
position_bit, problem_bit = concept_str.split('||')
t = problem_bit[3:-1]
start_and_end_span = next(re.finditer('\s\d+:\d+\s\d+:\d+', concept_str)).span()
c = concept_str[3:start_and_end_span[0]-1]
c = [y for y in c.split(' ') if y.strip() != '']
c = ' '.join(c)
start_and_end = concept_str[start_and_end_span[0]+1 : start_and_end_span[1]]
start, end = start_and_end.split(' ')
start_line, start_pos = [int(x) for x in start.split(':')]
end_line, end_pos = [int(x) for x in end.split(':')]
except:
raise
return {
't': t, 'start_line': start_line, 'start_pos': start_pos, 'end_line': end_line, 'end_pos': end_pos,
'c': c,
}
def build_label_vocab(base_dirs):
seen, label_vocab, label_vocab_size = set(['O']), {'O': 'O'}, 0
for base_dir in base_dirs:
concept_dir = os.path.join(base_dir, 'concept')
assert os.path.isdir(concept_dir), "Directory structure doesn't match!"
ids = set([x[:-4] for x in os.listdir(concept_dir) if x.endswith('.con')])
for i in ids:
with open(os.path.join(concept_dir, '%s.con' % i)) as f:
concepts = [process_concept(x.strip()) for x in f.readlines()]
for c in concepts:
if c['t'] not in seen:
label_vocab_size += 1
label_vocab['B-%s' % c['t']] = 'B-%s' % c['t'] # label_vocab_size
label_vocab_size += 1
label_vocab['I-%s' % c['t']] = 'I-%s' % c['t'] # label_vocab_size
seen.update([c['t']])
return label_vocab, label_vocab_size
def reformatter(base, label_vocab, txt_dir = None, concept_dir = None):
if txt_dir is None: txt_dir = os.path.join(base, 'txt')
if concept_dir is None: concept_dir = os.path.join(base, 'concept')
assert os.path.isdir(txt_dir) and os.path.isdir(concept_dir), "Directory structure doesn't match!"
txt_ids = set([x[:-4] for x in os.listdir(txt_dir) if x.endswith('.txt')])
concept_ids = set([x[:-4] for x in os.listdir(concept_dir) if x.endswith('.con')])
assert txt_ids == concept_ids, (
"id set doesn't match: txt - concept = %s, concept - txt = %s"
"" % (str(txt_ids - concept_ids), str(concept_ids - txt_ids))
)
ids = txt_ids
reprocessed_texts = {}
for i in ids:
with open(os.path.join(txt_dir, '%s.txt' % i), mode='r') as f:
lines = f.readlines()
txt = [[y for y in x.strip().split(' ') if y.strip() != ''] for x in lines]
line_starts_with_space = [x.startswith(' ') for x in lines]
with open(os.path.join(concept_dir, '%s.con' % i), mode='r') as f:
concepts = [process_concept(x.strip()) for x in f.readlines()]
labels = [['O' for _ in line] for line in txt]
for c in concepts:
if c['start_line'] == c['end_line']:
line = c['start_line']-1
p_modifier = -1 if line_starts_with_space[line] else 0
text = (' '.join(txt[line][c['start_pos']+p_modifier:c['end_pos']+1+p_modifier])).lower()
assert text == c['c'], (
"Text mismatch! %s vs. %s (id: %s, line: %d)\nFull line: %s"
"" % (c['c'], text, i, line, txt[line])
)
for line in range(c['start_line']-1, c['end_line']):
p_modifier = -1 if line_starts_with_space[line] else 0
start_pos = c['start_pos']+p_modifier if line == c['start_line']-1 else 0
end_pos = c['end_pos']+1+p_modifier if line == c['end_line']-1 else len(txt[line])
if line == c['end_line'] - 1: labels[line][end_pos-1] = label_vocab['I-%s' % c['t']]
if line == c['start_line'] - 1: labels[line][start_pos] = label_vocab['B-%s' % c['t']]
for j in range(start_pos + 1, end_pos-1): labels[line][j] = label_vocab['I-%s' % c['t']]
joined_words_and_labels = [zip(txt_line, label_line) for txt_line, label_line in zip(txt, labels)]
out_str = '\n\n'.join(
['\n'.join(['%s %s' % p for p in joined_line]) for joined_line in joined_words_and_labels]
)
reprocessed_texts[i] = out_str
return reprocessed_texts
def main(beth_dir, partners_dir, test_dir, test_txt_dir, task_dir):
label_vocab, label_vocab_size = build_label_vocab([beth_dir, partners_dir])
reprocessed_texts = {
'beth': reformatter(beth_dir, label_vocab),
'partners': reformatter(partners_dir, label_vocab),
'test': reformatter(
test_dir, label_vocab,
txt_dir=test_txt_dir,
concept_dir=os.path.join(test_dir, 'concepts')
),
}
np.random.seed(1)
all_partners_train_ids = np.random.permutation(list(reprocessed_texts['partners'].keys()))
N = len(all_partners_train_ids)
N_train = int(0.9 * N)
partners_train_ids = all_partners_train_ids[:N_train]
partners_dev_ids = all_partners_train_ids[N_train:]
print("Partners # Patients: Train: %d, Dev: %d" %(len(partners_train_ids), len(partners_dev_ids)))
all_beth_train_ids = np.random.permutation(list(reprocessed_texts['beth'].keys()))
N = len(all_beth_train_ids)
N_train = int(0.9 * N)
beth_train_ids = all_beth_train_ids[:N_train]
beth_dev_ids = all_beth_train_ids[N_train:]
print("Beth # Patients: Train: %d, Dev: %d" % (len(beth_train_ids), len(beth_dev_ids)))
print("Merged # Patients: Train: %d, Dev: %d" % (
len(partners_train_ids) + len(beth_train_ids), len(beth_dev_ids) + len(partners_dev_ids)
))
merged_train_txt = '\n\n'.join(np.random.permutation(
[reprocessed_texts['partners'][i] for i in partners_train_ids] +
[reprocessed_texts['beth'][i] for i in beth_train_ids]
))
merged_dev_txt = '\n\n'.join(np.random.permutation(
[reprocessed_texts['partners'][i] for i in partners_dev_ids] +
[reprocessed_texts['beth'][i] for i in beth_dev_ids]
))
merged_test_txt = '\n\n'.join(np.random.permutation(list(reprocessed_texts['test'].values())))
print("Merged # Samples: Train: %d, Dev: %d, Test: %d" % (
len(merged_train_txt.split('\n\n')),
len(merged_dev_txt.split('\n\n')),
len(merged_test_txt.split('\n\n'))
))
partners_train_txt = '\n\n'.join(np.random.permutation(
[reprocessed_texts['partners'][i] for i in partners_train_ids]
))
partners_dev_txt = '\n\n'.join(np.random.permutation(
[reprocessed_texts['partners'][i] for i in partners_dev_ids]
))
partners_test_txt = '\n\n'.join(np.random.permutation(list(reprocessed_texts['test'].values())))
OUT_FILES = {
'merged_train': os.path.join(task_dir, 'merged', 'train.tsv'),
'merged_dev': os.path.join(task_dir, 'merged', 'dev.tsv'),
'merged_test': os.path.join(task_dir, 'merged', 'test.tsv'),
'partners_train': os.path.join(task_dir, 'merged', 'train.tsv'),
'partners_dev': os.path.join(task_dir, 'merged', 'dev.tsv'),
'partners_test': os.path.join(task_dir, 'merged', 'test.tsv'),
'vocab': os.path.join(task_dir, 'merged' 'labels.txt')
}
os.makedirs(os.path.join(task_dir, 'merged'), exist_ok=True)
os.makedirs(os.path.join(task_dir, 'partners'), exist_ok=True)
with open(OUT_FILES['merged_train'], mode='w') as f: f.write(merged_train_txt)
with open(OUT_FILES['merged_dev'], mode='w') as f: f.write(merged_dev_txt)
with open(OUT_FILES['merged_test'], mode='w') as f: f.write(merged_test_txt)
with open(OUT_FILES['partners_train'], mode='w') as f: f.write(partners_train_txt)
with open(OUT_FILES['partners_dev'], mode='w') as f: f.write(partners_dev_txt)
with open(OUT_FILES['partners_test'], mode='w') as f: f.write(partners_test_txt)
with open(OUT_FILES['vocab'], mode='w') as f: f.write('\n'.join(label_vocab.keys()))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--beth_dir",
type=str,
required=True,
)
parser.add_argument(
"--partners_dir",
type=str,
required=True,
)
parser.add_argument(
"--test_dir",
type=str,
required=True,
)
parser.add_argument(
"--test_txt_dir",
type=str,
required=True,
)
parser.add_argument(
"--task_dir",
type=str,
required=True,
)
args = parser.parse_args()
# beth_dir = './data/blue_raw_data/data/i2b2-2010/original/concept_assertion_relation_training_data/beth/'
# partners_dir = './data/blue_raw_data/data/i2b2-2010/original/concept_assertion_relation_training_data/partners/'
# test_dir = './data/blue_raw_data/data/i2b2-2010/original/reference_standard_for_test_data/'
# test_txt_dir = './data/blue_raw_data/data/i2b2-2010/original/test_data/'
# task_dir = 'data/I2B22010NER'
main(args.beth_dir, args.partners_dir, args.test_dir, args.test_txt_dir, args.task_dir)
|
bio-lm-main
|
preprocessing/preprocess_i2b2_2010_ner.py
|
# Copyright (c) 2020-present Emily Alsentzer and Facebook Inc.
# Copyright (c) 2019 Emily Alsentzer
# All rights reserved.
#
# This source code is licensed under the MIT license, which can be found here https://github.com/EmilyAlsentzer/clinicalBERT/blob/master/LICENSE
#
"""Adapted from clinicalBERT preprocessing notebooks: https://github.com/EmilyAlsentzer/clinicalBERT"""
import os, xml.etree.ElementTree as ET, numpy as np, argparse
START_CDATA = "<TEXT><![CDATA["
END_CDATA = "]]></TEXT>"
TAGS = ['MEDICATION', 'OBSEE', 'SMOKER', 'HYPERTENSION', 'PHI', 'FAMILY_HIST']
def read_xml_file(xml_path, PHI_tag_type='ALL_CHILDREN', match_text=True):
with open(xml_path, mode='r') as f:
lines = f.readlines()
text, in_text = [], False
for i, l in enumerate(lines):
if START_CDATA in l:
text.append(list(l[l.find(START_CDATA) + len(START_CDATA):]))
in_text = True
elif END_CDATA in l:
text.append(list(l[:l.find(END_CDATA)]))
break
elif in_text:
if xml_path.endswith('180-03.xml') and '0808' in l and 'Effingham' in l:
print("Adjusting known error")
l = l[:9] + ' ' * 4 + l[9:]
text.append(list(l))
pos_transformer = {}
linear_pos = 1
for line, sentence in enumerate(text):
for char_pos, char in enumerate(sentence):
pos_transformer[linear_pos] = (line, char_pos)
linear_pos += 1
xml_parsed = ET.parse(xml_path)
tag_containers = xml_parsed.findall('TAGS')
assert len(tag_containers) == 1, "Found multiple tag sets!"
tag_container = tag_containers[0]
PHI_tags = tag_container.getchildren() if PHI_tag_type == 'ALL_CHILDREN' else tag_container.findall('PHI')
PHI_labels = [['O'] * len(sentence) for sentence in text]
for PHI_tag in PHI_tags:
base_label = PHI_tag.attrib['TYPE']
start_pos, end_pos, PHI_text = PHI_tag.attrib['start'], PHI_tag.attrib['end'], PHI_tag.attrib['text']
start_pos, end_pos = int(start_pos) + 1, int(end_pos)
PHI_text = ' '.join(PHI_text.split())
if PHI_text == 'Johnson and Johnson' and xml_path.endswith('188-05.xml'):
print("Adjusting known error")
PHI_text = 'Johnson & Johnson'
(start_line, start_char), (end_line, end_char) = pos_transformer[start_pos], pos_transformer[end_pos]
obs_text = []
for line in range(start_line, end_line + 1):
t = text[line]
s = start_char if line == start_line else 0
e = end_char if line == end_line else len(t)
obs_text.append(''.join(t[s:e + 1]).strip())
obs_text = ' '.join(obs_text)
obs_text = ' '.join(obs_text.split())
if match_text: assert obs_text == PHI_text, (
("Texts don't match! %s v %s" % (PHI_text, obs_text)) + '\n' + str((
start_pos, end_pos, line, s, e, t, xml_path
))
)
PHI_labels[end_line][end_char] = 'I-%s' % base_label
PHI_labels[start_line][start_char] = 'B-%s' % base_label
for line in range(start_line, end_line + 1):
t = text[line]
s = start_char + 1 if line == start_line else 0
e = end_char - 1 if line == end_line else len(t) - 1
for i in range(s, e + 1): PHI_labels[line][i] = 'I-%s' % base_label
return text, PHI_labels
def merge_into_words(text_by_char, all_labels_by_char):
assert len(text_by_char) == len(all_labels_by_char), "Incorrect # of sentences!"
N = len(text_by_char)
text_by_word, all_labels_by_word = [], []
for sentence_num in range(N):
sentence_by_char = text_by_char[sentence_num]
labels_by_char = all_labels_by_char[sentence_num]
assert len(sentence_by_char) == len(labels_by_char), "Incorrect # of chars in sentence!"
S = len(sentence_by_char)
if labels_by_char == (['O'] * len(sentence_by_char)):
sentence_by_word = ''.join(sentence_by_char).split()
labels_by_word = ['O'] * len(sentence_by_word)
else:
sentence_by_word, labels_by_word = [], []
text_chunks, labels_chunks = [], []
s = 0
for i in range(S):
if i == S - 1:
text_chunks.append(sentence_by_char[s:])
labels_chunks.append(labels_by_char[s:])
elif labels_by_char[i] == 'O':
continue
else:
if i > 0 and labels_by_char[i - 1] == 'O':
text_chunks.append(sentence_by_char[s:i])
labels_chunks.append(labels_by_char[s:i])
s = i
if labels_by_char[i + 1] == 'O' or labels_by_char[i + 1][2:] != labels_by_char[i][2:]:
text_chunks.append(sentence_by_char[s:i + 1])
labels_chunks.append(labels_by_char[s:i + 1])
s = i + 1
for text_chunk, labels_chunk in zip(text_chunks, labels_chunks):
assert len(text_chunk) == len(labels_chunk), "Bad Chunking (len)"
assert len(text_chunk) > 0, "Bad chunking (len 0)" + str(text_chunks) + str(labels_chunks)
labels_set = set(labels_chunk)
assert labels_set == set(['O']) or (len(labels_set) <= 3 and 'O' not in labels_set), (
("Bad chunking (contents) %s" % ', '.join(labels_set)) + str(text_chunks) + str(labels_chunks)
)
text_chunk_by_word = ''.join(text_chunk).split()
W = len(text_chunk_by_word)
if W == 0:
continue
if labels_chunk[0] == 'O':
labels_chunk_by_word = ['O'] * W
elif W == 1:
labels_chunk_by_word = [labels_chunk[0]]
elif W == 2:
labels_chunk_by_word = [labels_chunk[0], labels_chunk[-1]]
else:
labels_chunk_by_word = [
labels_chunk[0]
] + [labels_chunk[1]] * (W - 2) + [
labels_chunk[-1]
]
sentence_by_word.extend(text_chunk_by_word)
labels_by_word.extend(labels_chunk_by_word)
assert len(sentence_by_word) == len(labels_by_word), "Incorrect # of words in sentence!"
if len(sentence_by_word) == 0: continue
text_by_word.append(sentence_by_word)
all_labels_by_word.append(labels_by_word)
return text_by_word, all_labels_by_word
def reprocess_PHI_labels(folders, base_path='.', PHI_tag_type='PHI', match_text=True, dev_set_size=None):
all_texts_by_patient, all_labels_by_patient = {}, {}
for folder in folders:
folder_dir = os.path.join(base_path, folder)
xml_filenames = [x for x in os.listdir(folder_dir) if x.endswith('xml')]
for xml_filename in xml_filenames:
patient_num = int(xml_filename[:3])
xml_filepath = os.path.join(folder_dir, xml_filename)
text_by_char, labels_by_char = read_xml_file(
xml_filepath,
PHI_tag_type=PHI_tag_type,
match_text=match_text
)
text_by_word, labels_by_word = merge_into_words(text_by_char, labels_by_char)
if patient_num not in all_texts_by_patient:
all_texts_by_patient[patient_num] = []
all_labels_by_patient[patient_num] = []
all_texts_by_patient[patient_num].extend(text_by_word)
all_labels_by_patient[patient_num].extend(labels_by_word)
patients = set(all_texts_by_patient.keys())
if dev_set_size is None:
train_patients, dev_patients = list(patients), []
else:
N_train = int(len(patients) * (1 - dev_set_size))
patients_random = np.random.permutation(list(patients))
train_patients = list(patients_random[:N_train])
dev_patients = list(patients_random[N_train:])
train_texts, train_labels = [], []
dev_texts, dev_labels = [], []
for patient_num in train_patients:
train_texts.extend(all_texts_by_patient[patient_num])
train_labels.extend(all_labels_by_patient[patient_num])
for patient_num in dev_patients:
dev_texts.extend(all_texts_by_patient[patient_num])
dev_labels.extend(all_labels_by_patient[patient_num])
train_out_text_by_sentence = []
for text, labels in zip(train_texts, train_labels):
train_out_text_by_sentence.append('\n'.join('%s %s' % x for x in zip(text, labels)))
dev_out_text_by_sentence = []
for text, labels in zip(dev_texts, dev_labels):
dev_out_text_by_sentence.append('\n'.join('%s %s' % x for x in zip(text, labels)))
return '\n\n'.join(train_out_text_by_sentence), '\n\n'.join(dev_out_text_by_sentence)
def main(gold_set_1_dir, gold_set_2_dir, test_gold_set_dir, task_dir):
final_train_text, final_dev_text = reprocess_PHI_labels(
[gold_set_1_dir, gold_set_2_dir], PHI_tag_type='ALL_CHILDREN',
dev_set_size=0.1, match_text=True
)
test_text, _ = reprocess_PHI_labels(
[test_gold_set_dir], PHI_tag_type='ALL_CHILDREN', match_text=False, dev_set_size=None
)
labels = {}
for s in final_train_text, final_dev_text, test_text:
for line in s.split('\n'):
if line == '': continue
label = line.split()[-1]
assert label == 'O' or label.startswith('B-') or label.startswith('I-'), "label wrong! %s" % label
if label not in labels: labels[label] = 1
else: labels[label] += 1
with open(os.path.join(task_dir, 'train.txt.conll'), mode='w') as f:
f.write(final_train_text)
with open(os.path.join(task_dir,'dev.txt.conll'), mode='w') as f:
f.write(final_dev_text)
with open(os.path.join(task_dir,'test.txt.conll'), mode='w') as f:
f.write(test_text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--gold_set_1_dir",
type=str,
required=True,
)
parser.add_argument(
"--gold_set_2_dir",
type=str,
required=True,
)
parser.add_argument(
"--test_gold_set_dir",
type=str,
required=True,
)
parser.add_argument(
"--task_dir",
type=str,
required=True,
)
args = parser.parse_args()
main(args.gold_set_1_dir, args.gold_set_2_dir, args.test_gold_set_dir, args.task_dir)
|
bio-lm-main
|
preprocessing/preprocess_i2b2_2014_ner.py
|
"""Run dimensionality reduction experiment."""
import argparse
import logging
import networkx as nx
import numpy as np
import torch
import geom.hyperboloid as hyperboloid
import geom.poincare as poincare
from learning.frechet import Frechet
from learning.pca import TangentPCA, EucPCA, PGA, HoroPCA, BSA
from utils.data import load_graph, load_embeddings
from utils.metrics import avg_distortion_measures, compute_metrics, format_metrics, aggregate_metrics
from utils.sarkar import sarkar, pick_root
parser = argparse.ArgumentParser(
description="Hyperbolic dimensionality reduction"
)
parser.add_argument('--dataset', type=str, help='which datasets to use', default="smalltree",
choices=["smalltree", "phylo-tree", "bio-diseasome", "ca-CSphd"])
parser.add_argument('--model', type=str, help='which dimensionality reduction method to use', default="horopca",
choices=["pca", "tpca", "pga", "bsa", "hmds", "horopca"])
parser.add_argument('--metrics', nargs='+', help='which metrics to use', default=["distortion", "frechet_var"])
parser.add_argument(
"--dim", default=10, type=int, help="input embedding dimension to use"
)
parser.add_argument(
"--n-components", default=2, type=int, help="number of principal components"
)
parser.add_argument(
"--lr", default=5e-2, type=float, help="learning rate to use for optimization-based methods"
)
parser.add_argument(
"--n-runs", default=5, type=int, help="number of runs for optimization-based methods"
)
parser.add_argument('--use-sarkar', default=False, action='store_true', help="use sarkar to embed the graphs")
parser.add_argument(
"--sarkar-scale", default=3.5, type=float, help="scale to use for embeddings computed with Sarkar's construction"
)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S"
)
args = parser.parse_args()
torch.set_default_dtype(torch.float64)
pca_models = {
'pca': {'class': EucPCA, 'optim': False, 'iterative': False, "n_runs": 1},
'tpca': {'class': TangentPCA, 'optim': False, 'iterative': False, "n_runs": 1},
'pga': {'class': PGA, 'optim': True, 'iterative': True, "n_runs": args.n_runs},
'bsa': {'class': BSA, 'optim': True, 'iterative': False, "n_runs": args.n_runs},
'horopca': {'class': HoroPCA, 'optim': True, 'iterative': False, "n_runs": args.n_runs},
}
metrics = {}
embeddings = {}
logging.info(f"Running experiments for {args.dataset} dataset.")
# load a graph args.dataset
graph = load_graph(args.dataset)
n_nodes = graph.number_of_nodes()
nodelist = np.arange(n_nodes)
graph_dist = torch.from_numpy(nx.floyd_warshall_numpy(graph, nodelist=nodelist))
logging.info(f"Loaded {args.dataset} dataset with {n_nodes} nodes")
# get hyperbolic embeddings
if args.use_sarkar:
# embed with Sarkar
logging.info("Using sarkar embeddings")
root = pick_root(graph)
z = sarkar(graph, tau=args.sarkar_scale, root=root, dim=args.dim)
z = torch.from_numpy(z)
z_dist = poincare.pairwise_distance(z) / args.sarkar_scale
else:
# load pre-trained embeddings
logging.info("Using optimization-based embeddings")
assert args.dim in [2, 10, 50], "pretrained embeddings are only for 2, 10 and 50 dimensions"
z = load_embeddings(args.dataset, dim=args.dim)
z = torch.from_numpy(z)
z_dist = poincare.pairwise_distance(z)
if torch.cuda.is_available():
z = z.cuda()
z_dist = z_dist.cuda()
graph_dist = graph_dist.cuda()
# compute embeddings' distortion
distortion = avg_distortion_measures(graph_dist, z_dist)[0]
logging.info("Embedding distortion in {} dimensions: {:.4f}".format(args.dim, distortion))
# Compute the mean and center the data
logging.info("Computing the Frechet mean to center the embeddings")
frechet = Frechet(lr=1e-2, eps=1e-5, max_steps=5000)
mu_ref, has_converged = frechet.mean(z, return_converged=True)
logging.info(f"Mean computation has converged: {has_converged}")
x = poincare.reflect_at_zero(z, mu_ref)
# Run dimensionality reduction methods
logging.info(f"Running {args.model} for dimensionality reduction")
metrics = []
dist_orig = poincare.pairwise_distance(x)
if args.model in pca_models.keys():
model_params = pca_models[args.model]
for _ in range(model_params["n_runs"]):
model = model_params['class'](dim=args.dim, n_components=args.n_components, lr=args.lr, max_steps=500)
if torch.cuda.is_available():
model.cuda()
model.fit(x, iterative=model_params['iterative'], optim=model_params['optim'])
metrics.append(model.compute_metrics(x))
embeddings = model.map_to_ball(x).detach().cpu().numpy()
metrics = aggregate_metrics(metrics)
else:
# run hMDS baseline
logging.info(f"Running hMDS")
x_hyperboloid = hyperboloid.from_poincare(x)
distances = hyperboloid.distance(x.unsqueeze(-2), x.unsqueeze(-3))
D_p = poincare.pairwise_distance(x)
x_h = hyperboloid.mds(D_p, d=args.n_components)
x_proj = hyperboloid.to_poincare(x_h)
embeddings["hMDS"] = x_proj.numpy()
metrics = compute_metrics(x, x_proj)
logging.info(f"Experiments for {args.dataset} dataset completed.")
logging.info("Computing evaluation metrics")
results = format_metrics(metrics, args.metrics)
for line in results:
logging.info(line)
|
HoroPCA-main
|
main.py
|
HoroPCA-main
|
learning/__init__.py
|
|
"""Frechet data statistics."""
import torch
import geom.poincare as poincare
class Frechet:
"""Class to compute Frechet statiscs (mean and variance)."""
def __init__(self, lr=1e-1, eps=1e-5, max_steps=5000, max_lr_try=3):
self.lr = lr
self.eps = eps
self.max_steps = max_steps
self.max_lr_try = max_lr_try
self.lr_values = [self.lr]
self.lr_values = [self.lr * (2 ** (i + 1)) for i in range(self.max_lr_try)]
self.lr_values += [self.lr * (0.5 * (i + 1)) for i in range(self.max_lr_try)]
def mean(self, x, return_converged=False):
"""Compute the Frechet mean with gradient descent steps."""
n = x.shape[0]
mu_init = torch.mean(x, dim=0, keepdim=True)
has_converged = False
for i, lr in enumerate(self.lr_values):
mu = mu_init
for i in range(self.max_steps):
log_x = torch.sum(poincare.logmap(mu, x), dim=0, keepdim=True)
delta_mu = lr / n * log_x
mu = poincare.expmap(mu, delta_mu)
if delta_mu.norm(dim=-1, p=2, keepdim=False) < self.eps:
has_converged = True
break
if has_converged:
break
if not has_converged:
mu = mu_init
if return_converged:
return mu, has_converged
else:
return mu
def variance(self, x, return_converged=False):
"""Compute the Frechet variance."""
mu, has_converged = self.mean(x, return_converged=True)
distances = poincare.distance(x, mu.unsqueeze(0)) ** 2
var = torch.mean(distances)
if return_converged:
return var, has_converged
else:
return var
|
HoroPCA-main
|
learning/frechet.py
|
"""Hyperbolic dimensionality reduction models."""
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
import geom.euclidean as euclidean
import geom.hyperboloid as hyperboloid
import geom.minkowski as minkowski
import geom.poincare as poincare
from geom.horo import busemann, project_kd
from utils.metrics import compute_metrics
class PCA(ABC, nn.Module):
"""Dimensionality reduction model class."""
def __init__(self, dim, n_components, lr=1e-3, max_steps=100, keep_orthogonal=False):
super(PCA, self).__init__()
self.dim = dim
self.n_components = n_components
self.components = nn.ParameterList(nn.Parameter(torch.randn(1, dim)) for _ in range(self.n_components))
self.max_steps = max_steps
self.lr = lr
self.keep_orthogonal = keep_orthogonal
def project(self, x):
"""Projects points onto the principal components."""
Q = self.get_components()
return self._project(x, Q)
@abstractmethod
def _project(self, x, Q):
"""Projects points onto the submanifold that goes through the origin and is spanned by different components.
Args:
x: torch.tensor of shape (batch_size, dim)
Q: torch.tensor of shape (n_components, dim)
Returns:
x_p: torch.tensor of shape (batch_size, dim)
"""
raise NotImplementedError
@abstractmethod
def compute_loss(self, x, Q):
"""Computes objective to minimize.
Args:
x: torch.tensor of shape (batch_size, dim), data before _projection
Q: torch.tensor of shape (n_components, dim)
Args:
loss: torch.tensor of shape (1,)
"""
raise NotImplementedError
def gram_schmidt(self, ):
"""Applies Gram-Schmidt to the component vectors."""
def inner(u, v):
return torch.sum(u * v)
Q = []
for k in range(self.n_components):
v_k = self.components[k][0]
proj = 0.0
for v_j in Q:
v_j = v_j[0]
coeff = inner(v_j, v_k) / inner(v_j, v_j).clamp_min(1e-15)
proj += coeff * v_j
v_k = v_k - proj
v_k = v_k / torch.norm(v_k).clamp_min(1e-15)
Q.append(torch.unsqueeze(v_k, 0))
return torch.cat(Q, dim=0)
def orthogonalize(self):
Q = torch.cat([self.components[i] for i in range(self.n_components)]) # (k, d)
# _, _, v = torch.svd(Q, some=False) # Q = USV^T
# Q_ = v[:, :self.n_components]
# return Q_.transpose(-1, -2)# (k, d) rows are orthonormal basis for rows of Q
return euclidean.orthonormal(Q)
def normalize(self, ):
"""Makes the component vectors unit-norm (not orthogonal)."""
Q = torch.cat([self.components[i] for i in range(self.n_components)])
return Q / torch.norm(Q, dim=1, keepdim=True).clamp_min(1e-15)
def get_components(self, ):
if self.keep_orthogonal:
Q = self.gram_schmidt()
# Q = self.orthogonalize()
else:
Q = self.normalize()
return Q # shape (n_components, dim)
def map_to_ball(self, x):
"""Returns coordinates of _projected points in a lower-dimensional Poincare ball model.
Args:
x: torch.tensor of shape (batch_size, dim)
Returns:
torch.tensor of shape (batch_size, n_components)
"""
Q = self.get_components()
x_p = self._project(x, Q)
# Q_orthogonal = self.gram_schmidt()
Q_orthogonal = self.orthogonalize()
return x_p @ Q_orthogonal.transpose(0, 1)
def fit_optim(self, x, iterative=False):
"""Finds component using gradient-descent-based optimization.
Args:
x: torch.tensor of size (batch_size x dim)
iterative: boolean
Note:
If iterative = True returns optimizes components by components (nested subspace assumption).
"""
loss_vals = []
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
if not iterative:
for i in range(self.max_steps):
# Forward pass: compute _projected variance
Q = self.get_components()
loss = self.compute_loss(x, Q)
loss_vals.append(loss.item())
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 1e5)
# if self.components[0].grad.sum().isnan().item():
optimizer.step()
else:
for k in range(self.n_components):
for i in range(self.max_steps):
# Forward pass: compute _projected variance
Q = self.get_components()
# Project on first k components
loss = self.compute_loss(x, Q[:k + 1, :])
loss_vals.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.components[k].data = self.get_components()[k].unsqueeze(0)
self.components[k].requires_grad = False
return loss_vals
def fit_spectral(self, x):
"""Finds component using spectral decomposition (closed-form solution).
Args:
x: torch.tensor of size (batch_size x dim)
"""
raise NotImplementedError
def fit(self, x, iterative=False, optim=True):
"""Finds principal components using optimization or spectral decomposition approaches.
Args:
x: torch.tensor of size (batch_size x dim)
iterative: boolean (true to do iterative optimization of nested subspaces)
optim: boolean (true to find components via optimization, defaults to SVD otherwise)
"""
if optim:
self.fit_optim(x, iterative)
else:
self.fit_spectral(x)
def compute_metrics(self, x):
"""Compute dimensionality reduction evaluation metrics."""
Q = self.get_components()
x_proj = self._project(x, Q).detach()
return compute_metrics(x, x_proj)
class EucPCA(PCA):
"""Euclidean PCA (assumes data has Euclidean mean zero)."""
def __init__(self, dim, n_components, lr=1e-3, max_steps=100):
super(EucPCA, self).__init__(dim, n_components, lr, max_steps, keep_orthogonal=True)
def compute_loss(self, x, Q):
vals = x @ Q.transpose(0, 1) # shape (batch_size, n_components)
return - torch.sum(vals ** 2)
def _project(self, x, Q):
return (x @ Q.transpose(0, 1)) @ Q # shape (batch_size, dim)
def fit_spectral(self, x):
"""Euclidean PCA closed-form with SVD."""
S = (x.T @ x)
U, S, V = torch.svd(S)
for k in range(self.n_components):
self.components[k].data = U[k:k + 1]
class TangentPCA(PCA):
"""Euclidean PCA in the tangent space of the mean (assumes data has Frechet mean zero)."""
def __init__(self, dim, n_components, lr=1e-3, max_steps=100):
super(TangentPCA, self).__init__(dim, n_components, lr, max_steps, keep_orthogonal=True)
def _project(self, x, Q):
x_t = poincare.logmap0(x) # shape (batch_size, dim)
x_pt = (x_t @ Q.transpose(0, 1)) @ Q # shape (batch_size, dim)
x_p = poincare.expmap0(x_pt) # shape (batch_size, dim)
return x_p
def compute_loss(self, x, Q):
x_t = poincare.logmap0(x) # shape (batch_size, dim)
vals = x_t @ Q.transpose(0, 1) # shape (batch_size, n_components)
return - torch.sum(vals ** 2)
def fit_spectral(self, x):
"""Geodesic PCA closed-form with SVD."""
u = poincare.logmap0(x)
S = (u.T @ u)
U, S, V = torch.svd(S)
for k in range(self.n_components):
self.components[k].data = U[k:k + 1]
class PGA(PCA):
"""Exact Hyperbolic PGA using geodesic _projection (assuming data has Frechet mean zero).
This assumption is necessary because otherwise its unclear how to geodesically _project on the submanifold spanned
by tangent vectors. For general Frechet mean, the PGA paper approximates this using Tangent PCA.
"""
def __init__(self, dim, n_components, lr=1e-3, max_steps=100):
super(PGA, self).__init__(dim, n_components, lr, max_steps, keep_orthogonal=True)
def _project(self, x, Q):
"""Geodesic projection."""
proj = poincare.orthogonal_projection(x, Q, normalized=self.keep_orthogonal)
return proj
def compute_loss(self, x, Q):
proj = self._project(x, Q)
sq_distances = poincare.distance0(proj) ** 2
var = torch.mean(sq_distances)
return -var
class HoroPCA(PCA):
"""Hyperbolic PCA using horocycle _projections (assumes data has Frechet mean zero)."""
def __init__(self, dim, n_components, lr=1e-3, max_steps=100, frechet_variance=False, auc=False, hyperboloid=True):
"""
Currently auc=True and frechet_variance=True are not simultaneously supported (need to track mean parameter for each component).
"""
super(HoroPCA, self).__init__(dim, n_components, lr, max_steps, keep_orthogonal=True)
self.hyperboloid = hyperboloid
self.frechet_variance = frechet_variance
self.auc = auc
if self.frechet_variance:
self.mean_weights = nn.Parameter(torch.zeros(n_components))
def _project(self, x, Q):
if self.n_components == 1:
proj = project_kd(Q, x)[0]
else:
if self.hyperboloid:
hyperboloid_ideals = hyperboloid.from_poincare(Q, ideal=True)
hyperboloid_x = hyperboloid.from_poincare(x)
hyperboloid_proj = hyperboloid.horo_projection(hyperboloid_ideals, hyperboloid_x)[0]
proj = hyperboloid.to_poincare(hyperboloid_proj)
else:
proj = project_kd(Q, x)[0]
return proj
def compute_variance(self, x):
""" x are projected points. """
if self.frechet_variance:
# mean = self.mean_weights.unsqueeze(-1) * torch.stack(self.components, dim=0) # (k, d)
Q = [self.mean_weights[i] * self.components[i] for i in range(self.n_components)] # (k, d)
mean = sum(Q).squeeze(0)
distances = poincare.distance(mean, x)
var = torch.mean(distances ** 2)
else:
distances = poincare.pairwise_distance(x)
var = torch.mean(distances ** 2)
return var
def compute_loss(self, x, Q):
if self.n_components == 1:
# option 1
bus = busemann(x, Q[0]) # shape (batch_size, n_components)
return -torch.var(bus)
else:
auc = []
if self.auc:
for i in range(1, self.n_components):
Q_ = Q[:i, :]
proj = self._project(x, Q_)
var = self.compute_variance(proj)
auc.append(var)
return -sum(auc)
else:
proj = self._project(x, Q)
var = self.compute_variance(proj)
return -var
class BSA(PCA):
""" Stores k+1 reference points to define geodesic projections.
If hyperboloid option is false, only stores k reference points and assumes the first reference point (mean) is the origin.
"""
def __init__(self, dim, n_components, lr=1e-3, max_steps=100, hyperboloid=True, auc=True):
"""
hyperboloid: Do computations in the hyperboloid model, allowing for subspaces that do not pass through the origin (stores k+1 reference points instead of k)
auc: Use AUC objective to optimize the entire flag
Note that if auc=False and iterative=True, this is equivalent to forward BSA. However, hyperboloid=True is not currently supported in this case
"""
self.hyperboloid = hyperboloid
self.auc = auc
if self.hyperboloid:
super(BSA, self).__init__(dim, n_components + 1, lr, max_steps, keep_orthogonal=True)
else:
super(BSA, self).__init__(dim, n_components, lr, max_steps, keep_orthogonal=True)
def _project(self, x, Q):
"""Geodesic projection."""
# return poincare.orthogonal_projection(x, Q, normalized=self.keep_orthogonal)
proj = poincare.orthogonal_projection(x, Q, normalized=self.keep_orthogonal)
return proj
def compute_loss(self, x, Q):
if self.auc:
auc = []
if self.hyperboloid:
Q = hyperboloid.from_poincare(Q, ideal=True)
x = hyperboloid.from_poincare(x)
for i in range(1, self.n_components):
Q_ = Q[:i + 1, :]
proj = minkowski.orthogonal_projection(Q_, x)
residual_variance = torch.sum(hyperboloid.distance(x, proj) ** 2)
auc.append(residual_variance)
else:
for i in range(1, self.n_components):
Q_ = Q[:i, :]
proj = self._project(x, Q_)
residual_variance = torch.sum(poincare.distance(x, proj) ** 2)
auc.append(residual_variance)
return sum(auc)
else:
if self.hyperboloid:
Q = hyperboloid.from_poincare(Q, ideal=True)
x = hyperboloid.from_poincare(x)
proj = minkowski.orthogonal_projection(Q, x)
residual_variance = torch.sum(hyperboloid.distance(x, proj) ** 2)
else:
proj = self._project(x, Q)
residual_variance = torch.sum(poincare.distance(x, proj) ** 2)
return residual_variance
|
HoroPCA-main
|
learning/pca.py
|
"""Evaluation metrics."""
import numpy as np
import torch
import geom.poincare as poincare
from learning.frechet import Frechet
def avg_distortion_measures(distances1, distances2, tau=1.0):
"""Computes different measures of average distortion between two distance matrices.
:param distances1: N x N torch tensor with pairwise distances. (ground truth)
:param distances2: N x N torch tensor with pairwise distances. (scaled embeddings)
:return: Average distortion (scalar).
"""
n_nodes = distances1.shape[0]
ids = torch.triu_indices(row=n_nodes, col=n_nodes, offset=1)
distances1 = distances1[ids[0], ids[1]]
distances2 = distances2[ids[0], ids[1]] / tau
diff = torch.abs(distances2 - distances1)
ratio = diff / distances1
avg_distortion = torch.mean(ratio).item()
avg_distortion_sq = torch.mean(ratio ** 2).item()
avg_distortion_abs = torch.mean(diff ** 2).item()
return avg_distortion, avg_distortion_sq, avg_distortion_abs
def worst_case_distortion(distances1, distances2):
"""Worst case distortion metric."""
n_nodes = distances1.shape[0]
ids = torch.triu_indices(row=n_nodes, col=n_nodes, offset=1)
ratio = (distances2 / distances1)[ids[0], ids[1]]
return (torch.max(ratio) / torch.min(ratio)).item()
def l2_error(dist_orig, dist_proj):
"""l2 error of distances."""
return torch.mean((dist_orig - dist_proj) ** 2).item()
def unexplained_variance(x, x_proj):
"""Unexplained variance (see Pennec (2018))."""
res = poincare.distance(x, x_proj) ** 2
return torch.mean(res).item()
def frechet_var_approx(dist_proj):
"""Approximation of the Frechet variance with pairwise squared distances."""
return torch.mean(dist_proj ** 2).item()
def compute_metrics(x, x_proj, frechet_lr=0.1):
"""Computes various evaluation metrics projections."""
try:
uv = unexplained_variance(x, x_proj)
except RuntimeError:
# exception for hMDS where unexplained variance cannot be computed
uv = -1
dist_orig = poincare.pairwise_distance(x)
dist_proj = poincare.pairwise_distance(x_proj)
avg_distortion, avg_distortion_sq, avg_distortion_abs = avg_distortion_measures(dist_orig, dist_proj)
wc_distortion = worst_case_distortion(dist_orig, dist_proj)
frechet_var_apx = frechet_var_approx(dist_proj)
frechet_var_apx_orig = frechet_var_approx(dist_orig)
l2 = l2_error(dist_orig, dist_proj)
frechet = Frechet(lr=frechet_lr)
frechet_var, has_converged = frechet.variance(x_proj, return_converged=True)
return {
'distortion': avg_distortion,
'distortion_sq': avg_distortion_sq,
'distortion_abs': avg_distortion_abs,
'distortion_wc': wc_distortion,
'unexplained_var': uv,
'l2_error': l2,
'frechet_var_apx': frechet_var_apx,
'frechet_var': frechet_var.item(),
'frechet_mean_has_converged': has_converged,
"frechet_var_apx_orig": frechet_var_apx_orig,
}
def format_metrics(metrics, metric_names):
"""Print metrics."""
formatted_results = []
for metric in metric_names:
x = metrics[metric]
if isinstance(x, list):
mean, std = x
else:
mean, std = x, 0.0
formatted_results.append("{}: \t{:.2f} +- {:.2f}".format(metric, mean, std))
return formatted_results
def aggregate_metrics(metrics):
"""Compute average and standard deviation for metrics."""
if len(metrics) == 1:
return metrics[0]
else:
agg_metrics = metrics[0]
for metric in agg_metrics.keys():
vals = [x[metric] for x in metrics]
agg_metrics[metric] = [np.mean(vals), np.std(vals)]
return agg_metrics
|
HoroPCA-main
|
utils/metrics.py
|
HoroPCA-main
|
utils/__init__.py
|
|
"""Sarkar's combinatorial construction."""
import networkx as nx
import numpy as np
import scipy
MIN_NORM = 1e-15
# ################# CIRCLE INVERSIONS ########################
def reflect_at_zero(x, mu): # Note: this differs from geom.poincare.reflect_at_zero because it's numpy instead of torch
"""
Image of x by circle inversion that takes mu to the origin
"""
mu_sqnorm = np.sum(mu ** 2)
a = mu / mu_sqnorm.clip(min=1e-15)
a_sqnorm = np.sum(a ** 2)
r2 = a_sqnorm - np.longdouble([1.])
xa_sqnorm = np.maximum(np.sum((x - a) ** 2, axis=-1, keepdims=True), MIN_NORM)
return (r2 / xa_sqnorm) * (x - a) + a
def reflect_through_zero(p, q, x):
""" Image of x under reflection that takes p (normalized) to q (normalized) and 0 to 0. """
p_ = p / np.linalg.norm(p, axis=-1, keepdims=True).clip(min=1e-15)
q_ = q / np.linalg.norm(q, axis=-1, keepdims=True).clip(min=1e-15)
# print("norm p, q", np.linalg.norm(p_), np.linalg.norm(q_))
r = q_ - p_
# Magnitude of x in direction of r
m = np.sum(r * x, axis=-1, keepdims=True) / np.sum(r * r, axis=-1, keepdims=True)
return x - 2 * r * m
def test_reflect():
pass
# ################# SARKAR CONSTRUCTION ########################
def pick_root(tree):
graph_distances = np.array(nx.floyd_warshall_numpy(tree).astype(np.float32))
j_ids = np.argmax(graph_distances, axis=1)
i = np.argmax(graph_distances[np.arange(tree.number_of_nodes()), j_ids])
j = j_ids[i]
path = nx.shortest_path(tree, i, j)
length = len(path)
root = path[length // 2]
return root
def place_children(z_parent, n_children, scaling, dim=2, coding=True):
"""Embeds children of node embedded at the origin.
Assumes z is embedding of parent of node at the origin.
children are at disrance scale/2 from their parent in hyperbolic metric.
"""
if dim == 2:
if z_parent is None:
theta_parent = 0
n_neighbors = n_children
else:
theta_parent = np.angle(z_parent[0] + z_parent[1] * 1j)
n_neighbors = n_children + 1
theta_children = [theta_parent + 2 * np.longdouble(np.pi) * (i + 1) / np.longdouble(n_neighbors) for i in
range(n_children)]
z_children = []
for theta_child in theta_children:
z_children.append(scaling * np.array([np.cos(theta_child), np.sin(theta_child)]))
return z_children
else:
normalize = lambda x: x / np.linalg.norm(x, keepdims=True)
if coding:
N = 2 ** int(np.ceil(np.log(dim) / np.log(2)))
H = scipy.linalg.hadamard(N)
if z_parent is not None:
par_ = np.concatenate((z_parent, np.zeros(N - dim)))
H = reflect_through_zero(H[0, :], par_, H)
# print("reflecting H0 onto parent", scaling * normalize(H[0,:]) - par_)
z_children = [H[i, :dim] for i in range(1, min(n_children + 1, N))]
if n_children > N - 1:
z_children += [np.random.randn(dim) for _ in range(n_children - N + 1)]
z_children = [scaling * normalize(c) for c in z_children]
else:
z_children = [scaling * normalize(np.random.randn(dim)) for _ in range(n_children)]
return z_children
def sarkar(tree, tau=1.0, root=None, dim=2, coding=False, seed=1234):
"""Embeds a tree in H_d using Sarkar's construction.
Args:
tree: nx.Graph object representing the tree structure to embed.
root: index of the root node in the tree object.
tau: scale of hyperbolic embeddings, parent-child will be placed at
hyperbolic distance tau from each other.
"""
np.random.seed(seed)
if root is None:
# pick root in Sarkar as node on longest path
root = pick_root(tree)
# Initialize embeddings array
z = np.zeros((tree.number_of_nodes(), dim), dtype=np.float64)
scaling = np.tanh(tau / 2) # Euclidean distance corresponding to hyperbolic distance of tau
# bfs traversal
bfs_tree_rev = nx.reverse_view(nx.bfs_tree(tree, root))
for current, children in nx.bfs_successors(tree, root):
if current == root:
z[root] = np.zeros(dim)
z_children = place_children(None, len(children), scaling, dim=dim, coding=coding)
for i, child_idx in enumerate(children):
z[child_idx] = z_children[i]
else:
z_current = z[current]
z_parent = z[list(bfs_tree_rev.neighbors(current))[0]]
# inversion that maps current to the origin
z_parent = reflect_at_zero(z_parent, z_current)
z_children = place_children(z_parent, len(children), scaling, dim=dim, coding=coding)
for i, child_idx in enumerate(children):
z[child_idx] = reflect_at_zero(z_children[i], z_current)
return z
|
HoroPCA-main
|
utils/sarkar.py
|
"""Data utils."""
import networkx as nx
import numpy as np
import geom.poincare as poincare
from learning.frechet import Frechet
def load_graph(dataset):
"""Loads a graph dataset.
Return: networkx graph object
"""
G = nx.Graph()
with open(f"data/edges/{dataset}.edges", "r") as f:
for line in f:
tokens = line.split()
u = int(tokens[0])
v = int(tokens[1])
G.add_edge(u, v)
return G
def load_embeddings(dataset, dim):
embeddings_path = f"data/embeddings/{dataset}_{dim}_poincare.npy"
return np.load(embeddings_path)
def center(x, lr):
"""Centers data so it has zero Frechet mean."""
frechet = Frechet(lr=lr)
mu = frechet.mean(x)
return poincare.reflect_at_zero(x, mu)
|
HoroPCA-main
|
utils/data.py
|
"""Util functions for hyperboloid models
Convention: The ambient Minkowski space has signature -1, 1, 1, ...
i.e. the squared norm of (t,x,y,z) is -t^2 + x^2 + y^2 + z^2,
And we are using the positive sheet, i.e. every point on the hyperboloid
has positive first coordinate.
"""
import torch
import geom.minkowski as minkowski
import geom.poincare as poincare
MIN_NORM = 1e-15
def distance(x, y):
"""
Args:
x, y: torch.tensor of the same shape (..., Minkowski_dim)
Returns:
torch.tensor of shape (..., )
"""
# return torch.acosh(- minkowski.bilinear_pairing(x, y))
return torch.acosh(torch.clamp(- minkowski.bilinear_pairing(x, y), min=1.0))
def exp_unit_tangents(base_points, unit_tangents, distances):
"""Batched exponential map using the given base points, unit tangent directions, and distances
Args:
base_points, unit_tangents: torch.tensor of shape (..., Minkowski_dim)
Each unit_tangents[j..., :] must have (Minkowski) squared norm 1 and is orthogonal to base_points[j..., :]
distances: torch.tensor of shape (...)
Returns:
torch.tensor of shape (..., Minkowski_dim)
"""
distances = distances.unsqueeze(-1)
return base_points * torch.cosh(distances) + unit_tangents * torch.sinh(distances)
# def exp(base_points, tangents):
# """Batched exponential map using the given base points and tangent vectors
#
# Args:
# base_point, tangents: torch.tensor of shape (..., Minkowski_dim)
# Each tangents[j..., :] must have squared norm > 0 and is orthogonal to base_points[j..., :]
#
# Returns:
# torch.tensor of shape (..., Minkowski_dim)
# """
# distances = torch.sqrt(minkowski.squared_norm(tangents)) # shape (...)
# unit_tangets = tangents / distances.view(-1, 1) # shape (..., Minkowski_dim)
# return exp_unit_tangents(base_point, unit_tangents, distances)
def from_poincare(x, ideal=False):
"""Convert from Poincare ball model to hyperboloid model
Args:
x: torch.tensor of shape (..., dim)
ideal: boolean. Should be True if the input vectors are ideal points, False otherwise
Returns:
torch.tensor of shape (..., dim+1)
To do:
Add some capping to make things numerically stable. This is only needed in the case ideal == False
"""
if ideal:
t = torch.ones(x.shape[:-1], device=x.device).unsqueeze(-1)
return torch.cat((t, x), dim=-1)
else:
eucl_squared_norm = (x * x).sum(dim=-1, keepdim=True)
return torch.cat((1 + eucl_squared_norm, 2 * x), dim=-1) / (1 - eucl_squared_norm).clamp_min(MIN_NORM)
def to_poincare(x, ideal=False):
"""Convert from hyperboloid model to Poincare ball model
Args:
x: torch.tensor of shape (..., Minkowski_dim), where Minkowski_dim >= 3
ideal: boolean. Should be True if the input vectors are ideal points, False otherwise
Returns:
torch.tensor of shape (..., Minkowski_dim - 1)
"""
if ideal:
return x[..., 1:] / (x[..., 0].unsqueeze(-1)).clamp_min(MIN_NORM)
else:
return x[..., 1:] / (1 + x[..., 0].unsqueeze(-1)).clamp_min(MIN_NORM)
def decision_boundary_to_poincare(minkowski_normal_vec):
"""Convert the totally geodesic submanifold defined by the Minkowski normal vector to Poincare ball model
(Here the Minkowski normal vector defines a linear subspace, which intersects the hyperboloid at our submanifold)
Args:
minkowski_normal_vec: torch.tensor of shape (Minkowski_dim, )
Returns:
center: torch.tensor of shape (Minkowski_dim -1, )
radius: float
Warning:
minkowski_normal_vec must have positive squared norm
minkowski_normal_vec[0] must be nonzero (otherwise the submanifold is a flat plane through the origin)
"""
x = minkowski_normal_vec
# poincare_origin = [1,0,0,0,...], # shape (Minkowski_dim, )
poincare_origin = torch.zeros(minkowski_normal_vec.shape[0], device=minkowski_normal_vec.device)
poincare_origin[0] = 1
# shape (1, Minkowski_dim)
poincare_origin_reflected = minkowski.reflection(minkowski_normal_vec, poincare_origin.unsqueeze(0))
# shape (Minkowski_dim-1, )
origin_reflected = to_poincare(poincare_origin_reflected).squeeze(0)
center = poincare.reflection_center(origin_reflected)
radius = torch.sqrt(torch.sum(center ** 2) - 1)
return center, radius
def orthogonal_projection(basis, x):
"""Compute the orthogonal projection of x onto the geodesic submanifold
spanned by the given basis vectors (i.e. the intersection of the hyperboloid with
the Euclidean linear subspace spanned by the basis vectors).
Args:
basis: torch.tensor of shape(num_basis, Minkowski_dim)
x: torch.tensor of shape(batch_size, Minkowski_dim)
Returns:
torch.tensor of shape(batch_size, Minkowski_dim)
Conditions:
Each basis vector must have non-positive Minkowski squared norms.
There must be at least 2 basis vectors.
The basis vectors must be linearly independent.
"""
minkowski_proj = minkowski.orthogonal_projection(basis, x) # shape (batch_size, Minkowski_dim)
squared_norms = minkowski.squared_norm(minkowski_proj) # shape (batch_size, )
return minkowski_proj / torch.sqrt(- squared_norms.unsqueeze(1))
def horo_projection(ideals, x):
"""Compute the projection based on horosphere intersections.
The target submanifold has dimension num_ideals and is a geodesic submanifold passing through
the ideal points and (1,0,0,0,...), i.e. the point corresponds to the origin in Poincare model.
Args:
ideals: torch.tensor of shape (num_ideals, Minkowski_dim)
num_ideals must be STRICTLY between 1 and Minkowski_dim
ideal vectors must be independent
the geodesic submanifold spanned by ideals must not contain (1,0,0,...)
x: torch.tensor of shape (batch_size, Minkowski_dim)
Returns:
torch.tensor of shape (batch_size, Minkowski_dim)
"""
# Compute orthogonal (geodesic) projection from x to the geodesic submanifold spanned by ideals
# We call this submanifold the "spine" because of the "open book" intuition
spine_ortho_proj = orthogonal_projection(ideals, x) # shape (batch_size, Minkowski_dim)
spine_dist = distance(spine_ortho_proj, x) # shape (batch_size, )
# poincare_origin = [1,0,0,0,...], # shape (Minkowski_dim, )
poincare_origin = torch.zeros(x.shape[1], device=x.device)
poincare_origin[0] = 1
# Find a tangent vector of the hyperboloid at spine_ortho_proj that is tangent to the target submanifold
# and orthogonal to the spine.
# This is done in a Gram-Schmidt way: Take the Euclidean vector pointing from spine_ortho_proj to poincare_origin,
# then subtract a projection part so that it is orthogonal to the spine and tangent to the hyperboloid
# Everything below has shape (batch_size, Minkowski_dim)
chords = poincare_origin - spine_ortho_proj
tangents = chords - minkowski.orthogonal_projection(ideals, chords)
unit_tangents = tangents / torch.sqrt(minkowski.squared_norm(tangents)).view(-1, 1)
proj_1 = exp_unit_tangents(spine_ortho_proj, unit_tangents, spine_dist)
proj_2 = exp_unit_tangents(spine_ortho_proj, unit_tangents, -spine_dist)
return proj_1, proj_2
def mds(D, d):
"""
Args:
D - (..., n, n) distance matrix
Returns:
X - (..., n, d) hyperbolic embeddings
"""
Y = -torch.cosh(D)
# print("Y:", Y)
eigenvals, eigenvecs = torch.symeig(Y, eigenvectors=True)
# print(Y.shape, eigenvals.shape, eigenvecs.shape)
# print(eigenvals, eigenvecs)
X = torch.sqrt(torch.clamp(eigenvals[-d:], min=0.)) * eigenvecs[..., -d:]
# print("testing")
# print(X)
# print(Y @ X)
u = torch.sqrt(1 + torch.sum(X * X, dim=-1, keepdim=True))
M = torch.cat((u, X), dim=-1)
# print(minkowski.pairwise_bilinear_pairing(M, M))
return torch.cat((u, X), dim=-1)
def test():
ideal = torch.tensor([[1.0, 0, 0, 0], [0.0, 1, 0, 0]])
x = torch.tensor([[0.2, 0.3, 0.4, 0.5], [0.0, 0, 0, 0], [0.0, 0, 0, 0.7]])
loid_ideal, loid_x = from_poincare(ideal, True), from_poincare(x)
loid_p1, loid_p2 = horo_projection(loid_ideal, loid_x)
pr1, pr2 = to_poincare(loid_p1), to_poincare(loid_p2)
print(pr1)
print(pr2)
# ideals = torch.tensor([[3.0,3.0,0.0], [5.0,-5.0,0.0]])
# x = torch.tensor([[5.0,0.0,math.sqrt(24)],[2.0,-math.sqrt(3), 0]])
# print(orthogonal_projection(ideals, x))
ideals = torch.tensor([[1.0, 1.0, 0.0], [5.0, 3, 4]])
x = torch.tensor([[5.0, 0, 24 ** 0.5], [2.0, - 3 ** 0.5, 0]])
print(horo_projection(ideals, x))
def test_mds(n=100, d=10):
X = torch.randn(n, d)
X = X / torch.norm(X, dim=-1, keepdim=True) * 0.9
X = from_poincare(X)
# print(X.shape)
D = distance(X.unsqueeze(-2), X.unsqueeze(-3))
# print(D.shape)
# print(D-D.transpose(0,1))
X_ = mds(D, d)
# print(X_.shape)
D_ = distance(X_.unsqueeze(-2), X_.unsqueeze(-3))
print(D - D_)
def test_projection():
""" Test that orthogonal projection agrees with the Poincare disk version. """
d = 5
# x = torch.randn(1, d) * 0.01
x = poincare.random_points((1, d))
# Q = torch.randn(2, d)
# Q = Q / torch.norm(Q, dim=-1, keepdim=True)
Q = poincare.random_ideals((2, d))
# poincare projection
import geom.poincare as P
from geom.euclidean import orthonormal
Q = orthonormal(Q)
x_r = P.reflect(x, Q)
p = P.midpoint(x, x_r)
print(p)
# hyperboloid projection
Q = torch.cat([Q, torch.zeros(1, d)], dim=0)
p_ = orthogonal_projection(from_poincare(Q, ideal=True), from_poincare(x))
print(to_poincare(p_))
# Sanity checks
if __name__ == "__main__":
# test()
# test_mds(n=100, d=10)
poincare_origin = torch.zeros(3)
poincare_origin[0] = 1
print(from_poincare(poincare_origin, ideal=True))
print(to_poincare(from_poincare(poincare_origin, ideal=True), ideal=True))
test_projection()
|
HoroPCA-main
|
geom/hyperboloid.py
|
"""Poincare utils functions."""
import torch
import geom.euclidean as euclidean
MIN_NORM = 1e-15
BALL_EPS = {torch.float32: 4e-3, torch.float64: 1e-5}
def expmap0(u):
"""Exponential map taken at the origin of the Poincare ball with curvature c.
Args:
u: torch.Tensor of size B x d with hyperbolic points
c: torch.Tensor of size 1 or B x 1 with absolute hyperbolic curvatures
Returns:
torch.Tensor with tangent points shape (B, d)
"""
u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
gamma_1 = torch.tanh(u_norm) * u / u_norm
return project(gamma_1)
def logmap0(y):
"""Logarithmic map taken at the origin of the Poincare ball with curvature c.
Args:
y: torch.Tensor of size B x d with tangent points
c: torch.Tensor of size 1 or B x 1 with absolute hyperbolic curvatures
Returns:
torch.Tensor with hyperbolic points.
"""
y_norm = y.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
return y / y_norm / 1. * torch.atanh(y_norm.clamp(-1 + 1e-15, 1 - 1e-15))
def expmap(x, u):
u_norm = u.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
second_term = torch.tanh(lambda_(x) * u_norm / 2) * u / u_norm
gamma_1 = mobius_add(x, second_term)
return gamma_1
def logmap(x, y):
sub = mobius_add(-x, y)
sub_norm = sub.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM).clamp_max(1 - 1e-15)
return 2 / lambda_(x) * torch.atanh(sub_norm) * sub / sub_norm
def lambda_(x):
"""Computes the conformal factor."""
x_sqnorm = torch.sum(x.data.pow(2), dim=-1, keepdim=True)
return 2 / (1. - x_sqnorm).clamp_min(MIN_NORM)
def project(x):
"""Project points to Poincare ball with curvature c.
Args:
x: torch.Tensor of size B x d with hyperbolic points
Returns:
torch.Tensor with projected hyperbolic points.
"""
norm = x.norm(dim=-1, p=2, keepdim=True).clamp_min(MIN_NORM)
eps = BALL_EPS[x.dtype]
maxnorm = (1 - eps)
cond = norm > maxnorm
projected = x / norm * maxnorm
return torch.where(cond, projected, x)
def distance(x, y, keepdim=True):
"""Hyperbolic distance on the Poincare ball with curvature c.
Args:
x: torch.Tensor of size B x d with hyperbolic points
y: torch.Tensor of size B x d with hyperbolic points
Returns: torch,Tensor with hyperbolic distances, size B x 1
"""
pairwise_norm = mobius_add(-x, y).norm(dim=-1, p=2, keepdim=True)
dist = 2.0 * torch.atanh(pairwise_norm.clamp(-1 + MIN_NORM, 1 - MIN_NORM))
if not keepdim:
dist = dist.squeeze(-1)
return dist
def pairwise_distance(x, keepdim=False):
"""All pairs of hyperbolic distances (NxN matrix)."""
return distance(x.unsqueeze(-2), x.unsqueeze(-3), keepdim=keepdim)
def distance0(x, keepdim=True):
"""Computes hyperbolic distance between x and the origin."""
x_norm = x.norm(dim=-1, p=2, keepdim=True)
d = 2 * torch.atanh(x_norm.clamp(-1 + 1e-15, 1 - 1e-15))
if not keepdim:
d = d.squeeze(-1)
return d
def mobius_add(x, y):
"""Mobius addition."""
x2 = torch.sum(x * x, dim=-1, keepdim=True)
y2 = torch.sum(y * y, dim=-1, keepdim=True)
xy = torch.sum(x * y, dim=-1, keepdim=True)
num = (1 + 2 * xy + y2) * x + (1 - x2) * y
denom = 1 + 2 * xy + x2 * y2
return num / denom.clamp_min(MIN_NORM)
def mobius_mul(x, t):
"""Mobius multiplication."""
normx = x.norm(dim=-1, p=2, keepdim=True).clamp(min=MIN_NORM, max=1. - 1e-5)
return torch.tanh(t * torch.atanh(normx)) * x / normx
def midpoint(x, y):
"""Computes hyperbolic midpoint beween x and y."""
t1 = mobius_add(-x, y)
t2 = mobius_mul(t1, 0.5)
return mobius_add(x, t2)
# Reflection (circle inversion of x through orthogonal circle centered at a)
def isometric_transform(x, a):
r2 = torch.sum(a ** 2, dim=-1, keepdim=True) - 1.
u = x - a
return r2 / torch.sum(u ** 2, dim=-1, keepdim=True) * u + a
# center of inversion circle
def reflection_center(mu):
return mu / torch.sum(mu ** 2, dim=-1, keepdim=True)
# Map x under the isometry (inversion) taking mu to origin
def reflect_at_zero(x, mu):
a = reflection_center(mu)
return isometric_transform(x, a)
def orthogonal_projection(x, Q, normalized=False):
""" Orthogonally project x onto linear subspace (through the origin) spanned by rows of Q. """
if not normalized:
Q = euclidean.orthonormal(Q)
x_ = euclidean.reflect(x, Q)
return midpoint(x, x_)
def geodesic_between_ideals(ideals):
"""Return the center and radius of the Euclidean circle representing
the geodesic joining two ideal points p = ideals[0] and q = ideals[1]
Args:
ideals: torch.tensor of shape (...,2,dim)
Return:
center: torch.tensor of shape (..., dim)
radius: torch.tensor of shape (...)
Note: raise an error if p = -q, i.e. if the geodesic between them is an Euclidean line
"""
p = ideals[..., 0, :]
q = ideals[..., 1, :]
norm_sum = (p + q).norm(dim=-1, p=2) # shape (...)
assert torch.all(norm_sum != 0)
center = (p + q) / (1 + (p * q).sum(dim=-1, keepdim=True))
radius = (p - q).norm(dim=-1, p=2) / norm_sum
return center, radius
def random_points(size, std=1.0):
tangents = torch.randn(*size) * std
x = expmap0(tangents)
return x
def random_ideals(size):
Q = torch.randn(*size)
Q = Q / torch.norm(Q, dim=-1, keepdim=True)
return Q
|
HoroPCA-main
|
geom/poincare.py
|
HoroPCA-main
|
geom/__init__.py
|
|
"""Horocycle projection utils (Poincare model)."""
import torch
MIN_NORM = 1e-15
def busemann(x, p, keepdim=True):
"""
x: (..., d)
p: (..., d)
Returns: (..., 1) if keepdim==True else (...)
"""
xnorm = x.norm(dim=-1, p=2, keepdim=True)
pnorm = p.norm(dim=-1, p=2, keepdim=True)
p = p / pnorm.clamp_min(MIN_NORM)
num = torch.norm(p - x, dim=-1, keepdim=True) ** 2
den = (1 - xnorm ** 2).clamp_min(MIN_NORM)
ans = torch.log((num / den).clamp_min(MIN_NORM))
if not keepdim:
ans = ans.squeeze(-1)
return ans
def circle_intersection_(r, R):
""" Computes the intersection of a circle of radius r and R with distance 1 between their centers.
Returns:
x - distance from center of first circle
h - height off the line connecting the two centers of the two intersection pointers
"""
x = (1.0 - R ** 2 + r ** 2) / 2.0
s = (r + R + 1) / 2.0
sq_h = (s * (s - r) * (s - R) * (s - 1)).clamp_min(MIN_NORM)
h = torch.sqrt(sq_h) * 2.0
return x, h
def circle_intersection(c1, c2, r1, r2):
""" Computes the intersections of a circle centered at ci of radius ri.
c1, c2: (..., d)
r1, r2: (...)
"""
d = torch.norm(c1 - c2) # (...)
x, h = circle_intersection_(r1 / d.clamp_min(MIN_NORM), r2 / d.clamp_min(MIN_NORM)) # (...)
x = x.unsqueeze(-1)
h = h.unsqueeze(-1)
center = x * c2 + (1 - x) * c1 # (..., d)
radius = h * d # (...)
# The intersection is a hypersphere of one lower dimension, intersected with the plane
# orthogonal to the direction c1->c2
# In general, you can compute this with a sort of higher dimensional cross product?
# For now, only 2 dimensions
ortho = c2 - c1 # (..., d)
assert ortho.size(-1) == 2
direction = torch.stack((-ortho[..., 1], ortho[..., 0]), dim=-1)
direction = direction / torch.norm(direction, keepdim=True).clamp_min(MIN_NORM)
return center + radius.unsqueeze(-1) * direction # , center - radius*direction
def busemann_to_horocycle(p, t):
""" Find the horocycle corresponding to the level set of the Busemann function to ideal point p with value t.
p: (..., d)
t: (...)
Returns:
c: (..., d)
r: (...)
"""
# Busemann_p(x) = d means dist(0, x) = -d
q = -torch.tanh(t / 2).unsqueeze(-1) * p
c = (p + q) / 2.0
r = torch.norm(p - q, dim=-1) / 2.0
return c, r
def sphere_intersection(c1, r1, c2, r2):
""" Computes the intersections of a circle centered at ci of radius ri.
c1, c2: (..., d)
r1, r2: (...)
Returns:
center, radius such that the intersection of the two spheres is given by
the intersection of the sphere (c, r) with the hyperplane orthogonal to the direction c1->c2
"""
d = torch.norm(c1 - c2, dim=-1) # (...)
x, h = circle_intersection_(r1 / d.clamp_min(MIN_NORM), r2 / d.clamp_min(MIN_NORM)) # (...)
x = x.unsqueeze(-1)
center = x * c2 + (1 - x) * c1 # (..., d)
radius = h * d # (...)
return center, radius
def sphere_intersections(c, r):
""" Computes the intersection of k spheres in dimension d.
c: list of centers (..., k, d)
r: list of radii (..., k)
Returns:
center: (..., d)
radius: (...)
ortho_directions: (..., d, k-1)
"""
k = c.size(-2)
assert k == r.size(-1)
ortho_directions = []
center = c[..., 0, :] # (..., d)
radius = r[..., 0] # (...)
for i in range(1, k):
center, radius = sphere_intersection(center, radius, c[..., i, :], r[..., i])
ortho_directions.append(c[..., i, :] - center)
ortho_directions.append(torch.zeros_like(center)) # trick to handle the case k=1
ortho_directions = torch.stack(ortho_directions, dim=-1) # (..., d, k-1) [last element is 0]
return center, radius, ortho_directions
# 2D projections
def project_kd(p, x, keep_ambient=True):
""" Project n points in dimension d onto 'direction' spanned by k ideal points
p: (..., k, d) ideal points
x: (..., n, d) points to project
Returns:
projection_1: (..., n, s) where s = d if keep_ambient==True otherwise s = k
projection_2: same as projection_1. this is guaranteed to be the ideal point in the case k = 1
p: the ideal points
"""
if len(p.shape) < 2:
p = p.unsqueeze(0)
if len(x.shape) < 2:
x = x.unsqueeze(0)
k = p.size(-2)
d = x.size(-1)
assert d == p.size(-1)
busemann_distances = busemann(x.unsqueeze(-2), p.unsqueeze(-3), keepdim=False) # (..., n, k)
c, r = busemann_to_horocycle(p.unsqueeze(-3), busemann_distances) # (..., n, k, d) (..., n, k)
c, r, ortho = sphere_intersections(c, r) # (..., n, d) (..., n) (..., n, d, k-1)
# we are looking for a vector spanned by the k ideal points, orthogonal to k-1 given vectors
# i.e. x @ p @ ortho = 0
if ortho is None:
direction = torch.ones_like(busemann_distances) # (..., n, k)
else:
a = torch.matmul(p.unsqueeze(-3), ortho) # (..., n, k, k-1) = (..., n, k, d) @ (..., n, d, k-1)
u, s, v = torch.svd(a, some=False) # a = u s v^T
direction = u[..., -1] # (..., n, k)
direction = direction @ p # (..., n, d)
direction = direction / torch.norm(direction, dim=-1, keepdim=True).clamp_min(MIN_NORM)
projection_1 = c - r.unsqueeze(-1) * direction
projection_2 = c + r.unsqueeze(-1) * direction
if not keep_ambient:
_, _, v = torch.svd(p, some=False) # P = USV^T => PV = US so last d-k columns of PV are 0
projection_1 = (projection_1 @ v)[..., :k]
projection_2 = (projection_2 @ v)[..., :k]
p = (p @ v)[..., :k]
return projection_1, projection_2, p
def project2d(p, q, x):
# reconstruct p and q in 2D
p_ = torch.stack([p.new_ones(p.shape[:-1]), p.new_zeros(p.shape[:-1])], dim=-1)
cos = torch.sum(p * q, dim=-1)
sin = torch.sqrt(1 - cos ** 2)
q_ = torch.stack([cos, sin], dim=-1)
bp = busemann(x, p).squeeze(-1)
bq = busemann(x, q).squeeze(-1)
c0, r0 = busemann_to_horocycle(p_, bp)
c1, r1 = busemann_to_horocycle(q_, bq)
reconstruction = circle_intersection(c0, c1, r0, r1)
return reconstruction
def horo_project_using_one_ideal(submanifold_ideals, x, custom_ideal_direction=None, keep_ambient=True):
"""The first horospherical projection we discussed, currently section 5.3 of the overleaf doc
Args:
submanifold_ideals: torch.tensor of shape (sub_dim, dim)
x: torch.tensor of shape (batch_size, dim)
custom_ideal_direction (optional): torch.tensor of shape (dim, )
keep_ambient: boolean
Returns:
if keep_ambient == True:
torch.tensor of shape (batch_size, dim)
else:
torch.tensor of shape (batch_size, sub_dim): the rotated projections
Note:
custom_ideal_direction, if provided, must be a unit vector in the row span of submanifold_ideals
submanifold_ideals must have independent rows
The submanifold we are projecting onto always passes through the origin.
"""
if custom_ideal_direction is None:
p = submanifold_ideals[0, :]
p = p / torch.sqrt(p.dot(p))
else:
p = custom_ideal_direction
eucl_proj_coefs, _ = torch.solve(submanifold_ideals @ x.transpose(0, 1),
submanifold_ideals @ submanifold_ideals.transpose(0, 1)) # (sub_dim, batch_size)
eucl_projs = eucl_proj_coefs.transpose(0, 1) @ submanifold_ideals # (batch_size, dim)
t = torch.sum((p - x) * (p - x), dim=-1) / (
2 * torch.sum((p - eucl_projs) * (p - x), dim=-1)) # shape (batch_size, )
t = t.unsqueeze(-1)
output = 2 * t * eucl_projs + (1 - 2 * t) * p.unsqueeze(0) # shape (batch_size, dim)
if keep_ambient:
return output
else:
q, r = torch.qr(submanifold_ideals.transpose(0, 1)) # q.shape (dim, sub_dim)
return output @ q
def test_horo_project_one_ideal():
"""
Sanity checks for horo_project_using_one_ideal:
In this test, the first two input points are already in the submanifold,
so they should project to themselves. The third input should not.
"""
print("Test Horo Projection with One Ideal:")
print("------------------------------------")
submanifold_ideals = torch.tensor([[1.0, 0, 0], [0.0, 1, 0]])
x = torch.tensor([[0.0, 0, 0], [0.7, 0.6, 0], [0.3, 0.4, 0.5]])
proj = horo_project_using_one_ideal(submanifold_ideals, x)
print(x, proj)
"""
In the case sub_dim == 1, applying horo_project_using_one_ideal and then
computing hyperbolic distance to the origin should give another implementation
of the Busemann function, at least up to a sign convention
"""
import geom.poincare as poincare
submanifold_ideals = torch.tensor([[1.0, 0, 0]])
x = torch.tensor([[0.0, 0, 0], [0.7, 0.6, 0], [0.3, 0.4, 0.5]])
proj = horo_project_using_one_ideal(submanifold_ideals, x, keep_ambient=False)
print(poincare.distance0(proj))
print(busemann(x, submanifold_ideals))
if __name__ == "__main__":
test_horo_project_one_ideal()
|
HoroPCA-main
|
geom/horo.py
|
""" Util functions for the Minkowski metric.
Note that functions for the hyperboloid model itself are in geom.hyperboloid
Most functions in this file has a bilinear_form argument that can generally be ignored.
That argument is there just in case we need to use a non-standard norm/signature.
"""
import torch
def product(x, y):
eucl_pairing = torch.sum(x * y, dim=-1, keepdim=False)
return 2 * x[..., 0] * y[..., 0] - eucl_pairing
def bilinear_pairing(x, y, bilinear_form=None):
"""Compute the bilinear pairing (i.e. "dot product") of x and y using the given bilinear form.
If bilinear_form is not provided, use the default Minkowski form,
i.e. (x0, x1, x2) dot (y0, y1, y2) = -x0*y0 + x1*y1 + x2*y2
Args:
x, y: torch.tensor of the same shape (..., dim), where dim >= 2
bilinear_form (optional): torch.tensor of shape (dim, dim)
Returns:
torch.tensor of shape (...)
"""
if bilinear_form is None:
eucl_pairing = torch.sum(x * y, dim=-1, keepdim=False)
return eucl_pairing - 2 * x[..., 0] * y[..., 0]
else:
pairing = torch.matmul(x.unsqueeze(-2), (y @ bilinear_form).unsqueeze(-1)) # shape (..., 1, 1)
return pairing.reshape(x.shape[:-1])
def squared_norm(x, bilinear_form=None):
return bilinear_pairing(x, x, bilinear_form)
def pairwise_bilinear_pairing(x, y, bilinear_form=None):
"""Compute the pairwise bilinear pairings (i.e. "dot product") of two list of vectors
with respect to the given bilinear form.
If bilinear_form is not provided, use the default Minkowski form,
i.e. (x0, x1, x2) dot (y0, y1, y2) = -x0*y0 + x1*y1 + x2*y2
Args:
x: torch.tensor of shape (..., M, dim), where dim >= 2
y: torch.tensor of shape (..., N, dim), where dim >= 2
bilinear_form (optional): torch.tensor of shape (dim, dim).
Returns:
torch.tensor of shape (..., M, N)
"""
if bilinear_form is None:
return x @ y.transpose(-1, -2) - 2 * torch.ger(x[:, 0], y[:, 0])
else:
return x @ bilinear_form @ y.transpose(-1, -2)
def orthogonal_projection(basis, x, bilinear_form=None):
"""Compute the orthogonal projection of x onto the vector subspace spanned by basis.
Here orthogonality is defined using the given bilinear_form
If bilinear_form is not provided, use the default Minkowski form,
i.e. (x0, x1, x2) dot (y0, y1, y2) = -x0*y0 + x1*y1 + x2*y2
Args:
basis: torch.tensor of shape (subspace_dim, dim), where dim >= 2
x: torch.tensor of shape (batch_size, dim), where dim >= 2
bilinear_form (optional): torch.tensor of shape (dim, dim).
Returns:
torch.tensor of shape (batch_size, dim)
Warning: Will not work if the linear subspace spanned by basis is tangent to the light cone.
(In that case, the orthogonal projection is not unique)
"""
coefs, _ = torch.solve(pairwise_bilinear_pairing(basis, x, bilinear_form),
pairwise_bilinear_pairing(basis, basis, bilinear_form))
return coefs.transpose(-1, -2) @ basis
def reflection(subspace, x, subspace_given_by_normal=True, bilinear_form=None):
"""Compute the reflection of x through a linear subspace (of dimension 1 less than the ambient space)
Here reflection is defined using the notion of orthogonality coming from the given bilinear_form
If bilinear_form is not provided, use the default Minkowski form,
i.e. (x0, x1, x2) dot (y0, y1, y2) = -x0*y0 + x1*y1 + x2*y2
Args:
subspace: If subspace_given_by_normal:
torch.tensor of shape (dim, ), representing a normal vector to the subspace
Else:
torch.tensor of shape (dim-1, dim), representing a basis of the subspace
x: torch.tensor of shape (batch_size, dim)
bilinear_form (optional): torch.tensor of shape (dim, dim).
Returns:
torch.tensor of shape (batch_size, dim)
Warning: Will not work if the linear subspace is tangent to the light cone.
(In that case, the reflection is not unique)
"""
if subspace_given_by_normal:
return x - 2 * orthogonal_projection(subspace.unsqueeze(0), x, bilinear_form)
else:
return 2 * orthogonal_projection(subspace, x, bilinear_form) - x
|
HoroPCA-main
|
geom/minkowski.py
|
""" Geometric utility functions, mostly for standard Euclidean operations."""
import torch
MIN_NORM = 1e-15
def orthonormal(Q):
"""Return orthonormal basis spanned by the vectors in Q.
Q: (..., k, d) k vectors of dimension d to orthonormalize
"""
k = Q.size(-2)
_, _, v = torch.svd(Q, some=False) # Q = USV^T
Q_ = v[:, :k]
return Q_.transpose(-1, -2) # (k, d) rows are orthonormal basis for rows of Q
def euc_reflection(x, a):
"""
Euclidean reflection (also hyperbolic) of x
Along the geodesic that goes through a and the origin
(straight line)
NOTE: this should be generalized by reflect()
"""
xTa = torch.sum(x * a, dim=-1, keepdim=True)
norm_a_sq = torch.sum(a ** 2, dim=-1, keepdim=True)
proj = xTa * a / norm_a_sq.clamp_min(MIN_NORM)
return 2 * proj - x
def reflect(x, Q):
"""Reflect points (euclidean) with respect to the space spanned by the rows of Q.
Q: (k, d) set of k d-dimensional vectors (must be orthogonal)
"""
ref = 2 * Q.transpose(0, 1) @ Q - torch.eye(x.shape[-1], device=x.device)
return x @ ref
|
HoroPCA-main
|
geom/euclidean.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from crlapi.benchmark import StreamTrainer
import hydra
from omegaconf import DictConfig, OmegaConf
def to_dict(cfg):
r = {}
for k, v in cfg.items():
if isinstance(v, DictConfig):
td = to_dict(v)
for kk in td:
r[k + "/" + kk] = td[kk]
else:
r[k] = v
return r
@hydra.main(config_path=".", config_name="test_finetune_mlp.yaml")
def main(cfg):
import torch.multiprocessing as mp
mp.set_start_method("spawn")
import time
stream_trainer = StreamTrainer()
stream_trainer.run(cfg)
if __name__ == "__main__":
main()
|
alma-main
|
configs/mnist/run.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from crlapi.benchmark import StreamTrainer
import hydra
from omegaconf import DictConfig, OmegaConf
def to_dict(cfg):
r = {}
for k, v in cfg.items():
if isinstance(v, DictConfig):
td = to_dict(v)
for kk in td:
r[k + "/" + kk] = td[kk]
else:
r[k] = v
return r
@hydra.main(config_path=".", config_name="test_finetune_mlp.yaml")
def main(cfg):
import torch.multiprocessing as mp
mp.set_start_method("spawn")
import time
stream_trainer = StreamTrainer()
stream_trainer.run(cfg)
if __name__ == "__main__":
main()
|
alma-main
|
configs/cifar10/run.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi import instantiate_class,get_class,get_arguments
class StreamTrainer:
def create_logger(self, logger_args,all_args):
self.logger=instantiate_class(logger_args)
self.logger.save_hps(all_args)
def create_stream(self, stream_args):
return instantiate_class(stream_args)
def create_clmodel(self, cl_model_args):
from importlib import import_module
d = dict(cl_model_args)
if "classname" in d:
classname = d["classname"]
else:
classname = d["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
self.clmodel=c(self.train_stream,cl_model_args)
def run(self, args):
self.create_logger(args.logger,args)
stream_args = args.stream.train
self.train_stream=self.create_stream(stream_args)
stream_args = args.stream.evaluation
self.evaluation_stream=self.create_stream(stream_args)
clmodel_args = args.clmodel
self.create_clmodel(clmodel_args)
evaluation_args = args.evaluation
#args=_prefix(args,"benchmark/")
evaluation_mode=evaluation_args.mode
assert evaluation_mode=="all_tasks" or evaluation_mode=="previous_tasks"
for n_stage, task in enumerate(self.train_stream):
self.logger.message("Training at stage "+str(n_stage))
training_logger = self.logger.get_logger(f"train_stage_{n_stage}/")
self.clmodel = self.clmodel.update(task, training_logger)
evaluation_logger = self.logger.get_logger(f"evaluation_stage_{n_stage}/")
self.logger.message("Evaluation at stage "+str(n_stage))
for k,evaluation_task in enumerate(self.evaluation_stream):
if evaluation_mode=="previous_tasks" and k>n_stage:
pass
else:
self.logger.message("\tEvaluation on task "+str(k))
evaluation=self.clmodel.evaluate(evaluation_task,evaluation_logger,evaluation_args)
self.logger.message("\t == "+str(evaluation))
for kk,vv in evaluation.items():
evaluation_logger.add_scalar(kk,vv,k)
self.logger.close()
|
alma-main
|
crlapi/benchmark.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def instantiate_class(arguments):
from importlib import import_module
d = dict(arguments)
if "classname" in d:
classname = d["classname"]
del d["classname"]
else:
classname = d["class_name"]
del d["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
return c(**d)
def get_class(arguments):
from importlib import import_module
d = dict(arguments)
if "classname" in d:
classname = d["classname"]
del d["classname"]
else:
classname = d["class_name"]
del d["class_name"]
module_path, class_name = classname.rsplit(".", 1)
module = import_module(module_path)
c = getattr(module, class_name)
return c
def get_arguments(arguments):
from importlib import import_module
d = dict(arguments)
if "classname" in d:
classname = d["classname"]
del d["classname"]
else:
classname = d["class_name"]
del d["class_name"]
return d
|
alma-main
|
crlapi/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
class TaskResources:
""" Describe resources for a task (e.g a dataset, and environments, etc...)
"""
def make(self):
raise NotImplementedError
class Task:
""" Describe a task to solve with a task descriptor, and associated ressources
"""
def task_descriptor(self):
raise NotImplementedError
def task_resources(self):
raise NotImplementedError
class CLModel:
""" A continual learning model that is updated on different tasks. Such a model can evaluate itself on a particular task
"""
def __init__(self, config):
self.config = config
def update(self, task, logger):
# return a clmodel
raise NotImplementedError
def evaluate(self, task,logger,**evaluation_args):
raise NotImplementedError
class Stream:
""" A stream of tasks
"""
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __getitem__(self,k):
raise NotImplementedError
|
alma-main
|
crlapi/core.py
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from torch.utils.tensorboard import SummaryWriter
import sqlite3
import os
import os.path
import csv
import copy
from datetime import datetime
import torch
import numpy as np
import time
import pickle
import bz2
import sys
import pandas as pd
from omegaconf import DictConfig, OmegaConf
class TFPrefixLogger:
def __init__(self,prefix,logger):
self.logger=logger
self.prefix=prefix
def add_images(self, name, value, iteration):
self.logger.add_images(self.prefix+name,value,iteration)
def add_scalar(self, name, value, iteration):
self.logger.add_scalar(self.prefix+name,value,iteration)
def add_video(self, name, value, iteration, fps=10):
self.logger.add_video(self.prefix+name,value,iteration,fps)
def message(self,msg,from_name=""):
self.logger.message(msg,from_name=self.prefix+from_name)
def debug(self,msg,from_name=""):
self.logger.debug(msg,from_name=self.prefix+from_name)
class TFLogger(SummaryWriter):
"""A logger that stores informations both in tensorboard and CSV formats"""
def __init__(
self, log_dir=None, cache_size=10000, modulo=1,verbose=False, use_zip=True
):
SummaryWriter.__init__(self, log_dir=log_dir)
self.use_zip = use_zip
self.save_every = cache_size
self.modulo=modulo
self.written_values={}
self.log_dir = log_dir
self.verbose = verbose
self.picklename = log_dir + "/db.pickle.bzip2"
if not self.use_zip:
self.picklename = log_dir + "/db.pickle"
self.to_pickle = []
def _omegaconf_to_dict(self,hps):
d={}
for k,v in hps.items():
if isinstance(v,DictConfig):
d[k]=self._omegaconf_to_dict(v)
else:
d[k]=v
return d
def save_hps(self, hps):
hps=self._omegaconf_to_dict(hps)
print(hps)
f = open(self.log_dir + "/params.json", "wt")
f.write(str(hps) + "\n")
f.close()
outfile = open(self.log_dir + "/params.pickle", "wb")
pickle.dump(hps, outfile)
outfile.close()
self.add_text("Hyperparameters", str(hps))
def get_logger(self,prefix):
return TFPrefixLogger(prefix,self)
def message(self,msg,from_name=""):
print("[",from_name,"]: ",msg)
def debug(self,msg,from_name=""):
print("[DEBUG] [",from_name,"]: ",msg)
def _to_pickle(self, name, value, iteration):
self.to_pickle.append((name, iteration, value))
if len(self.to_pickle) > self.save_every:
if self.use_zip:
f = bz2.BZ2File(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
else:
f = open(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
self.to_pickle = []
def add_images(self, name, value, iteration):
iteration=int(iteration/self.modulo)*self.modulo
if (name,iteration) in self.written_values:
return
else:
self.written_values[(name,iteration)]=True
self._to_pickle(name, value, iteration)
SummaryWriter.add_images(self, name, value, iteration)
def add_scalar(self, name, value, iteration):
iteration=int(iteration/self.modulo)*self.modulo
if (name,iteration) in self.written_values:
return
else:
self.written_values[(name,iteration)]=True
self._to_pickle(name, value, iteration)
if self.verbose:
print("['" + name + "' at " + str(iteration) + "] = " + str(value))
if isinstance(value, int) or isinstance(value, float):
SummaryWriter.add_scalar(self, name, value, iteration)
def add_video(self, name, value, iteration, fps=10):
iteration=int(iteration/self.modulo)*self.modulo
if (name,iteration) in self.written_values:
return
else:
self.written_values[(name,iteration)]=True
self._to_pickle(name, value.numpy(), iteration)
SummaryWriter.add_video(self, name, value, iteration, fps=fps)
def close(self):
if len(self.to_pickle) > 0:
if self.use_zip:
f = bz2.BZ2File(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
else:
f = open(self.picklename, "ab")
pickle.dump(self.to_pickle, f)
f.close()
self.to_pickle = []
SummaryWriter.close(self)
f = open(self.log_dir + "/done", "wt")
f.write("Done\n")
f.close()
class Log:
def __init__(self, hps, values):
self.hps = hps
self.values = values
max_length = max([len(v) for v in self.values])
for k in values:
while len(values[k]) < max_length:
values[k].append(None)
self.length = max_length
def to_xy(self, name):
assert name in self.values
x, y = [], []
for k, v in enumerate(self.values[name]):
if not v is None:
x.append(k)
y.append(v)
return x, y
def to_dataframe(self, with_hps=False):
max_len = np.max([len(k) for v, k in self.values.items()])
nv = {}
for k, v in self.values.items():
while len(v) < max_len:
v.append(None)
nv[k] = v
self.values = nv
it = [k for k in range(max_len)]
d = {**self.values, **{"iteration": it}}
if with_hps:
for k in self.hps:
dd = [self.hps[k] for i in range(max_len)]
d = {**d, **{"_hp/" + k: dd}}
return pd.DataFrame(d)
def get_at(self, name, iteration):
return self.values[name][iteration]
def get(self, name, keep_none=False):
v = self.values[name]
if not keep_none:
return [k for k in v if not k is None]
else:
return v
def replace_None_(self, name):
v = self.values[name]
last_v = None
first_v = None
r = []
for k in range(len(v)):
if v[k] is None:
r.append(last_v)
else:
r.append(v[k])
if last_v is None:
first_v = v[k]
last_v = v[k]
p = 0
while r[p] is None:
r[p] = first_v
p += 1
self.values[name] = r
def max(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
return np.max(vv)
def min(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
return np.min(vv)
def argmin(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
_max = np.max(vv)
for k in range(len(v)):
if v[k] is None:
vv.append(_max + 1.0)
else:
vv.append(v[k])
return np.argmin(vv)
def argmax(self, name):
v = self.values[name]
vv = [k for k in v if not k is None]
_min = np.min(vv)
vv = []
for k in range(len(v)):
if v[k] is None:
vv.append(_min - 1.0)
else:
vv.append(v[k])
return np.argmax(vv)
class Logs:
def __init__(self):
self.logs = []
self.hp_names = None
self.filenames = []
def _add(self, log):
self.hp_names = {k: True for k in log.hps}
for l in self.logs:
for k in log.hps:
if not k in l.hps:
l.hps[k] = "none"
self.logs.append(log)
def add(self, logs):
if isinstance(logs, Log):
self._add(logs)
else:
for l in logs:
self._add(l)
def max(self, function):
alls = [function(l) for l in self.logs]
idx = np.argmax(alls)
return self.logs[idx]
def hps(self):
return list(self.hp_names)
def size(self):
return len(self.logs)
def filter(self, hp_name, test_fn):
logs = Logs()
if not callable(test_fn):
for l in self.logs:
h = l.hps[hp_name]
if h == test_fn:
logs.add(l)
else:
for l in self.logs:
if test_fn(l.hps[hp_name]):
logs.add(l)
return logs
def unique_hps(self, name):
r = {}
for l in self.logs:
v = l.hps[name]
r[v] = 1
return list(r.keys())
def __len__(self):
return len(self.logs)
def to_dataframe(self):
rdf = None
for log in self.logs:
df = log.to_dataframe(with_hps=True)
if rdf is None:
rdf = df
else:
rdf = pd.concat([rdf, df])
return rdf
# def plot(self, y, x, hue=None, style=None, row=None, col=None, kind="line"):
def flattify(d):
d=dict(d)
r = {}
for k, v in d.items():
if isinstance(v, dict):
rr = flattify(v)
rrr = {k + "/" + kk: rr[kk] for kk in rr}
r = {**r, **rrr}
elif isinstance(v, list):
r[k] = str(v)
else:
r[k] = v
return r
def read_log(directory, use_bz2=True, debug=False):
print("== Read ", directory)
# if os.path.exists(directory+"/fast.pickle"):
# f=open(directory+"/fast.pickle","rb")
# log=pickle.load(f)
# f.close()
# return log
f = None
if use_bz2:
picklename = directory + "/db.pickle.bzip2"
f = bz2.BZ2File(picklename, "rb")
else:
picklename = directory + "/db.pickle"
f = open(picklename, "rb")
values = {}
try:
while True:
a = pickle.load(f)
if not a is None:
for name, iteration, value in a:
# print(name,iteration,value)
if debug:
print(name, value, type(value))
if isinstance(value, np.int64):
value = int(value)
if (
isinstance(value, int)
or isinstance(value, float)
or isinstance(value, str)
):
if not name in values:
values[name] = []
while len(values[name]) < iteration + 1:
values[name].append(None)
values[name][iteration] = value
except:
f.close()
f = open(directory + "/params.pickle", "rb")
params = pickle.load(f)
params = flattify(params)
f.close()
log = Log(params, values)
log.from_directory = directory
# f=open(directory+"/fast.pickle","wb")
# pickle.dump(log,f)
# f.close()
return log
def read_directory(directory, use_bz2=True):
import os
import os.path
l = Logs()
name = "db.pickle"
if use_bz2:
name = "db.pickle.bzip2"
for dirpath, dirnames, filenames in os.walk(directory):
if name in filenames:
log = read_log(dirpath, use_bz2)
l.add(log)
print("Found %d logs" % l.size())
return l
def _create_col(df, hps, _name):
vs = []
for k, v in df.groupby(hps):
n = {hps[i]: k[i] for i in range(len(hps))}
v = v.copy()
name = ",".join([str(k) + "=" + str(n[k]) for k in n])
print(name)
print(_name)
v[_name] = name
vs.append(v)
return pd.concat(vs)
def plot_dataframe(
df, y, x="iteration", hue=None, style=None, row=None, col=None, kind="line"
):
import seaborn as sns
cols = [y, x]
if isinstance(row, list):
cols += row
else:
cols += [row]
if isinstance(col, list):
cols += col
else:
cols += [col]
if isinstance(style, list):
cols += style
else:
cols += [style]
if isinstance(hue, list):
cols += hue
else:
cols += [hue]
cols = [c for c in cols if not c is None]
df = df[cols].dropna()
if isinstance(row, list):
df = _create_col(df, row, "__row")
row = "__row"
if isinstance(col, list):
df = _create_col(df, col, "__col")
col = "__col"
if isinstance(style, list):
df = _create_col(df, style, "__style")
style = "__style"
if isinstance(hue, list):
df = _create_col(df, hue, "__hue")
hue = "__hue"
# df = convert_iteration_to_steps(df)
sns.relplot(x=x, y=y, hue=hue, style=style, row=row, col=col, data=df, kind=kind)
|
alma-main
|
crlapi/logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi.core import TaskResources, Stream, Task
import torchvision.datasets
import torchvision.transforms
import numpy.random
import numpy
import torch.utils.data
import torch
class CachedCIFAR10(torchvision.datasets.CIFAR10):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = torch.from_numpy(self.data).float().permute(0, 3, 1, 2)
self.targets = numpy.array(self.targets)
self.data = self.data / 255.
# normalize
mu = torch.Tensor([0.4914, 0.4822, 0.4465]).view(1, 3, 1, 1)
var = torch.Tensor([0.2470, 0.2435, 0.2616]).view(1, 3, 1, 1)
self.data = (self.data - mu) / var
def __getitem__(self, index):
x, y = self.data[index], self.targets[index]
if self.transform is not None:
x = self.transform(x)
return x, y
class ClassificationResources(TaskResources):
def __init__(self):
pass
class CIFAR10Resources(ClassificationResources):
def __init__(self, idx_batch, n_total_batches, seed,train,directory):
self.idx_batch=idx_batch
self.n_total_batches=n_total_batches
self.train=train
self.seed=seed
self.directory=directory
def make(self):
dataset=CachedCIFAR10(self.directory, train=self.train, download=True)
if self.n_total_batches==1:
return dataset
numpy.random.seed(self.seed)
indices=numpy.arange(len(dataset))
indices=numpy.random.permutation(indices)
_indices=numpy.array_split(indices,self.n_total_batches)
indices=list(_indices[self.idx_batch])
_set=torch.utils.data.Subset(dataset,indices)
return _set
class CIFAR10Task(Task):
def __init__(self,task_descriptor,resources):
self._task_descriptor=task_descriptor
self._resources=resources
self.input_shape=(3, 32, 32)
self.n_classes=10
def task_descriptor(self):
return self._task_descriptor
def task_resources(self):
return self._resources
class CIFAR10EvaluationAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
evaluation_resources=CIFAR10Resources(0,1,seed,False,directory)
self.tasks.append(CIFAR10Task(None,evaluation_resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
class CIFAR10TrainAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
for k in range(n_megabatches):
resources=CIFAR10Resources(k,n_megabatches,seed,True,directory)
self.tasks.append(CIFAR10Task(k,resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
|
alma-main
|
crlapi/sl/streams/cifar10.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi.core import TaskResources, Stream, Task
import torchvision.datasets
import torchvision.transforms
import numpy.random
import numpy
import torch.utils.data
import torch
# TODO: did not verify this dataset
class CachedEMNIST(torchvision.datasets.EMNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.targets = self.targets.numpy()
self.data = (self.data / 255. - .1307) / .3081
def __getitem__(self, index):
x, y = self.data[index], self.targets[index]
if self.transform is not None:
x = self.transform(x)
return x, y
class ClassificationResources(TaskResources):
def __init__(self):
pass
class EMNISTResources(ClassificationResources):
def __init__(self, idx_batch, n_total_batches, seed,train,split,directory):
self.idx_batch=idx_batch
self.split=split
self.n_total_batches=n_total_batches
self.train=train
self.seed=seed
self.directory=directory
def n_classes(self):
print("Compputing n classes...")
dataset=CachedEMNIST(self.directory, split=self.split,train=self.train, download=True)
n=len(dataset.classes_split_dict[self.split])
return n
def make(self):
dataset=torchvision.datasets.EMNIST(self.directory, split=self.split,train=self.train, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
if self.n_total_batches==1:
return dataset
numpy.random.seed(self.seed)
indices=numpy.arange(len(dataset))
indices=numpy.random.permutation(indices)
_indices=numpy.array_split(indices,self.n_total_batches)
indices=list(_indices[self.idx_batch])
_set=torch.utils.data.Subset(dataset,indices)
return _set
class EMNISTTask(Task):
def __init__(self,task_descriptor,resources):
self._task_descriptor=task_descriptor
self._resources=resources
self.input_shape=(1,28,28)
self.n_classes=self._resources.n_classes()
def task_descriptor(self):
return self._task_descriptor
def task_resources(self):
return self._resources
class EMNISTEvaluationAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,split,directory):
self.tasks=[]
evaluation_resources=EMNISTResources(0,1,seed,False,split,directory)
self.tasks.append(EMNISTTask(None,evaluation_resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
class EMNISTTrainAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,split,directory):
self.tasks=[]
for k in range(n_megabatches):
resources=EMNISTResources(k,n_megabatches,seed,True,split,directory)
self.tasks.append(EMNISTTask(k,resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
|
alma-main
|
crlapi/sl/streams/emnist.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from crlapi.core import TaskResources, Stream, Task
import torchvision.datasets
import torchvision.transforms
import numpy.random
import numpy
import torch.utils.data
import torch
class CachedMNIST(torchvision.datasets.MNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.targets = self.targets.numpy()
self.data = (self.data / 255. - .1307) / .3081
def __getitem__(self, index):
x, y = self.data[index], self.targets[index]
if self.transform is not None:
x = self.transform(x)
return x, y
class ClassificationResources(TaskResources):
def __init__(self):
pass
class MNISTResources(ClassificationResources):
def __init__(self, idx_batch, n_total_batches, seed,train,directory):
self.idx_batch=idx_batch
self.n_total_batches=n_total_batches
self.train=train
self.seed=seed
self.directory=directory
def make(self):
dataset=CachedMNIST(self.directory, train=self.train, download=True)
if self.n_total_batches==1:
return dataset
numpy.random.seed(self.seed)
indices=numpy.arange(len(dataset))
indices=numpy.random.permutation(indices)
_indices=numpy.array_split(indices,self.n_total_batches)
indices=list(_indices[self.idx_batch])
_set=torch.utils.data.Subset(dataset,indices)
return _set
class MNISTTask(Task):
def __init__(self,task_descriptor,resources):
self._task_descriptor=task_descriptor
self._resources=resources
self.input_shape=(1,28,28)
self.n_classes=10
def task_descriptor(self):
return self._task_descriptor
def task_resources(self):
return self._resources
class MNISTEvaluationAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
evaluation_resources=MNISTResources(0,1,seed,False,directory)
self.tasks.append(MNISTTask(None,evaluation_resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
class MNISTTrainAnytimeStream(Stream):
def __init__(self,n_megabatches,seed,directory):
self.tasks=[]
for k in range(n_megabatches):
resources=MNISTResources(k,n_megabatches,seed,True,directory)
self.tasks.append(MNISTTask(k,resources))
def __len__(self):
return len(self.tasks)
def __iter__(self):
return self.tasks.__iter__()
def __getitem__(self,k):
return self.tasks[k]
|
alma-main
|
crlapi/sl/streams/mnist.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import numpy as np
from copy import deepcopy
from crlapi.sl.architectures.mixture_model import (
HardSoftMaxGateModule,
SoftMaxGateModule,
MixtureLayer,
MoE_RandomGrow,
MoE_UsageGrow,
Gate,
MoE,
)
# -- Gates
class SoftGate(Gate):
def __init__(self, input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape, n_experts)
gate_fn = nn.Linear(input_shape, n_experts)
if prepro_fn is not None:
self.prepro_fn = prepro_fn
gate_fn = nn.Sequential(prepro_fn, gate_fn)
self.module = SoftMaxGateModule(gate_fn)
def forward(self,x):
return self.module(x)
class HardGate(Gate):
def __init__(self, input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape, n_experts)
gate_fn = nn.Linear(input_shape, n_experts)
if prepro_fn is not None:
gate_fn = nn.Sequential(prepro_fn, gate_fn)
self.module = HardSoftMaxGateModule(gate_fn)
def forward(self,x):
return self.module(x)
# -- Layers
def _make_layers(array, in_channels):
layers = []
for x in array:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
return in_channels, nn.Sequential(*layers)
def VGG(task, n_channels):
vgg_parts = [ [64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 256, 'M'], [512, 512, 512, 512, 'M'], [512, 512, 512, 512, 'M'] ]
if n_channels > 0:
vgg_parts = [[n_channels if type(x) == int else x for x in block] for block in vgg_parts]
in_channels, block0 = _make_layers(vgg_parts[0], 3)
in_channels, block1 = _make_layers(vgg_parts[1], in_channels)
in_channels, block2 = _make_layers(vgg_parts[2], in_channels)
in_channels, block3 = _make_layers(vgg_parts[3], in_channels)
in_channels, block4 = _make_layers(vgg_parts[4], in_channels)
return nn.Sequential(
block0,
block1,
block2,
block3,
block4,
nn.Flatten(),
nn.Linear(in_channels, task.n_classes)
)
def MoE_VGG(task, n_channels, n_adaptivepooling, n_experts, is_hard):
vgg_parts = [ [64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 256, 'M'], [512, 512, 512, 512, 'M'], [512, 512, 512, 512, 'M'] ]
input_shape = task.input_shape
gate = HardGate if is_hard else SoftGate
if n_channels > 0:
vgg_parts = [[n_channels if type(x) == int else x for x in block] for block in vgg_parts]
in_channels, head = _make_layers(vgg_parts[0], 3)
in_channels, block1 = _make_layers(vgg_parts[1], in_channels)
in_channels, block2 = _make_layers(vgg_parts[2], in_channels)
in_channels, block3 = _make_layers(vgg_parts[3], in_channels)
in_channels, block4 = _make_layers(vgg_parts[4], in_channels)
blocks = [block1, block2, block3, block4]
dim_gates = []
x = torch.randn(1,3,32,32)
for layer in [head] + blocks:
x = layer(x)
dim_gates += [x.shape[1:]]
# Build Layers
layers = [head]
for i, (block, dim_gate) in enumerate(zip(blocks, dim_gates[:-1])):
# build adaptive pooling gate
input_size = dim_gate[0] * n_adaptivepooling ** 2
gate_fn = nn.Sequential(
nn.AdaptiveAvgPool2d(n_adaptivepooling),
nn.Flatten(),
)
experts = [deepcopy(block) for _ in range(n_experts)]
layers += [MixtureLayer(gate(input_size, n_experts, gate_fn), experts)]
linear = nn.Linear(np.prod(dim_gates[-1]), task.n_classes)
layers += nn.Sequential(nn.Flatten(), linear)
model = MoE(layers)
return model
def MoE_VGG_RandomGrow(task, n_channels, n_adaptivepooling, n_experts, is_hard, n_experts_to_split):
moe = MoE_VGG(task, n_channels, n_adaptivepooling, n_experts, is_hard)
return MoE_RandomGrow(moe.layers,n_experts_to_split)
def MoE_VGG_UsageGrow(task, n_channels, n_adaptivepooling, n_experts, is_hard, n_experts_to_split):
moe = MoE_VGG(task, n_channels, n_adaptivepooling, n_experts, is_hard)
return MoE_UsageGrow(moe.layers,n_experts_to_split)
|
alma-main
|
crlapi/sl/architectures/vgg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import random
import numpy as np
class SoftMaxGateModule(nn.Module):
def __init__(self,module):
super().__init__()
self.module=module
def forward(self,x):
y=self.module(x)
return torch.softmax(y,dim=1)
class HardSoftMaxGateModule(nn.Module):
def __init__(self,module):
super().__init__()
self.module=module
def forward(self,x):
y=self.module(x)
if self.training:
dist=torch.distributions.Categorical(y)
sampled_y=dist.sample()
oh=F.one_hot(sampled_y,num_classes=p.size()[1])
return oh+(y-y.detach())
else:
_max=y.max(1)[1]
oh=F.one_hot(_max,num_classes=y.size()[1])
return oh
class Gate(nn.Module):
def __init__(self,input_shape,n_experts, prepro_fn=None):
self.input_shape=input_shape
super().__init__()
def _weight(output,score):
s=output.size()
while(len(score.size())<len(s)):
score=score.unsqueeze(-1)
score=score.repeat(1,*s[1:])
return output*score
class MixtureLayer(nn.Module):
def __init__(self,gate_module,experts):
super().__init__()
assert isinstance(gate_module,Gate)
self.gate=gate_module
self.experts=nn.ModuleList(experts)
def forward(self,x):
out=0.0
scores=self.gate(x)
gate_scores=[]
for k,e in enumerate(self.experts):
score=scores[:,k]
if isinstance(e,MixtureLayer):
y,g=e(x)
for kk,vv in enumerate(g):
gate_scores.append(([k]+vv[0],vv[1]*score))
else:
y=e(x)
gate_scores.append(([k],score))
y=_weight(y,score)
out=out+y
return out,gate_scores
class MoE(nn.Module):
def __init__(self,layers):
super().__init__()
self.layers=nn.ModuleList(layers)
@property
def device(self):
return next(self.parameters()).device
def forward(self,x,with_gate_scores=False):
gate_scores=[]
for l in self.layers:
if isinstance(l,MixtureLayer):
x,g=l(x)
gate_scores.append(g)
else:
x=l(x)
if with_gate_scores:
return x,gate_scores
else:
return x
class MoE_RandomGrow(MoE):
def __init__(self,layers,n_experts_split):
super().__init__(layers)
self.n_experts_split=n_experts_split
def _list_experts(self,layer):
assert isinstance(layer,MixtureLayer)
experts_url=[]
for k,e in enumerate(layer.experts):
if not isinstance(e,MixtureLayer):
experts_url.append([k])
else:
le=self._list_experts(e)
for v in le:
experts_url.append([k]+v)
return experts_url
def _generate_splitting(self,layer,url_to_split):
idx_split=url_to_split[0]
gate=copy.deepcopy(layer.gate)
experts=[]
for k,e in enumerate(layer.experts):
if k!=idx_split:
experts.append(copy.deepcopy(e))
elif len(url_to_split)>1:
experts.append(self._generate_splitting(e,url_to_split[1:]))
else:
n_experts=[copy.deepcopy(e) for _ in range(self.n_experts_split)]
n_gate=layer.gate.__class__(layer.gate.input_shape, self.n_experts_split, getattr(layer.gate, 'prepro_fn', None))
experts.append(MixtureLayer(n_gate,n_experts))
return MixtureLayer(gate,experts)
def _grow_layer(self,layer):
assert isinstance(layer,MixtureLayer)
#First, we list all the experts
experts_urls=self._list_experts(layer)
print("\tList of experts: ",experts_urls)
#Choose one expert at random
expert_to_split=random.choice(experts_urls)
print("\t\tSplitting expert: "+str(expert_to_split))
new_module=self._generate_splitting(layer,expert_to_split)
experts_urls=self._list_experts(new_module)
print("\t\tNew list of experts = ",experts_urls)
return new_module
def grow(self,dataset_loader,**args):
if self.n_experts_split==0:
return self
self.zero_grad()
new_layers=[]
for l in self.layers:
if isinstance(l,MixtureLayer):
new_layers.append(self._grow_layer(l))
else:
new_layers.append(copy.deepcopy(l))
return MoE_RandomGrow(new_layers,self.n_experts_split)
class MoE_UsageGrow(MoE):
def __init__(self,layers,n_experts_split):
super().__init__(layers)
self.n_experts_split=n_experts_split
def _list_experts(self,layer):
assert isinstance(layer,MixtureLayer)
experts_url=[]
for k,e in enumerate(layer.experts):
if not isinstance(e,MixtureLayer):
experts_url.append([k])
else:
le=self._list_experts(e)
for v in le:
experts_url.append([k]+v)
return experts_url
def _generate_splitting(self,layer,url_to_split):
idx_split=url_to_split[0]
gate=copy.deepcopy(layer.gate)
experts=[]
for k,e in enumerate(layer.experts):
if k!=idx_split:
experts.append(copy.deepcopy(e))
elif len(url_to_split)>1:
experts.append(self._generate_splitting(e,url_to_split[1:]))
else:
n_experts=[copy.deepcopy(e) for _ in range(self.n_experts_split)]
n_gate=layer.gate.__class__(layer.gate.input_shape, self.n_experts_split, getattr(layer.gate, 'prepro_fn', None))
experts.append(MixtureLayer(n_gate,n_experts))
return MixtureLayer(gate,experts)
def _grow_layer(self,layer,to_split_expert):
assert isinstance(layer,MixtureLayer)
#First, we list all the experts
experts_urls=self._list_experts(layer)
print("\tList of experts: ",experts_urls)
print("\t To split: ",to_split_expert)
assert to_split_expert in experts_urls
new_module=self._generate_splitting(layer,to_split_expert)
experts_urls=self._list_experts(new_module)
print("\t\tNew list of experts = ",experts_urls)
return new_module
def grow(self,dataset_loader,**args):
if self.n_experts_split==0:
return self
with torch.no_grad():
usage=None
n=0
for x,y in dataset_loader:
x, y = x.to(self.device), y.to(self.device)
out,gate_scores=self(x,with_gate_scores=True)
loss=F.cross_entropy(out,y,reduction='none')
gate_scores=[[(gg[0],gg[1].sum(0)) for gg in g] for g in gate_scores]
n+=x.size()[0]
if usage is None:
usage=gate_scores
else:
for k,g in enumerate(gate_scores):
for kk,gg in enumerate(g):
assert gg[0]==usage[k][kk][0]
usage[k][kk]=(gg[0],gg[1]+usage[k][kk][1])
self.zero_grad()
new_layers=[]
p=0
for k,l in enumerate(self.layers):
if isinstance(l,MixtureLayer):
u=usage[p]
us=[uu[1].item() for uu in u]
idx=np.argmax(us)
print("Expert usage at layer ",k," is ",{str(uu[0]):uu[1].item() for uu in u})
max_expert=u[idx][0]
print("\tSplitting expert ",max_expert)
new_layers.append(self._grow_layer(l,max_expert))
p+=1
else:
new_layers.append(copy.deepcopy(l))
return MoE_UsageGrow(new_layers,self.n_experts_split)
|
alma-main
|
crlapi/sl/architectures/mixture_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
from crlapi.sl.architectures.mixture_model import MixtureLayer,SoftMaxGateModule,HardSoftMaxGateModule,Gate,MoE,MoE_RandomGrow,MoE_UsageGrow
class MLP(nn.Module):
def __init__(self,task,**args):
super().__init__()
input_shape=task.input_shape
d=1
for k in input_shape:
d*=k
input_dim=d
output_dim=task.n_classes
sizes=[input_dim]+[args["size_layers"] for k in range(args["n_layers"])]+[output_dim]
print(sizes)
layers=[]
for k in range(len(sizes)-1):
layers.append(nn.Linear(sizes[k],sizes[k+1]))
if not k==len(sizes)-2:
layers.append(nn.ReLU())
self.model=nn.Sequential(*layers)
def forward(self,x):
x=torch.flatten(x,start_dim=1)
return self.model(x)
class LinearSoftGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=SoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
return self.module(x)
class LinearHardGate(Gate):
def __init__(self,input_shape, n_experts, prepro_fn=None):
super().__init__(input_shape,n_experts)
assert len(input_shape)==1
self.module=HardSoftMaxGateModule(nn.Linear(input_shape[0],n_experts))
def forward(self,x):
return self.module(x)
def mlp_layers(task,size_layers,n_layers,n_experts,is_hard):
input_shape=task.input_shape
d=1
for k in input_shape:
d*=k
input_dim=d
output_dim=task.n_classes
sizes=[input_dim]+[size_layers for k in range(n_layers)]+[output_dim]
layers=[nn.Flatten(start_dim=1)]
for k in range(len(sizes)-2):
if is_hard:
gate=LinearHardGate([sizes[k]],n_experts)
else:
gate=LinearSoftGate([sizes[k]],n_experts)
experts=[nn.Sequential(nn.Linear(sizes[k],sizes[k+1]),nn.ReLU()) for _ in range(n_experts)]
layer=MixtureLayer(gate,experts)
layers.append(layer)
layers.append(nn.Linear(sizes[-2],sizes[-1]))
return layers
def MoE_MLP(task,size_layers,n_layers,n_experts,is_hard):
return MoE(mlp_layers(task,size_layers,n_layers,n_experts,is_hard))
def MoE_MLP_RandomGrow(task,size_layers,n_layers,n_experts,is_hard,n_experts_to_split):
return MoE_RandomGrow(mlp_layers(task,size_layers,n_layers,n_experts,is_hard),n_experts_to_split)
def MoE_MLP_UsageGrow(task,size_layers,n_layers,n_experts,is_hard,n_experts_to_split):
return MoE_UsageGrow(mlp_layers(task,size_layers,n_layers,n_experts,is_hard),n_experts_to_split)
|
alma-main
|
crlapi/sl/architectures/mlp.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import logging
import numpy as np
import torch
from torch import nn
import torch.utils.data
from torch.nn import functional as F
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
self.activation = nn.ReLU()
def forward(self, x):
out = self.activation(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = out + self.shortcut(x)
out = self.activation(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, nf, input_size):
super(ResNet, self).__init__()
self.in_planes = nf
self.input_size = input_size
self.conv1 = conv3x3(input_size[0], nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
# hardcoded for now
last_hid = nf * 8 * block.expansion
last_hid = last_hid * (self.input_size[-1] // 2 // 2 // 2 // 4) ** 2
self.linear = nn.Linear(last_hid, num_classes)
self.activation = nn.ReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def return_hidden(self, x):
bsz = x.size(0)
assert x.ndim == 4
out = self.activation(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
# out = F.adaptive_avg_pool2d(out, 1)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def forward(self, x):
out = self.return_hidden(x)
out = self.linear(out)
return out
def ResNet18(nclasses, nf=20, input_size=(3, 32, 32), *args, **kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf, input_size, *args, **kwargs)
|
alma-main
|
crlapi/sl/architectures/resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import copy
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
def percentile(t, qq):
k = int(qq * t.numel()) #1 + round(float(q) * (t.numel() - 1))
return t.view(-1).kthvalue(k).values.item()
class GetSubnet(torch.autograd.Function):
@staticmethod
def forward(ctx, scores, zeros, ones, sparsity):
k_val = percentile(scores, sparsity)
return torch.where(scores < k_val, zeros.to(scores.device), ones.to(scores.device))
@staticmethod
def backward(ctx, g):
return g, None, None, None
# Not learning weights, finding subnet
class SubnetConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
self.register_buffer('ones', torch.ones_like(self.scores.data))
self.register_buffer('zeros', torch.zeros_like(self.scores.data))
# hardcoded for now
self.prune_rate = 0.5
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
@property
def clamped_scores(self):
return self.scores.abs()
def forward(self, x):
subnet = GetSubnet.apply(self.clamped_scores, self.zeros, self.ones, self.prune_rate)
w = self.weight * subnet
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
# -- Layers
def _make_layers(array, in_channels):
layers = []
for x in array:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [SubnetConv(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x, affine=False),
nn.ReLU(inplace=True)]
in_channels = x
return in_channels, layers
class SubnetVGG(nn.Module):
def __init__(self, task, n_channels, grow_n_units):
super().__init__()
self.grow_n_units = grow_n_units
vgg_parts = [ 64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M' ]
if n_channels > 0:
vgg_parts = [n_channels if type(x) == int else x for x in vgg_parts]
out_channels, base = _make_layers(vgg_parts, 3)
self.net = nn.Sequential(
*base,
SubnetConv(out_channels, task.n_classes, kernel_size=1, padding=0),
nn.Flatten(),
)
def forward(self, x):
return self.net(x)
def grow(self, valid_loader, **args):
x = torch.FloatTensor(64, 3, 32, 32).normal_()
new_layers = []
for i, layer in enumerate(self.net):
if isinstance(layer, SubnetConv):
# input size
in_c = 3 if i == 0 else last_output_channels
# output size
out_c = layer.out_channels + (self.grow_n_units if i < len(self.net) - 2 else 0)
# what is the minimal score to be selected ?
max_val = percentile(layer.scores.abs(), layer.prune_rate)
min_val = layer.scores.abs().min().item()
# init new layer
new_layer = SubnetConv(in_c, out_c, kernel_size=layer.kernel_size, padding=layer.padding)
new_layer.scores.data.uniform_(min_val, max_val)
# adjust the prune rate so that the same amount of points get selected
new_layer.prune_rate = 1 - (1 - layer.prune_rate) * layer.weight.numel() / new_layer.weight.numel()
# copy the old params
a, b, c, d = layer.scores.size()
new_layer.weight[:a, :b, :c, :d].data.copy_(layer.weight.data)
new_layer.scores[:a, :b, :c, :d].data.copy_(layer.scores.data)
new_layer.bias.data.fill_(0)
new_layer.bias[:a].data.copy_(layer.bias)
last_output_channels = out_c
new_layers += [new_layer]
new_sub = torch.where(new_layer.clamped_scores < percentile(new_layer.clamped_scores, new_layer.prune_rate), new_layer.zeros, new_layer.ones)
import pdb
# assert torch.allclose(layer(x[:, :b]), new_layer(x)[:, :a]), pdb.set_trace()
elif isinstance(layer, nn.BatchNorm2d):
new_bn = nn.BatchNorm2d(last_output_channels, affine=False)
c = layer.running_mean.size(0)
new_bn.running_mean[:c].data.copy_(layer.running_mean.data)
new_bn.running_var[:c].data.copy_(layer.running_var.data)
new_layers += [new_bn]
new_bn.training = layer.training
# assert torch.allclose(layer(x[:, :c]), new_bn(x)[:, :c], atol=1e-7)
else:
new_layers += [copy.deepcopy(layer)]
x = new_layers[-1](x)
net = nn.Sequential(*new_layers)
copy_self = copy.deepcopy(self)
copy_self.net = net
print(net)
return copy_self
|
alma-main
|
crlapi/sl/architectures/sp_vgg.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.