Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def set_target_variable (self, targets, variable, value, append=0):
if isinstance (targets, str):
targets = [targets]
if isinstance(value, str):
value = [value]
assert is_iterable(targets)
assert isinstance(variable, basestring)
assert is_iterable(value)
if targets:
if append:
bjam_interface.call("set-target-variable", targets, variable, value, "true")
else:
bjam_interface.call("set-target-variable", targets, variable, value)
|
[
" Sets a target variable.\n\n The 'variable' will be available to bjam when it decides\n where to generate targets, and will also be available to\n updating rule for that 'taret'.\n "
] |
Please provide a description of the function:def set_update_action (self, action_name, targets, sources, properties=None):
if isinstance(targets, str):
targets = [targets]
if isinstance(sources, str):
sources = [sources]
if properties is None:
properties = property_set.empty()
assert isinstance(action_name, basestring)
assert is_iterable(targets)
assert is_iterable(sources)
assert(isinstance(properties, property_set.PropertySet))
self.do_set_update_action (action_name, targets, sources, properties)
|
[
" Binds a target to the corresponding update action.\n If target needs to be updated, the action registered\n with action_name will be used.\n The 'action_name' must be previously registered by\n either 'register_action' or 'register_bjam_action'\n method.\n "
] |
Please provide a description of the function:def register_action (self, action_name, command='', bound_list = [], flags = [],
function = None):
assert isinstance(action_name, basestring)
assert isinstance(command, basestring)
assert is_iterable(bound_list)
assert is_iterable(flags)
assert function is None or callable(function)
bjam_flags = reduce(operator.or_,
(action_modifiers[flag] for flag in flags), 0)
# We allow command to be empty so that we can define 'action' as pure
# python function that would do some conditional logic and then relay
# to other actions.
assert command or function
if command:
bjam_interface.define_action(action_name, command, bound_list, bjam_flags)
self.actions[action_name] = BjamAction(
action_name, function, has_command=bool(command))
|
[
"Creates a new build engine action.\n\n Creates on bjam side an action named 'action_name', with\n 'command' as the command to be executed, 'bound_variables'\n naming the list of variables bound when the command is executed\n and specified flag.\n If 'function' is not None, it should be a callable taking three\n parameters:\n - targets\n - sources\n - instance of the property_set class\n This function will be called by set_update_action, and can\n set additional target variables.\n "
] |
Please provide a description of the function:def register_bjam_action (self, action_name, function=None):
# We allow duplicate calls to this rule for the same
# action name. This way, jamfile rules that take action names
# can just register them without specially checking if
# action is already registered.
assert isinstance(action_name, basestring)
assert function is None or callable(function)
if action_name not in self.actions:
self.actions[action_name] = BjamNativeAction(action_name, function)
|
[
"Informs self that 'action_name' is declared in bjam.\n\n From this point, 'action_name' is a valid argument to the\n set_update_action method. The action_name should be callable\n in the global module of bjam.\n "
] |
Please provide a description of the function:def pixel_data(self):
from .. import extensions as _extensions
data = _np.zeros((self.height, self.width, self.channels), dtype=_np.uint8)
_extensions.image_load_to_numpy(self, data.ctypes.data, data.strides)
if self.channels == 1:
data = data.squeeze(2)
return data
|
[
"\n Returns the pixel data stored in the Image object.\n\n Returns\n -------\n out : numpy.array\n The pixel data of the Image object. It returns a multi-dimensional\n numpy array, where the shape of the array represents the shape of\n the image (height, weight, channels).\n\n See Also\n --------\n width, channels, height\n\n Examples\n --------\n >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')\n >>> image_array = img.pixel_data\n "
] |
Please provide a description of the function:def show(self):
from ..visualization._plot import _target
try:
img = self._to_pil_image()
try:
# output into jupyter notebook if possible
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
from io import BytesIO
from IPython import display
b = BytesIO()
img.save(b, format='png')
data = b.getvalue()
ip_img = display.Image(data=data, format='png', embed=True)
display.display(ip_img)
else:
# fall back to pillow .show (jupyter notebook integration disabled or not in jupyter notebook)
img.show()
except NameError:
# fall back to pillow .show (no get_ipython() available)
img.show()
except ImportError:
print("Install pillow to use the .show() method.")
|
[
"\n Displays the image. Requires PIL/Pillow.\n\n Alternatively, you can create an :class:`turicreate.SArray` of this image\n and use py:func:`turicreate.SArray.show()`\n\n See Also\n --------\n turicreate.image_analysis.resize\n\n Examples\n --------\n >>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')\n >>> img.show()\n\n "
] |
Please provide a description of the function:def predict(self, data, useCPUOnly=False, **kwargs):
if self.__proxy__:
return self.__proxy__.predict(data,useCPUOnly)
else:
if _macos_version() < (10, 13):
raise Exception('Model prediction is only supported on macOS version 10.13 or later.')
try:
from ..libcoremlpython import _MLModelProxy
except:
_MLModelProxy = None
if not _MLModelProxy:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
elif _MLModelProxy.maximum_supported_specification_version() < self._spec.specificationVersion:
engineVersion = _MLModelProxy.maximum_supported_specification_version()
raise Exception('The specification has version ' + str(self._spec.specificationVersion)
+ ' but the Core ML framework version installed only supports Core ML model specification version '
+ str(engineVersion) + ' or older.')
elif _has_custom_layer(self._spec):
raise Exception('This model contains a custom neural network layer, so predict is not supported.')
else:
raise Exception('Unable to load CoreML.framework. Cannot make predictions.')
|
[
"\n Return predictions for the model. The kwargs gets passed into the\n model as a dictionary.\n\n Parameters\n ----------\n data : dict[str, value]\n Dictionary of data to make predictions from where the keys are\n the names of the input features.\n\n useCPUOnly : bool\n Set to true to restrict computation to use only the CPU. Defaults to False.\n\n Returns\n -------\n out : dict[str, value]\n Predictions as a dictionary where each key is the output feature\n name.\n\n Examples\n --------\n >>> data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240}\n >>> predictions = model.predict(data)\n "
] |
Please provide a description of the function:def visualize_spec(self, port=None, input_shape_dict=None):
spec = self._spec
model_type = spec.WhichOneof('Type')
model_description = spec.description
input_spec = model_description.input
output_spec = model_description.output
spec_inputs = []
for model_input in input_spec:
spec_inputs.append((model_input.name, str(model_input.type)))
spec_outputs = []
for model_output in output_spec:
spec_outputs.append((model_output.name, str(model_output.type)))
cy_nodes = []
cy_edges = []
cy_nodes.append({
'data': {
'id': 'input_node',
'name': '',
'info': {
'type': 'input node'
},
'classes': 'input',
}
})
for model_input, input_type in spec_inputs:
cy_nodes.append({
'data': {
'id': str(model_input),
'name': str(model_input),
'info': {
'type': "\n".join(str(input_type).split("\n")),
'inputs': str([]),
'outputs': str([model_input])
},
'parent': 'input_node'
},
'classes': 'input'
})
if model_type == 'pipeline':
pipeline_spec = spec.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineRegressor':
pipeline_spec = spec.pipelineRegressor.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineClassifier':
pipeline_spec = spec.pipelineClassifier.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'neuralNetwork':
nn_spec = spec.neuralNetwork
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkClassifier':
nn_spec = spec.neuralNetworkClassifier
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkRegressor':
nn_spec = spec.neuralNetworkRegressor
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
else:
print("Model is not of type Pipeline or Neural Network "
"and cannot be visualized")
return
import coremltools
web_dir = _os.path.join(_os.path.dirname(coremltools.__file__),
'graph_visualization')
with open('{}/model.json'.format(web_dir), 'w') as file:
_json.dump(cy_data, file)
_start_server(port, web_dir)
|
[
"\n Visualize the model.\n\n Parameters\n ----------\n port : int\n if server is to be hosted on specific localhost port\n\n input_shape_dict : dict\n The shapes are calculated assuming the batch and sequence\n are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide\n full input shape\n\n Returns\n -------\n\n None\n\n Examples\n --------\n >>> model = coreml.models.MLModel('HousePricer.mlmodel')\n >>> model.visualize_spec()\n "
] |
Please provide a description of the function:def _construct_auto_distance(feature_names, column_names, column_types, sample):
## Make a dictionary from the column_names and column_types
col_type_dict = {k: v for k, v in zip(column_names, column_types)}
## Loop through feature names, appending a distance component if the
# feature's type is *not* numeric. If the type *is* numeric, append it to
# the numeric_cols list, then at the end make a numeric columns distance
# component.
composite_distance_params = []
numeric_cols = []
for c in feature_names:
if col_type_dict[c] == str:
composite_distance_params.append([[c], _turicreate.distances.levenshtein, 1])
elif col_type_dict[c] == dict:
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] == array.array:
composite_distance_params.append([[c], _turicreate.distances.euclidean, 1])
elif col_type_dict[c] == list:
only_str_lists = _validate_lists(sample[c], allowed_types=[str])
if not only_str_lists:
raise TypeError("Only lists of all str objects are currently supported")
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] in [int, float, array.array, list]:
numeric_cols.append(c)
else:
raise TypeError("Unable to automatically determine a distance "+\
"for column {}".format(c))
# Make the standalone numeric column distance component
if len(numeric_cols) > 0:
composite_distance_params.append([numeric_cols, _turicreate.distances.euclidean, 1])
return composite_distance_params
|
[
"\n Construct composite distance parameters based on selected features and their\n types.\n "
] |
Please provide a description of the function:def create(dataset, label=None, features=None, distance=None, method='auto',
verbose=True, **kwargs):
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Basic validation of the features input
if features is not None and not isinstance(features, list):
raise TypeError("If specified, input 'features' must be a list of " +
"strings.")
## Clean the method options and create the options dictionary
allowed_kwargs = ['leaf_size', 'num_tables', 'num_projections_per_table']
_method_options = {}
for k, v in kwargs.items():
if k in allowed_kwargs:
_method_options[k] = v
else:
raise _ToolkitError("'{}' is not a valid keyword argument".format(k) +
" for the nearest neighbors model. Please " +
"check for capitalization and other typos.")
## Exclude inappropriate combinations of method an distance
if method == 'ball_tree' and (distance == 'cosine'
or distance == _turicreate.distances.cosine
or distance == 'dot_product'
or distance == _turicreate.distances.dot_product
or distance == 'transformed_dot_product'
or distance == _turicreate.distances.transformed_dot_product):
raise TypeError("The ball tree method does not work with 'cosine' " +
"'dot_product', or 'transformed_dot_product' distance." +
"Please use the 'brute_force' method for these distances.")
if method == 'lsh' and ('num_projections_per_table' not in _method_options):
if distance == 'jaccard' or distance == _turicreate.distances.jaccard:
_method_options['num_projections_per_table'] = 4
elif distance == 'cosine' or distance == _turicreate.distances.cosine:
_method_options['num_projections_per_table'] = 16
else:
_method_options['num_projections_per_table'] = 8
## Initial validation and processing of the label
if label is None:
_label = _robust_column_name('__id', dataset.column_names())
_dataset = dataset.add_row_number(_label)
else:
_label = label
_dataset = _copy.copy(dataset)
col_type_map = {c:_dataset[c].dtype for c in _dataset.column_names()}
_validate_row_label(_label, col_type_map)
ref_labels = _dataset[_label]
## Determine the internal list of available feature names (may still include
# the row label name).
if features is None:
_features = _dataset.column_names()
else:
_features = _copy.deepcopy(features)
## Check if there's only one feature and it's the same as the row label.
# This would also be trapped by the composite distance validation, but the
# error message is not very informative for the user.
free_features = set(_features).difference([_label])
if len(free_features) < 1:
raise _ToolkitError("The only available feature is the same as the " +
"row label column. Please specify features " +
"that are not also row labels.")
### Validate and preprocess the distance function
### ---------------------------------------------
# - The form of the 'distance' controls how we interact with the 'features'
# parameter as well.
# - At this point, the row label 'label' may still be in the list(s) of
# features.
## Convert any distance function input into a single composite distance.
# distance is already a composite distance
if isinstance(distance, list):
distance = _copy.deepcopy(distance)
# distance is a single name (except 'auto') or function handle.
elif (hasattr(distance, '__call__') or
(isinstance(distance, str) and not distance == 'auto')):
distance = [[_features, distance, 1]]
# distance is unspecified and needs to be constructed.
elif distance is None or distance == 'auto':
sample = _dataset.head()
distance = _construct_auto_distance(_features,
_dataset.column_names(),
_dataset.column_types(),
sample)
else:
raise TypeError("Input 'distance' not understood. The 'distance' "
" argument must be a string, function handle, or " +
"composite distance.")
## Basic composite distance validation, remove the row label from all
# feature lists, and convert string distance names into distance functions.
distance = _scrub_composite_distance_features(distance, [_label])
distance = _convert_distance_names_to_functions(distance)
_validate_composite_distance(distance)
## Raise an error if any distances are used with non-lists
list_features_to_check = []
sparse_distances = ['jaccard', 'weighted_jaccard', 'cosine', 'dot_product', 'transformed_dot_product']
sparse_distances = [_turicreate.distances.__dict__[k] for k in sparse_distances]
for d in distance:
feature_names, dist, _ = d
list_features = [f for f in feature_names if _dataset[f].dtype == list]
for f in list_features:
if dist in sparse_distances:
list_features_to_check.append(f)
else:
raise TypeError("The chosen distance cannot currently be used " +
"on list-typed columns.")
for f in list_features_to_check:
only_str_lists = _validate_lists(_dataset[f], [str])
if not only_str_lists:
raise TypeError("Distances for sparse data, such as jaccard " +
"and weighted_jaccard, can only be used on " +
"lists containing only strings. Please modify " +
"any list features accordingly before creating " +
"the nearest neighbors model.")
## Raise an error if any component has string features are in single columns
for d in distance:
feature_names, dist, _ = d
if (len(feature_names) > 1) and (dist == _turicreate.distances.levenshtein):
raise ValueError("Levenshtein distance cannot be used with multiple " +
"columns. Please concatenate strings into a single " +
"column before creating the nearest neighbors model.")
## Get the union of feature names and make a clean dataset.
clean_features = _get_composite_distance_features(distance)
sf_clean = _tkutl._toolkits_select_columns(_dataset, clean_features)
## Decide which method to use
## - If more than one distance component (specified either directly or
# generated automatically because distance set to 'auto'), then do brute
# force.
if len(distance) > 1:
_method = 'brute_force'
if method != 'brute_force' and verbose is True:
print("Defaulting to brute force instead of ball tree because " +\
"there are multiple distance components.")
else:
if method == 'auto':
# get the total number of variables. Assume the number of elements in
# array type columns does not change
num_variables = sum([len(x) if hasattr(x, '__iter__') else 1
for x in _six.itervalues(sf_clean[0])])
# flag if all the features in the single composite are of numeric
# type.
numeric_type_flag = all([x in [int, float, list, array.array]
for x in sf_clean.column_types()])
## Conditions necessary for ball tree to work and be worth it
if ((distance[0][1] in ['euclidean',
'manhattan',
_turicreate.distances.euclidean,
_turicreate.distances.manhattan])
and numeric_type_flag is True
and num_variables <= 200):
_method = 'ball_tree'
else:
_method = 'brute_force'
else:
_method = method
## Pick the right model name for the method
if _method == 'ball_tree':
model_name = 'nearest_neighbors_ball_tree'
elif _method == 'brute_force':
model_name = 'nearest_neighbors_brute_force'
elif _method == 'lsh':
model_name = 'nearest_neighbors_lsh'
else:
raise ValueError("Method must be 'auto', 'ball_tree', 'brute_force', " +
"or 'lsh'.")
## Package the model options
opts = {}
opts.update(_method_options)
opts.update(
{'model_name': model_name,
'ref_labels': ref_labels,
'label': label,
'sf_features': sf_clean,
'composite_params': distance})
## Construct the nearest neighbors model
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.train(opts)
model_proxy = result['model']
model = NearestNeighborsModel(model_proxy)
return model
|
[
"\n Create a nearest neighbor model, which can be searched efficiently and\n quickly for the nearest neighbors of a query observation. If the `method`\n argument is specified as `auto`, the type of model is chosen automatically\n based on the type of data in `dataset`.\n\n .. warning::\n\n The 'dot_product' distance is deprecated and will be removed in future\n versions of Turi Create. Please use 'transformed_dot_product'\n distance instead, although note that this is more than a name change;\n it is a *different* transformation of the dot product of two vectors.\n Please see the distances module documentation for more details.\n\n Parameters\n ----------\n dataset : SFrame\n Reference data. If the features for each observation are numeric, they\n may be in separate columns of 'dataset' or a single column with lists\n of values. The features may also be in the form of a column of sparse\n vectors (i.e. dictionaries), with string keys and numeric values.\n\n label : string, optional\n Name of the SFrame column with row labels. If 'label' is not specified,\n row numbers are used to identify reference dataset rows when the model\n is queried.\n\n features : list[string], optional\n Name of the columns with features to use in computing distances between\n observations and the query points. 'None' (the default) indicates that\n all columns except the label should be used as features. Each column\n can be one of the following types:\n\n - *Numeric*: values of numeric type integer or float.\n\n - *Array*: list of numeric (integer or float) values. Each list element\n is treated as a separate variable in the model.\n\n - *Dictionary*: key-value pairs with numeric (integer or float) values.\n Each key indicates a separate variable in the model.\n\n - *List*: list of integer or string values. Each element is treated as\n a separate variable in the model.\n\n - *String*: string values.\n\n Please note: if a composite distance is also specified, this parameter\n is ignored.\n\n distance : string, function, or list[list], optional\n Function to measure the distance between any two input data rows. This\n may be one of three types:\n\n - *String*: the name of a standard distance function. One of\n 'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',\n 'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),\n or 'transformed_dot_product'.\n\n - *Function*: a function handle from the\n :mod:`~turicreate.toolkits.distances` module.\n\n - *Composite distance*: the weighted sum of several standard distance\n functions applied to various features. This is specified as a list of\n distance components, each of which is itself a list containing three\n items:\n\n 1. list or tuple of feature names (strings)\n\n 2. standard distance name (string)\n\n 3. scaling factor (int or float)\n\n For more information about Turi Create distance functions, please\n see the :py:mod:`~turicreate.toolkits.distances` module.\n\n If 'distance' is left unspecified or set to 'auto', a composite\n distance is constructed automatically based on feature types.\n\n method : {'auto', 'ball_tree', 'brute_force', 'lsh'}, optional\n Method for computing nearest neighbors. The options are:\n\n - *auto* (default): the method is chosen automatically, based on the\n type of data and the distance. If the distance is 'manhattan' or\n 'euclidean' and the features are numeric or vectors of numeric\n values, then the 'ball_tree' method is used. Otherwise, the\n 'brute_force' method is used.\n\n - *ball_tree*: use a tree structure to find the k-closest neighbors to\n each query point. The ball tree model is slower to construct than the\n brute force model, but queries are faster than linear time. This\n method is not applicable for the cosine and dot product distances.\n See `Liu, et al (2004)\n <http://papers.nips.cc/paper/2666-an-investigation-of-p\n ractical-approximat e-nearest-neighbor-algorithms>`_ for\n implementation details.\n\n - *brute_force*: compute the distance from a query point to all\n reference observations. There is no computation time for model\n creation with the brute force method (although the reference data is\n held in the model, but each query takes linear time.\n\n - *lsh*: use Locality Sensitive Hashing (LSH) to find approximate\n nearest neighbors efficiently. The LSH model supports 'euclidean',\n 'squared_euclidean', 'manhattan', 'cosine', 'jaccard', 'dot_product'\n (deprecated), and 'transformed_dot_product' distances. Two options\n are provided for LSH -- ``num_tables`` and\n ``num_projections_per_table``. See the notes below for details.\n\n verbose: bool, optional\n If True, print progress updates and model details.\n\n **kwargs : optional\n Options for the distance function and query method.\n\n - *leaf_size*: for the ball tree method, the number of points in each\n leaf of the tree. The default is to use the max of 1,000 and\n n/(2^11), which ensures a maximum tree depth of 12.\n\n - *num_tables*: For the LSH method, the number of hash tables\n constructed. The default value is 20. We recommend choosing values\n from 10 to 30.\n\n - *num_projections_per_table*: For the LSH method, the number of\n projections/hash functions for each hash table. The default value is\n 4 for 'jaccard' distance, 16 for 'cosine' distance and 8 for other\n distances. We recommend using number 2 ~ 6 for 'jaccard' distance, 8\n ~ 20 for 'cosine' distance and 4 ~ 12 for other distances.\n\n Returns\n -------\n out : NearestNeighborsModel\n A structure for efficiently computing the nearest neighbors in 'dataset'\n of new query points.\n\n See Also\n --------\n NearestNeighborsModel.query, turicreate.toolkits.distances\n\n Notes\n -----\n - Missing data is not allowed in the 'dataset' provided to this function.\n Please use the :func:`turicreate.SFrame.fillna` and\n :func:`turicreate.SFrame.dropna` utilities to handle missing data before\n creating a nearest neighbors model.\n\n - Missing keys in sparse vectors are assumed to have value 0.\n\n - The `composite_params` parameter was removed as of Turi Create\n version 1.5. The `distance` parameter now accepts either standard or\n composite distances. Please see the :mod:`~turicreate.toolkits.distances`\n module documentation for more information on composite distances.\n\n - If the features should be weighted equally in the distance calculations\n but are measured on different scales, it is important to standardize the\n features. One way to do this is to subtract the mean of each column and\n divide by the standard deviation.\n\n **Locality Sensitive Hashing (LSH)**\n\n There are several efficient nearest neighbors search algorithms that work\n well for data with low dimensions :math:`d` (approximately 50). However,\n most of the solutions suffer from either space or query time that is\n exponential in :math:`d`. For large :math:`d`, they often provide little,\n if any, improvement over the 'brute_force' method. This is a well-known\n consequence of the phenomenon called `The Curse of Dimensionality`.\n\n `Locality Sensitive Hashing (LSH)\n <https://en.wikipedia.org/wiki/Locality-sensitive_hashing>`_ is an approach\n that is designed to efficiently solve the *approximate* nearest neighbor\n search problem for high dimensional data. The key idea of LSH is to hash\n the data points using several hash functions, so that the probability of\n collision is much higher for data points which are close to each other than\n those which are far apart.\n\n An LSH family is a family of functions :math:`h` which map points from the\n metric space to a bucket, so that\n\n - if :math:`d(p, q) \\\\leq R`, then :math:`h(p) = h(q)` with at least probability :math:`p_1`.\n - if :math:`d(p, q) \\\\geq cR`, then :math:`h(p) = h(q)` with probability at most :math:`p_2`.\n\n LSH for efficient approximate nearest neighbor search:\n\n - We define a new family of hash functions :math:`g`, where each\n function :math:`g` is obtained by concatenating :math:`k` functions\n :math:`h_1, ..., h_k`, i.e., :math:`g(p)=[h_1(p),...,h_k(p)]`.\n The algorithm constructs :math:`L` hash tables, each of which\n corresponds to a different randomly chosen hash function :math:`g`.\n There are :math:`k \\\\cdot L` hash functions used in total.\n\n - In the preprocessing step, we hash all :math:`n` reference points\n into each of the :math:`L` hash tables.\n\n - Given a query point :math:`q`, the algorithm iterates over the\n :math:`L` hash functions :math:`g`. For each :math:`g` considered, it\n retrieves the data points that are hashed into the same bucket as q.\n These data points from all the :math:`L` hash tables are considered as\n candidates that are then re-ranked by their real distances with the query\n data.\n\n **Note** that the number of tables :math:`L` and the number of hash\n functions per table :math:`k` are two main parameters. They can be set\n using the options ``num_tables`` and ``num_projections_per_table``\n respectively.\n\n Hash functions for different distances:\n\n - `euclidean` and `squared_euclidean`:\n :math:`h(q) = \\\\lfloor \\\\frac{a \\\\cdot q + b}{w} \\\\rfloor` where\n :math:`a` is a vector, of which the elements are independently\n sampled from normal distribution, and :math:`b` is a number\n uniformly sampled from :math:`[0, r]`. :math:`r` is a parameter for the\n bucket width. We set :math:`r` using the average all-pair `euclidean`\n distances from a small randomly sampled subset of the reference data.\n\n - `manhattan`: The hash function of `manhattan` is similar with that of\n `euclidean`. The only difference is that the elements of `a` are sampled\n from Cauchy distribution, instead of normal distribution.\n\n - `cosine`: Random Projection is designed to approximate the cosine\n distance between vectors. The hash function is :math:`h(q) = sgn(a \\\\cdot\n q)`, where :math:`a` is randomly sampled normal unit vector.\n\n - `jaccard`: We use a recently proposed method one permutation hashing by\n Shrivastava and Li. See the paper `[Shrivastava and Li, UAI 2014]\n <http://www.auai.org/uai2014/proceedings/individuals/225.pdf>`_ for\n details.\n\n - `dot_product`: The reference data points are first transformed to\n fixed-norm vectors, and then the minimum `dot_product` distance search\n problem can be solved via finding the reference data with smallest\n `cosine` distances. See the paper `[Neyshabur and Srebro, ICML 2015]\n <http://proceedings.mlr.press/v37/neyshabur15.html>`_ for details.\n\n References\n ----------\n - `Wikipedia - nearest neighbor\n search <http://en.wikipedia.org/wiki/Nearest_neighbor_search>`_\n\n - `Wikipedia - ball tree <http://en.wikipedia.org/wiki/Ball_tree>`_\n\n - Ball tree implementation: Liu, T., et al. (2004) `An Investigation of\n Practical Approximate Nearest Neighbor Algorithms\n <http://papers.nips.cc/paper/2666-an-investigation-of-p\n ractical-approximat e-nearest-neighbor-algorithms>`_. Advances in Neural\n Information Processing Systems pp. 825-832.\n\n - `Wikipedia - Jaccard distance\n <http://en.wikipedia.org/wiki/Jaccard_index>`_\n\n - Weighted Jaccard distance: Chierichetti, F., et al. (2010) `Finding the\n Jaccard Median\n <http://theory.stanford.edu/~sergei/papers/soda10-jaccard.pdf>`_.\n Proceedings of the Twenty-First Annual ACM-SIAM Symposium on Discrete\n Algorithms. Society for Industrial and Applied Mathematics.\n\n - `Wikipedia - Cosine distance\n <http://en.wikipedia.org/wiki/Cosine_similarity>`_\n\n - `Wikipedia - Levenshtein distance\n <http://en.wikipedia.org/wiki/Levenshtein_distance>`_\n\n - Locality Sensitive Hashing : Chapter 3 of the book `Mining Massive\n Datasets <http://infolab.stanford.edu/~ullman/mmds/ch3.pdf>`_.\n\n Examples\n --------\n Construct a nearest neighbors model with automatically determined method\n and distance:\n\n >>> sf = turicreate.SFrame({'X1': [0.98, 0.62, 0.11],\n ... 'X2': [0.69, 0.58, 0.36],\n ... 'str_feature': ['cat', 'dog', 'fossa']})\n >>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'])\n\n For datasets with a large number of rows and up to about 100 variables, the\n ball tree method often leads to much faster queries.\n\n >>> model = turicreate.nearest_neighbors.create(sf, features=['X1', 'X2'],\n ... method='ball_tree')\n\n Often the final determination of a neighbor is based on several distance\n computations over different sets of features. Each part of this composite\n distance may have a different relative weight.\n\n >>> my_dist = [[['X1', 'X2'], 'euclidean', 2.],\n ... [['str_feature'], 'levenshtein', 3.]]\n ...\n >>> model = turicreate.nearest_neighbors.create(sf, distance=my_dist)\n "
] |
Please provide a description of the function:def _get_summary_struct(self):
model_fields = [
("Method", 'method'),
("Number of distance components", 'num_distance_components'),
("Number of examples", 'num_examples'),
("Number of feature columns", 'num_features'),
("Number of unpacked features", 'num_unpacked_features'),
("Total training time (seconds)", 'training_time')]
ball_tree_fields = [
("Tree depth", 'tree_depth'),
("Leaf size", 'leaf_size')]
lsh_fields = [
("Number of hash tables", 'num_tables'),
("Number of projections per table", 'num_projections_per_table')]
sections = [model_fields]
section_titles = ['Attributes']
if (self.method == 'ball_tree'):
sections.append(ball_tree_fields)
section_titles.append('Ball Tree Attributes')
if (self.method == 'lsh'):
sections.append(lsh_fields)
section_titles.append('LSH Attributes')
return (sections, section_titles)
|
[
"\n Returns a structured description of the model, including (where\n relevant) the schema of the training data, description of the training\n data, training statistics, and model hyperparameters.\n\n Returns\n -------\n sections : list (of list of tuples)\n A list of summary sections.\n Each section is a list.\n Each item in a section list is a tuple of the form:\n ('<label>','<field>')\n section_titles: list\n A list of section titles.\n The order matches that of the 'sections' object.\n "
] |
Please provide a description of the function:def _list_fields(self):
opts = {'model': self.__proxy__, 'model_name': self.__name__}
response = _turicreate.extensions._nearest_neighbors.list_fields(opts)
return sorted(response.keys())
|
[
"\n List the fields stored in the model, including data, model, and\n training options. Each field can be queried with the ``get`` method.\n\n Returns\n -------\n out : list\n List of fields queryable with the ``get`` method.\n "
] |
Please provide a description of the function:def _get(self, field):
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'field': field}
response = _turicreate.extensions._nearest_neighbors.get_value(opts)
return response['value']
|
[
"\n Return the value of a given field. The list of all queryable fields is\n detailed below, and can be obtained with the\n :func:`~turicreate.nearest_neighbors.NearestNeighborsModel._list_fields`\n method.\n\n +-----------------------+----------------------------------------------+\n | Field | Description |\n +=======================+==============================================+\n | distance | Measure of dissimilarity between two points |\n +-----------------------+----------------------------------------------+\n | features | Feature column names |\n +-----------------------+----------------------------------------------+\n | unpacked_features | Names of the individual features used |\n +-----------------------+----------------------------------------------+\n | label | Label column names |\n +-----------------------+----------------------------------------------+\n | leaf_size | Max size of leaf nodes (ball tree only) |\n +-----------------------+----------------------------------------------+\n | method | Method of organizing reference data |\n +-----------------------+----------------------------------------------+\n | num_examples | Number of reference data observations |\n +-----------------------+----------------------------------------------+\n | num_features | Number of features for distance computation |\n +-----------------------+----------------------------------------------+\n | num_unpacked_features | Number of unpacked features |\n +-----------------------+----------------------------------------------+\n | num_variables | Number of variables for distance computation |\n +-----------------------+----------------------------------------------+\n | training_time | Time to create the reference structure |\n +-----------------------+----------------------------------------------+\n | tree_depth | Number of levels in the tree (ball tree only)|\n +-----------------------+----------------------------------------------+\n\n Parameters\n ----------\n field : string\n Name of the field to be retrieved.\n\n Returns\n -------\n out\n Value of the requested field.\n "
] |
Please provide a description of the function:def _training_stats(self):
opts = {'model': self.__proxy__, 'model_name': self.__name__}
return _turicreate.extensions._nearest_neighbors.training_stats(opts)
|
[
"\n Return a dictionary of statistics collected during creation of the\n model. These statistics are also available with the ``get`` method and\n are described in more detail in that method's documentation.\n\n Returns\n -------\n out : dict\n Dictionary of statistics compiled during creation of the\n NearestNeighborsModel.\n\n See Also\n --------\n summary\n\n Examples\n --------\n >>> sf = turicreate.SFrame({'label': range(3),\n ... 'feature1': [0.98, 0.62, 0.11],\n ... 'feature2': [0.69, 0.58, 0.36]})\n >>> model = turicreate.nearest_neighbors.create(sf, 'label')\n >>> model.training_stats()\n {'features': 'feature1, feature2',\n 'label': 'label',\n 'leaf_size': 1000,\n 'num_examples': 3,\n 'num_features': 2,\n 'num_variables': 2,\n 'training_time': 0.023223,\n 'tree_depth': 1}\n "
] |
Please provide a description of the function:def query(self, dataset, label=None, k=5, radius=None, verbose=True):
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Get model features
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Validate and preprocess the 'label' input
if label is None:
query_labels = _turicreate.SArray.from_sequence(len(dataset))
else:
if not label in dataset.column_names():
raise ValueError(
"Input 'label' must be a string matching the name of a " +\
"column in the reference SFrame 'dataset'.")
if not dataset[label].dtype == str and not dataset[label].dtype == int:
raise TypeError("The label column must contain integers or strings.")
if label in ref_features:
raise ValueError("The label column cannot be one of the features.")
query_labels = dataset[label]
## Validate neighborhood parameters 'k' and 'radius'
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'features': sf_features,
'query_labels': query_labels,
'k': k,
'radius': radius}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.query(opts)
return result['neighbors']
|
[
"\n For each row of the input 'dataset', retrieve the nearest neighbors\n from the model's stored data. In general, the query dataset does not\n need to be the same as the reference data stored in the model, but if\n it is, the 'include_self_edges' parameter can be set to False to\n exclude results that match query points to themselves.\n\n Parameters\n ----------\n dataset : SFrame\n Query data. Must contain columns with the same names and types as\n the features used to train the model. Additional columns are\n allowed, but ignored. Please see the nearest neighbors\n :func:`~turicreate.nearest_neighbors.create` documentation for more\n detail on allowable data types.\n\n label : str, optional\n Name of the query SFrame column with row labels. If 'label' is not\n specified, row numbers are used to identify query dataset rows in\n the output SFrame.\n\n k : int, optional\n Number of nearest neighbors to return from the reference set for\n each query observation. The default is 5 neighbors, but setting it\n to ``None`` will return all neighbors within ``radius`` of the\n query point.\n\n radius : float, optional\n Only neighbors whose distance to a query point is smaller than this\n value are returned. The default is ``None``, in which case the\n ``k`` nearest neighbors are returned for each query point,\n regardless of distance.\n\n verbose: bool, optional\n If True, print progress updates and model details.\n\n Returns\n -------\n out : SFrame\n An SFrame with the k-nearest neighbors of each query observation.\n The result contains four columns: the first is the label of the\n query observation, the second is the label of the nearby reference\n observation, the third is the distance between the query and\n reference observations, and the fourth is the rank of the reference\n observation among the query's k-nearest neighbors.\n\n See Also\n --------\n similarity_graph\n\n Notes\n -----\n - The `dataset` input to this method *can* have missing values (in\n contrast to the reference dataset used to create the nearest\n neighbors model). Missing numeric values are imputed to be the mean\n of the corresponding feature in the reference dataset, and missing\n strings are imputed to be empty strings.\n\n - If both ``k`` and ``radius`` are set to ``None``, each query point\n returns all of the reference set. If the reference dataset has\n :math:`n` rows and the query dataset has :math:`m` rows, the output\n is an SFrame with :math:`nm` rows.\n\n - For models created with the 'lsh' method, the query results may have\n fewer query labels than input query points. Because LSH is an\n approximate method, a query point may have fewer than 'k' neighbors.\n If LSH returns no neighbors at all for a query, the query point is\n omitted from the results.\n\n Examples\n --------\n First construct a toy SFrame and create a nearest neighbors model:\n\n >>> sf = turicreate.SFrame({'label': range(3),\n ... 'feature1': [0.98, 0.62, 0.11],\n ... 'feature2': [0.69, 0.58, 0.36]})\n >>> model = turicreate.nearest_neighbors.create(sf, 'label')\n\n A new SFrame contains query observations with same schema as the\n reference SFrame. This SFrame is passed to the ``query`` method.\n\n >>> queries = turicreate.SFrame({'label': range(3),\n ... 'feature1': [0.05, 0.61, 0.99],\n ... 'feature2': [0.06, 0.97, 0.86]})\n >>> model.query(queries, 'label', k=2)\n +-------------+-----------------+----------------+------+\n | query_label | reference_label | distance | rank |\n +-------------+-----------------+----------------+------+\n | 0 | 2 | 0.305941170816 | 1 |\n | 0 | 1 | 0.771556867638 | 2 |\n | 1 | 1 | 0.390128184063 | 1 |\n | 1 | 0 | 0.464004310325 | 2 |\n | 2 | 0 | 0.170293863659 | 1 |\n | 2 | 1 | 0.464004310325 | 2 |\n +-------------+-----------------+----------------+------+\n "
] |
Please provide a description of the function:def similarity_graph(self, k=5, radius=None, include_self_edges=False,
output_type='SGraph', verbose=True):
## Validate inputs.
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'k': k,
'radius': radius,
'include_self_edges': include_self_edges}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts)
knn = result['neighbors']
if output_type == "SFrame":
return knn
else:
sg = _SGraph(edges=knn, src_field='query_label',
dst_field='reference_label')
return sg
|
[
"\n Construct the similarity graph on the reference dataset, which is\n already stored in the model. This is conceptually very similar to\n running `query` with the reference set, but this method is optimized\n for the purpose, syntactically simpler, and automatically removes\n self-edges.\n\n Parameters\n ----------\n k : int, optional\n Maximum number of neighbors to return for each point in the\n dataset. Setting this to ``None`` deactivates the constraint, so\n that all neighbors are returned within ``radius`` of a given point.\n\n radius : float, optional\n For a given point, only neighbors within this distance are\n returned. The default is ``None``, in which case the ``k`` nearest\n neighbors are returned for each query point, regardless of\n distance.\n\n include_self_edges : bool, optional\n For most distance functions, each point in the model's reference\n dataset is its own nearest neighbor. If this parameter is set to\n False, this result is ignored, and the nearest neighbors are\n returned *excluding* the point itself.\n\n output_type : {'SGraph', 'SFrame'}, optional\n By default, the results are returned in the form of an SGraph,\n where each point in the reference dataset is a vertex and an edge A\n -> B indicates that vertex B is a nearest neighbor of vertex A. If\n 'output_type' is set to 'SFrame', the output is in the same form as\n the results of the 'query' method: an SFrame with columns\n indicating the query label (in this case the query data is the same\n as the reference data), reference label, distance between the two\n points, and the rank of the neighbor.\n\n verbose : bool, optional\n If True, print progress updates and model details.\n\n Returns\n -------\n out : SFrame or SGraph\n The type of the output object depends on the 'output_type'\n parameter. See the parameter description for more detail.\n\n Notes\n -----\n - If both ``k`` and ``radius`` are set to ``None``, each data point is\n matched to the entire dataset. If the reference dataset has\n :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an\n SGraph with :math:`n^2` edges).\n\n - For models created with the 'lsh' method, the output similarity graph\n may have fewer vertices than there are data points in the original\n reference set. Because LSH is an approximate method, a query point\n may have fewer than 'k' neighbors. If LSH returns no neighbors at all\n for a query and self-edges are excluded, the query point is omitted\n from the results.\n\n Examples\n --------\n First construct an SFrame and create a nearest neighbors model:\n\n >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],\n ... 'x2': [0.69, 0.58, 0.36]})\n ...\n >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')\n\n Unlike the ``query`` method, there is no need for a second dataset with\n ``similarity_graph``.\n\n >>> g = model.similarity_graph(k=1) # an SGraph\n >>> g.edges\n +----------+----------+----------------+------+\n | __src_id | __dst_id | distance | rank |\n +----------+----------+----------------+------+\n | 0 | 1 | 0.376430604494 | 1 |\n | 2 | 1 | 0.55542776308 | 1 |\n | 1 | 0 | 0.376430604494 | 1 |\n +----------+----------+----------------+------+\n "
] |
Please provide a description of the function:def random_split_by_session(dataset, session_id, fraction=0.9, seed=None):
from random import Random
_raise_error_if_not_of_type(dataset, _SFrame, 'dataset')
_raise_error_if_not_of_type(session_id, str, 'session_id')
_raise_error_if_not_of_type(fraction, float, 'fraction')
_raise_error_if_not_of_type(seed, [int, type(None)], 'seed')
_numeric_param_check_range('fraction', fraction, 0, 1)
if session_id not in dataset.column_names():
raise _ToolkitError(
'Input "dataset" must contain a column called %s.' % session_id)
if seed is None:
# Include the nanosecond component as well.
import time
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# The cython bindings require this to be an int, so cast if we can.
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
random = Random()
# Create a random binary filter (boolean SArray), using the same probability across all lines
# that belong to the same session. In expectancy - the desired fraction of the sessions will
# go to the training set.
# Since boolean filters preserve order - there is no need to re-sort the lines within each session.
# The boolean filter is a pseudorandom function of the session_id and the
# global seed above, allowing the train-test split to vary across runs using
# the same dataset.
def random_session_pick(session_id_hash):
random.seed(session_id_hash)
return random.uniform(0, 1) < fraction
chosen_filter = dataset[session_id].hash(seed).apply(random_session_pick)
train = dataset[chosen_filter]
valid = dataset[1 - chosen_filter]
return train, valid
|
[
"\n Randomly split an SFrame into two SFrames based on the `session_id` such\n that one split contains data for a `fraction` of the sessions while the\n second split contains all data for the rest of the sessions.\n\n Parameters\n ----------\n dataset : SFrame\n Dataset to split. It must contain a column of session ids.\n\n session_id : string, optional\n The name of the column in `dataset` that corresponds to the\n a unique identifier for each session.\n\n fraction : float, optional\n Fraction of the sessions to fetch for the first returned SFrame. Must\n be between 0 and 1. Once the sessions are split, all data from a single\n session is in the same SFrame.\n\n seed : int, optional\n Seed for the random number generator used to split.\n\n Examples\n --------\n\n .. sourcecode:: python\n\n # Split the data so that train has 90% of the users.\n >>> train, valid = tc.activity_classifier.util.random_split_by_session(\n ... dataset, session_id='session_id', fraction=0.9)\n\n # For example: If dataset has 2055 sessions\n >>> len(dataset['session_id'].unique())\n 2055\n\n # The training set now has 90% of the sessions\n >>> len(train['session_id'].unique())\n 1850\n\n # The validation set has the remaining 10% of the sessions\n >>> len(valid['session_id'].unique())\n 205\n "
] |
Please provide a description of the function:def read_msbuild_xml(path, values={}):
# Attempt to read the file contents
try:
document = parse(path)
except Exception as e:
logging.exception('Could not read MS Build XML file at %s', path)
return values
# Convert the XML to JSON format
logging.info('Processing MS Build XML file at %s', path)
# Get the rule node
rule = document.getElementsByTagName('Rule')[0]
rule_name = rule.attributes['Name'].value
logging.info('Found rules for %s', rule_name)
# Proprocess Argument values
__preprocess_arguments(rule)
# Get all the values
converted_values = []
__convert(rule, 'EnumProperty', converted_values, __convert_enum)
__convert(rule, 'BoolProperty', converted_values, __convert_bool)
__convert(rule, 'StringListProperty', converted_values,
__convert_string_list)
__convert(rule, 'StringProperty', converted_values, __convert_string)
__convert(rule, 'IntProperty', converted_values, __convert_string)
values[rule_name] = converted_values
return values
|
[
"Reads the MS Build XML file at the path and returns its contents.\n\n Keyword arguments:\n values -- The map to append the contents to (default {})\n "
] |
Please provide a description of the function:def read_msbuild_json(path, values=[]):
if not os.path.exists(path):
logging.info('Could not find MS Build JSON file at %s', path)
return values
try:
values.extend(__read_json_file(path))
except Exception as e:
logging.exception('Could not read MS Build JSON file at %s', path)
return values
logging.info('Processing MS Build JSON file at %s', path)
return values
|
[
"Reads the MS Build JSON file at the path and returns its contents.\n\n Keyword arguments:\n values -- The list to append the contents to (default [])\n "
] |
Please provide a description of the function:def main():
# Parse the arguments
parser = argparse.ArgumentParser(
description='Convert MSBuild XML to JSON format')
parser.add_argument(
'-t', '--toolchain', help='The name of the toolchain', required=True)
parser.add_argument(
'-o', '--output', help='The output directory', default='')
parser.add_argument(
'-r',
'--overwrite',
help='Whether previously output should be overwritten',
dest='overwrite',
action='store_true')
parser.set_defaults(overwrite=False)
parser.add_argument(
'-d',
'--debug',
help="Debug tool output",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument(
'-v',
'--verbose',
help="Verbose output",
action="store_const",
dest="loglevel",
const=logging.INFO)
parser.add_argument('input', help='The input files', nargs='+')
args = parser.parse_args()
toolchain = args.toolchain
logging.basicConfig(level=args.loglevel)
logging.info('Creating %s toolchain files', toolchain)
values = {}
# Iterate through the inputs
for input in args.input:
input = __get_path(input)
read_msbuild_xml(input, values)
# Determine if the output directory needs to be created
output_dir = __get_path(args.output)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
logging.info('Created output directory %s', output_dir)
for key, value in values.items():
output_path = __output_path(toolchain, key, output_dir)
if os.path.exists(output_path) and not args.overwrite:
logging.info('Comparing previous output to current')
__merge_json_values(value, read_msbuild_json(output_path))
else:
logging.info('Original output will be overwritten')
logging.info('Writing MS Build JSON file at %s', output_path)
__write_json_file(output_path, value)
|
[
"Script entrypoint."
] |
Please provide a description of the function:def __merge_json_values(current, previous):
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value)
|
[
"Merges the values between the current and previous run of the script."
] |
Please provide a description of the function:def __find_and_remove_value(list, compare):
# next throws if there are no matches
try:
found = next(value for value in list
if value['name'] == compare['name'] and value['switch'] ==
compare['switch'])
except:
return None
list.remove(found)
return found
|
[
"Finds the value in the list that corresponds with the value of compare."
] |
Please provide a description of the function:def __convert(root, tag, values, func):
elements = root.getElementsByTagName(tag)
for element in elements:
converted = func(element)
# Append to the list
__append_list(values, converted)
|
[
"Converts the tag type found in the root and converts them using the func\n and appends them to the values.\n "
] |
Please provide a description of the function:def __convert_enum(node):
name = __get_attribute(node, 'Name')
logging.debug('Found EnumProperty named %s', name)
converted_values = []
for value in node.getElementsByTagName('EnumValue'):
converted = __convert_node(value)
converted['value'] = converted['name']
converted['name'] = name
# Modify flags when there is an argument child
__with_argument(value, converted)
converted_values.append(converted)
return converted_values
|
[
"Converts an EnumProperty node to JSON format."
] |
Please provide a description of the function:def __convert_bool(node):
converted = __convert_node(node, default_value='true')
# Check for a switch for reversing the value
reverse_switch = __get_attribute(node, 'ReverseSwitch')
if reverse_switch:
converted_reverse = copy.deepcopy(converted)
converted_reverse['switch'] = reverse_switch
converted_reverse['value'] = 'false'
return [converted_reverse, converted]
# Modify flags when there is an argument child
__with_argument(node, converted)
return __check_for_flag(converted)
|
[
"Converts an BoolProperty node to JSON format."
] |
Please provide a description of the function:def __convert_string_list(node):
converted = __convert_node(node)
# Determine flags for the string list
flags = vsflags(VSFlags.UserValue)
# Check for a separator to determine if it is semicolon appendable
# If not present assume the value should be ;
separator = __get_attribute(node, 'Separator', default_value=';')
if separator == ';':
flags = vsflags(flags, VSFlags.SemicolonAppendable)
converted['flags'] = flags
return __check_for_flag(converted)
|
[
"Converts a StringListProperty node to JSON format."
] |
Please provide a description of the function:def __convert_string(node):
converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue))
return __check_for_flag(converted)
|
[
"Converts a StringProperty node to JSON format."
] |
Please provide a description of the function:def __convert_node(node, default_value='', default_flags=vsflags()):
name = __get_attribute(node, 'Name')
logging.debug('Found %s named %s', node.tagName, name)
converted = {}
converted['name'] = name
converted['switch'] = __get_attribute(node, 'Switch')
converted['comment'] = __get_attribute(node, 'DisplayName')
converted['value'] = default_value
# Check for the Flags attribute in case it was created during preprocessing
flags = __get_attribute(node, 'Flags')
if flags:
flags = flags.split(',')
else:
flags = default_flags
converted['flags'] = flags
return converted
|
[
"Converts a XML node to a JSON equivalent."
] |
Please provide a description of the function:def __with_argument(node, value):
arguments = node.getElementsByTagName('Argument')
if arguments:
logging.debug('Found argument within %s', value['name'])
value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue)
|
[
"Modifies the flags in value if the node contains an Argument."
] |
Please provide a description of the function:def __preprocess_arguments(root):
# Set the flags to require a value
flags = ','.join(vsflags(VSFlags.UserValueRequired))
# Search through the arguments
arguments = root.getElementsByTagName('Argument')
for argument in arguments:
reference = __get_attribute(argument, 'Property')
found = None
# Look for the argument within the root's children
for child in root.childNodes:
# Ignore Text nodes
if isinstance(child, Element):
name = __get_attribute(child, 'Name')
if name == reference:
found = child
break
if found is not None:
logging.info('Found property named %s', reference)
# Get the associated switch
switch = __get_attribute(argument.parentNode, 'Switch')
# See if there is already a switch associated with the element.
if __get_attribute(found, 'Switch'):
logging.debug('Copying node %s', reference)
clone = found.cloneNode(True)
root.insertBefore(clone, found)
found = clone
found.setAttribute('Switch', switch)
found.setAttribute('Flags', flags)
else:
logging.warning('Could not find property named %s', reference)
|
[
"Preprocesses occurrences of Argument within the root.\n\n Argument XML values reference other values within the document by name. The\n referenced value does not contain a switch. This function will add the\n switch associated with the argument.\n "
] |
Please provide a description of the function:def __get_attribute(node, name, default_value=''):
if node.hasAttribute(name):
return node.attributes[name].value.strip()
else:
return default_value
|
[
"Retrieves the attribute of the given name from the node.\n\n If not present then the default_value is used.\n "
] |
Please provide a description of the function:def __get_path(path):
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return os.path.normpath(path)
|
[
"Gets the path to the file."
] |
Please provide a description of the function:def __output_path(toolchain, rule, output_dir):
filename = '%s_%s.json' % (toolchain, rule)
return os.path.join(output_dir, filename)
|
[
"Gets the output path for a file given the toolchain, rule and output_dir"
] |
Please provide a description of the function:def __write_json_file(path, values):
# Sort the keys to ensure ordering
sort_order = ['name', 'switch', 'comment', 'value', 'flags']
sorted_values = [
OrderedDict(
sorted(
value.items(), key=lambda value: sort_order.index(value[0])))
for value in values
]
with open(path, 'w') as f:
json.dump(sorted_values, f, indent=2, separators=(',', ': '))
|
[
"Writes a JSON file at the path with the values provided."
] |
Please provide a description of the function:def __append_list(append_to, value):
if value is not None:
if isinstance(value, list):
append_to.extend(value)
else:
append_to.append(value)
|
[
"Appends the value to the list."
] |
Please provide a description of the function:def decompile_func(func):
'''
Decompile a function into ast.FunctionDef node.
:param func: python function (can not be a built-in)
:return: ast.FunctionDef instance.
'''
code = func.__code__
# For python 3
# defaults = func.func_defaults if sys.version_info.major < 3 else func.__defaults__
# if defaults:
# default_names = code.co_varnames[:code.co_argcount][-len(defaults):]
# else:
# default_names = []
# defaults = [_ast.Name(id='%s_default' % name, ctx=_ast.Load() , lineno=0, col_offset=0) for name in default_names]
ast_node = make_function(code, defaults=[], lineno=code.co_firstlineno)
return ast_node
|
[] |
Please provide a description of the function:def compile_func(ast_node, filename, globals, **defaults):
'''
Compile a function from an ast.FunctionDef instance.
:param ast_node: ast.FunctionDef instance
:param filename: path where function source can be found.
:param globals: will be used as func_globals
:return: A python function object
'''
function_name = ast_node.name
module = _ast.Module(body=[ast_node])
ctx = {'%s_default' % key : arg for key, arg in defaults.items()}
code = compile(module, filename, 'exec')
eval(code, globals, ctx)
function = ctx[function_name]
return function
|
[] |
Please provide a description of the function:def decompile_pyc(bin_pyc, output=sys.stdout):
'''
decompile apython pyc or pyo binary file.
:param bin_pyc: input file objects
:param output: output file objects
'''
from turicreate.meta.asttools import python_source
bin = bin_pyc.read()
code = marshal.loads(bin[8:])
mod_ast = make_module(code)
python_source(mod_ast, file=output)
|
[] |
Please provide a description of the function:def ParseInput(self, a_file):
input_lines = a_file.read().splitlines()
self.ParseLines(input_lines)
|
[
"Consumes input extracting definitions.\n\n Args:\n a_file: The file like stream to parse.\n\n Raises:\n PDDMError if there are any issues.\n "
] |
Please provide a description of the function:def ParseLines(self, input_lines):
current_macro = None
for line in input_lines:
if line.startswith('PDDM-'):
directive = line.split(' ', 1)[0]
if directive == 'PDDM-DEFINE':
name, args = self._ParseDefineLine(line)
if self._macros.get(name):
raise PDDMError('Attempt to redefine macro: "%s"' % line)
current_macro = self.MacroDefinition(name, args)
self._macros[name] = current_macro
continue
if directive == 'PDDM-DEFINE-END':
if not current_macro:
raise PDDMError('Got DEFINE-END directive without an active macro:'
' "%s"' % line)
current_macro = None
continue
raise PDDMError('Hit a line with an unknown directive: "%s"' % line)
if current_macro:
current_macro.AppendLine(line)
continue
# Allow blank lines between macro definitions.
if line.strip() == '':
continue
raise PDDMError('Hit a line that wasn\'t a directive and no open macro'
' definition: "%s"' % line)
|
[
"Parses list of lines.\n\n Args:\n input_lines: A list of strings of input to parse (no newlines on the\n strings).\n\n Raises:\n PDDMError if there are any issues.\n "
] |
Please provide a description of the function:def Expand(self, macro_ref_str):
match = _MACRO_RE.match(macro_ref_str)
if match is None or match.group(0) != macro_ref_str:
raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str)
if match.group('name') not in self._macros:
raise PDDMError('No macro named "%s".' % match.group('name'))
return self._Expand(match, [], macro_ref_str)
|
[
"Expands the macro reference.\n\n Args:\n macro_ref_str: String of a macro reference (i.e. foo(a, b)).\n\n Returns:\n The text from the expansion.\n\n Raises:\n PDDMError if there are any issues.\n "
] |
Please provide a description of the function:def ProcessContent(self, strip_expansion=False):
self._ParseFile()
if strip_expansion:
# Without a collection the expansions become blank, removing them.
collection = None
else:
collection = MacroCollection()
for section in self._sections:
section.BindMacroCollection(collection)
result = ''
for section in self._sections:
result += section.text
self._processed_content = result
|
[
"Processes the file contents."
] |
Please provide a description of the function:def convert(model, input_features, output_features):
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, StandardScaler)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'mean_'))
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'scale_'))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the parameters
tr_spec = spec.scaler
for x in model.mean_:
tr_spec.shiftValue.append(-x)
for x in model.scale_:
tr_spec.scaleValue.append(1.0 / x)
return _MLModel(spec)
|
[
"Convert a _imputer model to the protobuf spec.\n\n Parameters\n ----------\n model: Imputer\n A trained Imputer model.\n\n input_features: str\n Name of the input column.\n\n output_features: str\n Name of the output column.\n\n Returns\n -------\n model_spec: An object of type Model_pb.\n Protobuf representation of the model\n "
] |
Please provide a description of the function:def reset ():
global __all_attributes, __all_features, __implicit_features, __composite_properties
global __subfeature_from_value, __all_top_features, __free_features
global __all_subfeatures
# sets the default value of False for each valid attribute
for attr in VALID_ATTRIBUTES:
setattr(Feature, attr.replace("-", "_"), False)
# A map containing all features. The key is the feature name.
# The value is an instance of Feature class.
__all_features = {}
# All non-subfeatures.
__all_top_features = []
# Maps valus to the corresponding implicit feature
__implicit_features = {}
# A map containing all composite properties. The key is a Property instance,
# and the value is a list of Property instances
__composite_properties = {}
# Maps a value to the corresponding subfeature name.
__subfeature_from_value = {}
# All free features
__free_features = []
__all_subfeatures = []
|
[
" Clear the module state. This is mainly for testing purposes.\n "
] |
Please provide a description of the function:def feature (name, values, attributes = []):
__validate_feature_attributes (name, attributes)
feature = Feature(name, [], attributes)
__all_features[name] = feature
# Temporary measure while we have not fully moved from 'gristed strings'
__all_features["<" + name + ">"] = feature
name = add_grist(name)
if 'subfeature' in attributes:
__all_subfeatures.append(name)
else:
__all_top_features.append(feature)
extend (name, values)
# FIXME: why his is needed.
if 'free' in attributes:
__free_features.append (name)
return feature
|
[
" Declares a new feature with the given name, values, and attributes.\n name: the feature name\n values: a sequence of the allowable values - may be extended later with feature.extend\n attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...)\n "
] |
Please provide a description of the function:def set_default (feature, value):
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value)
|
[
" Sets the default value of the given feature, overriding any previous default.\n feature: the name of the feature\n value: the default value to assign\n "
] |
Please provide a description of the function:def defaults(features):
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free and not f.optional and f.default:
result.append(property.Property(f, f.default))
return result
|
[
" Returns the default property values for the given features.\n "
] |
Please provide a description of the function:def valid (names):
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names)
|
[
" Returns true iff all elements of names are valid features.\n "
] |
Please provide a description of the function:def values (feature):
assert isinstance(feature, basestring)
validate_feature (feature)
return __all_features[feature].values
|
[
" Return the values of the given feature.\n "
] |
Please provide a description of the function:def is_implicit_value (value_string):
assert isinstance(value_string, basestring)
if value_string in __implicit_features:
return __implicit_features[value_string]
v = value_string.split('-')
if v[0] not in __implicit_features:
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True
|
[
" Returns true iff 'value_string' is a value_string\n of an implicit feature.\n "
] |
Please provide a description of the function:def implied_feature (implicit_value):
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if components[0] not in __implicit_features:
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]]
|
[
" Returns the implicit feature associated with the given implicit value.\n "
] |
Please provide a description of the function:def validate_feature (name):
assert isinstance(name, basestring)
if name not in __all_features:
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name]
|
[
" Checks if all name is a valid feature. Otherwise, raises an exception.\n "
] |
Please provide a description of the function:def __expand_subfeatures_aux (property_, dont_validate = False):
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature
v = property_.value
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result
|
[
" Helper for expand_subfeatures.\n Given a feature and value, or just a value corresponding to an\n implicit feature, returns a property set consisting of all component\n subfeatures and their values. For example:\n\n expand_subfeatures <toolset>gcc-2.95.2-linux-x86\n -> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86\n equivalent to:\n expand_subfeatures gcc-2.95.2-linux-x86\n\n feature: The name of the feature, or empty if value corresponds to an implicit property\n value: The value of the feature.\n dont_validate: If True, no validation of value string will be done.\n "
] |
Please provide a description of the function:def expand_subfeatures(properties, dont_validate = False):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature.subfeature:
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result
|
[
"\n Make all elements of properties corresponding to implicit features\n explicit, and express all subfeature values as separate properties\n in their own right. For example, the property\n\n gcc-2.95.2-linux-x86\n\n might expand to\n\n <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86\n\n properties: A sequence with elements of the form\n <feature>value-string or just value-string in the\n case of implicit features.\n : dont_validate: If True, no validation of value string will be done.\n "
] |
Please provide a description of the function:def extend (name, values):
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit:
for v in values:
if v in __implicit_features:
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values and not(feature.free or feature.optional):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values)
|
[
" Adds the given values to the given feature.\n "
] |
Please provide a description of the function:def validate_value_string (f, value_string):
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free or value_string in f.values:
return
values = [value_string]
if f.subfeatures:
if not value_string in f.values and \
not value_string in f.subfeatures:
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values and \
(values[0] or not f.optional):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0])
|
[
" Checks that value-string is a valid value-string for the given feature.\n "
] |
Please provide a description of the function:def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures:
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues)
|
[
" Declares a subfeature.\n feature_name: Root feature that is not a subfeature.\n value_string: An optional value-string specifying which feature or\n subfeature values this subfeature is specific to,\n if any.\n subfeature: The name of the subfeature being declared.\n subvalues: The allowed values of this subfeature.\n attributes: The attributes of the subfeature.\n "
] |
Please provide a description of the function:def compose (composite_property_s, component_properties_s):
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties
|
[
" Sets the components of the given composite property.\n\n All parameters are <feature>value strings\n "
] |
Please provide a description of the function:def get_values (feature, properties):
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result
|
[
" Returns all values of the given feature specified by the given property set.\n "
] |
Please provide a description of the function:def expand_composites (properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result
|
[
" Expand all composite properties in the set so that all components\n are explicitly expressed.\n "
] |
Please provide a description of the function:def is_subfeature_of (parent_property, f):
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature:
return False
p = f.parent
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature:
return False
if parent_value and parent_value != parent_property.value:
return False
return True
|
[
" Return true iff f is an ordinary subfeature of the parent_property's\n feature, or if f is a subfeature of the parent_property's feature\n specific to the parent_property's value.\n "
] |
Please provide a description of the function:def __is_subproperty_of (parent_property, p):
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature)
|
[
" As is_subfeature_of, for subproperties.\n "
] |
Please provide a description of the function:def expand (properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded)
|
[
" Given a property set which may consist of composite and implicit\n properties and combined subfeature values, returns an expanded,\n normalized property set with all implicit features expressed\n explicitly, all subfeature values individually expressed, and all\n components of composite properties expanded. Non-free features\n directly expressed in the input properties cause any values of\n those features due to composite feature expansion to be dropped. If\n two values of a given non-free feature are directly expressed in the\n input, an error is issued.\n "
] |
Please provide a description of the function:def add_defaults (properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# create a copy since properties will be modified
result = list(properties)
# We don't add default for conditional properties. We don't want
# <variant>debug:<define>DEBUG to be takes as specified value for <variant>
handled_features = set(p.feature for p in properties if not p.condition)
missing_top = [f for f in __all_top_features if not f in handled_features]
more = defaults(missing_top)
result.extend(more)
handled_features.update(p.feature for p in more)
# Add defaults for subfeatures of features which are present
for p in result[:]:
subfeatures = [s for s in p.feature.subfeatures if not s in handled_features]
more = defaults(__select_subfeatures(p, subfeatures))
handled_features.update(h.feature for h in more)
result.extend(more)
return result
|
[
" Given a set of properties, add default values for features not\n represented in the set.\n Note: if there's there's ordinary feature F1 and composite feature\n F2, which includes some value for F1, and both feature have default values,\n then the default value of F1 will be added, not the value in F2. This might\n not be right idea: consider\n\n feature variant : debug ... ;\n <variant>debug : .... <runtime-debugging>on\n feature <runtime-debugging> : off on ;\n\n Here, when adding default for an empty property set, we'll get\n\n <variant>debug <runtime_debugging>off\n\n and that's kind of strange.\n "
] |
Please provide a description of the function:def minimize (properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# remove properties implied by composite features
components = []
component_features = set()
for property in properties:
if property in __composite_properties:
cs = __composite_properties[property]
components.extend(cs)
component_features.update(c.feature for c in cs)
properties = b2.util.set.difference (properties, components)
# handle subfeatures and implicit features
# move subfeatures to the end of the list
properties = [p for p in properties if not p.feature.subfeature] +\
[p for p in properties if p.feature.subfeature]
result = []
while properties:
p = properties[0]
f = p.feature
# locate all subproperties of $(x[1]) in the property set
subproperties = [x for x in properties if is_subfeature_of(p, x.feature)]
if subproperties:
# reconstitute the joined property name
subproperties.sort ()
joined = b2.build.property.Property(p.feature, p.value + '-' + '-'.join ([sp.value for sp in subproperties]))
result.append(joined)
properties = b2.util.set.difference(properties[1:], subproperties)
else:
# eliminate properties whose value is equal to feature's
# default and which are not symmetric and which do not
# contradict values implied by composite properties.
# since all component properties of composites in the set
# have been eliminated, any remaining property whose
# feature is the same as a component of a composite in the
# set must have a non-redundant value.
if p.value != f.default or f.symmetric or f in component_features:
result.append (p)
properties = properties[1:]
return result
|
[
" Given an expanded property set, eliminate all redundancy: properties\n which are elements of other (composite) properties in the set will\n be eliminated. Non-symmetric properties equal to default values will be\n eliminated, unless the override a value from some composite property.\n Implicit properties will be expressed without feature\n grist, and sub-property values will be expressed as elements joined\n to the corresponding main property.\n "
] |
Please provide a description of the function:def split (properties):
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result
|
[
" Given a property-set of the form\n v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM\n\n Returns\n v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM\n\n Note that vN...vM may contain slashes. This is resilient to the\n substitution of backslashes for slashes, since Jam, unbidden,\n sometimes swaps slash direction on NT.\n "
] |
Please provide a description of the function:def compress_subproperties (properties):
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature
if not f.subfeature:
subs = [x for x in properties if is_subfeature_of(p, x.feature)]
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value for sub in subs)
result.append(Property(
p.feature, p.value + '-' + subvalues,
p.condition))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result
|
[
" Combine all subproperties into their parent properties\n\n Requires: for every subproperty, there is a parent property. All\n features are explicitly expressed.\n\n This rule probably shouldn't be needed, but\n build-request.expand-no-defaults is being abused for unintended\n purposes and it needs help\n "
] |
Please provide a description of the function:def __select_subfeatures (parent_property, features):
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)]
|
[
" Given a property, return the subset of features consisting of all\n ordinary subfeatures of the property's feature, and all specific\n subfeatures of the property's feature which are conditional on the\n property's value.\n "
] |
Please provide a description of the function:def _get_interpretation_function(interpretation, dtype):
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
global _interpretations
if not hasattr(_interpretations, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
return getattr(_interpretations, name)
|
[
"\n Retrieves the interpretation function used.\n "
] |
Please provide a description of the function:def _get_interpretation_description_and_output_type(interpretation, dtype):
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
if not hasattr(_interpretations_class, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
# Need unbound method to get the attributes
func = getattr(_interpretations_class, name)
return func.description, func.output_type
|
[
"\n Returns the description and output type for a given interpretation.\n "
] |
Please provide a description of the function:def _get_embeddable_interpretation_doc(indent = 0):
output_rows = []
# Pull out the doc string and put it in a table.
for name in sorted(dir(_interpretations)):
if name.startswith("_") or "__" not in name:
continue
interpretation, type_str = name.split("__")
func = getattr(_interpretations, name)
output_rows.append("%s (%s type):" % (interpretation, type_str))
output_rows += [(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")]
output_rows.append("")
return "\n".join(" "*indent + line for line in output_rows)
|
[
"\n Returns a list of the available interpretations and what they do.\n\n If indent is specified, then the entire doc string is indented by that amount.\n "
] |
Please provide a description of the function:def _load_version(cls, unpickler, version):
state, _exclude, _features = unpickler.load()
features = state['features']
excluded_features = state['excluded_features']
model = cls.__new__(cls)
model._setup()
model.__proxy__.update(state)
model._exclude = _exclude
model._features = _features
return model
|
[
"\n A function to load a previously saved SentenceSplitter instance.\n\n Parameters\n ----------\n unpickler : GLUnpickler\n A GLUnpickler file handler.\n\n version : int\n Version number maintained by the class writer.\n "
] |
Please provide a description of the function:def fit(self, data):
_raise_error_if_not_sframe(data, "data")
fitted_state = {}
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state['features'] = feature_columns
fitted_state['fitted'] = True
self.__proxy__.update(fitted_state)
return self
|
[
"\n Fits the transformer using the given data.\n "
] |
Please provide a description of the function:def transform(self, data):
if not self._get("fitted"):
raise RuntimeError("`transform` called before `fit` or `fit_transform`.")
data = data.copy()
output_column_prefix = self._get("output_column_prefix")
if output_column_prefix is None:
prefix = ""
else:
prefix = output_column_prefix + '.'
transform_function = self._get("transform_function")
feature_columns = self._get("features")
feature_columns = _internal_utils.select_feature_subset(data, feature_columns)
for f in feature_columns:
data[prefix + f] = transform_function(data[f])
return data
|
[
"\n Transforms the data.\n "
] |
Please provide a description of the function:def short_text__str(self, column_name, output_column_prefix):
from ._ngram_counter import NGramCounter
from ._tfidf import TFIDF
return [NGramCounter(features=[column_name],
n = 3,
method = "character",
output_column_prefix = output_column_prefix),
TFIDF(features=[column_name],
min_document_frequency=0.01,
max_document_frequency=0.5,
output_column_prefix = output_column_prefix)]
|
[
"\n Transforms short text into a dictionary of TFIDF-weighted 3-gram\n character counts.\n "
] |
Please provide a description of the function:def categorical__int(self, column_name, output_column_prefix):
return [_ColumnFunctionTransformation(
features = [column_name],
output_column_prefix = output_column_prefix,
transform_function = lambda col: col.astype(str),
transform_function_name = "astype(str)")]
|
[
"\n Interprets an integer column as a categorical variable.\n "
] |
Please provide a description of the function:def _setup_from_data(self, data):
fitted_state = {}
_raise_error_if_not_of_type(data, [_SFrame])
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state["features"] = feature_columns
################################################################################
# Helper functions
def get_valid_interpretations():
return list(n.split("__")[0] for n in dir(_interpretations) if not n.startswith("_"))
################################################################################
# Check input data.
if not isinstance(data, _SFrame):
raise TypeError("`data` parameter must be an SFrame.")
all_col_names = set(feature_columns)
column_interpretations = self._get("column_interpretations").copy()
# Make sure all the interpretations are valid.
for k, v in column_interpretations.items():
if k not in all_col_names:
raise ValueError("Column '%s' in column_interpretations, but not found in `data`." % k)
# Get the automatic column interpretations.
for col_name in feature_columns:
if col_name not in column_interpretations:
n = column_interpretations[col_name] = infer_column_interpretation(data[col_name])
if n.startswith("unknown"):
raise ValueError("Interpretation inference failed on column '%s'; %s"
% (col_name, n[len("unknown"):].strip()))
# Now, build up the feature transforms.
transforms = {}
input_types = {}
output_column_prefix = self._get("output_column_prefix")
assert output_column_prefix is None or type(output_column_prefix) is str
tr_chain = []
for col_name in feature_columns:
in_type = input_types[col_name] = data[col_name].dtype
intr_func = _get_interpretation_function(column_interpretations[col_name], in_type)
tr_list = intr_func(col_name, output_column_prefix)
transforms[col_name] = tr_list
tr_chain += tr_list
fitted_state["transform_chain"] = _TransformerChain(tr_chain)
fitted_state["transforms"] = transforms
fitted_state["input_types"] = input_types
fitted_state["column_interpretations"] = column_interpretations
self.__proxy__.update(fitted_state)
|
[
"\n Sets up the content transforms.\n "
] |
Please provide a description of the function:def fit(self, data):
self._setup_from_data(data)
self.transform_chain.fit(data)
self.__proxy__.update({"fitted" : True})
return self
|
[
"\n Fits a transformer using the SFrame `data`.\n\n Parameters\n ----------\n data : SFrame\n The data used to fit the transformer.\n\n Returns\n -------\n self (A fitted object)\n\n See Also\n --------\n transform, fit_transform\n "
] |
Please provide a description of the function:def fit_transform(self, data):
self._setup_from_data(data)
ret = self.transform_chain.fit_transform(data)
self.__proxy__.update({"fitted" : True})
return ret
|
[
"\n Fits and transforms the SFrame `data` using a fitted model.\n\n Parameters\n ----------\n data : SFrame\n The data to be transformed.\n\n Returns\n -------\n A transformed SFrame.\n\n Returns\n -------\n out: SFrame\n A transformed SFrame.\n\n See Also\n --------\n fit, transform\n "
] |
Please provide a description of the function:def transform(self, data):
if self.transform_chain is None:
raise RuntimeError("`transform()` method called before `fit` or `fit_transform`.")
return self.transform_chain.transform(data)
|
[
"\n Transform the SFrame `data` using a fitted model.\n\n Parameters\n ----------\n data : SFrame\n The data to be transformed.\n\n Returns\n -------\n A transformed SFrame.\n\n Returns\n -------\n out: SFrame\n A transformed SFrame.\n\n See Also\n --------\n fit, fit_transform\n "
] |
Please provide a description of the function:def _get_summary_struct(self):
sections = []
fields = []
_features = _precomputed_field(_internal_utils.pretty_print_list(self.features))
_exclude = _precomputed_field(_internal_utils.pretty_print_list(self.excluded_features))
header_fields = [("Features", "features"),
("Excluded Features", "excluded_features")]
sections.append("Model Fields")
fields.append(header_fields)
if self.user_column_interpretations:
sections.append("User Specified Interpretations")
fields.append(list(sorted(self._get("user_column_interpretations").items())))
column_interpretations = self._get("column_interpretations")
features = self._get("features")
if self._get("fitted") and features is not None:
n_rows = len(features)
transform_info = [None]*n_rows
for i, f in enumerate(features):
interpretation = column_interpretations[f]
input_type = self.input_types[f]
description, output_type = _get_interpretation_description_and_output_type(
interpretation, input_type)
transform_info[i] = (f, input_type.__name__, interpretation, description, output_type.__name__)
transform_table = _SFrame()
transform_table["Column"] = [t[0] for t in transform_info]
transform_table["Type"] = [t[1] for t in transform_info]
transform_table["Interpretation"] = [t[2] for t in transform_info]
transform_table["Transforms"] = [t[3] for t in transform_info]
transform_table["Output Type"] = [t[4] for t in transform_info]
fields[-1].append(transform_table)
return fields, sections
|
[
"\n Returns a structured description of the model, including (where relevant)\n the schema of the training data, description of the training data,\n training statistics, and model hyperparameters.\n\n Returns\n -------\n sections : list (of list of tuples)\n A list of summary sections.\n Each section is a list.\n Each item in a section list is a tuple of the form:\n ('<feature>','<field>')\n section_titles: list\n A list of section titles.\n The order matches that of the 'sections' object.\n "
] |
Please provide a description of the function:def _save_impl(self, pickler):
pickler.dump( (self.__proxy__.state, self._exclude, self._features) )
|
[
"\n Save the model as a directory, which can be loaded with the\n :py:func:`~turicreate.load_model` method.\n\n Parameters\n ----------\n pickler : GLPickler\n An opened GLPickle archive (Do not close the archive).\n\n See Also\n --------\n turicreate.load_model\n\n Examples\n --------\n >>> model.save('my_model_file')\n >>> loaded_model = turicreate.load_model('my_model_file')\n "
] |
Please provide a description of the function:def CreateMock(self, class_to_mock):
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
|
[
"Create a new mock object.\n\n Args:\n # class_to_mock: the class to be mocked\n class_to_mock: class\n\n Returns:\n MockObject that can be used as the class_to_mock would be.\n "
] |
Please provide a description of the function:def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
|
[
"Replace a method, attribute, etc. with a Mock.\n\n This will replace a class or module with a MockObject, and everything else\n (method, function, etc) with a MockAnything. This can be overridden to\n always use a MockAnything by setting use_mock_anything to True.\n\n Args:\n obj: A Python object (class, module, instance, callable).\n attr_name: str. The name of the attribute to replace with a mock.\n use_mock_anything: bool. True if a MockAnything should be used regardless\n of the type of attribute.\n "
] |
Please provide a description of the function:def _Verify(self):
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
|
[
"Verify that all of the expected calls have been made.\n\n Raises:\n ExpectedMethodCallsError: if there are still more method calls in the\n expected queue.\n "
] |
Please provide a description of the function:def _VerifyMethodCall(self):
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
|
[
"Verify the called method is expected.\n\n This can be an ordered method, or part of an unordered set.\n\n Returns:\n The expected mock method.\n\n Raises:\n UnexpectedMethodCall if the method called was not expected.\n "
] |
Please provide a description of the function:def GetPossibleGroup(self):
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
|
[
"Returns a possible group from the end of the call queue or None if no\n other methods are on the stack.\n "
] |
Please provide a description of the function:def _CheckAndCreateNewGroup(self, group_name, group_class):
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
|
[
"Checks if the last method (a possible group) is an instance of our\n group_class. Adds the current method to this group or creates a new one.\n\n Args:\n\n group_name: the name of the group.\n group_class: the class used to create instance of this new group\n "
] |
Please provide a description of the function:def equals(self, rhs):
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
|
[
"Check to see if the RHS is an instance of class_name.\n\n Args:\n # rhs: the right hand side of the test\n rhs: object\n\n Returns:\n bool\n "
] |
Please provide a description of the function:def equals(self, rhs):
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
|
[
"Check to see if RHS is almost equal to float_value\n\n Args:\n rhs: the value to compare to float_value\n\n Returns:\n bool\n "
] |
Please provide a description of the function:def equals(self, rhs):
try:
return rhs[self._key] == self._value
except Exception:
return False
|
[
"Check whether the given key/value pair is in the rhs dict.\n\n Returns:\n bool\n "
] |
Please provide a description of the function:def equals(self, actual_seq):
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
|
[
"Check to see whether actual_seq has same elements as expected_seq.\n\n Args:\n actual_seq: sequence\n\n Returns:\n bool\n "
] |
Please provide a description of the function:def equals(self, rhs):
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
|
[
"Checks whether any Comparator is equal to rhs.\n\n Args:\n # rhs: can be anything\n\n Returns:\n bool\n "
] |
Please provide a description of the function:def MethodCalled(self, mock_method):
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
|
[
"Remove a method call from the group.\n\n If the method is not in the set, an UnexpectedMethodCallError will be\n raised.\n\n Args:\n mock_method: a mock method that should be equal to a method in the group.\n\n Returns:\n The mock method from the group\n\n Raises:\n UnexpectedMethodCallError if the mock_method was not in the group.\n "
] |
Please provide a description of the function:def MethodCalled(self, mock_method):
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
|
[
"Remove a method call from the group.\n\n If the method is not in the set, an UnexpectedMethodCallError will be\n raised.\n\n Args:\n mock_method: a mock method that should be equal to a method in the group.\n\n Returns:\n The mock method from the group\n\n Raises:\n UnexpectedMethodCallError if the mock_method was not in the group.\n "
] |
Please provide a description of the function:def IsSatisfied(self):
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
|
[
"Return True if all methods in this group are called at least once."
] |
Please provide a description of the function:def convert(model, input_features, output_features):
_INTERMEDIATE_FEATURE_NAME = "__sparse_vector_features__"
n_dimensions = len(model.feature_names_)
input_features = process_or_validate_features(input_features)
# Ensure that the output_features are also solid.
output_features = process_or_validate_features(output_features, n_dimensions)
# The DictVectorizer in the framework outputs a sparse dictionary
# of index to value due to other considerations, but we are expecting
# the output of this to be a dense feature vector. To make that happen,
# put a feature_vectorizer immediately after the dict vectorizer.
pline = Pipeline(input_features, output_features)
# Set the basic model parameters of the dict vectorizer component.
dv_spec = _Model_pb2.Model()
dv_spec.specificationVersion = SPECIFICATION_VERSION
# Set up the dict vectorizer parameters
tr_spec = dv_spec.dictVectorizer
is_str = None
for feature_name in model.feature_names_:
if isinstance(feature_name, _six.string_types):
if is_str == False:
raise ValueError("Mapping of DictVectorizer mixes int and str types.")
tr_spec.stringToIndex.vector.append(feature_name)
is_str == True
if isinstance(feature_name, _six.integer_types):
if is_str == True:
raise ValueError("Mapping of DictVectorizer mixes int and str types.")
tr_spec.int64ToIndex.vector.append(feature_name)
is_str == False
intermediate_features = [(_INTERMEDIATE_FEATURE_NAME,
datatypes.Dictionary(key_type = int))]
# Set the interface for the dict vectorizer with the input and the
# intermediate output
set_transform_interface_params(
dv_spec, input_features, intermediate_features)
pline.add_model(dv_spec)
# Follow the dict vectorizer by a feature_vectorizer to change the sparse
# output layer into a dense vector as expected.
fvec, _num_out_dim = create_feature_vectorizer(intermediate_features,
output_features[0][0], {"__sparse_vector_features__" : n_dimensions})
pline.add_model(fvec)
return _MLModel(pline.spec)
|
[
"Convert a _imputer model to the protobuf spec.\n\n Parameters\n ----------\n model: Imputer\n A trained Imputer model.\n\n input_features: str\n Name of the input column.\n\n output_features: str\n Name of the output column.\n\n Returns\n -------\n model_spec: An object of type Model_pb.\n Protobuf representation of the model\n "
] |
Please provide a description of the function:def set_classifier_interface_params(spec, features, class_labels,
model_accessor_for_class_labels, output_features = None):
# Normalize the features list.
features = _fm.process_or_validate_features(features)
if class_labels is None:
raise ValueError("List of class labels must be provided.")
n_classes = len(class_labels)
output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels)
if len(output_features) == 1:
predicted_class_output, pred_cl_type = output_features[0]
score_output = None
elif len(output_features) == 2:
predicted_class_output, pred_cl_type = output_features[0]
score_output, score_output_type = output_features[1]
else:
raise ValueError("Provided output classes for a classifier must be "
"a list of features, predicted class and (optionally) class_score.")
spec.description.predictedFeatureName = predicted_class_output
# Are they out of order?
if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()):
raise ValueError("Provided predicted class output type not Int64 or String (%s)."
% repr(pred_cl_type))
if score_output is not None:
if not isinstance(score_output_type, datatypes.Dictionary):
raise ValueError("Provided class score output type not a Dictionary (%s)."
% repr(score_output_type))
if score_output_type.key_type != pred_cl_type:
raise ValueError(("Provided class score output (%s) key_type (%s) does not "
"match type of class prediction (%s).")
% (score_output, repr(score_output_type.key_type), repr(pred_cl_type)))
spec.description.predictedProbabilitiesName = score_output
# add input
for index, (cur_input_name, input_type) in enumerate(features):
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, input_type)
# add output
for index, (cur_output_name, output_type) in enumerate(output_features):
output_ = spec.description.output.add()
output_.name = cur_output_name
datatypes._set_datatype(output_.type, output_type)
# Worry about the class labels
if pred_cl_type == datatypes.String():
try:
for c in class_labels:
getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
pass
else:
for c in class_labels:
conv_error = False
try:
if not (int(c) == c):
conv_error = True
except:
conv_error = True
if conv_error:
raise TypeError(("Cannot cast '%s' class to an int type " % str(c))
+ "(class type determined by type of first class).")
try:
getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
break
# And we are done!
return spec
|
[
"\n Common utilities to set the regression interface params.\n "
] |
Please provide a description of the function:def set_regressor_interface_params(spec, features, output_features):
if output_features is None:
output_features = [("predicted_class", datatypes.Double())]
else:
output_features = _fm.process_or_validate_features(output_features, 1)
if len(output_features) != 1:
raise ValueError("Provided output features for a regressor must be "
"one Double feature.")
if output_features[0][1] != datatypes.Double():
raise ValueError("Output type of a regressor must be a Double.")
prediction_name = output_features[0][0]
spec.description.predictedFeatureName = prediction_name
# Normalize the features list.
features = _fm.process_or_validate_features(features)
# add input and output features
for cur_input_name, feature_type in features:
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, feature_type)
output_ = spec.description.output.add()
output_.name = prediction_name
datatypes._set_datatype(output_.type, 'Double')
return spec
|
[
" Common utilities to set the regressor interface params.\n "
] |
Please provide a description of the function:def set_transform_interface_params(spec, input_features, output_features, are_optional = False):
input_features = _fm.process_or_validate_features(input_features)
output_features = _fm.process_or_validate_features(output_features)
# Add input and output features
for (fname, ftype) in input_features:
input_ = spec.description.input.add()
input_.name = fname
datatypes._set_datatype(input_.type, ftype)
if are_optional:
input_.type.isOptional = are_optional
for (fname, ftype) in output_features:
output_ = spec.description.output.add()
output_.name = fname
datatypes._set_datatype(output_.type, ftype)
return spec
|
[
" Common utilities to set transform interface params.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.