Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def parse_rev_args(receive_msg): global trainloader global testloader global net # Loading Data logger.debug("Preparing data..") (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() y_train = to_categorical(y_train, 10) y_test = to_categorical(y_test, 10) x_train = x_train.reshape(x_train.shape+(1,)).astype("float32") x_test = x_test.reshape(x_test.shape+(1,)).astype("float32") x_train /= 255.0 x_test /= 255.0 trainloader = (x_train, y_train) testloader = (x_test, y_test) # Model logger.debug("Building model..") net = build_graph_from_json(receive_msg) # parallel model try: available_devices = os.environ["CUDA_VISIBLE_DEVICES"] gpus = len(available_devices.split(",")) if gpus > 1: net = multi_gpu_model(net, gpus) except KeyError: logger.debug("parallel model not support in this config settings") if args.optimizer == "SGD": optimizer = SGD(lr=args.learning_rate, momentum=0.9, decay=args.weight_decay) if args.optimizer == "Adadelta": optimizer = Adadelta(lr=args.learning_rate, decay=args.weight_decay) if args.optimizer == "Adagrad": optimizer = Adagrad(lr=args.learning_rate, decay=args.weight_decay) if args.optimizer == "Adam": optimizer = Adam(lr=args.learning_rate, decay=args.weight_decay) if args.optimizer == "Adamax": optimizer = Adamax(lr=args.learning_rate, decay=args.weight_decay) if args.optimizer == "RMSprop": optimizer = RMSprop(lr=args.learning_rate, decay=args.weight_decay) # Compile the model net.compile( loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] ) return 0
[ " parse reveive msgs to global variable\n " ]
Please provide a description of the function:def train_eval(): global trainloader global testloader global net (x_train, y_train) = trainloader (x_test, y_test) = testloader # train procedure net.fit( x=x_train, y=y_train, batch_size=args.batch_size, validation_data=(x_test, y_test), epochs=args.epochs, shuffle=True, callbacks=[ SendMetrics(), EarlyStopping(min_delta=0.001, patience=10), TensorBoard(log_dir=TENSORBOARD_DIR), ], ) # trial report final acc to tuner _, acc = net.evaluate(x_test, y_test) logger.debug("Final result is: %.3f", acc) nni.report_final_result(acc)
[ " train and eval the model\n " ]
Please provide a description of the function:def on_epoch_end(self, epoch, logs=None): if logs is None: logs = dict() logger.debug(logs) nni.report_intermediate_result(logs["val_acc"])
[ "\n Run on end of each epoch\n " ]
Please provide a description of the function:def create_bracket_parameter_id(brackets_id, brackets_curr_decay, increased_id=-1): if increased_id == -1: increased_id = str(create_parameter_id()) params_id = '_'.join([str(brackets_id), str(brackets_curr_decay), increased_id]) return params_id
[ "Create a full id for a specific bracket's hyperparameter configuration\n \n Parameters\n ----------\n brackets_id: int\n brackets id\n brackets_curr_decay:\n brackets curr decay\n increased_id: int\n increased id\n\n Returns\n -------\n int\n params id\n " ]
Please provide a description of the function:def json2paramater(ss_spec, random_state): if isinstance(ss_spec, dict): if '_type' in ss_spec.keys(): _type = ss_spec['_type'] _value = ss_spec['_value'] if _type == 'choice': _index = random_state.randint(len(_value)) chosen_params = json2paramater(ss_spec['_value'][_index], random_state) else: chosen_params = eval('parameter_expressions.' + # pylint: disable=eval-used _type)(*(_value + [random_state])) else: chosen_params = dict() for key in ss_spec.keys(): chosen_params[key] = json2paramater(ss_spec[key], random_state) elif isinstance(ss_spec, list): chosen_params = list() for _, subspec in enumerate(ss_spec): chosen_params.append(json2paramater(subspec, random_state)) else: chosen_params = copy.deepcopy(ss_spec) return chosen_params
[ "Randomly generate values for hyperparameters from hyperparameter space i.e., x.\n \n Parameters\n ----------\n ss_spec:\n hyperparameter space\n random_state:\n random operator to generate random values\n\n Returns\n -------\n Parameter:\n Parameters in this experiment\n " ]
Please provide a description of the function:def get_n_r(self): return math.floor(self.n / self.eta**self.i + _epsilon), math.floor(self.r * self.eta**self.i + _epsilon)
[ "return the values of n and r for the next round" ]
Please provide a description of the function:def increase_i(self): self.i += 1 if self.i > self.bracket_id: self.no_more_trial = True
[ "i means the ith round. Increase i by 1" ]
Please provide a description of the function:def set_config_perf(self, i, parameter_id, seq, value): if parameter_id in self.configs_perf[i]: if self.configs_perf[i][parameter_id][0] < seq: self.configs_perf[i][parameter_id] = [seq, value] else: self.configs_perf[i][parameter_id] = [seq, value]
[ "update trial's latest result with its sequence number, e.g., epoch number or batch number\n \n Parameters\n ----------\n i: int\n the ith round\n parameter_id: int\n the id of the trial/parameter\n seq: int\n sequence number, e.g., epoch number or batch number\n value: int\n latest result with sequence number seq\n\n Returns\n -------\n None\n " ]
Please provide a description of the function:def inform_trial_end(self, i): global _KEY # pylint: disable=global-statement self.num_finished_configs[i] += 1 _logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d', self.bracket_id, self.i, i, self.num_finished_configs[i], self.num_configs_to_run[i]) if self.num_finished_configs[i] >= self.num_configs_to_run[i] \ and self.no_more_trial is False: # choose candidate configs from finished configs to run in the next round assert self.i == i + 1 this_round_perf = self.configs_perf[i] if self.optimize_mode is OptimizeMode.Maximize: sorted_perf = sorted(this_round_perf.items(), key=lambda kv: kv[1][1], reverse=True) # reverse else: sorted_perf = sorted(this_round_perf.items(), key=lambda kv: kv[1][1]) _logger.debug('bracket %s next round %s, sorted hyper configs: %s', self.bracket_id, self.i, sorted_perf) next_n, next_r = self.get_n_r() _logger.debug('bracket %s next round %s, next_n=%d, next_r=%d', self.bracket_id, self.i, next_n, next_r) hyper_configs = dict() for k in range(next_n): params_id = sorted_perf[k][0] params = self.hyper_configs[i][params_id] params[_KEY] = next_r # modify r # generate new id increased_id = params_id.split('_')[-1] new_id = create_bracket_parameter_id(self.bracket_id, self.i, increased_id) hyper_configs[new_id] = params self._record_hyper_configs(hyper_configs) return [[key, value] for key, value in hyper_configs.items()] return None
[ "If the trial is finished and the corresponding round (i.e., i) has all its trials finished,\n it will choose the top k trials for the next round (i.e., i+1)\n\n Parameters\n ----------\n i: int\n the ith round\n " ]
Please provide a description of the function:def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state): # pylint: disable=invalid-name global _KEY # pylint: disable=global-statement assert self.i == 0 hyperparameter_configs = dict() for _ in range(num): params_id = create_bracket_parameter_id(self.bracket_id, self.i) params = json2paramater(searchspace_json, random_state) params[_KEY] = r hyperparameter_configs[params_id] = params self._record_hyper_configs(hyperparameter_configs) return [[key, value] for key, value in hyperparameter_configs.items()]
[ "Randomly generate num hyperparameter configurations from search space\n\n Parameters\n ----------\n num: int\n the number of hyperparameter configurations\n \n Returns\n -------\n list\n a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]\n " ]
Please provide a description of the function:def _record_hyper_configs(self, hyper_configs): self.hyper_configs.append(hyper_configs) self.configs_perf.append(dict()) self.num_finished_configs.append(0) self.num_configs_to_run.append(len(hyper_configs)) self.increase_i()
[ "after generating one round of hyperconfigs, this function records the generated hyperconfigs,\n creates a dict to record the performance when those hyperconifgs are running, set the number of finished configs\n in this round to be 0, and increase the round number.\n\n Parameters\n ----------\n hyper_configs: list\n the generated hyperconfigs\n " ]
Please provide a description of the function:def _request_one_trial_job(self): if not self.generated_hyper_configs: if self.curr_s < 0: self.curr_s = self.s_max _logger.debug('create a new bracket, self.curr_s=%d', self.curr_s) self.brackets[self.curr_s] = Bracket(self.curr_s, self.s_max, self.eta, self.R, self.optimize_mode) next_n, next_r = self.brackets[self.curr_s].get_n_r() _logger.debug('new bracket, next_n=%d, next_r=%d', next_n, next_r) assert self.searchspace_json is not None and self.random_state is not None generated_hyper_configs = self.brackets[self.curr_s].get_hyperparameter_configurations(next_n, next_r, self.searchspace_json, self.random_state) self.generated_hyper_configs = generated_hyper_configs.copy() self.curr_s -= 1 assert self.generated_hyper_configs params = self.generated_hyper_configs.pop() ret = { 'parameter_id': params[0], 'parameter_source': 'algorithm', 'parameters': params[1] } send(CommandType.NewTrialJob, json_tricks.dumps(ret))
[ "get one trial job, i.e., one hyperparameter configuration." ]
Please provide a description of the function:def handle_update_search_space(self, data): self.searchspace_json = data self.random_state = np.random.RandomState()
[ "data: JSON object, which is search space\n \n Parameters\n ----------\n data: int\n number of trial jobs\n " ]
Please provide a description of the function:def handle_trial_end(self, data): hyper_params = json_tricks.loads(data['hyper_params']) bracket_id, i, _ = hyper_params['parameter_id'].split('_') hyper_configs = self.brackets[int(bracket_id)].inform_trial_end(int(i)) if hyper_configs is not None: _logger.debug('bracket %s next round %s, hyper_configs: %s', bracket_id, i, hyper_configs) self.generated_hyper_configs = self.generated_hyper_configs + hyper_configs for _ in range(self.credit): if not self.generated_hyper_configs: break params = self.generated_hyper_configs.pop() ret = { 'parameter_id': params[0], 'parameter_source': 'algorithm', 'parameters': params[1] } send(CommandType.NewTrialJob, json_tricks.dumps(ret)) self.credit -= 1
[ "\n Parameters\n ----------\n data: dict()\n it has three keys: trial_job_id, event, hyper_params\n trial_job_id: the id generated by training service\n event: the job's state\n hyper_params: the hyperparameters (a string) generated and returned by tuner\n " ]
Please provide a description of the function:def handle_report_metric_data(self, data): value = extract_scalar_reward(data['value']) bracket_id, i, _ = data['parameter_id'].split('_') bracket_id = int(bracket_id) if data['type'] == 'FINAL': # sys.maxsize indicates this value is from FINAL metric data, because data['sequence'] from FINAL metric # and PERIODICAL metric are independent, thus, not comparable. self.brackets[bracket_id].set_config_perf(int(i), data['parameter_id'], sys.maxsize, value) self.completed_hyper_configs.append(data) elif data['type'] == 'PERIODICAL': self.brackets[bracket_id].set_config_perf(int(i), data['parameter_id'], data['sequence'], value) else: raise ValueError('Data type not supported: {}'.format(data['type']))
[ "\n Parameters\n ----------\n data: \n it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.\n \n Raises\n ------\n ValueError\n Data type not supported\n " ]
Please provide a description of the function:def generate_parameters(self, parameter_id): if len(self.population) <= 0: logger.debug("the len of poplution lower than zero.") raise Exception('The population is empty') pos = -1 for i in range(len(self.population)): if self.population[i].result == None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) temp = json.loads(graph_dumps(indiv.config)) else: random.shuffle(self.population) if self.population[0].result < self.population[1].result: self.population[0] = self.population[1] indiv = copy.deepcopy(self.population[0]) self.population.pop(1) indiv.mutation() graph = indiv.config temp = json.loads(graph_dumps(graph)) logger.debug('generate_parameter return value is:') logger.debug(temp) return temp
[ "Returns a set of trial graph config, as a serializable object.\n parameter_id : int\n " ]
Please provide a description of the function:def receive_trial_result(self, parameter_id, parameters, value): ''' Record an observation of the objective function parameter_id : int parameters : dict of parameters value: final metrics of the trial, including reward ''' reward = extract_scalar_reward(value) if self.optimize_mode is OptimizeMode.Minimize: reward = -reward logger.debug('receive trial result is:\n') logger.debug(str(parameters)) logger.debug(str(reward)) indiv = Individual(graph_loads(parameters), result=reward) self.population.append(indiv) return
[]
Please provide a description of the function:def generate(self, model_len=None, model_width=None): if model_len is None: model_len = Constant.MODEL_LEN if model_width is None: model_width = Constant.MODEL_WIDTH pooling_len = int(model_len / 4) graph = Graph(self.input_shape, False) temp_input_channel = self.input_shape[-1] output_node_id = 0 stride = 1 for i in range(model_len): output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer( self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id ) output_node_id = graph.add_layer( self.conv(temp_input_channel, model_width, kernel_size=3, stride=stride), output_node_id, ) temp_input_channel = model_width if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1): output_node_id = graph.add_layer(self.pooling(), output_node_id) output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id) output_node_id = graph.add_layer( self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id ) output_node_id = graph.add_layer( StubDense(graph.node_list[output_node_id].shape[0], model_width), output_node_id, ) output_node_id = graph.add_layer(StubReLU(), output_node_id) graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id) return graph
[ "Generates a CNN.\n Args:\n model_len: An integer. Number of convolutional layers.\n model_width: An integer. Number of filters for the convolutional layers.\n Returns:\n An instance of the class Graph. Represents the neural architecture graph of the generated model.\n " ]
Please provide a description of the function:def generate(self, model_len=None, model_width=None): if model_len is None: model_len = Constant.MODEL_LEN if model_width is None: model_width = Constant.MODEL_WIDTH if isinstance(model_width, list) and not len(model_width) == model_len: raise ValueError("The length of 'model_width' does not match 'model_len'") elif isinstance(model_width, int): model_width = [model_width] * model_len graph = Graph(self.input_shape, False) output_node_id = 0 n_nodes_prev_layer = self.input_shape[0] for width in model_width: output_node_id = graph.add_layer( StubDense(n_nodes_prev_layer, width), output_node_id ) output_node_id = graph.add_layer( StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id ) output_node_id = graph.add_layer(StubReLU(), output_node_id) n_nodes_prev_layer = width graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id) return graph
[ "Generates a Multi-Layer Perceptron.\n Args:\n model_len: An integer. Number of hidden layers.\n model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the\n number of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this\n value.\n Returns:\n An instance of the class Graph. Represents the neural architecture graph of the generated model.\n " ]
Please provide a description of the function:def generate_search_space(code_dir): search_space = {} if code_dir.endswith(slash): code_dir = code_dir[:-1] for subdir, _, files in os.walk(code_dir): # generate module name from path if subdir == code_dir: package = '' else: assert subdir.startswith(code_dir + slash), subdir prefix_len = len(code_dir) + 1 package = subdir[prefix_len:].replace(slash, '.') + '.' for file_name in files: if file_name.endswith('.py'): path = os.path.join(subdir, file_name) module = package + file_name[:-3] search_space.update(_generate_file_search_space(path, module)) return search_space
[ "Generate search space from Python source code.\n Return a serializable search space object.\n code_dir: directory path of source files (str)\n " ]
Please provide a description of the function:def expand_annotations(src_dir, dst_dir): if src_dir[-1] == slash: src_dir = src_dir[:-1] if dst_dir[-1] == slash: dst_dir = dst_dir[:-1] annotated = False for src_subdir, dirs, files in os.walk(src_dir): assert src_subdir.startswith(src_dir) dst_subdir = src_subdir.replace(src_dir, dst_dir, 1) os.makedirs(dst_subdir, exist_ok=True) for file_name in files: src_path = os.path.join(src_subdir, file_name) dst_path = os.path.join(dst_subdir, file_name) if file_name.endswith('.py'): annotated |= _expand_file_annotations(src_path, dst_path) else: shutil.copyfile(src_path, dst_path) for dir_name in dirs: os.makedirs(os.path.join(dst_subdir, dir_name), exist_ok=True) return dst_dir if annotated else src_dir
[ "Expand annotations in user code.\n Return dst_dir if annotation detected; return src_dir if not.\n src_dir: directory path of user code (str)\n dst_dir: directory to place generated files (str)\n " ]
Please provide a description of the function:def gen_send_stdout_url(ip, port): '''Generate send stdout url''' return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID)
[]
Please provide a description of the function:def gen_send_version_url(ip, port): '''Generate send error url''' return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, VERSION_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID)
[]
Please provide a description of the function:def validate_digit(value, start, end): '''validate if a digit is valid''' if not str(value).isdigit() or int(value) < start or int(value) > end: raise ValueError('%s must be a digit from %s to %s' % (value, start, end))
[]
Please provide a description of the function:def validate_dispatcher(args): '''validate if the dispatcher of the experiment supports importing data''' nni_config = Config(get_config_filename(args)).get_config('experimentConfig') if nni_config.get('tuner') and nni_config['tuner'].get('builtinTunerName'): dispatcher_name = nni_config['tuner']['builtinTunerName'] elif nni_config.get('advisor') and nni_config['advisor'].get('builtinAdvisorName'): dispatcher_name = nni_config['advisor']['builtinAdvisorName'] else: # otherwise it should be a customized one return if dispatcher_name not in TUNERS_SUPPORTING_IMPORT_DATA: if dispatcher_name in TUNERS_NO_NEED_TO_IMPORT_DATA: print_warning("There is no need to import data for %s" % dispatcher_name) exit(0) else: print_error("%s does not support importing addtional data" % dispatcher_name) exit(1)
[]
Please provide a description of the function:def load_search_space(path): '''load search space content''' content = json.dumps(get_json_content(path)) if not content: raise ValueError('searchSpace file should not be empty') return content
[]
Please provide a description of the function:def update_experiment_profile(args, key, value): '''call restful server to update experiment profile''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') running, _ = check_rest_server_quick(rest_port) if running: response = rest_get(experiment_url(rest_port), REST_TIME_OUT) if response and check_response(response): experiment_profile = json.loads(response.text) experiment_profile['params'][key] = value response = rest_put(experiment_url(rest_port)+get_query_type(key), json.dumps(experiment_profile), REST_TIME_OUT) if response and check_response(response): return response else: print_error('Restful server is not running...') return None
[]
Please provide a description of the function:def import_data(args): '''import additional data to the experiment''' validate_file(args.filename) validate_dispatcher(args) content = load_search_space(args.filename) args.port = get_experiment_port(args) if args.port is not None: if import_data_to_restful_server(args, content): pass else: print_error('Import data failed!')
[]
Please provide a description of the function:def import_data_to_restful_server(args, content): '''call restful server to import data to the experiment''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') running, _ = check_rest_server_quick(rest_port) if running: response = rest_post(import_data_url(rest_port), content, REST_TIME_OUT) if response and check_response(response): return response else: print_error('Restful server is not running...') return None
[]
Please provide a description of the function:def setType(key, type): '''check key type''' return And(type, error=SCHEMA_TYPE_ERROR % (key, type.__name__))
[]
Please provide a description of the function:def setChoice(key, *args): '''check choice''' return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))
[]
Please provide a description of the function:def setNumberRange(key, keyType, start, end): '''check number range''' return And( And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)), And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))), )
[]
Please provide a description of the function:def keras_dropout(layer, rate): '''keras dropout layer. ''' from keras import layers input_dim = len(layer.input.shape) if input_dim == 2: return layers.SpatialDropout1D(rate) elif input_dim == 3: return layers.SpatialDropout2D(rate) elif input_dim == 4: return layers.SpatialDropout3D(rate) else: return layers.Dropout(rate)
[]
Please provide a description of the function:def to_real_keras_layer(layer): ''' real keras layer. ''' from keras import layers if is_layer(layer, "Dense"): return layers.Dense(layer.units, input_shape=(layer.input_units,)) if is_layer(layer, "Conv"): return layers.Conv2D( layer.filters, layer.kernel_size, input_shape=layer.input.shape, padding="same", ) # padding if is_layer(layer, "Pooling"): return layers.MaxPool2D(2) if is_layer(layer, "BatchNormalization"): return layers.BatchNormalization(input_shape=layer.input.shape) if is_layer(layer, "Concatenate"): return layers.Concatenate() if is_layer(layer, "Add"): return layers.Add() if is_layer(layer, "Dropout"): return keras_dropout(layer, layer.rate) if is_layer(layer, "ReLU"): return layers.Activation("relu") if is_layer(layer, "Softmax"): return layers.Activation("softmax") if is_layer(layer, "Flatten"): return layers.Flatten() if is_layer(layer, "GlobalAveragePooling"): return layers.GlobalAveragePooling2D()
[]
Please provide a description of the function:def is_layer(layer, layer_type): '''judge the layer type. Returns: boolean -- True or False ''' if layer_type == "Input": return isinstance(layer, StubInput) elif layer_type == "Conv": return isinstance(layer, StubConv) elif layer_type == "Dense": return isinstance(layer, (StubDense,)) elif layer_type == "BatchNormalization": return isinstance(layer, (StubBatchNormalization,)) elif layer_type == "Concatenate": return isinstance(layer, (StubConcatenate,)) elif layer_type == "Add": return isinstance(layer, (StubAdd,)) elif layer_type == "Pooling": return isinstance(layer, StubPooling) elif layer_type == "Dropout": return isinstance(layer, (StubDropout,)) elif layer_type == "Softmax": return isinstance(layer, (StubSoftmax,)) elif layer_type == "ReLU": return isinstance(layer, (StubReLU,)) elif layer_type == "Flatten": return isinstance(layer, (StubFlatten,)) elif layer_type == "GlobalAveragePooling": return isinstance(layer, StubGlobalPooling)
[]
Please provide a description of the function:def layer_description_extractor(layer, node_to_id): '''get layer description. ''' layer_input = layer.input layer_output = layer.output if layer_input is not None: if isinstance(layer_input, Iterable): layer_input = list(map(lambda x: node_to_id[x], layer_input)) else: layer_input = node_to_id[layer_input] if layer_output is not None: layer_output = node_to_id[layer_output] if isinstance(layer, StubConv): return ( type(layer).__name__, layer_input, layer_output, layer.input_channel, layer.filters, layer.kernel_size, layer.stride, layer.padding, ) elif isinstance(layer, (StubDense,)): return [ type(layer).__name__, layer_input, layer_output, layer.input_units, layer.units, ] elif isinstance(layer, (StubBatchNormalization,)): return (type(layer).__name__, layer_input, layer_output, layer.num_features) elif isinstance(layer, (StubDropout,)): return (type(layer).__name__, layer_input, layer_output, layer.rate) elif isinstance(layer, StubPooling): return ( type(layer).__name__, layer_input, layer_output, layer.kernel_size, layer.stride, layer.padding, ) else: return (type(layer).__name__, layer_input, layer_output)
[]
Please provide a description of the function:def layer_description_builder(layer_information, id_to_node): '''build layer from description. ''' # pylint: disable=W0123 layer_type = layer_information[0] layer_input_ids = layer_information[1] if isinstance(layer_input_ids, Iterable): layer_input = list(map(lambda x: id_to_node[x], layer_input_ids)) else: layer_input = id_to_node[layer_input_ids] layer_output = id_to_node[layer_information[2]] if layer_type.startswith("StubConv"): input_channel = layer_information[3] filters = layer_information[4] kernel_size = layer_information[5] stride = layer_information[6] return eval(layer_type)( input_channel, filters, kernel_size, stride, layer_input, layer_output ) elif layer_type.startswith("StubDense"): input_units = layer_information[3] units = layer_information[4] return eval(layer_type)(input_units, units, layer_input, layer_output) elif layer_type.startswith("StubBatchNormalization"): num_features = layer_information[3] return eval(layer_type)(num_features, layer_input, layer_output) elif layer_type.startswith("StubDropout"): rate = layer_information[3] return eval(layer_type)(rate, layer_input, layer_output) elif layer_type.startswith("StubPooling"): kernel_size = layer_information[3] stride = layer_information[4] padding = layer_information[5] return eval(layer_type)(kernel_size, stride, padding, layer_input, layer_output) else: return eval(layer_type)(layer_input, layer_output)
[]
Please provide a description of the function:def layer_width(layer): '''get layer width. ''' if is_layer(layer, "Dense"): return layer.units if is_layer(layer, "Conv"): return layer.filters raise TypeError("The layer should be either Dense or Conv layer.")
[]
Please provide a description of the function:def define_params(self): ''' Define parameters. ''' input_dim = self.input_dim hidden_dim = self.hidden_dim prefix = self.name self.w_matrix = tf.Variable(tf.random_normal([input_dim, 3 * hidden_dim], stddev=0.1), name='/'.join([prefix, 'W'])) self.U = tf.Variable(tf.random_normal([hidden_dim, 3 * hidden_dim], stddev=0.1), name='/'.join([prefix, 'U'])) self.bias = tf.Variable(tf.random_normal([1, 3 * hidden_dim], stddev=0.1), name='/'.join([prefix, 'b'])) return self
[]
Please provide a description of the function:def build(self, x, h, mask=None): ''' Build the GRU cell. ''' xw = tf.split(tf.matmul(x, self.w_matrix) + self.bias, 3, 1) hu = tf.split(tf.matmul(h, self.U), 3, 1) r = tf.sigmoid(xw[0] + hu[0]) z = tf.sigmoid(xw[1] + hu[1]) h1 = tf.tanh(xw[2] + r * hu[2]) next_h = h1 * (1 - z) + h * z if mask is not None: next_h = next_h * mask + h * (1 - mask) return next_h
[]
Please provide a description of the function:def build_sequence(self, xs, masks, init, is_left_to_right): ''' Build GRU sequence. ''' states = [] last = init if is_left_to_right: for i, xs_i in enumerate(xs): h = self.build(xs_i, last, masks[i]) states.append(h) last = h else: for i in range(len(xs) - 1, -1, -1): h = self.build(xs[i], last, masks[i]) states.insert(0, h) last = h return states
[]
Please provide a description of the function:def conv2d(x_input, w_matrix): return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
[ "conv2d returns a 2d convolution layer with full stride." ]
Please provide a description of the function:def max_pool(x_input, pool_size): return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1], padding='SAME')
[ "max_pool downsamples a feature map by 2X." ]
Please provide a description of the function:def main(params): ''' Main function, build mnist network, run and send result to NNI. ''' # Import data mnist = download_mnist_retry(params['data_dir']) print('Mnist download data done.') logger.debug('Mnist download data done.') # Create the model # Build the graph for the deep net mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], channel_2_num=params['channel_2_num'], pool_size=params['pool_size']) mnist_network.build_network() logger.debug('Mnist build network done.') # Write log graph_location = tempfile.mkdtemp() logger.debug('Saving graph to: %s', graph_location) train_writer = tf.summary.FileWriter(graph_location) train_writer.add_graph(tf.get_default_graph()) test_acc = 0.0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) batch_num = nni.choice(50, 250, 500, name='batch_num') for i in range(batch_num): batch = mnist.train.next_batch(batch_num) dropout_rate = nni.choice(1, 5, name='dropout_rate') mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], mnist_network.labels: batch[1], mnist_network.keep_prob: dropout_rate} ) if i % 100 == 0: test_acc = mnist_network.accuracy.eval( feed_dict={mnist_network.images: mnist.test.images, mnist_network.labels: mnist.test.labels, mnist_network.keep_prob: 1.0}) nni.report_intermediate_result(test_acc) logger.debug('test accuracy %g', test_acc) logger.debug('Pipe send intermediate result done.') test_acc = mnist_network.accuracy.eval( feed_dict={mnist_network.images: mnist.test.images, mnist_network.labels: mnist.test.labels, mnist_network.keep_prob: 1.0}) nni.report_final_result(test_acc) logger.debug('Final result is %g', test_acc) logger.debug('Send final result done.')
[]
Please provide a description of the function:def build_net(self, is_training): cfg = self.cfg with tf.device('/cpu:0'): word_embed = tf.get_variable( name='word_embed', initializer=self.embed, dtype=tf.float32, trainable=False) char_embed = tf.get_variable(name='char_embed', shape=[cfg.char_vcb_size, cfg.char_embed_dim], dtype=tf.float32) # [query_length, batch_size] self.query_word = tf.placeholder(dtype=tf.int32, shape=[None, None], name='query_word') self.query_mask = tf.placeholder(dtype=tf.float32, shape=[None, None], name='query_mask') # [batch_size] self.query_lengths = tf.placeholder( dtype=tf.int32, shape=[None], name='query_lengths') # [passage_length, batch_size] self.passage_word = tf.placeholder( dtype=tf.int32, shape=[None, None], name='passage_word') self.passage_mask = tf.placeholder( dtype=tf.float32, shape=[None, None], name='passage_mask') # [batch_size] self.passage_lengths = tf.placeholder( dtype=tf.int32, shape=[None], name='passage_lengths') if is_training: self.answer_begin = tf.placeholder( dtype=tf.int32, shape=[None], name='answer_begin') self.answer_end = tf.placeholder( dtype=tf.int32, shape=[None], name='answer_end') self.query_char_ids = tf.placeholder(dtype=tf.int32, shape=[ self.cfg.max_char_length, None, None], name='query_char_ids') # sequence_length, batch_size self.query_char_lengths = tf.placeholder( dtype=tf.int32, shape=[None, None], name='query_char_lengths') self.passage_char_ids = tf.placeholder(dtype=tf.int32, shape=[ self.cfg.max_char_length, None, None], name='passage_char_ids') # sequence_length, batch_size self.passage_char_lengths = tf.placeholder(dtype=tf.int32, shape=[None, None], name='passage_char_lengths') query_char_states = self.build_char_states(char_embed=char_embed, is_training=is_training, reuse=False, char_ids=self.query_char_ids, char_lengths=self.query_char_lengths) passage_char_states = self.build_char_states(char_embed=char_embed, is_training=is_training, reuse=True, char_ids=self.passage_char_ids, char_lengths=self.passage_char_lengths) with tf.variable_scope("encoding") as scope: query_states = tf.concat([tf.nn.embedding_lookup( word_embed, self.query_word), query_char_states], axis=2) scope.reuse_variables() passage_states = tf.concat([tf.nn.embedding_lookup( word_embed, self.passage_word), passage_char_states], axis=2) passage_states = tf.transpose(passage_states, perm=[1, 0, 2]) query_states = tf.transpose(query_states, perm=[1, 0, 2]) self.passage_states = passage_states self.query_states = query_states output, output2 = graph_to_network(passage_states, query_states, self.passage_lengths, self.query_lengths, self.graph, self.cfg.dropout, is_training, num_heads=cfg.num_heads, rnn_units=cfg.rnn_units) passage_att_mask = self.passage_mask batch_size_x = tf.shape(self.query_lengths) answer_h = tf.zeros( tf.concat([batch_size_x, tf.constant([cfg.ptr_dim], dtype=tf.int32)], axis=0)) answer_context = tf.reduce_mean(output2, axis=1) query_init_w = tf.get_variable( 'query_init_w', shape=[output2.get_shape().as_list()[-1], cfg.ptr_dim]) self.query_init = query_init_w answer_context = tf.matmul(answer_context, query_init_w) output = tf.transpose(output, perm=[1, 0, 2]) with tf.variable_scope('answer_ptr_layer'): ptr_att = DotAttention('ptr', hidden_dim=cfg.ptr_dim, is_vanilla=self.cfg.att_is_vanilla, is_identity_transform=self.cfg.att_is_id, need_padding=self.cfg.att_need_padding) answer_pre_compute = ptr_att.get_pre_compute(output) ptr_gru = XGRUCell(hidden_dim=cfg.ptr_dim) begin_prob, begin_logits = ptr_att.get_prob(output, answer_context, passage_att_mask, answer_pre_compute, True) att_state = ptr_att.get_att(output, begin_prob) (_, answer_h) = ptr_gru.call(inputs=att_state, state=answer_h) answer_context = answer_h end_prob, end_logits = ptr_att.get_prob(output, answer_context, passage_att_mask, answer_pre_compute, True) self.begin_prob = tf.transpose(begin_prob, perm=[1, 0]) self.end_prob = tf.transpose(end_prob, perm=[1, 0]) begin_logits = tf.transpose(begin_logits, perm=[1, 0]) end_logits = tf.transpose(end_logits, perm=[1, 0]) if is_training: def label_smoothing(inputs, masks, epsilon=0.1): epsilon = cfg.labelsmoothing num_of_channel = tf.shape(inputs)[-1] # number of channels inputs = tf.cast(inputs, tf.float32) return (((1 - epsilon) * inputs) + (epsilon / tf.cast(num_of_channel, tf.float32))) * masks cost1 = tf.reduce_mean( tf.losses.softmax_cross_entropy(label_smoothing( tf.one_hot(self.answer_begin, depth=tf.shape(self.passage_word)[0]), tf.transpose(self.passage_mask, perm=[1, 0])), begin_logits)) cost2 = tf.reduce_mean( tf.losses.softmax_cross_entropy( label_smoothing(tf.one_hot(self.answer_end, depth=tf.shape(self.passage_word)[0]), tf.transpose(self.passage_mask, perm=[1, 0])), end_logits)) reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.reduce_sum(reg_ws) loss = cost1 + cost2 + l2_loss self.loss = loss optimizer = tf.train.AdamOptimizer(learning_rate=cfg.learning_rate) self.train_op = optimizer.minimize(self.loss) return tf.stack([self.begin_prob, self.end_prob])
[ "Build the whole neural network for the QA model.", "Modify target for label smoothing." ]
Please provide a description of the function:def check_output_command(file_path, head=None, tail=None): '''call check_output command to read content from a file''' if os.path.exists(file_path): if sys.platform == 'win32': cmds = ['powershell.exe', 'type', file_path] if head: cmds += ['|', 'select', '-first', str(head)] elif tail: cmds += ['|', 'select', '-last', str(tail)] return check_output(cmds, shell=True).decode('utf-8') else: cmds = ['cat', file_path] if head: cmds = ['head', '-' + str(head), file_path] elif tail: cmds = ['tail', '-' + str(tail), file_path] return check_output(cmds, shell=False).decode('utf-8') else: print_error('{0} does not exist!'.format(file_path)) exit(1)
[]
Please provide a description of the function:def kill_command(pid): '''kill command''' if sys.platform == 'win32': process = psutil.Process(pid=pid) process.send_signal(signal.CTRL_BREAK_EVENT) else: cmds = ['kill', str(pid)] call(cmds)
[]
Please provide a description of the function:def install_package_command(package_name): '''install python package from pip''' #TODO refactor python logic if sys.platform == "win32": cmds = 'python -m pip install --user {0}'.format(package_name) else: cmds = 'python3 -m pip install --user {0}'.format(package_name) call(cmds, shell=True)
[]
Please provide a description of the function:def install_requirements_command(requirements_path): '''install requirements.txt''' cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt' #TODO refactor python logic if sys.platform == "win32": cmds = cmds.format('python') else: cmds = cmds.format('python3') call(cmds, shell=True)
[]
Please provide a description of the function:def get_params(): ''' Get parameters from command line ''' parser = argparse.ArgumentParser() parser.add_argument("--data_dir", type=str, default='/tmp/tensorflow/mnist/input_data', help="data directory") parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate") parser.add_argument("--channel_1_num", type=int, default=32) parser.add_argument("--channel_2_num", type=int, default=64) parser.add_argument("--conv_size", type=int, default=5) parser.add_argument("--pool_size", type=int, default=2) parser.add_argument("--hidden_size", type=int, default=1024) parser.add_argument("--learning_rate", type=float, default=1e-4) parser.add_argument("--batch_num", type=int, default=2700) parser.add_argument("--batch_size", type=int, default=32) args, _ = parser.parse_known_args() return args
[]
Please provide a description of the function:def build_network(self): ''' Building network for mnist ''' # Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images are # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. with tf.name_scope('reshape'): try: input_dim = int(math.sqrt(self.x_dim)) except: print( 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) logger.debug( 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) raise x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) # First convolutional layer - maps one grayscale image to 32 feature maps. with tf.name_scope('conv1'): w_conv1 = weight_variable( [self.conv_size, self.conv_size, 1, self.channel_1_num]) b_conv1 = bias_variable([self.channel_1_num]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) # Pooling layer - downsamples by 2X. with tf.name_scope('pool1'): h_pool1 = max_pool(h_conv1, self.pool_size) # Second convolutional layer -- maps 32 feature maps to 64. with tf.name_scope('conv2'): w_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num]) b_conv2 = bias_variable([self.channel_2_num]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) # Second pooling layer. with tf.name_scope('pool2'): h_pool2 = max_pool(h_conv2, self.pool_size) # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image # is down to 7x7x64 feature maps -- maps this to 1024 features. last_dim = int(input_dim / (self.pool_size * self.pool_size)) with tf.name_scope('fc1'): w_fc1 = weight_variable( [last_dim * last_dim * self.channel_2_num, self.hidden_size]) b_fc1 = bias_variable([self.hidden_size]) h_pool2_flat = tf.reshape( h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) # Dropout - controls the complexity of the model, prevents co-adaptation of features. with tf.name_scope('dropout'): h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) # Map the 1024 features to 10 classes, one for each digit with tf.name_scope('fc2'): w_fc2 = weight_variable([self.hidden_size, self.y_dim]) b_fc2 = bias_variable([self.y_dim]) y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 with tf.name_scope('loss'): cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) with tf.name_scope('adam_optimizer'): self.train_step = tf.train.AdamOptimizer( self.learning_rate).minimize(cross_entropy) with tf.name_scope('accuracy'): correct_prediction = tf.equal( tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) self.accuracy = tf.reduce_mean( tf.cast(correct_prediction, tf.float32))
[]
Please provide a description of the function:def get_experiment_time(port): '''get the startTime and endTime of an experiment''' response = rest_get(experiment_url(port), REST_TIME_OUT) if response and check_response(response): content = convert_time_stamp_to_date(json.loads(response.text)) return content.get('startTime'), content.get('endTime') return None, None
[]
Please provide a description of the function:def get_experiment_status(port): '''get the status of an experiment''' result, response = check_rest_server_quick(port) if result: return json.loads(response.text).get('status') return None
[]
Please provide a description of the function:def update_experiment(): '''Update the experiment status in config file''' experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: return None for key in experiment_dict.keys(): if isinstance(experiment_dict[key], dict): if experiment_dict[key].get('status') != 'STOPPED': nni_config = Config(experiment_dict[key]['fileName']) rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): experiment_config.update_experiment(key, 'status', 'STOPPED') continue rest_port = nni_config.get_config('restServerPort') startTime, endTime = get_experiment_time(rest_port) if startTime: experiment_config.update_experiment(key, 'startTime', startTime) if endTime: experiment_config.update_experiment(key, 'endTime', endTime) status = get_experiment_status(rest_port) if status: experiment_config.update_experiment(key, 'status', status)
[]
Please provide a description of the function:def check_experiment_id(args): '''check if the id is valid ''' update_experiment() experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: print_normal('There is no experiment running...') return None if not args.id: running_experiment_list = [] for key in experiment_dict.keys(): if isinstance(experiment_dict[key], dict): if experiment_dict[key].get('status') != 'STOPPED': running_experiment_list.append(key) elif isinstance(experiment_dict[key], list): # if the config file is old version, remove the configuration from file experiment_config.remove_experiment(key) if len(running_experiment_list) > 1: print_error('There are multiple experiments, please set the experiment id...') experiment_information = "" for key in running_experiment_list: experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], \ experiment_dict[key]['port'], experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime'])) print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) exit(1) elif not running_experiment_list: print_error('There is no experiment running!') return None else: return running_experiment_list[0] if experiment_dict.get(args.id): return args.id else: print_error('Id not correct!') return None
[]
Please provide a description of the function:def parse_ids(args): '''Parse the arguments for nnictl stop 1.If there is an id specified, return the corresponding id 2.If there is no id specified, and there is an experiment running, return the id, or return Error 3.If the id matches an experiment, nnictl will return the id. 4.If the id ends with *, nnictl will match all ids matchs the regular 5.If the id does not exist but match the prefix of an experiment id, nnictl will return the matched id 6.If the id does not exist but match multiple prefix of the experiment ids, nnictl will give id information ''' update_experiment() experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: print_normal('Experiment is not running...') return None result_list = [] running_experiment_list = [] for key in experiment_dict.keys(): if isinstance(experiment_dict[key], dict): if experiment_dict[key].get('status') != 'STOPPED': running_experiment_list.append(key) elif isinstance(experiment_dict[key], list): # if the config file is old version, remove the configuration from file experiment_config.remove_experiment(key) if not args.id: if len(running_experiment_list) > 1: print_error('There are multiple experiments, please set the experiment id...') experiment_information = "" for key in running_experiment_list: experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], \ experiment_dict[key]['port'], experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime'])) print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) exit(1) else: result_list = running_experiment_list elif args.id == 'all': result_list = running_experiment_list elif args.id.endswith('*'): for id in running_experiment_list: if id.startswith(args.id[:-1]): result_list.append(id) elif args.id in running_experiment_list: result_list.append(args.id) else: for id in running_experiment_list: if id.startswith(args.id): result_list.append(id) if len(result_list) > 1: print_error(args.id + ' is ambiguous, please choose ' + ' '.join(result_list) ) return None if not result_list and args.id: print_error('There are no experiments matched, please set correct experiment id...') elif not result_list: print_error('There is no experiment running...') return result_list
[]
Please provide a description of the function:def get_config_filename(args): '''get the file name of config file''' experiment_id = check_experiment_id(args) if experiment_id is None: print_error('Please set the experiment id!') exit(1) experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() return experiment_dict[experiment_id]['fileName']
[]
Please provide a description of the function:def convert_time_stamp_to_date(content): '''Convert time stamp to date time format''' start_time_stamp = content.get('startTime') end_time_stamp = content.get('endTime') if start_time_stamp: start_time = datetime.datetime.utcfromtimestamp(start_time_stamp // 1000).strftime("%Y/%m/%d %H:%M:%S") content['startTime'] = str(start_time) if end_time_stamp: end_time = datetime.datetime.utcfromtimestamp(end_time_stamp // 1000).strftime("%Y/%m/%d %H:%M:%S") content['endTime'] = str(end_time) return content
[]
Please provide a description of the function:def check_rest(args): '''check if restful server is running''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') running, _ = check_rest_server_quick(rest_port) if not running: print_normal('Restful server is running...') else: print_normal('Restful server is not running...')
[]
Please provide a description of the function:def stop_experiment(args): '''Stop the experiment which is running''' experiment_id_list = parse_ids(args) if experiment_id_list: experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() for experiment_id in experiment_id_list: print_normal('Stoping experiment %s' % experiment_id) nni_config = Config(experiment_dict[experiment_id]['fileName']) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if rest_pid: kill_command(rest_pid) tensorboard_pid_list = nni_config.get_config('tensorboardPidList') if tensorboard_pid_list: for tensorboard_pid in tensorboard_pid_list: try: kill_command(tensorboard_pid) except Exception as exception: print_error(exception) nni_config.set_config('tensorboardPidList', []) print_normal('Stop experiment success!') experiment_config.update_experiment(experiment_id, 'status', 'STOPPED') time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) experiment_config.update_experiment(experiment_id, 'endTime', str(time_now))
[]
Please provide a description of the function:def trial_ls(args): '''List trial''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = json.loads(response.text) for index, value in enumerate(content): content[index] = convert_time_stamp_to_date(value) print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':'))) else: print_error('List trial failed...') else: print_error('Restful server is not running...')
[]
Please provide a description of the function:def trial_kill(args): '''List trial''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, _ = check_rest_server_quick(rest_port) if running: response = rest_delete(trial_job_id_url(rest_port, args.id), REST_TIME_OUT) if response and check_response(response): print(response.text) else: print_error('Kill trial job failed...') else: print_error('Restful server is not running...')
[]
Please provide a description of the function:def list_experiment(args): '''Get experiment information''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, _ = check_rest_server_quick(rest_port) if running: response = rest_get(experiment_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = convert_time_stamp_to_date(json.loads(response.text)) print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':'))) else: print_error('List experiment failed...') else: print_error('Restful server is not running...')
[]
Please provide a description of the function:def experiment_status(args): '''Show the status of experiment''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') result, response = check_rest_server_quick(rest_port) if not result: print_normal('Restful server is not running...') else: print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':')))
[]
Please provide a description of the function:def log_internal(args, filetype): '''internal function to call get_log_content''' file_name = get_config_filename(args) if filetype == 'stdout': file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stdout') else: file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stderr') print(check_output_command(file_full_path, head=args.head, tail=args.tail))
[]
Please provide a description of the function:def log_trial(args): ''''get trial log path''' trial_id_path_dict = {} nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = json.loads(response.text) for trial in content: trial_id_path_dict[trial['id']] = trial['logPath'] else: print_error('Restful server is not running...') exit(1) if args.id: if args.trial_id: if trial_id_path_dict.get(args.trial_id): print_normal('id:' + args.trial_id + ' path:' + trial_id_path_dict[args.trial_id]) else: print_error('trial id is not valid!') exit(1) else: print_error('please specific the trial id!') exit(1) else: for key in trial_id_path_dict: print('id:' + key + ' path:' + trial_id_path_dict[key])
[]
Please provide a description of the function:def webui_url(args): '''show the url of web ui''' nni_config = Config(get_config_filename(args)) print_normal('{0} {1}'.format('Web UI url:', ' '.join(nni_config.get_config('webuiUrl'))))
[]
Please provide a description of the function:def experiment_list(args): '''get the information of all experiments''' experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: print('There is no experiment running...') exit(1) update_experiment() experiment_id_list = [] if args.all and args.all == 'all': for key in experiment_dict.keys(): experiment_id_list.append(key) else: for key in experiment_dict.keys(): if experiment_dict[key]['status'] != 'STOPPED': experiment_id_list.append(key) if not experiment_id_list: print_warning('There is no experiment running...\nYou can use \'nnictl experiment list all\' to list all stopped experiments!') experiment_information = "" for key in experiment_id_list: experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], experiment_dict[key]['port'],\ experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime'])) print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
[]
Please provide a description of the function:def get_time_interval(time1, time2): '''get the interval of two times''' try: #convert time to timestamp time1 = time.mktime(time.strptime(time1, '%Y/%m/%d %H:%M:%S')) time2 = time.mktime(time.strptime(time2, '%Y/%m/%d %H:%M:%S')) seconds = (datetime.datetime.fromtimestamp(time2) - datetime.datetime.fromtimestamp(time1)).seconds #convert seconds to day:hour:minute:second days = seconds / 86400 seconds %= 86400 hours = seconds / 3600 seconds %= 3600 minutes = seconds / 60 seconds %= 60 return '%dd %dh %dm %ds' % (days, hours, minutes, seconds) except: return 'N/A'
[]
Please provide a description of the function:def show_experiment_info(): '''show experiment information in monitor''' experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: print('There is no experiment running...') exit(1) update_experiment() experiment_id_list = [] for key in experiment_dict.keys(): if experiment_dict[key]['status'] != 'STOPPED': experiment_id_list.append(key) if not experiment_id_list: print_warning('There is no experiment running...') return for key in experiment_id_list: print(EXPERIMENT_MONITOR_INFO % (key, experiment_dict[key]['status'], experiment_dict[key]['port'], \ experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], get_time_interval(experiment_dict[key]['startTime'], experiment_dict[key]['endTime']))) print(TRIAL_MONITOR_HEAD) running, response = check_rest_server_quick(experiment_dict[key]['port']) if running: response = rest_get(trial_jobs_url(experiment_dict[key]['port']), REST_TIME_OUT) if response and check_response(response): content = json.loads(response.text) for index, value in enumerate(content): content[index] = convert_time_stamp_to_date(value) print(TRIAL_MONITOR_CONTENT % (content[index].get('id'), content[index].get('startTime'), content[index].get('endTime'), content[index].get('status'))) print(TRIAL_MONITOR_TAIL)
[]
Please provide a description of the function:def monitor_experiment(args): '''monitor the experiment''' if args.time <= 0: print_error('please input a positive integer as time interval, the unit is second.') exit(1) while True: try: os.system('clear') update_experiment() show_experiment_info() time.sleep(args.time) except KeyboardInterrupt: exit(0) except Exception as exception: print_error(exception) exit(1)
[]
Please provide a description of the function:def parse_trial_data(content): trial_records = [] for trial_data in content: for phase_i in range(len(trial_data['hyperParameters'])): hparam = json.loads(trial_data['hyperParameters'][phase_i])['parameters'] hparam['id'] = trial_data['id'] if 'finalMetricData' in trial_data.keys() and phase_i < len(trial_data['finalMetricData']): reward = json.loads(trial_data['finalMetricData'][phase_i]['data']) if isinstance(reward, (float, int)): dict_tmp = {**hparam, **{'reward': reward}} elif isinstance(reward, dict): dict_tmp = {**hparam, **reward} else: raise ValueError("Invalid finalMetricsData format: {}/{}".format(type(reward), reward)) else: dict_tmp = hparam trial_records.append(dict_tmp) return trial_records
[ "output: List[Dict]" ]
Please provide a description of the function:def export_trials_data(args): nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), 20) if response is not None and check_response(response): content = json.loads(response.text) # dframe = pd.DataFrame.from_records([parse_trial_data(t_data) for t_data in content]) # dframe.to_csv(args.csv_path, sep='\t') records = parse_trial_data(content) if args.type == 'json': json_records = [] for trial in records: value = trial.pop('reward', None) trial_id = trial.pop('id', None) json_records.append({'parameter': trial, 'value': value, 'id': trial_id}) with open(args.path, 'w') as file: if args.type == 'csv': writer = csv.DictWriter(file, set.union(*[set(r.keys()) for r in records])) writer.writeheader() writer.writerows(records) else: json.dump(json_records, file) else: print_error('Export failed...') else: print_error('Restful server is not Running')
[ "export experiment metadata to csv\n " ]
Please provide a description of the function:def copy_remote_directory_to_local(sftp, remote_path, local_path): '''copy remote directory to local machine''' try: os.makedirs(local_path, exist_ok=True) files = sftp.listdir(remote_path) for file in files: remote_full_path = os.path.join(remote_path, file) local_full_path = os.path.join(local_path, file) try: if sftp.listdir(remote_full_path): copy_remote_directory_to_local(sftp, remote_full_path, local_full_path) except: sftp.get(remote_full_path, local_full_path) except Exception: pass
[]
Please provide a description of the function:def create_ssh_sftp_client(host_ip, port, username, password): '''create ssh client''' try: check_environment() import paramiko conn = paramiko.Transport(host_ip, port) conn.connect(username=username, password=password) sftp = paramiko.SFTPClient.from_transport(conn) return sftp except Exception as exception: print_error('Create ssh client error %s\n' % exception)
[]
Please provide a description of the function:def json2space(x, oldy=None, name=NodeType.Root.value): y = list() if isinstance(x, dict): if NodeType.Type.value in x.keys(): _type = x[NodeType.Type.value] name = name + '-' + _type if _type == 'choice': if oldy != None: _index = oldy[NodeType.Index.value] y += json2space(x[NodeType.Value.value][_index], oldy[NodeType.Value.value], name=name+'[%d]' % _index) else: y += json2space(x[NodeType.Value.value], None, name=name) y.append(name) else: for key in x.keys(): y += json2space(x[key], (oldy[key] if oldy != None else None), name+"[%s]" % str(key)) elif isinstance(x, list): for i, x_i in enumerate(x): y += json2space(x_i, (oldy[i] if oldy != None else None), name+"[%d]" % i) else: pass return y
[ "Change search space from json format to hyperopt format\n " ]
Please provide a description of the function:def json2paramater(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.Root.value): if isinstance(x, dict): if NodeType.Type.value in x.keys(): _type = x[NodeType.Type.value] _value = x[NodeType.Value.value] name = name + '-' + _type Rand |= is_rand[name] if Rand is True: if _type == 'choice': _index = random_state.randint(len(_value)) y = { NodeType.Index.value: _index, NodeType.Value.value: json2paramater(x[NodeType.Value.value][_index], is_rand, random_state, None, Rand, name=name+"[%d]" % _index) } else: y = eval('parameter_expressions.' + _type)(*(_value + [random_state])) else: y = copy.deepcopy(oldy) else: y = dict() for key in x.keys(): y[key] = json2paramater(x[key], is_rand, random_state, oldy[key] if oldy != None else None, Rand, name + "[%s]" % str(key)) elif isinstance(x, list): y = list() for i, x_i in enumerate(x): y.append(json2paramater(x_i, is_rand, random_state, oldy[i] if oldy != None else None, Rand, name + "[%d]" % i)) else: y = copy.deepcopy(x) return y
[ "Json to pramaters.\n " ]
Please provide a description of the function:def _split_index(params): result = {} for key in params: if isinstance(params[key], dict): value = params[key]['_value'] else: value = params[key] result[key] = value return result
[ "Delete index information from params\n\n Parameters\n ----------\n params : dict\n\n Returns\n -------\n result : dict\n " ]
Please provide a description of the function:def mutation(self, config=None, info=None, save_dir=None): self.result = None self.config = config self.restore_dir = self.save_dir self.save_dir = save_dir self.info = info
[ "\n Parameters\n ----------\n config : str\n info : str\n save_dir : str\n " ]
Please provide a description of the function:def update_search_space(self, search_space): self.searchspace_json = search_space self.space = json2space(self.searchspace_json) self.random_state = np.random.RandomState() self.population = [] is_rand = dict() for item in self.space: is_rand[item] = True for _ in range(self.population_size): config = json2paramater( self.searchspace_json, is_rand, self.random_state) self.population.append(Individual(config=config))
[ "Update search space. \n Search_space contains the information that user pre-defined.\n\n Parameters\n ----------\n search_space : dict\n " ]
Please provide a description of the function:def generate_parameters(self, parameter_id): if not self.population: raise RuntimeError('The population is empty') pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) total_config = indiv.config else: random.shuffle(self.population) if self.population[0].result < self.population[1].result: self.population[0] = self.population[1] # mutation space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)] for i in range(len(self.space)): is_rand[self.space[i]] = (self.space[i] == mutation_pos) config = json2paramater( self.searchspace_json, is_rand, self.random_state, self.population[0].config) self.population.pop(1) # remove "_index" from config and save params-id total_config = config self.total_data[parameter_id] = total_config config = _split_index(total_config) return config
[ "Returns a dict of trial (hyper-)parameters, as a serializable object.\n\n Parameters\n ----------\n parameter_id : int\n \n Returns\n -------\n config : dict\n " ]
Please provide a description of the function:def receive_trial_result(self, parameter_id, parameters, value): '''Record the result from a trial Parameters ---------- parameters: dict value : dict/float if value is dict, it should have "default" key. value is final metrics of the trial. ''' reward = extract_scalar_reward(value) if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') # restore the paramsters contains "_index" params = self.total_data[parameter_id] if self.optimize_mode == OptimizeMode.Minimize: reward = -reward indiv = Individual(config=params, result=reward) self.population.append(indiv)
[]
Please provide a description of the function:def get_json_content(file_path): try: with open(file_path, 'r') as file: return json.load(file) except TypeError as err: print('Error: ', err) return None
[ "Load json file content\n \n Parameters\n ----------\n file_path:\n path to the file\n \n Raises\n ------\n TypeError\n Error with the file path\n " ]
Please provide a description of the function:def generate_pcs(nni_search_space_content): categorical_dict = {} search_space = nni_search_space_content with open('param_config_space.pcs', 'w') as pcs_fd: if isinstance(search_space, dict): for key in search_space.keys(): if isinstance(search_space[key], dict): try: if search_space[key]['_type'] == 'choice': choice_len = len(search_space[key]['_value']) pcs_fd.write('%s categorical {%s} [%s]\n' % ( key, json.dumps(list(range(choice_len)))[1:-1], json.dumps(0))) if key in categorical_dict: raise RuntimeError('%s has already existed, please make sure search space has no duplicate key.' % key) categorical_dict[key] = search_space[key]['_value'] elif search_space[key]['_type'] == 'randint': # TODO: support lower bound in randint pcs_fd.write('%s integer [0, %d] [%d]\n' % ( key, search_space[key]['_value'][0], search_space[key]['_value'][0])) elif search_space[key]['_type'] == 'uniform': pcs_fd.write('%s real %s [%s]\n' % ( key, json.dumps(search_space[key]['_value']), json.dumps(search_space[key]['_value'][0]))) elif search_space[key]['_type'] == 'loguniform': # use np.round here to ensure that the rounded defaut value is in the range, which will be rounded in configure_space package search_space[key]['_value'] = list(np.round(np.log(search_space[key]['_value']), 10)) pcs_fd.write('%s real %s [%s]\n' % ( key, json.dumps(search_space[key]['_value']), json.dumps(search_space[key]['_value'][0]))) elif search_space[key]['_type'] == 'quniform' \ and search_space[key]['_value'][2] == 1: pcs_fd.write('%s integer [%d, %d] [%d]\n' % ( key, search_space[key]['_value'][0], search_space[key]['_value'][1], search_space[key]['_value'][0])) else: raise RuntimeError('unsupported _type %s' % search_space[key]['_type']) except: raise RuntimeError('_type or _value error.') else: raise RuntimeError('incorrect search space.') return categorical_dict return None
[ "Generate the Parameter Configuration Space (PCS) which defines the \n legal ranges of the parameters to be optimized and their default values.\n \n Generally, the format is:\n # parameter_name categorical {value_1, ..., value_N} [default value]\n # parameter_name ordinal {value_1, ..., value_N} [default value]\n # parameter_name integer [min_value, max_value] [default value]\n # parameter_name integer [min_value, max_value] [default value] log\n # parameter_name real [min_value, max_value] [default value]\n # parameter_name real [min_value, max_value] [default value] log\n \n Reference: https://automl.github.io/SMAC3/stable/options.html\n\n Parameters\n ----------\n nni_search_space_content: search_space\n The search space in this experiment in nni\n \n Returns\n -------\n Parameter Configuration Space (PCS)\n the legal ranges of the parameters to be optimized and their default values\n\n Raises\n ------\n RuntimeError\n unsupported type or value error or incorrect search space\n " ]
Please provide a description of the function:def generate_scenario(ss_content): with open('scenario.txt', 'w') as sce_fd: sce_fd.write('deterministic = 0\n') #sce_fd.write('output_dir = \n') sce_fd.write('paramfile = param_config_space.pcs\n') sce_fd.write('run_obj = quality\n') return generate_pcs(ss_content)
[ "Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and \n can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.\n \n Reference: https://automl.github.io/SMAC3/stable/options.html\n\n The format of the scenario file is one option per line:\n OPTION1 = VALUE1\n OPTION2 = VALUE2\n ...\n\n Parameters\n ----------\n abort_on_first_run_crash: bool\n If true, SMAC will abort if the first run of the target algorithm crashes. Default: True, \n because trials reported to nni tuner would always in success state\n algo: function\n Specifies the target algorithm call that SMAC will optimize. Interpreted as a bash-command.\n Not required by tuner, but required by nni's training service for running trials\n always_race_default:\n Race new incumbents always against default configuration\n cost_for_crash:\n Defines the cost-value for crashed runs on scenarios with quality as run-obj. Default: 2147483647.0.\n Trials reported to nni tuner would always in success state\n cutoff_time:\n Maximum runtime, after which the target algorithm is cancelled. `Required if *run_obj* is runtime`\n deterministic: bool\n If true, the optimization process will be repeatable.\n execdir:\n Specifies the path to the execution-directory. Default: .\n Trials are executed by nni's training service\n feature_file:\n Specifies the file with the instance-features.\n No features specified or feature file is not supported\n initial_incumbent:\n DEFAULT is the default from the PCS. Default: DEFAULT. Must be from: [‘DEFAULT’, ‘RANDOM’].\n input_psmac_dirs:\n For parallel SMAC, multiple output-directories are used.\n Parallelism is supported by nni\n instance_file:\n Specifies the file with the training-instances. Not supported\n intensification_percentage:\n The fraction of time to be used on intensification (versus choice of next Configurations). Default: 0.5.\n Not supported, trials are controlled by nni's training service and kill be assessor\n maxR: int\n Maximum number of calls per configuration. Default: 2000.\n memory_limit:\n Maximum available memory the target algorithm can occupy before being cancelled.\n minR: int\n Minimum number of calls per configuration. Default: 1.\n output_dir:\n Specifies the output-directory for all emerging files, such as logging and results.\n Default: smac3-output_2018-01-22_15:05:56_807070.\n overall_obj:\n \tPARX, where X is an integer defining the penalty imposed on timeouts (i.e. runtimes that exceed the cutoff-time).\n Timeout is not supported\n paramfile:\n Specifies the path to the PCS-file.\n run_obj:\n Defines what metric to optimize. When optimizing runtime, cutoff_time is required as well.\n Must be from: [‘runtime’, ‘quality’].\n runcount_limit: int\n Maximum number of algorithm-calls during optimization. Default: inf.\n Use default because this is controlled by nni\n shared_model:\n Whether to run SMAC in parallel mode. Parallelism is supported by nni\n test_instance_file:\n Specifies the file with the test-instances. Instance is not supported\n tuner-timeout:\n Maximum amount of CPU-time used for optimization. Not supported\n wallclock_limit: int\n Maximum amount of wallclock-time used for optimization. Default: inf.\n Use default because this is controlled by nni\n\n Returns\n -------\n Scenario:\n The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and can be constructed\n either by providing an actual scenario-object, or by specifing the options in a scenario file\n " ]
Please provide a description of the function:def load_data(train_path='./data/regression.train', test_path='./data/regression.test'): ''' Load or create dataset ''' print('Load data...') df_train = pd.read_csv(train_path, header=None, sep='\t') df_test = pd.read_csv(test_path, header=None, sep='\t') num = len(df_train) split_num = int(0.9 * num) y_train = df_train[0].values y_test = df_test[0].values y_eval = y_train[split_num:] y_train = y_train[:split_num] X_train = df_train.drop(0, axis=1).values X_test = df_test.drop(0, axis=1).values X_eval = X_train[split_num:, :] X_train = X_train[:split_num, :] # create dataset for lightgbm lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_eval, y_eval, reference=lgb_train) return lgb_train, lgb_eval, X_test, y_test
[]
Please provide a description of the function:def layer_distance(a, b): # pylint: disable=unidiomatic-typecheck if type(a) != type(b): return 1.0 if is_layer(a, "Conv"): att_diff = [ (a.filters, b.filters), (a.kernel_size, b.kernel_size), (a.stride, b.stride), ] return attribute_difference(att_diff) if is_layer(a, "Pooling"): att_diff = [ (a.padding, b.padding), (a.kernel_size, b.kernel_size), (a.stride, b.stride), ] return attribute_difference(att_diff) return 0.0
[ "The distance between two layers." ]
Please provide a description of the function:def attribute_difference(att_diff): ''' The attribute distance. ''' ret = 0 for a_value, b_value in att_diff: if max(a_value, b_value) == 0: ret += 0 else: ret += abs(a_value - b_value) * 1.0 / max(a_value, b_value) return ret * 1.0 / len(att_diff)
[]
Please provide a description of the function:def layers_distance(list_a, list_b): len_a = len(list_a) len_b = len(list_b) f = np.zeros((len_a + 1, len_b + 1)) f[-1][-1] = 0 for i in range(-1, len_a): f[i][-1] = i + 1 for j in range(-1, len_b): f[-1][j] = j + 1 for i in range(len_a): for j in range(len_b): f[i][j] = min( f[i][j - 1] + 1, f[i - 1][j] + 1, f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]), ) return f[len_a - 1][len_b - 1]
[ "The distance between the layers of two neural networks." ]
Please provide a description of the function:def skip_connection_distance(a, b): if a[2] != b[2]: return 1.0 len_a = abs(a[1] - a[0]) len_b = abs(b[1] - b[0]) return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b))
[ "The distance between two skip-connections." ]
Please provide a description of the function:def skip_connections_distance(list_a, list_b): distance_matrix = np.zeros((len(list_a), len(list_b))) for i, a in enumerate(list_a): for j, b in enumerate(list_b): distance_matrix[i][j] = skip_connection_distance(a, b) return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs( len(list_a) - len(list_b) )
[ "The distance between the skip-connections of two neural networks." ]
Please provide a description of the function:def edit_distance(x, y): ret = layers_distance(x.layers, y.layers) ret += Constant.KERNEL_LAMBDA * skip_connections_distance( x.skip_connections, y.skip_connections ) return ret
[ "The distance between two neural networks.\n Args:\n x: An instance of NetworkDescriptor.\n y: An instance of NetworkDescriptor\n Returns:\n The edit-distance between x and y.\n " ]
Please provide a description of the function:def edit_distance_matrix(train_x, train_y=None): if train_y is None: ret = np.zeros((train_x.shape[0], train_x.shape[0])) for x_index, x in enumerate(train_x): for y_index, y in enumerate(train_x): if x_index == y_index: ret[x_index][y_index] = 0 elif x_index < y_index: ret[x_index][y_index] = edit_distance(x, y) else: ret[x_index][y_index] = ret[y_index][x_index] return ret ret = np.zeros((train_x.shape[0], train_y.shape[0])) for x_index, x in enumerate(train_x): for y_index, y in enumerate(train_y): ret[x_index][y_index] = edit_distance(x, y) return ret
[ "Calculate the edit distance.\n Args:\n train_x: A list of neural architectures.\n train_y: A list of neural architectures.\n Returns:\n An edit-distance matrix.\n " ]
Please provide a description of the function:def vector_distance(a, b): a = np.array(a) b = np.array(b) return np.linalg.norm(a - b)
[ "The Euclidean distance between two vectors." ]
Please provide a description of the function:def bourgain_embedding_matrix(distance_matrix): distance_matrix = np.array(distance_matrix) n = len(distance_matrix) if n == 1: return distance_matrix np.random.seed(123) distort_elements = [] r = range(n) k = int(math.ceil(math.log(n) / math.log(2) - 1)) t = int(math.ceil(math.log(n))) counter = 0 for i in range(0, k + 1): for t in range(t): s = np.random.choice(r, 2 ** i) for j in r: d = min([distance_matrix[j][s] for s in s]) counter += len(s) if i == 0 and t == 0: distort_elements.append([d]) else: distort_elements[j].append(d) return rbf_kernel(distort_elements, distort_elements)
[ "Use Bourgain algorithm to embed the neural architectures based on their edit-distance.\n Args:\n distance_matrix: A matrix of edit-distances.\n Returns:\n A matrix of distances after embedding.\n " ]
Please provide a description of the function:def contain(descriptors, target_descriptor): for descriptor in descriptors: if edit_distance(descriptor, target_descriptor) < 1e-5: return True return False
[ "Check if the target descriptor is in the descriptors." ]
Please provide a description of the function:def fit(self, train_x, train_y): if self.first_fitted: self.incremental_fit(train_x, train_y) else: self.first_fit(train_x, train_y)
[ " Fit the regressor with more data.\n Args:\n train_x: A list of NetworkDescriptor.\n train_y: A list of metric values.\n " ]
Please provide a description of the function:def incremental_fit(self, train_x, train_y): if not self._first_fitted: raise ValueError("The first_fit function needs to be called first.") train_x, train_y = np.array(train_x), np.array(train_y) # Incrementally compute K up_right_k = edit_distance_matrix(self._x, train_x) down_left_k = np.transpose(up_right_k) down_right_k = edit_distance_matrix(train_x) up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1) down_k = np.concatenate((down_left_k, down_right_k), axis=1) temp_distance_matrix = np.concatenate((up_k, down_k), axis=0) k_matrix = bourgain_embedding_matrix(temp_distance_matrix) diagonal = np.diag_indices_from(k_matrix) diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :]) k_matrix[diagonal] += self.alpha try: self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 except LinAlgError: return self self._x = np.concatenate((self._x, train_x), axis=0) self._y = np.concatenate((self._y, train_y), axis=0) self._distance_matrix = temp_distance_matrix self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 return self
[ " Incrementally fit the regressor. " ]
Please provide a description of the function:def first_fit(self, train_x, train_y): train_x, train_y = np.array(train_x), np.array(train_y) self._x = np.copy(train_x) self._y = np.copy(train_y) self._distance_matrix = edit_distance_matrix(self._x) k_matrix = bourgain_embedding_matrix(self._distance_matrix) k_matrix[np.diag_indices_from(k_matrix)] += self.alpha self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 self._first_fitted = True return self
[ " Fit the regressor for the first time. " ]
Please provide a description of the function:def predict(self, train_x): k_trans = np.exp(-np.power(edit_distance_matrix(train_x, self._x), 2)) y_mean = k_trans.dot(self._alpha_vector) # Line 4 (y_mean = f_star) # compute inverse K_inv of K based on its Cholesky # decomposition L and its inverse L_inv l_inv = solve_triangular(self._l_matrix.T, np.eye(self._l_matrix.shape[0])) k_inv = l_inv.dot(l_inv.T) # Compute variance of predictive distribution y_var = np.ones(len(train_x), dtype=np.float) y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): y_var[y_var_negative] = 0.0 return y_mean, np.sqrt(y_var)
[ "Predict the result.\n Args:\n train_x: A list of NetworkDescriptor.\n Returns:\n y_mean: The predicted mean.\n y_std: The predicted standard deviation.\n " ]