Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def generate(self, descriptors): model_ids = self.search_tree.adj_list.keys() target_graph = None father_id = None descriptors = deepcopy(descriptors) elem_class = Elem if self.optimizemode is OptimizeMode.Maximize: elem_class = ReverseElem # Initialize the priority queue. pq = PriorityQueue() temp_list = [] for model_id in model_ids: metric_value = self.searcher.get_metric_value_by_id(model_id) temp_list.append((metric_value, model_id)) temp_list = sorted(temp_list) for metric_value, model_id in temp_list: graph = self.searcher.load_model_by_id(model_id) graph.clear_operation_history() graph.clear_weights() pq.put(elem_class(metric_value, model_id, graph)) t = 1.0 t_min = self.t_min alpha = 0.9 opt_acq = self._get_init_opt_acq_value() while not pq.empty() and t > t_min: elem = pq.get() if self.optimizemode is OptimizeMode.Maximize: temp_exp = min((elem.metric_value - opt_acq) / t, 1.0) else: temp_exp = min((opt_acq - elem.metric_value) / t, 1.0) ap = math.exp(temp_exp) if ap >= random.uniform(0, 1): for temp_graph in transform(elem.graph): if contain(descriptors, temp_graph.extract_descriptor()): continue temp_acq_value = self.acq(temp_graph) pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph)) descriptors.append(temp_graph.extract_descriptor()) if self._accept_new_acq_value(opt_acq, temp_acq_value): opt_acq = temp_acq_value father_id = elem.father_id target_graph = deepcopy(temp_graph) t *= alpha # Did not found a not duplicated architecture if father_id is None: return None, None nm_graph = self.searcher.load_model_by_id(father_id) for args in target_graph.operation_history: getattr(nm_graph, args[0])(*list(args[1:])) return nm_graph, father_id
[ "Generate new architecture.\n Args:\n descriptors: All the searched neural architectures.\n Returns:\n graph: An instance of Graph. A morphed neural network with weights.\n father_id: The father node ID in the search tree.\n " ]
Please provide a description of the function:def acq(self, graph): ''' estimate the value of generated graph ''' mean, std = self.gpr.predict(np.array([graph.extract_descriptor()])) if self.optimizemode is OptimizeMode.Maximize: return mean + self.beta * std return mean - self.beta * std
[]
Please provide a description of the function:def add_child(self, u, v): ''' add child to search tree itself. Arguments: u {int} -- father id v {int} -- child id ''' if u == -1: self.root = v self.adj_list[v] = [] return if v not in self.adj_list[u]: self.adj_list[u].append(v) if v not in self.adj_list: self.adj_list[v] = []
[]
Please provide a description of the function:def get_dict(self, u=None): if u is None: return self.get_dict(self.root) children = [] for v in self.adj_list[u]: children.append(self.get_dict(v)) ret = {"name": u, "children": children} return ret
[ " A recursive function to return the content of the tree in a dict." ]
Please provide a description of the function:def train_with_graph(p_graph, qp_pairs, dev_qp_pairs): ''' Train a network from a specific graph. ''' global sess with tf.Graph().as_default(): train_model = GAG(cfg, embed, p_graph) train_model.build_net(is_training=True) tf.get_variable_scope().reuse_variables() dev_model = GAG(cfg, embed, p_graph) dev_model.build_net(is_training=False) with tf.Session() as sess: if restore_path is not None: restore_mapping = dict(zip(restore_shared, restore_shared)) logger.debug('init shared variables from {}, restore_scopes: {}'.format(restore_path, restore_shared)) init_from_checkpoint(restore_path, restore_mapping) logger.debug('init variables') logger.debug(sess.run(tf.report_uninitialized_variables())) init = tf.global_variables_initializer() sess.run(init) # writer = tf.summary.FileWriter('%s/graph/'%execution_path, sess.graph) logger.debug('assign to graph') saver = tf.train.Saver() train_loss = None bestacc = 0 patience = 5 patience_increase = 2 improvement_threshold = 0.995 for epoch in range(max_epoch): logger.debug('begin to train') train_batches = data.get_batches(qp_pairs, cfg.batch_size) train_loss = run_epoch(train_batches, train_model, True) logger.debug('epoch ' + str(epoch) + ' loss: ' + str(train_loss)) dev_batches = list(data.get_batches( dev_qp_pairs, cfg.batch_size)) _, position1, position2, ids, contexts = run_epoch( dev_batches, dev_model, False) answers = generate_predict_json( position1, position2, ids, contexts) if save_path is not None: logger.info('save prediction file to {}'.format(save_path)) with open(os.path.join(save_path, 'epoch%d.prediction' % epoch), 'w') as file: json.dump(answers, file) else: answers = json.dumps(answers) answers = json.loads(answers) iter = epoch + 1 acc = evaluate.evaluate_with_predictions( args.dev_file, answers) logger.debug('Send intermediate acc: %s', str(acc)) nni.report_intermediate_result(acc) logger.debug('Send intermediate result done.') if acc > bestacc: if acc * improvement_threshold > bestacc: patience = max(patience, iter * patience_increase) bestacc = acc if save_path is not None: logger.info('save model & prediction to {}'.format(save_path)) saver.save(sess, os.path.join(save_path, 'epoch%d.model' % epoch)) with open(os.path.join(save_path, 'epoch%d.score' % epoch), 'wb') as file: pickle.dump( (position1, position2, ids, contexts), file) logger.debug('epoch %d acc %g bestacc %g' % (epoch, acc, bestacc)) if patience <= iter: break logger.debug('save done.') return train_loss, bestacc
[]
Please provide a description of the function:def generate_multiple_parameters(self, parameter_id_list): result = [] for parameter_id in parameter_id_list: try: _logger.debug("generating param for {}".format(parameter_id)) res = self.generate_parameters(parameter_id) except nni.NoMoreTrialError: return result result.append(res) return result
[ "Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.\n Call 'generate_parameters()' by 'count' times by default.\n User code must override either this function or 'generate_parameters()'.\n If there's no more trial, user should raise nni.NoMoreTrialError exception in generate_parameters().\n If so, this function will only return sets of trial (hyper-)parameters that have already been collected.\n parameter_id_list: list of int\n " ]
Please provide a description of the function:def graph_loads(graph_json): ''' Load graph ''' layers = [] for layer in graph_json['layers']: layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id']) layer_info.is_delete = layer['is_delete'] _logger.debug('append layer {}'.format(layer_info)) layers.append(layer_info) graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], []) graph.layers = layers _logger.debug('graph {} loaded'.format(graph)) return graph
[]
Please provide a description of the function:def update_hash(self, layers: Iterable): if self.graph_type == LayerType.input.value: return hasher = hashlib.md5() hasher.update(LayerType(self.graph_type).name.encode('ascii')) hasher.update(str(self.size).encode('ascii')) for i in self.input: if layers[i].hash_id is None: raise ValueError('Hash id of layer {}: {} not generated!'.format(i, layers[i])) hasher.update(layers[i].hash_id.encode('ascii')) self.hash_id = hasher.hexdigest()
[ "\n Calculation of `hash_id` of Layer. Which is determined by the properties of itself, and the `hash_id`s of input layers\n " ]
Please provide a description of the function:def update_hash(self): _logger.debug('update hash') layer_in_cnt = [len(layer.input) for layer in self.layers] topo_queue = deque([i for i, layer in enumerate(self.layers) if not layer.is_delete and layer.graph_type == LayerType.input.value]) while topo_queue: layer_i = topo_queue.pop() self.layers[layer_i].update_hash(self.layers) for layer_j in self.layers[layer_i].output: layer_in_cnt[layer_j] -= 1 if layer_in_cnt[layer_j] == 0: topo_queue.appendleft(layer_j)
[ "\n update hash id of each layer, in topological order/recursively\n hash id will be used in weight sharing\n " ]
Please provide a description of the function:def init_logger(logger_file_path, log_level_name='info'): log_level = log_level_map.get(log_level_name, logging.INFO) logger_file = open(logger_file_path, 'w') fmt = '[%(asctime)s] %(levelname)s (%(name)s/%(threadName)s) %(message)s' logging.Formatter.converter = time.localtime formatter = logging.Formatter(fmt, _time_format) handler = logging.StreamHandler(logger_file) handler.setFormatter(formatter) root_logger = logging.getLogger() root_logger.addHandler(handler) root_logger.setLevel(log_level) # these modules are too verbose logging.getLogger('matplotlib').setLevel(log_level) sys.stdout = _LoggerFileWrapper(logger_file)
[ "Initialize root logger.\n This will redirect anything from logging.getLogger() as well as stdout to specified file.\n logger_file_path: path of logger file (path-like object).\n " ]
Please provide a description of the function:def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): ''' Create simple convolutional model ''' layers = [ Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), Conv2D(64, (3, 3), activation='relu'), MaxPooling2D(pool_size=(2, 2)), Flatten(), Dense(100, activation='relu'), Dense(num_classes, activation='softmax') ] model = Sequential(layers) if hyper_params['optimizer'] == 'Adam': optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) else: optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) return model
[]
Please provide a description of the function:def load_mnist_data(args): ''' Load MNIST dataset ''' (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] LOG.debug('x_train shape: %s', (x_train.shape,)) LOG.debug('x_test shape: %s', (x_test.shape,)) return x_train, y_train, x_test, y_test
[]
Please provide a description of the function:def train(args, params): ''' Train model ''' x_train, y_train, x_test, y_test = load_mnist_data(args) model = create_mnist_model(params) # nni model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1, validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)]) _, acc = model.evaluate(x_test, y_test, verbose=0) LOG.debug('Final result is: %d', acc) nni.report_final_result(acc)
[]
Please provide a description of the function:def on_epoch_end(self, epoch, logs={}): ''' Run on end of each epoch ''' LOG.debug(logs) nni.report_intermediate_result(logs["val_acc"])
[]
Please provide a description of the function:def get_all_config(self): '''get all of config values''' return json.dumps(self.config, indent=4, sort_keys=True, separators=(',', ':'))
[]
Please provide a description of the function:def set_config(self, key, value): '''set {key:value} paris to self.config''' self.config = self.read_file() self.config[key] = value self.write_file()
[]
Please provide a description of the function:def write_file(self): '''save config to local file''' if self.config: try: with open(self.config_file, 'w') as file: json.dump(self.config, file) except IOError as error: print('Error:', error) return
[]
Please provide a description of the function:def add_experiment(self, id, port, time, file_name, platform): '''set {key:value} paris to self.experiment''' self.experiments[id] = {} self.experiments[id]['port'] = port self.experiments[id]['startTime'] = time self.experiments[id]['endTime'] = 'N/A' self.experiments[id]['status'] = 'INITIALIZED' self.experiments[id]['fileName'] = file_name self.experiments[id]['platform'] = platform self.write_file()
[]
Please provide a description of the function:def update_experiment(self, id, key, value): '''Update experiment''' if id not in self.experiments: return False self.experiments[id][key] = value self.write_file() return True
[]
Please provide a description of the function:def remove_experiment(self, id): '''remove an experiment by id''' if id in self.experiments: self.experiments.pop(id) self.write_file()
[]
Please provide a description of the function:def write_file(self): '''save config to local file''' try: with open(self.experiment_file, 'w') as file: json.dump(self.experiments, file) except IOError as error: print('Error:', error) return
[]
Please provide a description of the function:def read_file(self): '''load config from local file''' if os.path.exists(self.experiment_file): try: with open(self.experiment_file, 'r') as file: return json.load(file) except ValueError: return {} return {}
[]
Please provide a description of the function:def load_from_file(path, fmt=None, is_training=True): ''' load data from file ''' if fmt is None: fmt = 'squad' assert fmt in ['squad', 'csv'], 'input format must be squad or csv' qp_pairs = [] if fmt == 'squad': with open(path) as data_file: data = json.load(data_file)['data'] for doc in data: for paragraph in doc['paragraphs']: passage = paragraph['context'] for qa_pair in paragraph['qas']: question = qa_pair['question'] qa_id = qa_pair['id'] if not is_training: qp_pairs.append( {'passage': passage, 'question': question, 'id': qa_id}) else: for answer in qa_pair['answers']: answer_begin = int(answer['answer_start']) answer_end = answer_begin + len(answer['text']) qp_pairs.append({'passage': passage, 'question': question, 'id': qa_id, 'answer_begin': answer_begin, 'answer_end': answer_end}) else: with open(path, newline='') as csvfile: reader = csv.reader(csvfile, delimiter='\t') line_num = 0 for row in reader: qp_pairs.append( {'passage': row[1], 'question': row[0], 'id': line_num}) line_num += 1 return qp_pairs
[]
Please provide a description of the function:def tokenize(qp_pair, tokenizer=None, is_training=False): ''' tokenize function. ''' question_tokens = tokenizer.tokenize(qp_pair['question']) passage_tokens = tokenizer.tokenize(qp_pair['passage']) if is_training: question_tokens = question_tokens[:300] passage_tokens = passage_tokens[:300] passage_tokens.insert( 0, {'word': '<BOS>', 'original_text': '<BOS>', 'char_begin': 0, 'char_end': 0}) passage_tokens.append( {'word': '<EOS>', 'original_text': '<EOS>', 'char_begin': 0, 'char_end': 0}) qp_pair['question_tokens'] = question_tokens qp_pair['passage_tokens'] = passage_tokens
[]
Please provide a description of the function:def collect_vocab(qp_pairs): ''' Build the vocab from corpus. ''' vocab = set() for qp_pair in qp_pairs: for word in qp_pair['question_tokens']: vocab.add(word['word']) for word in qp_pair['passage_tokens']: vocab.add(word['word']) return vocab
[]
Please provide a description of the function:def shuffle_step(entries, step): ''' Shuffle the step ''' answer = [] for i in range(0, len(entries), step): sub = entries[i:i+step] shuffle(sub) answer += sub return answer
[]
Please provide a description of the function:def get_batches(qp_pairs, batch_size, need_sort=True): ''' Get batches data and shuffle. ''' if need_sort: qp_pairs = sorted(qp_pairs, key=lambda qp: ( len(qp['passage_tokens']), qp['id']), reverse=True) batches = [{'qp_pairs': qp_pairs[i:(i + batch_size)]} for i in range(0, len(qp_pairs), batch_size)] shuffle(batches) return batches
[]
Please provide a description of the function:def get_char_input(data, char_dict, max_char_length): ''' Get char input. ''' batch_size = len(data) sequence_length = max(len(d) for d in data) char_id = np.zeros((max_char_length, sequence_length, batch_size), dtype=np.int32) char_lengths = np.zeros((sequence_length, batch_size), dtype=np.float32) for batch_idx in range(0, min(len(data), batch_size)): batch_data = data[batch_idx] for sample_idx in range(0, min(len(batch_data), sequence_length)): word = batch_data[sample_idx]['word'] char_lengths[sample_idx, batch_idx] = min( len(word), max_char_length) for i in range(0, min(len(word), max_char_length)): char_id[i, sample_idx, batch_idx] = get_id(char_dict, word[i]) return char_id, char_lengths
[]
Please provide a description of the function:def get_word_input(data, word_dict, embed, embed_dim): ''' Get word input. ''' batch_size = len(data) max_sequence_length = max(len(d) for d in data) sequence_length = max_sequence_length word_input = np.zeros((max_sequence_length, batch_size, embed_dim), dtype=np.float32) ids = np.zeros((sequence_length, batch_size), dtype=np.int32) masks = np.zeros((sequence_length, batch_size), dtype=np.float32) lengths = np.zeros([batch_size], dtype=np.int32) for batch_idx in range(0, min(len(data), batch_size)): batch_data = data[batch_idx] lengths[batch_idx] = len(batch_data) for sample_idx in range(0, min(len(batch_data), sequence_length)): word = batch_data[sample_idx]['word'].lower() if word in word_dict.keys(): word_input[sample_idx, batch_idx] = embed[word_dict[word]] ids[sample_idx, batch_idx] = word_dict[word] masks[sample_idx, batch_idx] = 1 word_input = np.reshape(word_input, (-1, embed_dim)) return word_input, ids, masks, lengths
[]
Please provide a description of the function:def get_word_index(tokens, char_index): ''' Given word return word index. ''' for (i, token) in enumerate(tokens): if token['char_end'] == 0: continue if token['char_begin'] <= char_index and char_index <= token['char_end']: return i return 0
[]
Please provide a description of the function:def get_answer_begin_end(data): ''' Get answer's index of begin and end. ''' begin = [] end = [] for qa_pair in data: tokens = qa_pair['passage_tokens'] char_begin = qa_pair['answer_begin'] char_end = qa_pair['answer_end'] word_begin = get_word_index(tokens, char_begin) word_end = get_word_index(tokens, char_end) begin.append(word_begin) end.append(word_end) return np.asarray(begin), np.asarray(end)
[]
Please provide a description of the function:def get_buckets(min_length, max_length, bucket_count): ''' Get bucket by length. ''' if bucket_count <= 0: return [max_length] unit_length = int((max_length - min_length) // (bucket_count)) buckets = [min_length + unit_length * (i + 1) for i in range(0, bucket_count)] buckets[-1] = max_length return buckets
[]
Please provide a description of the function:def tokenize(self, text): ''' tokenize function in Tokenizer. ''' start = -1 tokens = [] for i, character in enumerate(text): if character == ' ' or character == '\t': if start >= 0: word = text[start:i] tokens.append({ 'word': word, 'original_text': word, 'char_begin': start, 'char_end': i}) start = -1 else: if start < 0: start = i if start >= 0: tokens.append({ 'word': text[start:len(text)], 'original_text': text[start:len(text)], 'char_begin': start, 'char_end': len(text) }) return tokens
[]
Please provide a description of the function:def generate_new_id(self): self.events.append(Event()) indiv_id = self.indiv_counter self.indiv_counter += 1 return indiv_id
[ "\n generate new id and event hook for new Individual\n " ]
Please provide a description of the function:def init_population(self, population_size, graph_max_layer, graph_min_layer): population = [] graph = Graph(max_layer_num=graph_max_layer, min_layer_num=graph_min_layer, inputs=[Layer(LayerType.input.value, output=[4, 5], size='x'), Layer(LayerType.input.value, output=[4, 5], size='y')], output=[Layer(LayerType.output.value, inputs=[4], size='x'), Layer(LayerType.output.value, inputs=[5], size='y')], hide=[Layer(LayerType.attention.value, inputs=[0, 1], output=[2]), Layer(LayerType.attention.value, inputs=[1, 0], output=[3])]) for _ in range(population_size): graph_tmp = copy.deepcopy(graph) graph_tmp.mutation() population.append(Individual(indiv_id=self.generate_new_id(), graph_cfg=graph_tmp, result=None)) return population
[ "\n initialize populations for evolution tuner\n " ]
Please provide a description of the function:def generate_parameters(self, parameter_id): logger.debug('acquiring lock for param {}'.format(parameter_id)) self.thread_lock.acquire() logger.debug('lock for current thread acquired') if not self.population: logger.debug("the len of poplution lower than zero.") raise Exception('The population is empty') pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) graph_param = json.loads(graph_dumps(indiv.config)) else: random.shuffle(self.population) if self.population[0].result < self.population[1].result: self.population[0] = self.population[1] indiv = copy.deepcopy(self.population[0]) self.population.pop(1) indiv.mutation(indiv_id = self.generate_new_id()) graph_param = json.loads(graph_dumps(indiv.config)) param_json = { 'graph': graph_param, 'restore_dir': self.save_dir(indiv.parent_id), 'save_dir': self.save_dir(indiv.indiv_id), 'shared_id': list(indiv.shared_ids) if indiv.parent_id is not None else None, } logger.debug('generate_parameter return value is:') logger.debug(param_json) logger.debug('releasing lock') self.thread_lock.release() if indiv.parent_id is not None: logger.debug("new trial {} pending on parent experiment {}".format(indiv.indiv_id, indiv.parent_id)) self.events[indiv.parent_id].wait() logger.debug("trial {} ready".format(indiv.indiv_id)) return param_json
[ "Returns a set of trial graph config, as a serializable object.\n An example configuration:\n ```json\n {\n \"shared_id\": [\n \"4a11b2ef9cb7211590dfe81039b27670\",\n \"370af04de24985e5ea5b3d72b12644c9\",\n \"11f646e9f650f5f3fedc12b6349ec60f\",\n \"0604e5350b9c734dd2d770ee877cfb26\",\n \"6dbeb8b022083396acb721267335f228\",\n \"ba55380d6c84f5caeb87155d1c5fa654\"\n ],\n \"graph\": {\n \"layers\": [\n ...\n {\n \"hash_id\": \"ba55380d6c84f5caeb87155d1c5fa654\",\n \"is_delete\": false,\n \"size\": \"x\",\n \"graph_type\": 0,\n \"output\": [\n 6\n ],\n \"output_size\": 1,\n \"input\": [\n 7,\n 1\n ],\n \"input_size\": 2\n },\n ...\n ]\n },\n \"restore_dir\": \"/mnt/nfs/nni/ga_squad/87\",\n \"save_dir\": \"/mnt/nfs/nni/ga_squad/95\"\n }\n ```\n `restore_dir` means the path in which to load the previous trained model weights. if null, init from stratch.\n `save_dir` means the path to save trained model for current trial.\n `graph` is the configuration of model network.\n Note: each configuration of layers has a `hash_id` property,\n which tells tuner & trial code whether to share trained weights or not.\n `shared_id` is the hash_id of layers that should be shared with previously trained model.\n " ]
Please provide a description of the function:def receive_trial_result(self, parameter_id, parameters, value): ''' Record an observation of the objective function parameter_id : int parameters : dict of parameters value: final metrics of the trial, including reward ''' logger.debug('acquiring lock for param {}'.format(parameter_id)) self.thread_lock.acquire() logger.debug('lock for current acquired') reward = extract_scalar_reward(value) if self.optimize_mode is OptimizeMode.Minimize: reward = -reward logger.debug('receive trial result is:\n') logger.debug(str(parameters)) logger.debug(str(reward)) indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]), graph_cfg=graph_loads(parameters['graph']), result=reward) self.population.append(indiv) logger.debug('releasing lock') self.thread_lock.release() self.events[indiv.indiv_id].set()
[]
Please provide a description of the function:def _update_data(self, trial_job_id, trial_history): if trial_job_id not in self.running_history: self.running_history[trial_job_id] = [] self.running_history[trial_job_id].extend(trial_history[len(self.running_history[trial_job_id]):])
[ "update data\n\n Parameters\n ----------\n trial_job_id: int\n trial job id\n trial_history: list\n The history performance matrix of each trial\n " ]
Please provide a description of the function:def trial_end(self, trial_job_id, success): if trial_job_id in self.running_history: if success: cnt = 0 history_sum = 0 self.completed_avg_history[trial_job_id] = [] for each in self.running_history[trial_job_id]: cnt += 1 history_sum += each self.completed_avg_history[trial_job_id].append(history_sum / cnt) self.running_history.pop(trial_job_id) else: logger.warning('trial_end: trial_job_id does not in running_history')
[ "trial_end\n \n Parameters\n ----------\n trial_job_id: int\n trial job id\n success: bool\n True if succssfully finish the experiment, False otherwise\n " ]
Please provide a description of the function:def assess_trial(self, trial_job_id, trial_history): curr_step = len(trial_history) if curr_step < self.start_step: return AssessResult.Good try: num_trial_history = [float(ele) for ele in trial_history] except (TypeError, ValueError) as error: logger.warning('incorrect data type or value:') logger.exception(error) except Exception as error: logger.warning('unrecognized exception in medianstop_assessor:') logger.excpetion(error) self._update_data(trial_job_id, num_trial_history) if self.high_better: best_history = max(trial_history) else: best_history = min(trial_history) avg_array = [] for id in self.completed_avg_history: if len(self.completed_avg_history[id]) >= curr_step: avg_array.append(self.completed_avg_history[id][curr_step - 1]) if len(avg_array) > 0: avg_array.sort() if self.high_better: median = avg_array[(len(avg_array)-1) // 2] return AssessResult.Bad if best_history < median else AssessResult.Good else: median = avg_array[len(avg_array) // 2] return AssessResult.Bad if best_history > median else AssessResult.Good else: return AssessResult.Good
[ "assess_trial\n \n Parameters\n ----------\n trial_job_id: int\n trial job id\n trial_history: list\n The history performance matrix of each trial\n\n Returns\n -------\n bool\n AssessResult.Good or AssessResult.Bad\n\n Raises\n ------\n Exception\n unrecognize exception in medianstop_assessor\n " ]
Please provide a description of the function:def copyHdfsDirectoryToLocal(hdfsDirectory, localDirectory, hdfsClient): '''Copy directory from HDFS to local''' if not os.path.exists(localDirectory): os.makedirs(localDirectory) try: listing = hdfsClient.list_status(hdfsDirectory) except Exception as exception: nni_log(LogType.Error, 'List hdfs directory {0} error: {1}'.format(hdfsDirectory, str(exception))) raise exception for f in listing: if f.type == 'DIRECTORY': subHdfsDirectory = posixpath.join(hdfsDirectory, f.pathSuffix) subLocalDirectory = os.path.join(localDirectory, f.pathSuffix) copyHdfsDirectoryToLocal(subHdfsDirectory, subLocalDirectory, hdfsClient) elif f.type == 'FILE': hdfsFilePath = posixpath.join(hdfsDirectory, f.pathSuffix) localFilePath = os.path.join(localDirectory, f.pathSuffix) copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient) else: raise AssertionError('unexpected type {}'.format(f.type))
[]
Please provide a description of the function:def copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient, override=True): '''Copy file from HDFS to local''' if not hdfsClient.exists(hdfsFilePath): raise Exception('HDFS file {} does not exist!'.format(hdfsFilePath)) try: file_status = hdfsClient.get_file_status(hdfsFilePath) if file_status.type != 'FILE': raise Exception('HDFS file path {} is not a file'.format(hdfsFilePath)) except Exception as exception: nni_log(LogType.Error, 'Get hdfs file {0} status error: {1}'.format(hdfsFilePath, str(exception))) raise exception if os.path.exists(localFilePath) and override: os.remove(localFilePath) try: hdfsClient.copy_to_local(hdfsFilePath, localFilePath) except Exception as exception: nni_log(LogType.Error, 'Copy hdfs file {0} to {1} error: {2}'.format(hdfsFilePath, localFilePath, str(exception))) raise exception nni_log(LogType.Info, 'Successfully copied hdfs file {0} to {1}, {2} bytes'.format(hdfsFilePath, localFilePath, file_status.length))
[]
Please provide a description of the function:def copyDirectoryToHdfs(localDirectory, hdfsDirectory, hdfsClient): '''Copy directory from local to HDFS''' if not os.path.exists(localDirectory): raise Exception('Local Directory does not exist!') hdfsClient.mkdirs(hdfsDirectory) result = True for file in os.listdir(localDirectory): file_path = os.path.join(localDirectory, file) if os.path.isdir(file_path): hdfs_directory = os.path.join(hdfsDirectory, file) try: result = result and copyDirectoryToHdfs(file_path, hdfs_directory, hdfsClient) except Exception as exception: nni_log(LogType.Error, 'Copy local directory {0} to hdfs directory {1} error: {2}'.format(file_path, hdfs_directory, str(exception))) result = False else: hdfs_file_path = os.path.join(hdfsDirectory, file) try: result = result and copyFileToHdfs(file_path, hdfs_file_path, hdfsClient) except Exception as exception: nni_log(LogType.Error, 'Copy local file {0} to hdfs {1} error: {2}'.format(file_path, hdfs_file_path, str(exception))) result = False return result
[]
Please provide a description of the function:def copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True): '''Copy a local file to HDFS directory''' if not os.path.exists(localFilePath): raise Exception('Local file Path does not exist!') if os.path.isdir(localFilePath): raise Exception('localFile should not a directory!') if hdfsClient.exists(hdfsFilePath): if override: hdfsClient.delete(hdfsFilePath) else: return False try: hdfsClient.copy_from_local(localFilePath, hdfsFilePath) return True except Exception as exception: nni_log(LogType.Error, 'Copy local file {0} to hdfs file {1} error: {2}'.format(localFilePath, hdfsFilePath, str(exception))) return False
[]
Please provide a description of the function:def load_data(): '''Load dataset, use boston dataset''' boston = load_boston() X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=99, test_size=0.25) #normalize data ss_X = StandardScaler() ss_y = StandardScaler() X_train = ss_X.fit_transform(X_train) X_test = ss_X.transform(X_test) y_train = ss_y.fit_transform(y_train[:, None])[:,0] y_test = ss_y.transform(y_test[:, None])[:,0] return X_train, X_test, y_train, y_test
[]
Please provide a description of the function:def get_model(PARAMS): '''Get model according to parameters''' model_dict = { 'LinearRegression': LinearRegression(), 'SVR': SVR(), 'KNeighborsRegressor': KNeighborsRegressor(), 'DecisionTreeRegressor': DecisionTreeRegressor() } if not model_dict.get(PARAMS['model_name']): LOG.exception('Not supported model!') exit(1) model = model_dict[PARAMS['model_name']] try: if PARAMS['model_name'] == 'SVR': model.kernel = PARAMS['svr_kernel'] elif PARAMS['model_name'] == 'KNeighborsRegressor': model.weights = PARAMS['knr_weights'] except Exception as exception: LOG.exception(exception) raise return model
[]
Please provide a description of the function:def run(X_train, X_test, y_train, y_test, PARAMS): '''Train model and predict result''' model.fit(X_train, y_train) predict_y = model.predict(X_test) score = r2_score(y_test, predict_y) LOG.debug('r2 score: %s' % score) nni.report_final_result(score)
[]
Please provide a description of the function:def add_skip_connection(self, u, v, connection_type): if connection_type not in [self.CONCAT_CONNECT, self.ADD_CONNECT]: raise ValueError( "connection_type should be NetworkDescriptor.CONCAT_CONNECT " "or NetworkDescriptor.ADD_CONNECT." ) self.skip_connections.append((u, v, connection_type))
[ " Add a skip-connection to the descriptor.\n Args:\n u: Number of convolutional layers before the starting point.\n v: Number of convolutional layers before the ending point.\n connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT.\n " ]
Please provide a description of the function:def to_json(self): ''' NetworkDescriptor to json representation ''' skip_list = [] for u, v, connection_type in self.skip_connections: skip_list.append({"from": u, "to": v, "type": connection_type}) return {"node_list": self.layers, "skip_list": skip_list}
[]
Please provide a description of the function:def add_layer(self, layer, input_node_id): if isinstance(input_node_id, Iterable): layer.input = list(map(lambda x: self.node_list[x], input_node_id)) output_node_id = self._add_node(Node(layer.output_shape)) for node_id in input_node_id: self._add_edge(layer, node_id, output_node_id) else: layer.input = self.node_list[input_node_id] output_node_id = self._add_node(Node(layer.output_shape)) self._add_edge(layer, input_node_id, output_node_id) layer.output = self.node_list[output_node_id] return output_node_id
[ "Add a layer to the Graph.\n Args:\n layer: An instance of the subclasses of StubLayer in layers.py.\n input_node_id: An integer. The ID of the input node of the layer.\n Returns:\n output_node_id: An integer. The ID of the output node of the layer.\n " ]
Please provide a description of the function:def _add_node(self, node): node_id = len(self.node_list) self.node_to_id[node] = node_id self.node_list.append(node) self.adj_list[node_id] = [] self.reverse_adj_list[node_id] = [] return node_id
[ "Add a new node to node_list and give the node an ID.\n Args:\n node: An instance of Node.\n Returns:\n node_id: An integer.\n " ]
Please provide a description of the function:def _add_edge(self, layer, input_id, output_id): if layer in self.layer_to_id: layer_id = self.layer_to_id[layer] if input_id not in self.layer_id_to_input_node_ids[layer_id]: self.layer_id_to_input_node_ids[layer_id].append(input_id) if output_id not in self.layer_id_to_output_node_ids[layer_id]: self.layer_id_to_output_node_ids[layer_id].append(output_id) else: layer_id = len(self.layer_list) self.layer_list.append(layer) self.layer_to_id[layer] = layer_id self.layer_id_to_input_node_ids[layer_id] = [input_id] self.layer_id_to_output_node_ids[layer_id] = [output_id] self.adj_list[input_id].append((output_id, layer_id)) self.reverse_adj_list[output_id].append((input_id, layer_id))
[ "Add a new layer to the graph. The nodes should be created in advance." ]
Please provide a description of the function:def _redirect_edge(self, u_id, v_id, new_v_id): layer_id = None for index, edge_tuple in enumerate(self.adj_list[u_id]): if edge_tuple[0] == v_id: layer_id = edge_tuple[1] self.adj_list[u_id][index] = (new_v_id, layer_id) self.layer_list[layer_id].output = self.node_list[new_v_id] break for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]): if edge_tuple[0] == u_id: layer_id = edge_tuple[1] self.reverse_adj_list[v_id].remove(edge_tuple) break self.reverse_adj_list[new_v_id].append((u_id, layer_id)) for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]): if value == v_id: self.layer_id_to_output_node_ids[layer_id][index] = new_v_id break
[ "Redirect the layer to a new node.\n Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`\n while keeping all other property of the edge the same.\n " ]
Please provide a description of the function:def _replace_layer(self, layer_id, new_layer): old_layer = self.layer_list[layer_id] new_layer.input = old_layer.input new_layer.output = old_layer.output new_layer.output.shape = new_layer.output_shape self.layer_list[layer_id] = new_layer self.layer_to_id[new_layer] = layer_id self.layer_to_id.pop(old_layer)
[ "Replace the layer with a new layer." ]
Please provide a description of the function:def topological_order(self): q = Queue() in_degree = {} for i in range(self.n_nodes): in_degree[i] = 0 for u in range(self.n_nodes): for v, _ in self.adj_list[u]: in_degree[v] += 1 for i in range(self.n_nodes): if in_degree[i] == 0: q.put(i) order_list = [] while not q.empty(): u = q.get() order_list.append(u) for v, _ in self.adj_list[u]: in_degree[v] -= 1 if in_degree[v] == 0: q.put(v) return order_list
[ "Return the topological order of the node IDs from the input node to the output node." ]
Please provide a description of the function:def _get_pooling_layers(self, start_node_id, end_node_id): layer_list = [] node_list = [start_node_id] assert self._depth_first_search(end_node_id, layer_list, node_list) ret = [] for layer_id in layer_list: layer = self.layer_list[layer_id] if is_layer(layer, "Pooling"): ret.append(layer) elif is_layer(layer, "Conv") and layer.stride != 1: ret.append(layer) return ret
[ "Given two node IDs, return all the pooling layers between them." ]
Please provide a description of the function:def _depth_first_search(self, target_id, layer_id_list, node_list): assert len(node_list) <= self.n_nodes u = node_list[-1] if u == target_id: return True for v, layer_id in self.adj_list[u]: layer_id_list.append(layer_id) node_list.append(v) if self._depth_first_search(target_id, layer_id_list, node_list): return True layer_id_list.pop() node_list.pop() return False
[ "Search for all the layers and nodes down the path.\n A recursive function to search all the layers and nodes between the node in the node_list\n and the node with target_id." ]
Please provide a description of the function:def _search(self, u, start_dim, total_dim, n_add): if (u, start_dim, total_dim, n_add) in self.vis: return self.vis[(u, start_dim, total_dim, n_add)] = True for v, layer_id in self.adj_list[u]: layer = self.layer_list[layer_id] if is_layer(layer, "Conv"): new_layer = wider_next_conv( layer, start_dim, total_dim, n_add, self.weighted ) self._replace_layer(layer_id, new_layer) elif is_layer(layer, "Dense"): new_layer = wider_next_dense( layer, start_dim, total_dim, n_add, self.weighted ) self._replace_layer(layer_id, new_layer) elif is_layer(layer, "BatchNormalization"): new_layer = wider_bn(layer, start_dim, total_dim, n_add, self.weighted) self._replace_layer(layer_id, new_layer) self._search(v, start_dim, total_dim, n_add) elif is_layer(layer, "Concatenate"): if self.layer_id_to_input_node_ids[layer_id][1] == u: # u is on the right of the concat # next_start_dim += next_total_dim - total_dim left_dim = self._upper_layer_width( self.layer_id_to_input_node_ids[layer_id][0] ) next_start_dim = start_dim + left_dim next_total_dim = total_dim + left_dim else: next_start_dim = start_dim next_total_dim = total_dim + self._upper_layer_width( self.layer_id_to_input_node_ids[layer_id][1] ) self._search(v, next_start_dim, next_total_dim, n_add) else: self._search(v, start_dim, total_dim, n_add) for v, layer_id in self.reverse_adj_list[u]: layer = self.layer_list[layer_id] if is_layer(layer, "Conv"): new_layer = wider_pre_conv(layer, n_add, self.weighted) self._replace_layer(layer_id, new_layer) elif is_layer(layer, "Dense"): new_layer = wider_pre_dense(layer, n_add, self.weighted) self._replace_layer(layer_id, new_layer) elif is_layer(layer, "Concatenate"): continue else: self._search(v, start_dim, total_dim, n_add)
[ "Search the graph for all the layers to be widened caused by an operation.\n It is an recursive function with duplication check to avoid deadlock.\n It searches from a starting node u until the corresponding layers has been widened.\n Args:\n u: The starting node ID.\n start_dim: The position to insert the additional dimensions.\n total_dim: The total number of dimensions the layer has before widening.\n n_add: The number of dimensions to add.\n " ]
Please provide a description of the function:def to_deeper_model(self, target_id, new_layer): self.operation_history.append(("to_deeper_model", target_id, new_layer)) input_id = self.layer_id_to_input_node_ids[target_id][0] output_id = self.layer_id_to_output_node_ids[target_id][0] if self.weighted: if is_layer(new_layer, "Dense"): init_dense_weight(new_layer) elif is_layer(new_layer, "Conv"): init_conv_weight(new_layer) elif is_layer(new_layer, "BatchNormalization"): init_bn_weight(new_layer) self._insert_new_layers([new_layer], input_id, output_id)
[ "Insert a relu-conv-bn block after the target block.\n Args:\n target_id: A convolutional layer ID. The new block should be inserted after the block.\n new_layer: An instance of StubLayer subclasses.\n " ]
Please provide a description of the function:def to_wider_model(self, pre_layer_id, n_add): self.operation_history.append(("to_wider_model", pre_layer_id, n_add)) pre_layer = self.layer_list[pre_layer_id] output_id = self.layer_id_to_output_node_ids[pre_layer_id][0] dim = layer_width(pre_layer) self.vis = {} self._search(output_id, dim, dim, n_add) # Update the tensor shapes. for u in self.topological_order: for v, layer_id in self.adj_list[u]: self.node_list[v].shape = self.layer_list[layer_id].output_shape
[ "Widen the last dimension of the output of the pre_layer.\n Args:\n pre_layer_id: The ID of a convolutional layer or dense layer.\n n_add: The number of dimensions to add.\n " ]
Please provide a description of the function:def _insert_new_layers(self, new_layers, start_node_id, end_node_id): new_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) temp_output_id = new_node_id for layer in new_layers[:-1]: temp_output_id = self.add_layer(layer, temp_output_id) self._add_edge(new_layers[-1], temp_output_id, end_node_id) new_layers[-1].input = self.node_list[temp_output_id] new_layers[-1].output = self.node_list[end_node_id] self._redirect_edge(start_node_id, end_node_id, new_node_id)
[ "Insert the new_layers after the node with start_node_id." ]
Please provide a description of the function:def to_add_skip_model(self, start_id, end_id): self.operation_history.append(("to_add_skip_model", start_id, end_id)) filters_end = self.layer_list[end_id].output.shape[-1] filters_start = self.layer_list[start_id].output.shape[-1] start_node_id = self.layer_id_to_output_node_ids[start_id][0] pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] end_node_id = self.layer_id_to_output_node_ids[end_id][0] skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) # Add the conv layer new_conv_layer = get_conv_class(self.n_dim)(filters_start, filters_end, 1) skip_output_id = self.add_layer(new_conv_layer, skip_output_id) # Add the add layer. add_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) add_layer = StubAdd() self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id) self._add_edge(add_layer, add_input_node_id, end_node_id) self._add_edge(add_layer, skip_output_id, end_node_id) add_layer.input = [ self.node_list[add_input_node_id], self.node_list[skip_output_id], ] add_layer.output = self.node_list[end_node_id] self.node_list[end_node_id].shape = add_layer.output_shape # Set weights to the additional conv layer. if self.weighted: filter_shape = (1,) * self.n_dim weights = np.zeros((filters_end, filters_start) + filter_shape) bias = np.zeros(filters_end) new_conv_layer.set_weights( (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) )
[ "Add a weighted add skip-connection from after start node to end node.\n Args:\n start_id: The convolutional layer ID, after which to start the skip-connection.\n end_id: The convolutional layer ID, after which to end the skip-connection.\n " ]
Please provide a description of the function:def to_concat_skip_model(self, start_id, end_id): self.operation_history.append(("to_concat_skip_model", start_id, end_id)) filters_end = self.layer_list[end_id].output.shape[-1] filters_start = self.layer_list[start_id].output.shape[-1] start_node_id = self.layer_id_to_output_node_ids[start_id][0] pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] end_node_id = self.layer_id_to_output_node_ids[end_id][0] skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id) concat_layer = StubConcatenate() concat_layer.input = [ self.node_list[concat_input_node_id], self.node_list[skip_output_id], ] concat_output_node_id = self._add_node(Node(concat_layer.output_shape)) self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id) self._add_edge(concat_layer, skip_output_id, concat_output_node_id) concat_layer.output = self.node_list[concat_output_node_id] self.node_list[concat_output_node_id].shape = concat_layer.output_shape # Add the concatenate layer. new_conv_layer = get_conv_class(self.n_dim)( filters_start + filters_end, filters_end, 1 ) self._add_edge(new_conv_layer, concat_output_node_id, end_node_id) new_conv_layer.input = self.node_list[concat_output_node_id] new_conv_layer.output = self.node_list[end_node_id] self.node_list[end_node_id].shape = new_conv_layer.output_shape if self.weighted: filter_shape = (1,) * self.n_dim weights = np.zeros((filters_end, filters_end) + filter_shape) for i in range(filters_end): filter_weight = np.zeros((filters_end,) + filter_shape) center_index = (i,) + (0,) * self.n_dim filter_weight[center_index] = 1 weights[i, ...] = filter_weight weights = np.concatenate( (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1 ) bias = np.zeros(filters_end) new_conv_layer.set_weights( (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) )
[ "Add a weighted add concatenate connection from after start node to end node.\n Args:\n start_id: The convolutional layer ID, after which to start the skip-connection.\n end_id: The convolutional layer ID, after which to end the skip-connection.\n " ]
Please provide a description of the function:def extract_descriptor(self): main_chain = self.get_main_chain() index_in_main_chain = {} for index, u in enumerate(main_chain): index_in_main_chain[u] = index ret = NetworkDescriptor() for u in main_chain: for v, layer_id in self.adj_list[u]: if v not in index_in_main_chain: continue layer = self.layer_list[layer_id] copied_layer = copy(layer) copied_layer.weights = None ret.add_layer(deepcopy(copied_layer)) for u in index_in_main_chain: for v, layer_id in self.adj_list[u]: if v not in index_in_main_chain: temp_u = u temp_v = v temp_layer_id = layer_id skip_type = None while not (temp_v in index_in_main_chain and temp_u in index_in_main_chain): if is_layer(self.layer_list[temp_layer_id], "Concatenate"): skip_type = NetworkDescriptor.CONCAT_CONNECT if is_layer(self.layer_list[temp_layer_id], "Add"): skip_type = NetworkDescriptor.ADD_CONNECT temp_u = temp_v temp_v, temp_layer_id = self.adj_list[temp_v][0] ret.add_skip_connection( index_in_main_chain[u], index_in_main_chain[temp_u], skip_type ) elif index_in_main_chain[v] - index_in_main_chain[u] != 1: skip_type = None if is_layer(self.layer_list[layer_id], "Concatenate"): skip_type = NetworkDescriptor.CONCAT_CONNECT if is_layer(self.layer_list[layer_id], "Add"): skip_type = NetworkDescriptor.ADD_CONNECT ret.add_skip_connection( index_in_main_chain[u], index_in_main_chain[v], skip_type ) return ret
[ "Extract the the description of the Graph as an instance of NetworkDescriptor." ]
Please provide a description of the function:def clear_weights(self): ''' clear weights of the graph ''' self.weighted = False for layer in self.layer_list: layer.weights = None
[]
Please provide a description of the function:def get_main_chain_layers(self): main_chain = self.get_main_chain() ret = [] for u in main_chain: for v, layer_id in self.adj_list[u]: if v in main_chain and u in main_chain: ret.append(layer_id) return ret
[ "Return a list of layer IDs in the main chain." ]
Please provide a description of the function:def get_main_chain(self): pre_node = {} distance = {} for i in range(self.n_nodes): distance[i] = 0 pre_node[i] = i for i in range(self.n_nodes - 1): for u in range(self.n_nodes): for v, _ in self.adj_list[u]: if distance[u] + 1 > distance[v]: distance[v] = distance[u] + 1 pre_node[v] = u temp_id = 0 for i in range(self.n_nodes): if distance[i] > distance[temp_id]: temp_id = i ret = [] for i in range(self.n_nodes + 5): ret.append(temp_id) if pre_node[temp_id] == temp_id: break temp_id = pre_node[temp_id] assert temp_id == pre_node[temp_id] ret.reverse() return ret
[ "Returns the main chain node ID list." ]
Please provide a description of the function:def run(self): _logger.info('Start dispatcher') if dispatcher_env_vars.NNI_MODE == 'resume': self.load_checkpoint() while True: command, data = receive() if data: data = json_tricks.loads(data) if command is None or command is CommandType.Terminate: break if multi_thread_enabled(): result = self.pool.map_async(self.process_command_thread, [(command, data)]) self.thread_results.append(result) if any([thread_result.ready() and not thread_result.successful() for thread_result in self.thread_results]): _logger.debug('Caught thread exception') break else: self.enqueue_command(command, data) if self.worker_exceptions: break _logger.info('Dispatcher exiting...') self.stopping = True if multi_thread_enabled(): self.pool.close() self.pool.join() else: self.default_worker.join() self.assessor_worker.join() _logger.info('Terminated by NNI manager')
[ "Run the tuner.\n This function will never return unless raise.\n " ]
Please provide a description of the function:def command_queue_worker(self, command_queue): while True: try: # set timeout to ensure self.stopping is checked periodically command, data = command_queue.get(timeout=3) try: self.process_command(command, data) except Exception as e: _logger.exception(e) self.worker_exceptions.append(e) break except Empty: pass if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()): break
[ "Process commands in command queues.\n " ]
Please provide a description of the function:def enqueue_command(self, command, data): if command == CommandType.TrialEnd or (command == CommandType.ReportMetricData and data['type'] == 'PERIODICAL'): self.assessor_command_queue.put((command, data)) else: self.default_command_queue.put((command, data)) qsize = self.default_command_queue.qsize() if qsize >= QUEUE_LEN_WARNING_MARK: _logger.warning('default queue length: %d', qsize) qsize = self.assessor_command_queue.qsize() if qsize >= QUEUE_LEN_WARNING_MARK: _logger.warning('assessor queue length: %d', qsize)
[ "Enqueue command into command queues\n " ]
Please provide a description of the function:def process_command_thread(self, request): command, data = request if multi_thread_enabled(): try: self.process_command(command, data) except Exception as e: _logger.exception(str(e)) raise else: pass
[ "Worker thread to process a command.\n " ]
Please provide a description of the function:def match_val_type(vals, vals_bounds, vals_types): ''' Update values in the array, to match their corresponding type ''' vals_new = [] for i, _ in enumerate(vals_types): if vals_types[i] == "discrete_int": # Find the closest integer in the array, vals_bounds vals_new.append(min(vals_bounds[i], key=lambda x: abs(x - vals[i]))) elif vals_types[i] == "range_int": # Round down to the nearest integer vals_new.append(math.floor(vals[i])) elif vals_types[i] == "range_continuous": # Don't do any processing for continous numbers vals_new.append(vals[i]) else: return None return vals_new
[]
Please provide a description of the function:def rand(x_bounds, x_types): ''' Random generate variable value within their bounds ''' outputs = [] for i, _ in enumerate(x_bounds): if x_types[i] == "discrete_int": temp = x_bounds[i][random.randint(0, len(x_bounds[i]) - 1)] outputs.append(temp) elif x_types[i] == "range_int": temp = random.randint(x_bounds[i][0], x_bounds[i][1]) outputs.append(temp) elif x_types[i] == "range_continuous": temp = random.uniform(x_bounds[i][0], x_bounds[i][1]) outputs.append(temp) else: return None return outputs
[]
Please provide a description of the function:def to_wider_graph(graph): ''' wider graph ''' weighted_layer_ids = graph.wide_layer_ids() weighted_layer_ids = list( filter(lambda x: graph.layer_list[x].output.shape[-1], weighted_layer_ids) ) wider_layers = sample(weighted_layer_ids, 1) for layer_id in wider_layers: layer = graph.layer_list[layer_id] if is_layer(layer, "Conv"): n_add = layer.filters else: n_add = layer.units graph.to_wider_model(layer_id, n_add) return graph
[]
Please provide a description of the function:def to_skip_connection_graph(graph): ''' skip connection graph ''' # The last conv layer cannot be widen since wider operator cannot be done over the two sides of flatten. weighted_layer_ids = graph.skip_connection_layer_ids() valid_connection = [] for skip_type in sorted([NetworkDescriptor.ADD_CONNECT, NetworkDescriptor.CONCAT_CONNECT]): for index_a in range(len(weighted_layer_ids)): for index_b in range(len(weighted_layer_ids))[index_a + 1 :]: valid_connection.append((index_a, index_b, skip_type)) if not valid_connection: return graph for index_a, index_b, skip_type in sample(valid_connection, 1): a_id = weighted_layer_ids[index_a] b_id = weighted_layer_ids[index_b] if skip_type == NetworkDescriptor.ADD_CONNECT: graph.to_add_skip_model(a_id, b_id) else: graph.to_concat_skip_model(a_id, b_id) return graph
[]
Please provide a description of the function:def create_new_layer(layer, n_dim): ''' create new layer for the graph ''' input_shape = layer.output.shape dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU] conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU] if is_layer(layer, "ReLU"): conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)] dense_deeper_classes = [StubDense, get_dropout_class(n_dim)] elif is_layer(layer, "Dropout"): dense_deeper_classes = [StubDense, StubReLU] elif is_layer(layer, "BatchNormalization"): conv_deeper_classes = [get_conv_class(n_dim), StubReLU] layer_class = None if len(input_shape) == 1: # It is in the dense layer part. layer_class = sample(dense_deeper_classes, 1)[0] else: # It is in the conv layer part. layer_class = sample(conv_deeper_classes, 1)[0] if layer_class == StubDense: new_layer = StubDense(input_shape[0], input_shape[0]) elif layer_class == get_dropout_class(n_dim): new_layer = layer_class(Constant.DENSE_DROPOUT_RATE) elif layer_class == get_conv_class(n_dim): new_layer = layer_class( input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1 ) elif layer_class == get_batch_norm_class(n_dim): new_layer = layer_class(input_shape[-1]) elif layer_class == get_pooling_class(n_dim): new_layer = layer_class(sample((1, 3, 5), 1)[0]) else: new_layer = layer_class() return new_layer
[]
Please provide a description of the function:def to_deeper_graph(graph): ''' deeper graph ''' weighted_layer_ids = graph.deep_layer_ids() if len(weighted_layer_ids) >= Constant.MAX_LAYERS: return None deeper_layer_ids = sample(weighted_layer_ids, 1) for layer_id in deeper_layer_ids: layer = graph.layer_list[layer_id] new_layer = create_new_layer(layer, graph.n_dim) graph.to_deeper_model(layer_id, new_layer) return graph
[]
Please provide a description of the function:def legal_graph(graph): '''judge if a graph is legal or not. ''' descriptor = graph.extract_descriptor() skips = descriptor.skip_connections if len(skips) != len(set(skips)): return False return True
[]
Please provide a description of the function:def transform(graph): '''core transform function for graph. ''' graphs = [] for _ in range(Constant.N_NEIGHBOURS * 2): random_num = randrange(3) temp_graph = None if random_num == 0: temp_graph = to_deeper_graph(deepcopy(graph)) elif random_num == 1: temp_graph = to_wider_graph(deepcopy(graph)) elif random_num == 2: temp_graph = to_skip_connection_graph(deepcopy(graph)) if temp_graph is not None and temp_graph.size() <= Constant.MAX_MODEL_SIZE: graphs.append(temp_graph) if len(graphs) >= Constant.N_NEIGHBOURS: break return graphs
[]
Please provide a description of the function:def uniform(low, high, random_state): ''' low: an float that represent an lower bound high: an float that represent an upper bound random_state: an object of numpy.random.RandomState ''' assert high > low, 'Upper bound must be larger than lower bound' return random_state.uniform(low, high)
[]
Please provide a description of the function:def quniform(low, high, q, random_state): ''' low: an float that represent an lower bound high: an float that represent an upper bound q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(uniform(low, high, random_state) / q) * q
[]
Please provide a description of the function:def loguniform(low, high, random_state): ''' low: an float that represent an lower bound high: an float that represent an upper bound random_state: an object of numpy.random.RandomState ''' assert low > 0, 'Lower bound must be positive' return np.exp(uniform(np.log(low), np.log(high), random_state))
[]
Please provide a description of the function:def qloguniform(low, high, q, random_state): ''' low: an float that represent an lower bound high: an float that represent an upper bound q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(loguniform(low, high, random_state) / q) * q
[]
Please provide a description of the function:def qnormal(mu, sigma, q, random_state): ''' mu: float or array_like of floats sigma: float or array_like of floats q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(normal(mu, sigma, random_state) / q) * q
[]
Please provide a description of the function:def lognormal(mu, sigma, random_state): ''' mu: float or array_like of floats sigma: float or array_like of floats random_state: an object of numpy.random.RandomState ''' return np.exp(normal(mu, sigma, random_state))
[]
Please provide a description of the function:def qlognormal(mu, sigma, q, random_state): ''' mu: float or array_like of floats sigma: float or array_like of floats q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(lognormal(mu, sigma, random_state) / q) * q
[]
Please provide a description of the function:def predict(parameters_value, regressor_gp): ''' Predict by Gaussian Process Model ''' parameters_value = numpy.array(parameters_value).reshape(-1, len(parameters_value)) mu, sigma = regressor_gp.predict(parameters_value, return_std=True) return mu[0], sigma[0]
[]
Please provide a description of the function:def rest_get(url, timeout): '''Call rest get method''' try: response = requests.get(url, timeout=timeout) return response except Exception as e: print('Get exception {0} when sending http get to url {1}'.format(str(e), url)) return None
[]
Please provide a description of the function:def rest_post(url, data, timeout, rethrow_exception=False): '''Call rest post method''' try: response = requests.post(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ data=data, timeout=timeout) return response except Exception as e: if rethrow_exception is True: raise print('Get exception {0} when sending http post to url {1}'.format(str(e), url)) return None
[]
Please provide a description of the function:def rest_put(url, data, timeout): '''Call rest put method''' try: response = requests.put(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ data=data, timeout=timeout) return response except Exception as e: print('Get exception {0} when sending http put to url {1}'.format(str(e), url)) return None
[]
Please provide a description of the function:def rest_delete(url, timeout): '''Call rest delete method''' try: response = requests.delete(url, timeout=timeout) return response except Exception as e: print('Get exception {0} when sending http delete to url {1}'.format(str(e), url)) return None
[]
Please provide a description of the function:def trial_end(self, trial_job_id, success): if success: if self.set_best_performance: self.completed_best_performance = max(self.completed_best_performance, self.trial_history[-1]) else: self.set_best_performance = True self.completed_best_performance = self.trial_history[-1] logger.info('Updated complted best performance, trial job id:', trial_job_id) else: logger.info('No need to update, trial job id: ', trial_job_id)
[ "update the best performance of completed trial job\n \n Parameters\n ----------\n trial_job_id: int\n trial job id\n success: bool\n True if succssfully finish the experiment, False otherwise\n " ]
Please provide a description of the function:def assess_trial(self, trial_job_id, trial_history): self.trial_job_id = trial_job_id self.trial_history = trial_history if not self.set_best_performance: return AssessResult.Good curr_step = len(trial_history) if curr_step < self.start_step: return AssessResult.Good if trial_job_id in self.last_judgment_num.keys() and curr_step - self.last_judgment_num[trial_job_id] < self.gap: return AssessResult.Good self.last_judgment_num[trial_job_id] = curr_step try: start_time = datetime.datetime.now() # Predict the final result curvemodel = CurveModel(self.target_pos) predict_y = curvemodel.predict(trial_history) logger.info('Prediction done. Trial job id = ', trial_job_id, '. Predict value = ', predict_y) if predict_y is None: logger.info('wait for more information to predict precisely') return AssessResult.Good standard_performance = self.completed_best_performance * self.threshold end_time = datetime.datetime.now() if (end_time - start_time).seconds > 60: logger.warning('Curve Fitting Assessor Runtime Exceeds 60s, Trial Id = ', self.trial_job_id, 'Trial History = ', self.trial_history) if self.higher_better: if predict_y > standard_performance: return AssessResult.Good return AssessResult.Bad else: if predict_y < standard_performance: return AssessResult.Good return AssessResult.Bad except Exception as exception: logger.exception('unrecognize exception in curvefitting_assessor', exception)
[ "assess whether a trial should be early stop by curve fitting algorithm\n\n Parameters\n ----------\n trial_job_id: int\n trial job id\n trial_history: list\n The history performance matrix of each trial\n\n Returns\n -------\n bool\n AssessResult.Good or AssessResult.Bad\n\n Raises\n ------\n Exception\n unrecognize exception in curvefitting_assessor\n " ]
Please provide a description of the function:def handle_initialize(self, data): ''' data is search space ''' self.tuner.update_search_space(data) send(CommandType.Initialized, '') return True
[]
Please provide a description of the function:def generate_parameters(self, parameter_id): if not self.history: self.init_search() new_father_id = None generated_graph = None if not self.training_queue: new_father_id, generated_graph = self.generate() new_model_id = self.model_count self.model_count += 1 self.training_queue.append((generated_graph, new_father_id, new_model_id)) self.descriptors.append(generated_graph.extract_descriptor()) graph, father_id, model_id = self.training_queue.pop(0) # from graph to json json_model_path = os.path.join(self.path, str(model_id) + ".json") json_out = graph_to_json(graph, json_model_path) self.total_data[parameter_id] = (json_out, father_id, model_id) return json_out
[ "\n Returns a set of trial neural architecture, as a serializable object.\n\n Parameters\n ----------\n parameter_id : int\n " ]
Please provide a description of the function:def receive_trial_result(self, parameter_id, parameters, value): reward = extract_scalar_reward(value) if parameter_id not in self.total_data: raise RuntimeError("Received parameter_id not in total_data.") (_, father_id, model_id) = self.total_data[parameter_id] graph = self.bo.searcher.load_model_by_id(model_id) # to use the value and graph self.add_model(reward, model_id) self.update(father_id, graph, reward, model_id)
[ " Record an observation of the objective function.\n \n Parameters\n ----------\n parameter_id : int\n parameters : dict\n value : dict/float\n if value is dict, it should have \"default\" key.\n " ]
Please provide a description of the function:def init_search(self): if self.verbose: logger.info("Initializing search.") for generator in self.generators: graph = generator(self.n_classes, self.input_shape).generate( self.default_model_len, self.default_model_width ) model_id = self.model_count self.model_count += 1 self.training_queue.append((graph, -1, model_id)) self.descriptors.append(graph.extract_descriptor()) if self.verbose: logger.info("Initialization finished.")
[ "Call the generators to generate the initial architectures for the search." ]
Please provide a description of the function:def generate(self): generated_graph, new_father_id = self.bo.generate(self.descriptors) if new_father_id is None: new_father_id = 0 generated_graph = self.generators[0]( self.n_classes, self.input_shape ).generate(self.default_model_len, self.default_model_width) return new_father_id, generated_graph
[ "Generate the next neural architecture.\n\n Returns\n -------\n other_info: any object\n Anything to be saved in the training queue together with the architecture.\n generated_graph: Graph\n An instance of Graph.\n " ]
Please provide a description of the function:def update(self, other_info, graph, metric_value, model_id): father_id = other_info self.bo.fit([graph.extract_descriptor()], [metric_value]) self.bo.add_child(father_id, model_id)
[ " Update the controller with evaluation result of a neural architecture.\n\n Parameters\n ----------\n other_info: any object\n In our case it is the father ID in the search tree.\n graph: Graph\n An instance of Graph. The trained neural architecture.\n metric_value: float\n The final evaluated metric value.\n model_id: int\n " ]
Please provide a description of the function:def add_model(self, metric_value, model_id): if self.verbose: logger.info("Saving model.") # Update best_model text file ret = {"model_id": model_id, "metric_value": metric_value} self.history.append(ret) if model_id == self.get_best_model_id(): file = open(os.path.join(self.path, "best_model.txt"), "w") file.write("best model: " + str(model_id)) file.close() return ret
[ " Add model to the history, x_queue and y_queue\n\n Parameters\n ----------\n metric_value : float\n graph : dict\n model_id : int\n\n Returns\n -------\n model : dict\n " ]