text
stringlengths 0
828
|
---|
return dbi"
|
4628,"def _load_neighbors_from_external_source(self) -> None:
|
""""""
|
Loads the neighbors of the node from the igraph `Graph` instance that is
|
wrapped by the graph that has this node.
|
""""""
|
graph: IGraphWrapper = self._graph
|
ig_vertex: IGraphVertex = graph.wrapped_graph.vs[self._igraph_index]
|
ig_neighbors: List[IGraphVertex] = ig_vertex.neighbors()
|
for ig_neighbor in ig_neighbors:
|
try:
|
name: str = ig_neighbor[""name""]
|
except KeyError:
|
name: str = str(ig_neighbor.index)
|
try:
|
external_id: Optional[str] = ig_neighbor[""external_id""]
|
except KeyError:
|
external_id: Optional[str] = None
|
neighbor: IGraphNode = graph.nodes.get_node_by_name(name,
|
can_validate_and_load=True,
|
external_id=external_id)
|
graph.add_edge(self, neighbor)"
|
4629,"def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode:
|
""""""
|
Returns a new `IGraphNode` instance with the given index and name.
|
Arguments:
|
index (int): The index of the node to create.
|
name (str): The name of the node to create.
|
external_id (Optional[str]): The external ID of the node.
|
""""""
|
return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id)"
|
4630,"def parse(self):
|
""""""Parse the table data string into records.""""""
|
self.parse_fields()
|
records = []
|
for line in self.t['data'].split('\n'):
|
if EMPTY_ROW.match(line):
|
continue
|
row = [self.autoconvert(line[start_field:end_field+1])
|
for start_field, end_field in self.fields]
|
records.append(tuple(row))
|
self.records = records"
|
4631,"def recarray(self):
|
""""""Return a recarray from the (parsed) string.""""""
|
if self.records is None:
|
self.parse()
|
try:
|
# simple (should this also be subjected to convert.to_int64() ?)
|
return numpy.rec.fromrecords(self.records, names=self.names)
|
except ValueError:
|
# complicated because fromrecords cannot deal with records of lists
|
# Quick hack: use objects for lists etc (instead of building the proper
|
# data types (see docs for numpy.dtype , eg dtype('coord', (float, 3)) )
|
D = numpy.empty(len(self.records[0]), dtype=object) # number of fields from first record
|
types = numpy.array([map(type, r) for r in self.records]) # types of all fields
|
for icol, isSame in enumerate([numpy.all(col) for col in types.T]):
|
if isSame:
|
D[icol] = types[0][icol]
|
else:
|
D[icol] = object
|
dtype = numpy.dtype(zip(self.names, D))
|
# from numpy.rec.records
|
# TODO: this is not working properly yet; for instance, text fields
|
# are reduced to length 0 (<U0) and the final convert.to_int64 dies
|
# with '<U0'*** TypeError: TypeError('data type not understood',)
|
retval = numpy.array(self.records, dtype=dtype)
|
res = retval.view(numpy.recarray)
|
## res.dtype = numpy.dtype((numpy.rec.record, res.dtype)) # fails -- ARGH, this makes it a recarray
|
return convert.to_int64(res)"
|
4632,"def parse_fields(self):
|
""""""Determine the start and end columns and names of the fields.""""""
|
rule = self.t['toprule'].rstrip() # keep leading space for correct columns!!
|
if not (rule == self.t['midrule'].rstrip() and rule == self.t['botrule'].rstrip()):
|
raise ParseError(""Table rules differ from each other (check white space)."")
|
names = self.t['fields'].split()
|
nfields = len(rule.split())
|
if nfields != len(names):
|
raise ParseError(""number of field names (%d) does not match number of fields (%d)""
|
% (nfields, len(names)))
|
fields = [] # list of tuples (first,last) column of the field
|
ifield = 0
|
is_field = rule.startswith('=') # state
|
len_rule = len(rule)
|
start_field = 0
|
end_field = 0
|
for c in xrange(len_rule):
|
char = rule[c]
|
if not is_field and char == '=':
|
start_field = c
|
is_field = True
|
if is_field and (char == ' ' or c == len_rule-1):
|
# finished field
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.