file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl_parser/lisp_parser.py
|
__all__ = ["ParseError", "parse_nested_list"]
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
# Basic functions for parsing PDDL (Lisp) files.
def parse_nested_list(input_file):
tokens = tokenize(input_file)
next_token = next(tokens)
if next_token != "(":
raise ParseError("Expected '(', got %s." % next_token)
result = list(parse_list_aux(tokens))
for tok in tokens: # Check that generator is exhausted.
raise ParseError("Unexpected token: %s." % tok)
return result
def tokenize(input):
for line in input:
line = line.split(";", 1)[0] # Strip comments.
try:
line.encode("ascii")
except UnicodeEncodeError:
raise ParseError("Non-ASCII character outside comment: %s" %
line[0:-1])
line = line.replace("(", " ( ").replace(")", " ) ").replace("?", " ?")
for token in line.split():
yield token.lower()
def parse_list_aux(tokenstream):
# Leading "(" has already been swallowed.
while True:
try:
token = next(tokenstream)
except StopIteration:
raise ParseError("Missing ')'")
if token == ")":
return
elif token == "(":
yield list(parse_list_aux(tokenstream))
else:
yield token
| 1,422 |
Python
| 30.622222 | 78 | 0.555556 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl_parser/parsing_functions.py
|
from __future__ import print_function
import sys
import graph
import pddl
def parse_typed_list(alist, only_variables=False,
constructor=pddl.TypedObject,
default_type="object"):
result = []
while alist:
try:
separator_position = alist.index("-")
except ValueError:
items = alist
_type = default_type
alist = []
else:
items = alist[:separator_position]
_type = alist[separator_position + 1]
alist = alist[separator_position + 2:]
for item in items:
assert not only_variables or item.startswith("?"), \
"Expected item to be a variable: %s in (%s)" % (
item, " ".join(items))
entry = constructor(item, _type)
result.append(entry)
return result
def set_supertypes(type_list):
# TODO: This is a two-stage construction, which is perhaps
# not a great idea. Might need more thought in the future.
type_name_to_type = {}
child_types = []
for type in type_list:
type.supertype_names = []
type_name_to_type[type.name] = type
if type.basetype_name:
child_types.append((type.name, type.basetype_name))
for (desc_name, anc_name) in graph.transitive_closure(child_types):
type_name_to_type[desc_name].supertype_names.append(anc_name)
def parse_predicate(alist):
name = alist[0]
arguments = parse_typed_list(alist[1:], only_variables=True)
return pddl.Predicate(name, arguments)
def parse_function(alist, type_name):
name = alist[0]
arguments = parse_typed_list(alist[1:])
return pddl.Function(name, arguments, type_name)
def parse_condition(alist, type_dict, predicate_dict):
condition = parse_condition_aux(alist, False, type_dict, predicate_dict)
return condition.uniquify_variables({}).simplified()
def parse_condition_aux(alist, negated, type_dict, predicate_dict):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if tag in ("and", "or", "not", "imply"):
args = alist[1:]
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(
args[0], not negated, type_dict, predicate_dict)
elif tag in ("forall", "exists"):
parameters = parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
else:
return parse_literal(alist, type_dict, predicate_dict, negated=negated)
if tag == "imply":
parts = [parse_condition_aux(
args[0], not negated, type_dict, predicate_dict),
parse_condition_aux(
args[1], negated, type_dict, predicate_dict)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated, type_dict, predicate_dict)
for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return pddl.Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return pddl.Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return pddl.UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return pddl.ExistentialCondition(parameters, parts)
def parse_literal(alist, type_dict, predicate_dict, negated=False):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
negated = not negated
# pred_id, arity = _get_predicate_id_and_arity(
# alist[0], type_dict, predicate_dict)
#
# if arity != len(alist) - 1:
# raise SystemExit("predicate used with wrong arity: (%s)"
# % " ".join(alist))
pred_id, arity = _get_predicate_id_and_arity(
alist[0], type_dict, predicate_dict)
if arity != len(alist) - 1:
raise SystemExit("predicate used with wrong arity: (%s)"
% " ".join(alist))
text = alist[0]
the_type = type_dict.get(text)
# the_predicate = predicate_dict.get(text)
if the_type is not None:
pred_id = the_type.get_predicate_name()
else:
pred_id = text
if negated:
return pddl.NegatedAtom(pred_id, alist[1:])
else:
return pddl.Atom(pred_id, alist[1:])
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = False
def _get_predicate_id_and_arity(text, type_dict, predicate_dict):
global SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH
the_type = type_dict.get(text)
the_predicate = predicate_dict.get(text)
if the_type is None and the_predicate is None:
raise SystemExit("Undeclared predicate: %s" % text)
elif the_predicate is not None:
if the_type is not None and not SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH:
msg = ("Warning: name clash between type and predicate %r.\n"
"Interpreting as predicate in conditions.") % text
print(msg, file=sys.stderr)
SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH = True
return the_predicate.name, the_predicate.get_arity()
else:
assert the_type is not None
return the_type.get_predicate_name(), 1
def parse_effects(alist, result, type_dict, predicate_dict):
"""Parse a PDDL effect (any combination of simple, conjunctive, conditional, and universal)."""
tmp_effect = parse_effect(alist, type_dict, predicate_dict)
normalized = tmp_effect.normalize()
cost_eff, rest_effect = normalized.extract_cost()
add_effect(rest_effect, result)
if cost_eff:
return cost_eff.effect
else:
return None
def add_effect(tmp_effect, result):
"""tmp_effect has the following structure:
[ConjunctiveEffect] [UniversalEffect] [ConditionalEffect] SimpleEffect."""
if isinstance(tmp_effect, pddl.ConjunctiveEffect):
for effect in tmp_effect.effects:
add_effect(effect, result)
return
else:
parameters = []
condition = pddl.Truth()
if isinstance(tmp_effect, pddl.UniversalEffect):
parameters = tmp_effect.parameters
if isinstance(tmp_effect.effect, pddl.ConditionalEffect):
condition = tmp_effect.effect.condition
assert isinstance(tmp_effect.effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect.effect
else:
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
elif isinstance(tmp_effect, pddl.ConditionalEffect):
condition = tmp_effect.condition
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
else:
assert isinstance(tmp_effect, pddl.SimpleEffect)
effect = tmp_effect.effect
assert isinstance(effect, pddl.Literal)
# Check for contradictory effects
condition = condition.simplified()
new_effect = pddl.Effect(parameters, condition, effect)
contradiction = pddl.Effect(parameters, condition, effect.negate())
if contradiction not in result:
result.append(new_effect)
else:
# We use add-after-delete semantics, keep positive effect
if isinstance(contradiction.literal, pddl.NegatedAtom):
result.remove(contradiction)
result.append(new_effect)
def parse_effect(alist, type_dict, predicate_dict):
tag = alist[0]
if tag == "and":
return pddl.ConjunctiveEffect(
[parse_effect(eff, type_dict, predicate_dict) for eff in alist[1:]])
elif tag == "forall":
assert len(alist) == 3
parameters = parse_typed_list(alist[1])
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.UniversalEffect(parameters, effect)
elif tag == "when":
assert len(alist) == 3
condition = parse_condition(
alist[1], type_dict, predicate_dict)
effect = parse_effect(alist[2], type_dict, predicate_dict)
return pddl.ConditionalEffect(condition, effect)
elif tag == "increase":
assert len(alist) == 3
assert alist[1] == ['total-cost']
assignment = parse_assignment(alist)
return pddl.CostEffect(assignment)
else:
# We pass in {} instead of type_dict here because types must
# be static predicates, so cannot be the target of an effect.
return pddl.SimpleEffect(parse_literal(alist, {}, predicate_dict))
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return pddl.PrimitiveNumericExpression(functionsymbol, exp[1:])
elif exp.replace(".", "").isdigit():
return pddl.NumericConstant(float(exp))
elif exp[0] == "-":
raise ValueError("Negative numbers are not supported")
else:
return pddl.PrimitiveNumericExpression(exp, [])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return pddl.Assign(head, exp)
elif op == "increase":
return pddl.Increase(head, exp)
else:
assert False, "Assignment operator not supported."
def parse_action(alist, type_dict, predicate_dict):
iterator = iter(alist)
action_tag = next(iterator)
assert action_tag == ":action"
name = next(iterator)
parameters_tag_opt = next(iterator)
if parameters_tag_opt == ":parameters":
parameters = parse_typed_list(next(iterator),
only_variables=True)
precondition_tag_opt = next(iterator)
else:
parameters = []
precondition_tag_opt = parameters_tag_opt
if precondition_tag_opt == ":precondition":
precondition_list = next(iterator)
if not precondition_list:
# Note that :precondition () is allowed in PDDL.
precondition = pddl.Conjunction([])
else:
precondition = parse_condition(
precondition_list, type_dict, predicate_dict)
effect_tag = next(iterator)
else:
precondition = pddl.Conjunction([])
effect_tag = precondition_tag_opt
assert effect_tag == ":effect"
effect_list = next(iterator)
eff = []
if effect_list:
try:
cost = parse_effects(
effect_list, eff, type_dict, predicate_dict)
except ValueError as e:
raise SystemExit("Error in Action %s\nReason: %s." % (name, e))
for rest in iterator:
assert False, rest
if eff:
return pddl.Action(name, parameters, len(parameters),
precondition, eff, cost)
else:
return None
def parse_axiom(alist, type_dict, predicate_dict):
assert len(alist) == 3
assert alist[0] == ":derived"
predicate = parse_predicate(alist[1])
condition = parse_condition(
alist[2], type_dict, predicate_dict)
return pddl.Axiom(predicate.name, predicate.arguments,
len(predicate.arguments), condition)
def parse_task(domain_pddl, task_pddl):
domain_name, domain_requirements, types, type_dict, constants, predicates, predicate_dict, functions, actions, axioms \
= parse_domain_pddl(domain_pddl)
task_name, task_domain_name, task_requirements, objects, init, goal, use_metric = parse_task_pddl(task_pddl, type_dict, predicate_dict)
assert domain_name == task_domain_name
requirements = pddl.Requirements(sorted(set(
domain_requirements.requirements +
task_requirements.requirements)))
objects = constants + objects
check_for_duplicates(
[o.name for o in objects],
errmsg="error: duplicate object %r",
finalmsg="please check :constants and :objects definitions")
init += [pddl.Atom("=", (obj.name, obj.name)) for obj in objects]
return pddl.Task(
domain_name, task_name, requirements, types, objects,
predicates, functions, init, goal, actions, axioms, use_metric)
def parse_domain_pddl(domain_pddl):
iterator = iter(domain_pddl)
define_tag = next(iterator)
assert define_tag == "define"
domain_line = next(iterator)
assert domain_line[0] == "domain" and len(domain_line) == 2
yield domain_line[1]
## We allow an arbitrary order of the requirement, types, constants,
## predicates and functions specification. The PDDL BNF is more strict on
## this, so we print a warning if it is violated.
requirements = pddl.Requirements([":strips"])
the_types = [pddl.Type("object")]
constants, the_predicates, the_functions = [], [], []
correct_order = [":requirements", ":types", ":constants", ":predicates",
":functions"]
seen_fields = []
first_action = None
for opt in iterator:
field = opt[0]
if field not in correct_order:
first_action = opt
break
if field in seen_fields:
raise SystemExit("Error in domain specification\n" +
"Reason: two '%s' specifications." % field)
if (seen_fields and
correct_order.index(seen_fields[-1]) > correct_order.index(field)):
msg = "\nWarning: %s specification not allowed here (cf. PDDL BNF)" % field
print(msg, file=sys.stderr)
seen_fields.append(field)
if field == ":requirements":
requirements = pddl.Requirements(opt[1:])
elif field == ":types":
the_types.extend(parse_typed_list(
opt[1:], constructor=pddl.Type))
elif field == ":constants":
constants = parse_typed_list(opt[1:])
elif field == ":predicates":
the_predicates = [parse_predicate(entry)
for entry in opt[1:]]
the_predicates += [pddl.Predicate("=", [
pddl.TypedObject("?x", "object"),
pddl.TypedObject("?y", "object")])]
elif field == ":functions":
the_functions = parse_typed_list(
opt[1:],
constructor=parse_function,
default_type="number")
set_supertypes(the_types)
yield requirements
yield the_types
type_dict = {type.name: type for type in the_types}
yield type_dict
yield constants
yield the_predicates
predicate_dict = {pred.name: pred for pred in the_predicates}
yield predicate_dict
yield the_functions
entries = []
if first_action is not None:
entries.append(first_action)
entries.extend(iterator)
the_axioms = []
the_actions = []
for entry in entries:
if entry[0] == ":derived":
axiom = parse_axiom(entry, type_dict, predicate_dict)
the_axioms.append(axiom)
else:
action = parse_action(entry, type_dict, predicate_dict)
if action is not None:
the_actions.append(action)
yield the_actions
yield the_axioms
def parse_task_pddl(task_pddl, type_dict, predicate_dict):
iterator = iter(task_pddl)
define_tag = next(iterator)
assert define_tag == "define"
problem_line = next(iterator)
assert problem_line[0] == "problem" and len(problem_line) == 2
yield problem_line[1]
domain_line = next(iterator)
assert domain_line[0] == ":domain" and len(domain_line) == 2
yield domain_line[1]
requirements_opt = next(iterator)
if requirements_opt[0] == ":requirements":
requirements = requirements_opt[1:]
objects_opt = next(iterator)
else:
requirements = []
objects_opt = requirements_opt
yield pddl.Requirements(requirements)
if objects_opt[0] == ":objects":
yield parse_typed_list(objects_opt[1:])
init = next(iterator)
else:
yield []
init = objects_opt
assert init[0] == ":init"
initial = []
initial_true = set()
initial_false = set()
initial_assignments = dict()
for fact in init[1:]:
if fact[0] == "=":
try:
assignment = parse_assignment(fact)
except ValueError as e:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s." % e)
if not isinstance(assignment.expression,
pddl.NumericConstant):
raise SystemExit("Illegal assignment in initial state " +
"specification:\n%s" % assignment)
if assignment.fluent in initial_assignments:
prev = initial_assignments[assignment.fluent]
if assignment.expression == prev.expression:
print("Warning: %s is specified twice" % assignment,
"in initial state specification")
else:
raise SystemExit("Error in initial state specification\n" +
"Reason: conflicting assignment for " +
"%s." % assignment.fluent)
else:
initial_assignments[assignment.fluent] = assignment
initial.append(assignment)
elif fact[0] == "not":
atom = pddl.Atom(fact[1][0], fact[1][1:])
check_atom_consistency(atom, initial_false, initial_true, False)
initial_false.add(atom)
else:
atom = pddl.Atom(fact[0], fact[1:])
check_atom_consistency(atom, initial_true, initial_false)
initial_true.add(atom)
initial.extend(initial_true)
yield initial
goal = next(iterator)
assert goal[0] == ":goal" and len(goal) == 2
yield parse_condition(goal[1], type_dict, predicate_dict)
use_metric = False
for entry in iterator:
if entry[0] == ":metric":
if entry[1] == "minimize" and entry[2][0] == "total-cost":
use_metric = True
else:
assert False, "Unknown metric."
yield use_metric
for entry in iterator:
assert False, entry
def check_atom_consistency(atom, same_truth_value, other_truth_value, atom_is_true=True):
if atom in other_truth_value:
raise SystemExit("Error in initial state specification\n" +
"Reason: %s is true and false." % atom)
if atom in same_truth_value:
if not atom_is_true:
atom = atom.negate()
print("Warning: %s is specified twice in initial state specification" % atom)
def check_for_duplicates(elements, errmsg, finalmsg):
seen = set()
errors = []
for element in elements:
if element in seen:
errors.append(errmsg % element)
else:
seen.add(element)
if errors:
raise SystemExit("\n".join(errors) + "\n" + finalmsg)
| 19,159 |
Python
| 36.131783 | 139 | 0.598204 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl_parser/__init__.py
|
from .pddl_file import open
| 28 |
Python
| 13.499993 | 27 | 0.785714 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl_parser/pddl_file.py
|
import options
from . import lisp_parser
from . import parsing_functions
file_open = open
def parse_pddl_file(type, filename):
try:
# The builtin open function is shadowed by this module's open function.
# We use the Latin-1 encoding (which allows a superset of ASCII, of the
# Latin-* encodings and of UTF-8) to allow special characters in
# comments. In all other parts, we later validate that only ASCII is
# used.
return lisp_parser.parse_nested_list(file_open(filename)) #, encoding='ISO-8859-1'))
except OSError as e:
raise SystemExit("Error: Could not read file: %s\nReason: %s." %
(e.filename, e))
except lisp_parser.ParseError as e:
raise SystemExit("Error: Could not parse %s file: %s\nReason: %s." %
(type, filename, e))
def open(domain_filename=None, task_filename=None):
task_filename = task_filename or options.task
domain_filename = domain_filename or options.domain
domain_pddl = parse_pddl_file("domain", domain_filename)
task_pddl = parse_pddl_file("task", task_filename)
return parsing_functions.parse_task(domain_pddl, task_pddl)
| 1,204 |
Python
| 35.51515 | 92 | 0.657807 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/pddl_types.py
|
# Renamed from types.py to avoid clash with stdlib module.
# In the future, use explicitly relative imports or absolute
# imports as a better solution.
import itertools
def _get_type_predicate_name(type_name):
# PDDL allows mixing types and predicates, but some PDDL files
# have name collisions between types and predicates. We want to
# support both the case where such name collisions occur and the
# case where types are used as predicates.
#
# We internally give types predicate names that cannot be confused
# with non-type predicates. When the input uses a PDDL type as a
# predicate, we automatically map it to this internal name.
return "type@%s" % type_name
class Type:
def __init__(self, name, basetype_name=None):
self.name = name
self.basetype_name = basetype_name
def __str__(self):
return self.name
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.basetype_name)
def get_predicate_name(self):
return _get_type_predicate_name(self.name)
class TypedObject:
def __init__(self, name, type_name):
self.name = name
self.type_name = type_name
def __hash__(self):
return hash((self.name, self.type_name))
def __eq__(self, other):
return self.name == other.name and self.type_name == other.type_name
def __ne__(self, other):
return not self == other
def __str__(self):
return "%s: %s" % (self.name, self.type_name)
def __repr__(self):
return "<TypedObject %s: %s>" % (self.name, self.type_name)
def uniquify_name(self, type_map, renamings):
if self.name not in type_map:
type_map[self.name] = self.type_name
return self
for counter in itertools.count(1):
new_name = self.name + str(counter)
if new_name not in type_map:
renamings[self.name] = new_name
type_map[new_name] = self.type_name
return TypedObject(new_name, self.type_name)
def get_atom(self):
# TODO: Resolve cyclic import differently.
from . import conditions
predicate_name = _get_type_predicate_name(self.type_name)
return conditions.Atom(predicate_name, [self.name])
| 2,290 |
Python
| 31.267605 | 76 | 0.625764 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/conditions.py
|
# Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition:
def __init__(self, parts):
#self.parts = tuple(parts)
unique_parts = []
for part in parts:
if part not in unique_parts:
unique_parts.append(part)
self.parts = tuple(unique_parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.hash < other.hash
def __le__(self, other):
return self.hash <= other.hash
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
raise Impossible()
def negate(self):
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.parts == other.parts)
def _dump(self, indent=" "):
arglist = ", ".join(map(str, self.parameters))
return "%s %s" % (self.__class__.__name__, arglist)
def _simplified(self, parts):
if isinstance(parts[0], ConstantCondition):
return parts[0]
else:
return self._propagate(parts)
def uniquify_variables(self, type_map, renamings={}):
renamings = dict(renamings) # Create a copy.
new_parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
new_parts = (self.parts[0].uniquify_variables(type_map, renamings),)
return self.__class__(new_parameters, new_parts)
def free_variables(self):
result = Condition.free_variables(self)
for par in self.parameters:
result.discard(par.name)
return result
def change_parts(self, parts):
return self.__class__(self.parameters, parts)
class UniversalCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom().negate() for par in self.parameters]
return UniversalCondition(self.parameters,
[Disjunction(type_literals + parts)])
def negate(self):
return ExistentialCondition(self.parameters, [p.negate() for p in self.parts])
def has_universal_part(self):
return True
class ExistentialCondition(QuantifiedCondition):
def _untyped(self, parts):
type_literals = [par.get_atom() for par in self.parameters]
return ExistentialCondition(self.parameters,
[Conjunction(type_literals + parts)])
def negate(self):
return UniversalCondition(self.parameters, [p.negate() for p in self.parts])
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert not result, "Condition not simplified"
self.parts[0].instantiate(var_mapping, init_facts, fluent_facts, result)
def has_existential_part(self):
return True
class Literal(Condition):
# Defining __eq__ blocks inheritance of __hash__, so must set it explicitly.
__hash__ = Condition.__hash__
parts = []
__slots__ = ["predicate", "args", "hash"]
def __init__(self, predicate, args):
self.predicate = predicate
self.args = tuple(args)
self.hash = hash((self.__class__, self.predicate, self.args))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.predicate == other.predicate and
self.args == other.args)
def __ne__(self, other):
return not self == other
@property
def key(self):
return str(self.predicate), self.args
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.predicate,
", ".join(map(str, self.args)))
def __repr__(self):
return '<%s>' % self
def _dump(self):
return str(self)
def change_parts(self, parts):
return self
def uniquify_variables(self, type_map, renamings={}):
return self.rename_variables(renamings)
def rename_variables(self, renamings):
new_args = tuple(renamings.get(arg, arg) for arg in self.args)
return self.__class__(self.predicate, new_args)
def replace_argument(self, position, new_arg):
new_args = list(self.args)
new_args[position] = new_arg
return self.__class__(self.predicate, new_args)
def free_variables(self):
return {arg for arg in self.args if arg[0] == "?"}
class Atom(Literal):
negated = False
def to_untyped_strips(self):
return [self]
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(atom)
elif atom not in init_facts:
raise Impossible()
def negate(self):
return NegatedAtom(self.predicate, self.args)
def positive(self):
return self
class NegatedAtom(Literal):
negated = True
def _relaxed(self, parts):
return Truth()
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(NegatedAtom(self.predicate, args))
elif atom in init_facts:
raise Impossible()
def negate(self):
return Atom(self.predicate, self.args)
positive = negate
| 11,281 |
Python
| 36.85906 | 86 | 0.595869 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/__init__.py
|
from .pddl_types import Type
from .pddl_types import TypedObject
from .tasks import Task
from .tasks import Requirements
from .predicates import Predicate
from .functions import Function
from .actions import Action
from .actions import PropositionalAction
from .axioms import Axiom
from .axioms import PropositionalAxiom
from .conditions import Literal
from .conditions import Atom
from .conditions import NegatedAtom
from .conditions import Falsity
from .conditions import Truth
from .conditions import Conjunction
from .conditions import Disjunction
from .conditions import UniversalCondition
from .conditions import ExistentialCondition
from .effects import ConditionalEffect
from .effects import ConjunctiveEffect
from .effects import CostEffect
from .effects import Effect
from .effects import SimpleEffect
from .effects import UniversalEffect
from .f_expression import Assign
from .f_expression import Increase
from .f_expression import NumericConstant
from .f_expression import PrimitiveNumericExpression
| 1,020 |
Python
| 25.86842 | 52 | 0.844118 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/effects.py
|
from . import conditions
def cartesian_product(*sequences):
# TODO: Also exists in tools.py outside the pddl package (defined slightly
# differently). Not good. Need proper import paths.
if not sequences:
yield ()
else:
for tup in cartesian_product(*sequences[1:]):
for item in sequences[0]:
yield (item,) + tup
class Effect:
def __init__(self, parameters, condition, literal):
self.parameters = parameters
self.condition = condition
self.literal = literal
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.condition == other.condition and
self.literal == other.literal)
def dump(self):
indent = " "
if self.parameters:
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
indent += " "
if self.condition != conditions.Truth():
print("%sif" % indent)
self.condition.dump(indent + " ")
print("%sthen" % indent)
indent += " "
print("%s%s" % (indent, self.literal))
def copy(self):
return Effect(self.parameters, self.condition, self.literal)
def uniquify_variables(self, type_map):
renamings = {}
self.parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
self.condition = self.condition.uniquify_variables(type_map, renamings)
self.literal = self.literal.rename_variables(renamings)
def instantiate(self, var_mapping, init_facts, fluent_facts,
objects_by_type, result, predicate_to_atoms):
if self.parameters:
var_mapping = var_mapping.copy() # Will modify this.
if isinstance(self.condition, conditions.Truth):
facts = []
elif isinstance(self.condition, conditions.Atom):
facts = [self.condition]
elif isinstance(self.condition, conditions.Condition):
facts = self.condition.parts
else:
raise NotImplementedError(self.condition)
for fact in facts:
if ({p.name for p in self.parameters} <= set(fact.args)) and \
(fact.predicate in predicate_to_atoms):
# caelan: special case where consider the dual
for atom in predicate_to_atoms[fact.predicate]:
mapping = dict(zip(fact.args, atom.args))
for par in self.parameters:
var_mapping[par.name] = mapping[par.name]
self._instantiate(var_mapping, init_facts, fluent_facts, result)
break
else:
object_lists = [objects_by_type.get(par.type_name, [])
for par in self.parameters]
for object_tuple in cartesian_product(*object_lists):
for (par, obj) in zip(self.parameters, object_tuple):
var_mapping[par.name] = obj
self._instantiate(var_mapping, init_facts, fluent_facts, result)
else:
self._instantiate(var_mapping, init_facts, fluent_facts, result)
def _instantiate(self, var_mapping, init_facts, fluent_facts, result):
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return
effects = []
self.literal.instantiate(var_mapping, init_facts, fluent_facts, effects)
assert len(effects) <= 1
if effects:
result.append((condition, effects[0], self, var_mapping.copy()))
def relaxed(self):
if self.literal.negated:
return None
else:
return Effect(self.parameters, self.condition.relaxed(), self.literal)
def simplified(self):
return Effect(self.parameters, self.condition.simplified(), self.literal)
class ConditionalEffect:
def __init__(self, condition, effect):
if isinstance(effect, ConditionalEffect):
self.condition = conditions.Conjunction([condition, effect.condition])
self.effect = effect.effect
else:
self.condition = condition
self.effect = effect
def dump(self, indent=" "):
print("%sif" % (indent))
self.condition.dump(indent + " ")
print("%sthen" % (indent))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)
new_effects.append(ConditionalEffect(self.condition, effect))
return ConjunctiveEffect(new_effects)
elif isinstance(norm_effect, UniversalEffect):
child = norm_effect.effect
cond_effect = ConditionalEffect(self.condition, child)
return UniversalEffect(norm_effect.parameters, cond_effect)
else:
return ConditionalEffect(self.condition, norm_effect)
def extract_cost(self):
return None, self
class UniversalEffect:
def __init__(self, parameters, effect):
if isinstance(effect, UniversalEffect):
self.parameters = parameters + effect.parameters
self.effect = effect.effect
else:
self.parameters = parameters
self.effect = effect
def dump(self, indent=" "):
print("%sforall %s" % (indent, ", ".join(map(str, self.parameters))))
self.effect.dump(indent + " ")
def normalize(self):
norm_effect = self.effect.normalize()
if isinstance(norm_effect, ConjunctiveEffect):
new_effects = []
for effect in norm_effect.effects:
assert isinstance(effect, SimpleEffect) or isinstance(effect, ConditionalEffect)\
or isinstance(effect, UniversalEffect)
new_effects.append(UniversalEffect(self.parameters, effect))
return ConjunctiveEffect(new_effects)
else:
return UniversalEffect(self.parameters, norm_effect)
def extract_cost(self):
return None, self
class ConjunctiveEffect:
def __init__(self, effects):
flattened_effects = []
for effect in effects:
if isinstance(effect, ConjunctiveEffect):
flattened_effects += effect.effects
else:
flattened_effects.append(effect)
self.effects = flattened_effects
def dump(self, indent=" "):
print("%sand" % (indent))
for eff in self.effects:
eff.dump(indent + " ")
def normalize(self):
new_effects = []
for effect in self.effects:
new_effects.append(effect.normalize())
return ConjunctiveEffect(new_effects)
def extract_cost(self):
new_effects = []
cost_effect = None
for effect in self.effects:
if isinstance(effect, CostEffect):
cost_effect = effect
else:
new_effects.append(effect)
return cost_effect, ConjunctiveEffect(new_effects)
class SimpleEffect:
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
return None, self
class CostEffect:
def __init__(self, effect):
self.effect = effect
def dump(self, indent=" "):
print("%s%s" % (indent, self.effect))
def normalize(self):
return self
def extract_cost(self):
# This only happens if an action has no effect apart from the cost effect.
return self, None
| 8,106 |
Python
| 40.152284 | 97 | 0.581545 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/actions.py
|
import copy
from . import conditions
class Action:
def __init__(self, name, parameters, num_external_parameters,
precondition, effects, cost):
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
# num_external_parameters denotes how many of the parameters
# are "external", i.e., should be part of the grounded action
# name. Usually all parameters are external, but "invisible"
# parameters can be created when compiling away existential
# quantifiers in conditions.
self.num_external_parameters = num_external_parameters
self.precondition = precondition
self.effects = effects
self.cost = cost
self.uniquify_variables() # TODO: uniquify variables in cost?
def __repr__(self):
return "<Action %r at %#x>" % (self.name, id(self))
def dump(self):
print("%s(%s)" % (self.name, ", ".join(map(str, self.parameters))))
print("Precondition:")
self.precondition.dump()
print("Effects:")
for eff in self.effects:
eff.dump()
print("Cost:")
if(self.cost):
self.cost.dump()
else:
print(" None")
def uniquify_variables(self):
self.type_map = {par.name: par.type_name for par in self.parameters}
self.precondition = self.precondition.uniquify_variables(self.type_map)
for effect in self.effects:
effect.uniquify_variables(self.type_map)
def relaxed(self):
new_effects = []
for eff in self.effects:
relaxed_eff = eff.relaxed()
if relaxed_eff:
new_effects.append(relaxed_eff)
return Action(self.name, self.parameters, self.num_external_parameters,
self.precondition.relaxed().simplified(),
new_effects, self.cost)
def untyped(self):
# We do not actually remove the types from the parameter lists,
# just additionally incorporate them into the conditions.
# Maybe not very nice.
result = copy.copy(self)
parameter_atoms = [par.to_untyped_strips() for par in self.parameters]
new_precondition = self.precondition.untyped()
result.precondition = conditions.Conjunction(parameter_atoms + [new_precondition])
result.effects = [eff.untyped() for eff in self.effects]
return result
def instantiate(self, var_mapping, init_facts, init_assignments,
fluent_facts, objects_by_type, metric, predicate_to_atoms):
"""Return a PropositionalAction which corresponds to the instantiation of
this action with the arguments in var_mapping. Only fluent parts of the
conditions (those in fluent_facts) are included. init_facts are evaluated
while instantiating.
Precondition and effect conditions must be normalized for this to work.
Returns None if var_mapping does not correspond to a valid instantiation
(because it has impossible preconditions or an empty effect list.)"""
arg_list = [var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s %s)" % (self.name, " ".join(arg_list))
precondition = []
try:
self.precondition.instantiate(var_mapping, init_facts,
fluent_facts, precondition)
except conditions.Impossible:
return None
effects = []
for eff in self.effects:
eff.instantiate(var_mapping, init_facts, fluent_facts,
objects_by_type, effects, predicate_to_atoms)
if effects:
if metric:
if self.cost is None:
cost = 0
else:
cost = int(self.cost.instantiate(
var_mapping, init_assignments).expression.value)
else:
cost = 1
return PropositionalAction(name, precondition, effects, cost, self, var_mapping)
else:
return None
class PropositionalAction:
def __init__(self, name, precondition, effects, cost, action=None, var_mapping=None):
self.name = name
self.precondition = precondition
self.add_effects = []
self.del_effects = []
self.effect_mappings = effects
for condition, effect, _, _ in effects:
if not effect.negated:
self.add_effects.append((condition, effect))
# Warning: This is O(N^2), could be turned into O(N).
# But that might actually harm performance, since there are
# usually few effects.
# TODO: Measure this in critical domains, then use sets if acceptable.
for condition, effect, _, _ in effects:
if effect.negated and (condition, effect.negate()) not in self.add_effects:
self.del_effects.append((condition, effect.negate()))
self.cost = cost
self.action = action
self.var_mapping = var_mapping
def __repr__(self):
return "<PropositionalAction %r at %#x>" % (self.name, id(self))
def dump(self):
print(self.name)
for fact in self.precondition:
print("PRE: %s" % fact)
for cond, fact in self.add_effects:
print("ADD: %s -> %s" % (", ".join(map(str, cond)), fact))
for cond, fact in self.del_effects:
print("DEL: %s -> %s" % (", ".join(map(str, cond)), fact))
print("cost:", self.cost)
| 5,646 |
Python
| 40.522059 | 92 | 0.59334 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/f_expression.py
|
from __future__ import print_function
import math
COST_SCALE = 1
class FunctionalExpression(object):
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
#if value != int(value):
# raise ValueError("Fractional numbers are not supported")
#self.value = int(value)
self.value = int(math.ceil(COST_SCALE*float(value)))
def __eq__(self, other):
return self.__class__ == other.__class__ and self.value == other.value
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
self.hash = hash((self.__class__, self.symbol, self.args))
def __hash__(self):
return self.hash
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.symbol == other.symbol
and self.args == other.args)
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_assignments):
args = [var_mapping.get(arg, arg) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert self.symbol != "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
result = init_assignments.get(pne)
assert result is not None, "Could not find instantiation for PNE: %r" % (str(pne),)
return result
class FunctionAssignment:
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print("%s%s" % (indent, self._dump()))
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| 3,623 |
Python
| 38.391304 | 91 | 0.608888 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/tasks.py
|
from . import axioms
from . import predicates
class Task:
def __init__(self, domain_name, task_name, requirements,
types, objects, predicates, functions, init, goal,
actions, axioms, use_metric):
self.domain_name = domain_name
self.task_name = task_name
self.requirements = requirements
self.types = types
self.objects = objects
self.predicates = predicates
self.functions = functions
self.init = init
self.goal = goal
self.actions = actions
self.axioms = axioms
self.axiom_counter = 0
self.use_min_cost_metric = use_metric
def add_axiom(self, parameters, condition):
name = "new-axiom@%d" % self.axiom_counter
self.axiom_counter += 1
axiom = axioms.Axiom(name, parameters, len(parameters), condition)
self.predicates.append(predicates.Predicate(name, parameters))
self.axioms.append(axiom)
return axiom
def dump(self):
print("Problem %s: %s [%s]" % (
self.domain_name, self.task_name, self.requirements))
print("Types:")
for type in self.types:
print(" %s" % type)
print("Objects:")
for obj in self.objects:
print(" %s" % obj)
print("Predicates:")
for pred in self.predicates:
print(" %s" % pred)
print("Functions:")
for func in self.functions:
print(" %s" % func)
print("Init:")
for fact in self.init:
print(" %s" % fact)
print("Goal:")
self.goal.dump()
print("Actions:")
for action in self.actions:
action.dump()
if self.axioms:
print("Axioms:")
for axiom in self.axioms:
axiom.dump()
class Requirements:
def __init__(self, requirements):
self.requirements = requirements
for req in requirements:
assert req in (
":strips", ":adl", ":typing", ":negation", ":equality",
":negative-preconditions", ":disjunctive-preconditions",
":existential-preconditions", ":universal-preconditions",
":quantified-preconditions", ":conditional-effects",
":derived-predicates", ":action-costs"), req
def __str__(self):
return ", ".join(self.requirements)
| 2,414 |
Python
| 33.014084 | 74 | 0.555095 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/axioms.py
|
from __future__ import print_function
from . import conditions
class Axiom:
def __init__(self, name, parameters, num_external_parameters, condition):
# For an explanation of num_external_parameters, see the
# related Action class. Note that num_external_parameters
# always equals the arity of the derived predicate.
assert 0 <= num_external_parameters <= len(parameters)
self.name = name
self.parameters = parameters
self.num_external_parameters = num_external_parameters
self.condition = condition
self.uniquify_variables()
def dump(self):
args = map(str, self.parameters[:self.num_external_parameters])
print("Axiom %s(%s)" % (self.name, ", ".join(args)))
self.condition.dump()
def uniquify_variables(self):
self.type_map = {par.name: par.type_name for par in self.parameters}
self.condition = self.condition.uniquify_variables(self.type_map)
def instantiate(self, var_mapping, init_facts, fluent_facts):
# The comments for Action.instantiate apply accordingly.
arg_list = [self.name] + [
var_mapping[par.name]
for par in self.parameters[:self.num_external_parameters]]
name = "(%s)" % " ".join(arg_list)
condition = []
try:
self.condition.instantiate(var_mapping, init_facts, fluent_facts, condition)
except conditions.Impossible:
return None
effect_args = [var_mapping.get(arg.name, arg.name)
for arg in self.parameters[:self.num_external_parameters]]
effect = conditions.Atom(self.name, effect_args)
return PropositionalAxiom(name, condition, effect, self, var_mapping)
class PropositionalAxiom:
def __init__(self, name, condition, effect, axiom=None, var_mapping=None):
self.name = name
self.condition = condition
self.effect = effect
self.axiom = axiom
self.var_mapping = var_mapping
def clone(self):
return PropositionalAxiom(self.name, list(self.condition), self.effect, self.axiom, self.var_mapping)
def dump(self):
if self.effect.negated:
print("not", end=' ')
print(self.name)
for fact in self.condition:
print("PRE: %s" % fact)
print("EFF: %s" % self.effect)
@property
def key(self):
return (self.name, self.condition, self.effect)
def __lt__(self, other):
return self.key < other.key
def __le__(self, other):
return self.key <= other.key
def __eq__(self, other):
return self.key == other.key
def __repr__(self):
return '<PropositionalAxiom %s %s -> %s>' % (
self.name, self.condition, self.effect)
| 2,793 |
Python
| 33.493827 | 109 | 0.616541 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/functions.py
|
class Function:
def __init__(self, name, arguments, type_name):
self.name = name
self.arguments = arguments
if type_name != "number":
raise SystemExit("Error: object fluents not supported\n" +
"(function %s has type %s)" % (name, type_name))
self.type_name = type_name
def __str__(self):
result = "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
if self.type_name:
result += ": %s" % self.type_name
return result
| 542 |
Python
| 35.199998 | 77 | 0.523985 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/pddl/predicates.py
|
class Predicate:
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
def get_arity(self):
return len(self.arguments)
| 278 |
Python
| 24.363634 | 74 | 0.568345 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/tests/test_scripts.py
|
import os.path
import subprocess
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
TRANSLATE_DIR = os.path.dirname(DIR)
REPO = os.path.abspath(os.path.join(DIR, "..", "..", ".."))
BENCHMARKS = os.path.join(REPO, "misc", "tests", "benchmarks")
DOMAIN = os.path.join(BENCHMARKS, "gripper", "domain.pddl")
PROBLEM = os.path.join(BENCHMARKS, "gripper", "prob01.pddl")
SCRIPTS = [
"build_model.py",
"graph.py",
"instantiate.py",
"invariant_finder.py",
"normalize.py",
"pddl_to_prolog.py",
"translate.py",
]
def test_scripts():
for script in SCRIPTS:
script = os.path.join(TRANSLATE_DIR, script)
folder, filename = os.path.split(script)
assert subprocess.check_call([sys.executable, filename, DOMAIN, PROBLEM], cwd=folder) == 0
| 790 |
Python
| 29.423076 | 98 | 0.653165 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/translate/tests/test_normalization.py
|
from io import StringIO
import pddl
from pddl_to_prolog import Rule, PrologProgram
def test_normalization():
prog = PrologProgram()
prog.add_fact(pddl.Atom("at", ["foo", "bar"]))
prog.add_fact(pddl.Atom("truck", ["bollerwagen"]))
prog.add_fact(pddl.Atom("truck", ["segway"]))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"])], pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?X"])))
prog.add_rule(Rule([pddl.Atom("p", ["?Y", "?Z", "?Y", "?Z"])],
pddl.Atom("q", ["?Y", "?Y"])))
prog.add_rule(Rule([], pddl.Atom("foo", [])))
prog.add_rule(Rule([], pddl.Atom("bar", ["X"])))
prog.normalize()
output = StringIO()
prog.dump(file=output)
sorted_output = "\n".join(sorted(output.getvalue().splitlines()))
assert sorted_output == """\
Atom @object(bar).
Atom @object(bollerwagen).
Atom @object(foo).
Atom @object(segway).
Atom at(foo, bar).
Atom bar(X).
Atom foo().
Atom truck(bollerwagen).
Atom truck(segway).
none Atom at(?X, ?X@0) :- Atom truck(?X), Atom location(?Y), Atom =(?X, ?X@0).
none Atom at(?X, ?Y) :- Atom truck(?X), Atom @object(?Y).
none Atom at(?X, ?Y) :- Atom truck(X), Atom location(?Y), Atom @object(?X).
none Atom q(?Y, ?Y@0) :- Atom p(?Y, ?Z, ?Y, ?Z), Atom =(?Y, ?Y@0), Atom =(?Y, ?Y@1), Atom =(?Z, ?Z@2)."""
| 1,535 |
Python
| 39.421052 | 105 | 0.55114 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/planner.cc
|
#include "command_line.h"
#include "option_parser.h"
#include "search_engine.h"
#include "options/registries.h"
#include "tasks/root_task.h"
#include "task_utils/task_properties.h"
#include "../utils/logging.h"
#include "utils/system.h"
#include "utils/timer.h"
#include <iostream>
using namespace std;
using utils::ExitCode;
int main(int argc, const char **argv) {
utils::register_event_handlers();
if (argc < 2) {
utils::g_log << usage(argv[0]) << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
bool unit_cost = false;
if (static_cast<string>(argv[1]) != "--help") {
utils::g_log << "reading input..." << endl;
tasks::read_root_task(cin);
utils::g_log << "done reading input!" << endl;
TaskProxy task_proxy(*tasks::g_root_task);
unit_cost = task_properties::is_unit_cost(task_proxy);
}
shared_ptr<SearchEngine> engine;
// The command line is parsed twice: once in dry-run mode, to
// check for simple input errors, and then in normal mode.
try {
options::Registry registry(*options::RawRegistry::instance());
parse_cmd_line(argc, argv, registry, true, unit_cost);
engine = parse_cmd_line(argc, argv, registry, false, unit_cost);
} catch (const ArgError &error) {
error.print();
usage(argv[0]);
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
} catch (const OptionParserError &error) {
error.print();
usage(argv[0]);
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
} catch (const ParseError &error) {
error.print();
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
utils::Timer search_timer;
engine->search();
search_timer.stop();
utils::g_timer.stop();
engine->save_plan_if_necessary();
engine->print_statistics();
utils::g_log << "Search time: " << search_timer << endl;
utils::g_log << "Total time: " << utils::g_timer << endl;
ExitCode exitcode = engine->found_solution()
? ExitCode::SUCCESS
: ExitCode::SEARCH_UNSOLVED_INCOMPLETE;
utils::report_exit_code_reentrant(exitcode);
return static_cast<int>(exitcode);
}
| 2,200 |
C++
| 30 | 72 | 0.623182 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluator.cc
|
#include "evaluator.h"
#include "option_parser.h"
#include "plugin.h"
#include "utils/logging.h"
#include "utils/system.h"
#include <cassert>
using namespace std;
Evaluator::Evaluator(const string &description,
bool use_for_reporting_minima,
bool use_for_boosting,
bool use_for_counting_evaluations)
: description(description),
use_for_reporting_minima(use_for_reporting_minima),
use_for_boosting(use_for_boosting),
use_for_counting_evaluations(use_for_counting_evaluations) {
}
bool Evaluator::dead_ends_are_reliable() const {
return true;
}
void Evaluator::report_value_for_initial_state(const EvaluationResult &result) const {
assert(use_for_reporting_minima);
utils::g_log << "Initial heuristic value for " << description << ": ";
if (result.is_infinite())
utils::g_log << "infinity";
else
utils::g_log << result.get_evaluator_value();
utils::g_log << endl;
}
void Evaluator::report_new_minimum_value(const EvaluationResult &result) const {
assert(use_for_reporting_minima);
utils::g_log << "New best heuristic value for " << description << ": "
<< result.get_evaluator_value() << endl;
}
const string &Evaluator::get_description() const {
return description;
}
bool Evaluator::is_used_for_reporting_minima() const {
return use_for_reporting_minima;
}
bool Evaluator::is_used_for_boosting() const {
return use_for_boosting;
}
bool Evaluator::is_used_for_counting_evaluations() const {
return use_for_counting_evaluations;
}
bool Evaluator::does_cache_estimates() const {
return false;
}
bool Evaluator::is_estimate_cached(const State &) const {
return false;
}
int Evaluator::get_cached_estimate(const State &) const {
ABORT("Called get_cached_estimate when estimate is not cached.");
}
static PluginTypePlugin<Evaluator> _type_plugin(
"Evaluator",
"An evaluator specification is either a newly created evaluator "
"instance or an evaluator that has been defined previously. "
"This page describes how one can specify a new evaluator instance. "
"For re-using evaluators, see OptionSyntax#Evaluator_Predefinitions.\n\n"
"If the evaluator is a heuristic, "
"definitions of //properties// in the descriptions below:\n\n"
" * **admissible:** h(s) <= h*(s) for all states s\n"
" * **consistent:** h(s) <= c(s, s') + h(s') for all states s "
"connected to states s' by an action with cost c(s, s')\n"
" * **safe:** h(s) = infinity is only true for states "
"with h*(s) = infinity\n"
" * **preferred operators:** this heuristic identifies "
"preferred operators ",
"evaluator", "heuristic");
| 2,745 |
C++
| 30.204545 | 86 | 0.666302 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/per_state_bitset.cc
|
#include "per_state_bitset.h"
using namespace std;
int BitsetMath::compute_num_blocks(size_t num_bits) {
return (num_bits + bits_per_block - 1) / bits_per_block;
}
size_t BitsetMath::block_index(size_t pos) {
return pos / bits_per_block;
}
size_t BitsetMath::bit_index(size_t pos) {
return pos % bits_per_block;
}
BitsetMath::Block BitsetMath::bit_mask(size_t pos) {
return Block(1) << bit_index(pos);
}
BitsetView::BitsetView(ArrayView<BitsetMath::Block> data, int num_bits) :
data(data), num_bits(num_bits) {}
void BitsetView::set(int index) {
assert(index >= 0 && index < num_bits);
int block_index = BitsetMath::block_index(index);
data[block_index] |= BitsetMath::bit_mask(index);
}
void BitsetView::reset(int index) {
assert(index >= 0 && index < num_bits);
int block_index = BitsetMath::block_index(index);
data[block_index] &= ~BitsetMath::bit_mask(index);
}
void BitsetView::reset() {
for (int i = 0; i < data.size(); ++i) {
data[i] = BitsetMath::zeros;
}
}
bool BitsetView::test(int index) const {
assert(index >= 0 && index < num_bits);
int block_index = BitsetMath::block_index(index);
return (data[block_index] & BitsetMath::bit_mask(index)) != 0;
}
void BitsetView::intersect(const BitsetView &other) {
assert(num_bits == other.num_bits);
for (int i = 0; i < data.size(); ++i) {
data[i] &= other.data[i];
}
}
int BitsetView::size() const {
return num_bits;
}
static vector<BitsetMath::Block> pack_bit_vector(const vector<bool> &bits) {
int num_bits = bits.size();
int num_blocks = BitsetMath::compute_num_blocks(num_bits);
vector<BitsetMath::Block> packed_bits(num_blocks, 0);
BitsetView bitset_view(ArrayView<BitsetMath::Block>(packed_bits.data(), num_blocks), num_bits);
for (int i = 0; i < num_bits; ++i) {
if (bits[i]) {
bitset_view.set(i);
}
}
return packed_bits;
}
PerStateBitset::PerStateBitset(const vector<bool> &default_bits)
: num_bits_per_entry(default_bits.size()),
data(pack_bit_vector(default_bits)) {
}
BitsetView PerStateBitset::operator[](const State &state) {
return BitsetView(data[state], num_bits_per_entry);
}
| 2,233 |
C++
| 25.282353 | 99 | 0.642185 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_proxy.h
|
#ifndef TASK_PROXY_H
#define TASK_PROXY_H
#include "abstract_task.h"
#include "operator_id.h"
#include "state_id.h"
#include "task_id.h"
#include "algorithms/int_packer.h"
#include "utils/collections.h"
#include "utils/hash.h"
#include "utils/system.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
#include <vector>
class AxiomsProxy;
class ConditionsProxy;
class EffectProxy;
class EffectConditionsProxy;
class EffectsProxy;
class FactProxy;
class FactsProxy;
class GoalsProxy;
class OperatorProxy;
class OperatorsProxy;
class PreconditionsProxy;
class State;
class StateRegistry;
class TaskProxy;
class VariableProxy;
class VariablesProxy;
namespace causal_graph {
class CausalGraph;
}
using PackedStateBin = int_packer::IntPacker::Bin;
/*
Overview of the task interface.
The task interface is divided into two parts: a set of proxy classes
for accessing task information (TaskProxy, OperatorProxy, etc.) and
task implementations (subclasses of AbstractTask). Each proxy class
knows which AbstractTask it belongs to and uses its methods to retrieve
information about the task. RootTask is the AbstractTask that
encapsulates the unmodified original task that the planner received
as input.
Example code for creating a new task object and accessing its operators:
TaskProxy task_proxy(*g_root_task());
for (OperatorProxy op : task->get_operators())
utils::g_log << op.get_name() << endl;
Since proxy classes only store a reference to the AbstractTask and
some indices, they can be copied cheaply.
In addition to the lightweight proxy classes, the task interface
consists of the State class, which is used to hold state information
for TaskProxy tasks. The State class contains packed or unpacked state data
and shares the ownership with its copies, so it is cheap to copy but
expensive to create. If performance is absolutely critical, the values of a
state can be unpacked and accessed as a vector<int>.
For now, heuristics work with a TaskProxy that can represent a transformed
view of the original task. The search algorithms work on the unmodified root
task. We therefore need to do two conversions between the search and the
heuristics: converting states of the root task to states of the task used in
the heuristic computation and converting operators of the task used by the
heuristic to operators of the task used by the search for reporting preferred
operators.
These conversions are done by the Heuristic base class with
Heuristic::convert_ancestor_state() and Heuristic::set_preferred().
int FantasyHeuristic::compute_heuristic(const State &ancestor_state) {
State state = convert_ancestor_state(ancestor_state);
set_preferred(task->get_operators()[42]);
int sum = 0;
for (FactProxy fact : state)
sum += fact.get_value();
return sum;
}
For helper functions that work on task related objects, please see the
task_properties.h module.
*/
/*
Basic iterator support for proxy collections.
*/
template<typename ProxyCollection>
class ProxyIterator {
/* We store a pointer to collection instead of a reference
because iterators have to be copy assignable. */
const ProxyCollection *collection;
std::size_t pos;
public:
using iterator_category = std::input_iterator_tag;
using value_type = typename ProxyCollection::ItemType;
using difference_type = int;
using pointer = const value_type *;
using reference = value_type;
ProxyIterator(const ProxyCollection &collection, std::size_t pos)
: collection(&collection), pos(pos) {
}
reference operator*() const {
return (*collection)[pos];
}
value_type operator++(int) {
value_type value(**this);
++(*this);
return value;
}
ProxyIterator &operator++() {
++pos;
return *this;
}
bool operator==(const ProxyIterator &other) const {
assert(collection == other.collection);
return pos == other.pos;
}
bool operator!=(const ProxyIterator &other) const {
return !(*this == other);
}
};
template<class ProxyCollection>
inline ProxyIterator<ProxyCollection> begin(ProxyCollection &collection) {
return ProxyIterator<ProxyCollection>(collection, 0);
}
template<class ProxyCollection>
inline ProxyIterator<ProxyCollection> end(ProxyCollection &collection) {
return ProxyIterator<ProxyCollection>(collection, collection.size());
}
class FactProxy {
const AbstractTask *task;
FactPair fact;
public:
FactProxy(const AbstractTask &task, int var_id, int value);
FactProxy(const AbstractTask &task, const FactPair &fact);
~FactProxy() = default;
VariableProxy get_variable() const;
int get_value() const {
return fact.value;
}
FactPair get_pair() const {
return fact;
}
std::string get_name() const {
return task->get_fact_name(fact);
}
bool operator==(const FactProxy &other) const {
assert(task == other.task);
return fact == other.fact;
}
bool operator!=(const FactProxy &other) const {
return !(*this == other);
}
bool is_mutex(const FactProxy &other) const {
return task->are_facts_mutex(fact, other.fact);
}
};
class FactsProxyIterator {
const AbstractTask *task;
int var_id;
int value;
public:
FactsProxyIterator(const AbstractTask &task, int var_id, int value)
: task(&task), var_id(var_id), value(value) {}
~FactsProxyIterator() = default;
FactProxy operator*() const {
return FactProxy(*task, var_id, value);
}
FactsProxyIterator &operator++() {
assert(var_id < task->get_num_variables());
int num_facts = task->get_variable_domain_size(var_id);
assert(value < num_facts);
++value;
if (value == num_facts) {
++var_id;
value = 0;
}
return *this;
}
bool operator==(const FactsProxyIterator &other) const {
assert(task == other.task);
return var_id == other.var_id && value == other.value;
}
bool operator!=(const FactsProxyIterator &other) const {
return !(*this == other);
}
};
/*
Proxy class for the collection of all facts of a task.
We don't implement size() because it would not be constant-time.
FactsProxy supports iteration, e.g. for range-based for loops. This
iterates over all facts in order of increasing variable ID, and in
order of increasing value for each variable.
*/
class FactsProxy {
const AbstractTask *task;
public:
explicit FactsProxy(const AbstractTask &task)
: task(&task) {}
~FactsProxy() = default;
FactsProxyIterator begin() const {
return FactsProxyIterator(*task, 0, 0);
}
FactsProxyIterator end() const {
return FactsProxyIterator(*task, task->get_num_variables(), 0);
}
};
class ConditionsProxy {
protected:
const AbstractTask *task;
public:
using ItemType = FactProxy;
explicit ConditionsProxy(const AbstractTask &task)
: task(&task) {}
virtual ~ConditionsProxy() = default;
virtual std::size_t size() const = 0;
virtual FactProxy operator[](std::size_t index) const = 0;
bool empty() const {
return size() == 0;
}
};
class VariableProxy {
const AbstractTask *task;
int id;
public:
VariableProxy(const AbstractTask &task, int id)
: task(&task), id(id) {}
~VariableProxy() = default;
bool operator==(const VariableProxy &other) const {
assert(task == other.task);
return id == other.id;
}
bool operator!=(const VariableProxy &other) const {
return !(*this == other);
}
int get_id() const {
return id;
}
std::string get_name() const {
return task->get_variable_name(id);
}
int get_domain_size() const {
return task->get_variable_domain_size(id);
}
FactProxy get_fact(int index) const {
assert(index < get_domain_size());
return FactProxy(*task, id, index);
}
bool is_derived() const {
int axiom_layer = task->get_variable_axiom_layer(id);
return axiom_layer != -1;
}
int get_axiom_layer() const {
int axiom_layer = task->get_variable_axiom_layer(id);
/*
This should only be called for derived variables.
Non-derived variables have axiom_layer == -1.
Use var.is_derived() to check.
*/
assert(axiom_layer >= 0);
return axiom_layer;
}
int get_default_axiom_value() const {
assert(is_derived());
return task->get_variable_default_axiom_value(id);
}
};
class VariablesProxy {
const AbstractTask *task;
public:
using ItemType = VariableProxy;
explicit VariablesProxy(const AbstractTask &task)
: task(&task) {}
~VariablesProxy() = default;
std::size_t size() const {
return task->get_num_variables();
}
VariableProxy operator[](std::size_t index) const {
assert(index < size());
return VariableProxy(*task, index);
}
FactsProxy get_facts() const {
return FactsProxy(*task);
}
};
class PreconditionsProxy : public ConditionsProxy {
int op_index;
bool is_axiom;
public:
PreconditionsProxy(const AbstractTask &task, int op_index, bool is_axiom)
: ConditionsProxy(task), op_index(op_index), is_axiom(is_axiom) {}
~PreconditionsProxy() = default;
std::size_t size() const override {
return task->get_num_operator_preconditions(op_index, is_axiom);
}
FactProxy operator[](std::size_t fact_index) const override {
assert(fact_index < size());
return FactProxy(*task, task->get_operator_precondition(
op_index, fact_index, is_axiom));
}
};
class EffectConditionsProxy : public ConditionsProxy {
int op_index;
int eff_index;
bool is_axiom;
public:
EffectConditionsProxy(
const AbstractTask &task, int op_index, int eff_index, bool is_axiom)
: ConditionsProxy(task), op_index(op_index), eff_index(eff_index), is_axiom(is_axiom) {}
~EffectConditionsProxy() = default;
std::size_t size() const override {
return task->get_num_operator_effect_conditions(op_index, eff_index, is_axiom);
}
FactProxy operator[](std::size_t index) const override {
assert(index < size());
return FactProxy(*task, task->get_operator_effect_condition(
op_index, eff_index, index, is_axiom));
}
};
class EffectProxy {
const AbstractTask *task;
int op_index;
int eff_index;
bool is_axiom;
public:
EffectProxy(const AbstractTask &task, int op_index, int eff_index, bool is_axiom)
: task(&task), op_index(op_index), eff_index(eff_index), is_axiom(is_axiom) {}
~EffectProxy() = default;
EffectConditionsProxy get_conditions() const {
return EffectConditionsProxy(*task, op_index, eff_index, is_axiom);
}
FactProxy get_fact() const {
return FactProxy(*task, task->get_operator_effect(
op_index, eff_index, is_axiom));
}
};
class EffectsProxy {
const AbstractTask *task;
int op_index;
bool is_axiom;
public:
using ItemType = EffectProxy;
EffectsProxy(const AbstractTask &task, int op_index, bool is_axiom)
: task(&task), op_index(op_index), is_axiom(is_axiom) {}
~EffectsProxy() = default;
std::size_t size() const {
return task->get_num_operator_effects(op_index, is_axiom);
}
EffectProxy operator[](std::size_t eff_index) const {
assert(eff_index < size());
return EffectProxy(*task, op_index, eff_index, is_axiom);
}
};
class OperatorProxy {
const AbstractTask *task;
int index;
bool is_an_axiom;
public:
OperatorProxy(const AbstractTask &task, int index, bool is_axiom)
: task(&task), index(index), is_an_axiom(is_axiom) {}
~OperatorProxy() = default;
bool operator==(const OperatorProxy &other) const {
assert(task == other.task);
return index == other.index && is_an_axiom == other.is_an_axiom;
}
bool operator!=(const OperatorProxy &other) const {
return !(*this == other);
}
PreconditionsProxy get_preconditions() const {
return PreconditionsProxy(*task, index, is_an_axiom);
}
EffectsProxy get_effects() const {
return EffectsProxy(*task, index, is_an_axiom);
}
int get_cost() const {
return task->get_operator_cost(index, is_an_axiom);
}
bool is_axiom() const {
return is_an_axiom;
}
std::string get_name() const {
return task->get_operator_name(index, is_an_axiom);
}
int get_id() const {
return index;
}
/*
Eventually, this method should perhaps not be part of OperatorProxy but
live in a class that handles the task transformation and known about both
the original and the transformed task.
*/
OperatorID get_ancestor_operator_id(const AbstractTask *ancestor_task) const {
assert(!is_an_axiom);
return OperatorID(task->convert_operator_index(index, ancestor_task));
}
};
class OperatorsProxy {
const AbstractTask *task;
public:
using ItemType = OperatorProxy;
explicit OperatorsProxy(const AbstractTask &task)
: task(&task) {}
~OperatorsProxy() = default;
std::size_t size() const {
return task->get_num_operators();
}
bool empty() const {
return size() == 0;
}
OperatorProxy operator[](std::size_t index) const {
assert(index < size());
return OperatorProxy(*task, index, false);
}
OperatorProxy operator[](OperatorID id) const {
return (*this)[id.get_index()];
}
};
class AxiomsProxy {
const AbstractTask *task;
public:
using ItemType = OperatorProxy;
explicit AxiomsProxy(const AbstractTask &task)
: task(&task) {}
~AxiomsProxy() = default;
std::size_t size() const {
return task->get_num_axioms();
}
bool empty() const {
return size() == 0;
}
OperatorProxy operator[](std::size_t index) const {
assert(index < size());
return OperatorProxy(*task, index, true);
}
};
class GoalsProxy : public ConditionsProxy {
public:
explicit GoalsProxy(const AbstractTask &task)
: ConditionsProxy(task) {}
~GoalsProxy() = default;
std::size_t size() const override {
return task->get_num_goals();
}
FactProxy operator[](std::size_t index) const override {
assert(index < size());
return FactProxy(*task, task->get_goal_fact(index));
}
};
bool does_fire(const EffectProxy &effect, const State &state);
class State {
/*
TODO: We want to try out two things:
1. having StateID and num_variables next to each other, so that they
can fit into one 8-byte-aligned block.
2. removing num_variables altogether, getting it on demand.
It is not used in (very) performance-critical code.
Perhaps 2) has no positive influence after 1) because we would just have
a 4-byte gap at the end of the object. But it would probably be nice to
have fewer attributes regardless.
*/
const AbstractTask *task;
const StateRegistry *registry;
StateID id;
const PackedStateBin *buffer;
/*
values is mutable because we think of it as a redundant representation
of the state's contents, a kind of cache. One could argue for doing this
differently (for example because some methods require unpacked data, so
in some sense its presence changes the "logical state" from the program
perspective. It is a bit weird to have a const function like unpack that
is only called for its side effect on the object. But we decided to use
const here to mean "const from the perspective of the state space
semantics of the state".
*/
mutable std::shared_ptr<std::vector<int>> values;
const int_packer::IntPacker *state_packer;
int num_variables;
public:
using ItemType = FactProxy;
// Construct a registered state with only packed data.
State(const AbstractTask &task, const StateRegistry ®istry, StateID id,
const PackedStateBin *buffer);
// Construct a registered state with packed and unpacked data.
State(const AbstractTask &task, const StateRegistry ®istry, StateID id,
const PackedStateBin *buffer, std::vector<int> &&values);
// Construct a state with only unpacked data.
State(const AbstractTask &task, std::vector<int> &&values);
bool operator==(const State &other) const;
bool operator!=(const State &other) const;
/* Generate unpacked data if it is not available yet. Calling the function
on a state that already has unpacked data has no effect. */
void unpack() const;
std::size_t size() const;
FactProxy operator[](std::size_t var_id) const;
FactProxy operator[](VariableProxy var) const;
TaskProxy get_task() const;
/* Return a pointer to the registry in which this state is registered.
If the state is not registered, return nullptr. */
const StateRegistry *get_registry() const;
/* Return the ID of the state within its registry. If the state is not
registered, return StateID::no_state. */
StateID get_id() const;
/* Access the unpacked values. Accessing the unpacked values in a state
that doesn't have them is an error. Use unpack() to ensure the data
exists. */
const std::vector<int> &get_unpacked_values() const;
/* Access the packed values. Accessing packed values on states that do
not have them (unregistered states) is an error. */
const PackedStateBin *get_buffer() const;
/*
Create a successor state with the given operator. The operator is assumed
to be applicable and the precondition is not checked. This will create an
unpacked, unregistered successor. If you need registered successors, use
the methods of StateRegistry.
Using this method on states without unpacked values is an error. Use
unpack() to ensure the data exists.
*/
State get_unregistered_successor(const OperatorProxy &op) const;
};
namespace utils {
inline void feed(HashState &hash_state, const State &state) {
/*
Hashing a state without unpacked data will result in an error.
We don't want to unpack states implicitly, so this rules out the option
of unpacking the states here on demand. Mixing hashes from packed and
unpacked states would lead to logically equal states with different
hashes. Hashing packed (and therefore registered) states also seems like
a performance error because it's much cheaper to hash the state IDs
instead.
*/
feed(hash_state, state.get_unpacked_values());
}
}
class TaskProxy {
const AbstractTask *task;
public:
explicit TaskProxy(const AbstractTask &task)
: task(&task) {}
~TaskProxy() = default;
TaskID get_id() const {
return TaskID(task);
}
void subscribe_to_task_destruction(subscriber::Subscriber<AbstractTask> *subscriber) const {
task->subscribe(subscriber);
}
VariablesProxy get_variables() const {
return VariablesProxy(*task);
}
OperatorsProxy get_operators() const {
return OperatorsProxy(*task);
}
AxiomsProxy get_axioms() const {
return AxiomsProxy(*task);
}
GoalsProxy get_goals() const {
return GoalsProxy(*task);
}
State create_state(std::vector<int> &&state_values) const {
return State(*task, std::move(state_values));
}
// This method is meant to be called only by the state registry.
State create_state(
const StateRegistry ®istry, StateID id,
const PackedStateBin *buffer) const {
return State(*task, registry, id, buffer);
}
// This method is meant to be called only by the state registry.
State create_state(
const StateRegistry ®istry, StateID id,
const PackedStateBin *buffer, std::vector<int> &&state_values) const {
return State(*task, registry, id, buffer, std::move(state_values));
}
State get_initial_state() const {
return create_state(task->get_initial_state_values());
}
/*
Convert a state from an ancestor task into a state of this task.
The given state has to belong to a task that is an ancestor of
this task in the sense that this task is the result of a sequence
of task transformations on the ancestor task. If this is not the
case, the function aborts.
Eventually, this method should perhaps not be part of TaskProxy but live
in a class that handles the task transformation and knows about both the
original and the transformed task.
*/
State convert_ancestor_state(const State &ancestor_state) const {
TaskProxy ancestor_task_proxy = ancestor_state.get_task();
// Create a copy of the state values for the new state.
ancestor_state.unpack();
std::vector<int> state_values = ancestor_state.get_unpacked_values();
task->convert_ancestor_state_values(
state_values, ancestor_task_proxy.task);
return create_state(std::move(state_values));
}
const causal_graph::CausalGraph &get_causal_graph() const;
};
inline FactProxy::FactProxy(const AbstractTask &task, const FactPair &fact)
: task(&task), fact(fact) {
assert(fact.var >= 0 && fact.var < task.get_num_variables());
assert(fact.value >= 0 && fact.value < get_variable().get_domain_size());
}
inline FactProxy::FactProxy(const AbstractTask &task, int var_id, int value)
: FactProxy(task, FactPair(var_id, value)) {
}
inline VariableProxy FactProxy::get_variable() const {
return VariableProxy(*task, fact.var);
}
inline bool does_fire(const EffectProxy &effect, const State &state) {
for (FactProxy condition : effect.get_conditions()) {
if (state[condition.get_variable()] != condition)
return false;
}
return true;
}
inline bool State::operator==(const State &other) const {
assert(task == other.task);
if (registry != other.registry) {
std::cerr << "Comparing registered states with unregistered states "
<< "or registered states from different registries is "
<< "treated as an error because it is likely not "
<< "intentional."
<< std::endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
if (registry) {
// Both states are registered and from the same registry.
return id == other.id;
} else {
// Both states are unregistered.
assert(values);
assert(other.values);
return *values == *other.values;
}
}
inline bool State::operator!=(const State &other) const {
return !(*this == other);
}
inline void State::unpack() const {
if (!values) {
int num_variables = size();
/*
A micro-benchmark in issue348 showed that constructing the vector
in the required size and then assigning values was faster than the
more obvious reserve/push_back. Although, the benchmark did not
profile this specific code.
We might consider a bulk-unpack method in state_packer that could be
more efficient. (One can imagine state packer to have extra data
structures that exploit sequentially unpacking each entry, by doing
things bin by bin.)
*/
values = std::make_shared<std::vector<int>>(num_variables);
for (int var = 0; var < num_variables; ++var) {
(*values)[var] = state_packer->get(buffer, var);
}
}
}
inline std::size_t State::size() const {
return num_variables;
}
inline FactProxy State::operator[](std::size_t var_id) const {
assert(var_id < size());
if (values) {
return FactProxy(*task, var_id, (*values)[var_id]);
} else {
assert(buffer);
assert(state_packer);
return FactProxy(*task, var_id, state_packer->get(buffer, var_id));
}
}
inline FactProxy State::operator[](VariableProxy var) const {
return (*this)[var.get_id()];
}
inline TaskProxy State::get_task() const {
return TaskProxy(*task);
}
inline const StateRegistry *State::get_registry() const {
return registry;
}
inline StateID State::get_id() const {
return id;
}
inline const PackedStateBin *State::get_buffer() const {
/*
TODO: we should profile what happens if we #ifndef NDEBUG this test here
and in other places (e.g. the next method). The 'if' itself is probably
not costly, but the 'cerr <<' stuff might prevent inlining.
*/
if (!buffer) {
std::cerr << "Accessing the packed values of an unregistered state is "
<< "treated as an error."
<< std::endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
return buffer;
}
inline const std::vector<int> &State::get_unpacked_values() const {
if (!values) {
std::cerr << "Accessing the unpacked values of a state without "
<< "unpacking them first is treated as an error. Please "
<< "use State::unpack first."
<< std::endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
return *values;
}
#endif
| 25,790 |
C
| 29.094516 | 96 | 0.651066 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_list_factory.h
|
#ifndef OPEN_LIST_FACTORY_H
#define OPEN_LIST_FACTORY_H
#include "open_list.h"
#include <memory>
class OpenListFactory {
public:
OpenListFactory() = default;
virtual ~OpenListFactory() = default;
OpenListFactory(const OpenListFactory &) = delete;
virtual std::unique_ptr<StateOpenList> create_state_open_list() = 0;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() = 0;
/*
The following template receives manual specializations (in the
cc file) for the open list types we want to support. It is
intended for templatized callers, e.g. the constructor of
AlternationOpenList.
*/
template<typename T>
std::unique_ptr<OpenList<T>> create_open_list();
};
#endif
| 738 |
C
| 23.633333 | 72 | 0.696477 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_engine.h
|
#ifndef SEARCH_ENGINE_H
#define SEARCH_ENGINE_H
#include "operator_cost.h"
#include "operator_id.h"
#include "plan_manager.h"
#include "search_progress.h"
#include "search_space.h"
#include "search_statistics.h"
#include "state_registry.h"
#include "task_proxy.h"
#include <vector>
namespace options {
class OptionParser;
class Options;
}
namespace ordered_set {
template<typename T>
class OrderedSet;
}
namespace successor_generator {
class SuccessorGenerator;
}
namespace utils {
enum class Verbosity;
}
enum SearchStatus {IN_PROGRESS, TIMEOUT, FAILED, SOLVED};
class SearchEngine {
SearchStatus status;
bool solution_found;
Plan plan;
protected:
// Hold a reference to the task implementation and pass it to objects that need it.
const std::shared_ptr<AbstractTask> task;
// Use task_proxy to access task information.
TaskProxy task_proxy;
PlanManager plan_manager;
StateRegistry state_registry;
const successor_generator::SuccessorGenerator &successor_generator;
SearchSpace search_space;
SearchProgress search_progress;
SearchStatistics statistics;
int bound;
OperatorCost cost_type;
bool is_unit_cost;
double max_time;
const utils::Verbosity verbosity;
virtual void initialize() {}
virtual SearchStatus step() = 0;
void set_plan(const Plan &plan);
bool check_goal_and_set_plan(const State &state);
int get_adjusted_cost(const OperatorProxy &op) const;
public:
SearchEngine(const options::Options &opts);
virtual ~SearchEngine();
virtual void print_statistics() const = 0;
virtual void save_plan_if_necessary();
bool found_solution() const;
SearchStatus get_status() const;
const Plan &get_plan() const;
void search();
const SearchStatistics &get_statistics() const {return statistics;}
void set_bound(int b) {bound = b;}
int get_bound() {return bound;}
PlanManager &get_plan_manager() {return plan_manager;}
/* The following three methods should become functions as they
do not require access to private/protected class members. */
static void add_pruning_option(options::OptionParser &parser);
static void add_options_to_parser(options::OptionParser &parser);
static void add_succ_order_options(options::OptionParser &parser);
};
/*
Print evaluator values of all evaluators evaluated in the evaluation context.
*/
extern void print_initial_evaluator_values(const EvaluationContext &eval_context);
extern void collect_preferred_operators(
EvaluationContext &eval_context, Evaluator *preferred_operator_evaluator,
ordered_set::OrderedSet<OperatorID> &preferred_operators);
#endif
| 2,673 |
C
| 27.446808 | 87 | 0.731762 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/state_registry.cc
|
#include "state_registry.h"
#include "per_state_information.h"
#include "task_proxy.h"
#include "task_utils/task_properties.h"
#include "utils/logging.h"
using namespace std;
StateRegistry::StateRegistry(const TaskProxy &task_proxy)
: task_proxy(task_proxy),
state_packer(task_properties::g_state_packers[task_proxy]),
axiom_evaluator(g_axiom_evaluators[task_proxy]),
num_variables(task_proxy.get_variables().size()),
state_data_pool(get_bins_per_state()),
registered_states(
StateIDSemanticHash(state_data_pool, get_bins_per_state()),
StateIDSemanticEqual(state_data_pool, get_bins_per_state())) {
}
StateID StateRegistry::insert_id_or_pop_state() {
/*
Attempt to insert a StateID for the last state of state_data_pool
if none is present yet. If this fails (another entry for this state
is present), we have to remove the duplicate entry from the
state data pool.
*/
StateID id(state_data_pool.size() - 1);
pair<int, bool> result = registered_states.insert(id.value);
bool is_new_entry = result.second;
if (!is_new_entry) {
state_data_pool.pop_back();
}
assert(registered_states.size() == static_cast<int>(state_data_pool.size()));
return StateID(result.first);
}
State StateRegistry::lookup_state(StateID id) const {
const PackedStateBin *buffer = state_data_pool[id.value];
return task_proxy.create_state(*this, id, buffer);
}
const State &StateRegistry::get_initial_state() {
if (!cached_initial_state) {
int num_bins = get_bins_per_state();
unique_ptr<PackedStateBin[]> buffer(new PackedStateBin[num_bins]);
// Avoid garbage values in half-full bins.
fill_n(buffer.get(), num_bins, 0);
State initial_state = task_proxy.get_initial_state();
for (size_t i = 0; i < initial_state.size(); ++i) {
state_packer.set(buffer.get(), i, initial_state[i].get_value());
}
state_data_pool.push_back(buffer.get());
StateID id = insert_id_or_pop_state();
cached_initial_state = utils::make_unique_ptr<State>(lookup_state(id));
}
return *cached_initial_state;
}
//TODO it would be nice to move the actual state creation (and operator application)
// out of the StateRegistry. This could for example be done by global functions
// operating on state buffers (PackedStateBin *).
State StateRegistry::get_successor_state(const State &predecessor, const OperatorProxy &op) {
assert(!op.is_axiom());
state_data_pool.push_back(predecessor.get_buffer());
PackedStateBin *buffer = state_data_pool[state_data_pool.size() - 1];
/* Experiments for issue348 showed that for tasks with axioms it's faster
to compute successor states using unpacked data. */
if (task_properties::has_axioms(task_proxy)) {
predecessor.unpack();
vector<int> new_values = predecessor.get_unpacked_values();
for (EffectProxy effect : op.get_effects()) {
if (does_fire(effect, predecessor)) {
FactPair effect_pair = effect.get_fact().get_pair();
new_values[effect_pair.var] = effect_pair.value;
}
}
axiom_evaluator.evaluate(new_values);
for (size_t i = 0; i < new_values.size(); ++i) {
state_packer.set(buffer, i, new_values[i]);
}
StateID id = insert_id_or_pop_state();
return task_proxy.create_state(*this, id, buffer, move(new_values));
} else {
for (EffectProxy effect : op.get_effects()) {
if (does_fire(effect, predecessor)) {
FactPair effect_pair = effect.get_fact().get_pair();
state_packer.set(buffer, effect_pair.var, effect_pair.value);
}
}
StateID id = insert_id_or_pop_state();
return task_proxy.create_state(*this, id, buffer);
}
}
int StateRegistry::get_bins_per_state() const {
return state_packer.get_num_bins();
}
int StateRegistry::get_state_size_in_bytes() const {
return get_bins_per_state() * sizeof(PackedStateBin);
}
void StateRegistry::print_statistics() const {
utils::g_log << "Number of registered states: " << size() << endl;
registered_states.print_statistics();
}
| 4,280 |
C++
| 37.918181 | 93 | 0.643692 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning_method.h
|
#ifndef PRUNING_METHOD_H
#define PRUNING_METHOD_H
#include "operator_id.h"
#include "task_proxy.h"
#include <memory>
#include <vector>
class AbstractTask;
class PruningMethod {
protected:
std::shared_ptr<AbstractTask> task;
public:
PruningMethod();
virtual ~PruningMethod() = default;
virtual void initialize(const std::shared_ptr<AbstractTask> &task);
/* This method must not be called for goal states. This can be checked
with assertions in derived classes. */
virtual void prune_operators(const State &state,
std::vector<OperatorID> &op_ids) = 0;
virtual void print_statistics() const = 0;
};
#endif
| 680 |
C
| 20.967741 | 74 | 0.675 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/per_task_information.h
|
#ifndef PER_TASK_INFORMATION_H
#define PER_TASK_INFORMATION_H
#include "task_proxy.h"
#include "algorithms/subscriber.h"
#include "utils/hash.h"
#include "utils/memory.h"
#include <functional>
/*
A PerTaskInformation<T> acts like a HashMap<TaskID, T>
with two main differences:
(1) If an entry is accessed that does not exist yet, it is created using a
factory function that is passed to the PerTaskInformation in its
constructor.
(2) If a task is destroyed, its associated data in all PerTaskInformation
objects is automatically destroyed as well.
*/
template<class Entry>
class PerTaskInformation : public subscriber::Subscriber<AbstractTask> {
/*
EntryConstructor is the type of a function that can create objects for
a given task if the PerTaskInformation is accessed for a task that has no
associated entry yet. It receives a TaskProxy instead of the AbstractTask
because AbstractTask is an internal implementation detail as far as other
classes are concerned. It should return a unique_ptr to the newly created
object.
*/
using EntryConstructor = std::function<std::unique_ptr<Entry>(const TaskProxy &)>;
EntryConstructor entry_constructor;
utils::HashMap<TaskID, std::unique_ptr<Entry>> entries;
public:
/*
If no entry_constructor is passed to the PerTaskInformation explicitly,
we assume the class Entry has a constructor that takes a single TaskProxy
parameter.
*/
PerTaskInformation()
: entry_constructor(
[](const TaskProxy &task_proxy) {
return utils::make_unique_ptr<Entry>(task_proxy);
}) {
}
explicit PerTaskInformation(EntryConstructor entry_constructor)
: entry_constructor(entry_constructor) {
}
Entry &operator[](const TaskProxy &task_proxy) {
TaskID id = task_proxy.get_id();
const auto &it = entries.find(id);
if (it == entries.end()) {
entries[id] = entry_constructor(task_proxy);
task_proxy.subscribe_to_task_destruction(this);
}
return *entries[id];
}
virtual void notify_service_destroyed(const AbstractTask *task) override {
TaskID id = TaskProxy(*task).get_id();
entries.erase(id);
}
};
#endif
| 2,312 |
C
| 32.521739 | 86 | 0.679066 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/option_parser.h
|
#ifndef OPTION_PARSER_H
#define OPTION_PARSER_H
/*
TODO: This file was intended to be a temporary stub. We should think
of getting rid of it. This would entail including "options/options.h"
and "options/option_parser.h" instead and explicitly using the options
namespace. See also option_parser_util.h, plugin.h and issue588 for a
discussion.
*/
#include "options/errors.h"
#include "options/option_parser.h"
using options::Bounds;
using options::OptionParser;
using options::OptionParserError;
using options::Options;
using options::ParseError;
#endif
| 566 |
C
| 25.999999 | 72 | 0.768551 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_space.cc
|
#include "search_space.h"
#include "search_node_info.h"
#include "task_proxy.h"
#include "task_utils/task_properties.h"
#include "utils/logging.h"
#include <cassert>
using namespace std;
SearchNode::SearchNode(const State &state, SearchNodeInfo &info)
: state(state), info(info) {
assert(state.get_id() != StateID::no_state);
}
const State &SearchNode::get_state() const {
return state;
}
bool SearchNode::is_open() const {
return info.status == SearchNodeInfo::OPEN;
}
bool SearchNode::is_closed() const {
return info.status == SearchNodeInfo::CLOSED;
}
bool SearchNode::is_dead_end() const {
return info.status == SearchNodeInfo::DEAD_END;
}
bool SearchNode::is_new() const {
return info.status == SearchNodeInfo::NEW;
}
int SearchNode::get_g() const {
assert(info.g >= 0);
return info.g;
}
int SearchNode::get_real_g() const {
return info.real_g;
}
void SearchNode::open_initial() {
assert(info.status == SearchNodeInfo::NEW);
info.status = SearchNodeInfo::OPEN;
info.g = 0;
info.real_g = 0;
info.parent_state_id = StateID::no_state;
info.creating_operator = OperatorID::no_operator;
}
void SearchNode::open(const SearchNode &parent_node,
const OperatorProxy &parent_op,
int adjusted_cost) {
assert(info.status == SearchNodeInfo::NEW);
info.status = SearchNodeInfo::OPEN;
info.g = parent_node.info.g + adjusted_cost;
info.real_g = parent_node.info.real_g + parent_op.get_cost();
info.parent_state_id = parent_node.get_state().get_id();
info.creating_operator = OperatorID(parent_op.get_id());
}
void SearchNode::reopen(const SearchNode &parent_node,
const OperatorProxy &parent_op,
int adjusted_cost) {
assert(info.status == SearchNodeInfo::OPEN ||
info.status == SearchNodeInfo::CLOSED);
// The latter possibility is for inconsistent heuristics, which
// may require reopening closed nodes.
info.status = SearchNodeInfo::OPEN;
info.g = parent_node.info.g + adjusted_cost;
info.real_g = parent_node.info.real_g + parent_op.get_cost();
info.parent_state_id = parent_node.get_state().get_id();
info.creating_operator = OperatorID(parent_op.get_id());
}
// like reopen, except doesn't change status
void SearchNode::update_parent(const SearchNode &parent_node,
const OperatorProxy &parent_op,
int adjusted_cost) {
assert(info.status == SearchNodeInfo::OPEN ||
info.status == SearchNodeInfo::CLOSED);
// The latter possibility is for inconsistent heuristics, which
// may require reopening closed nodes.
info.g = parent_node.info.g + adjusted_cost;
info.real_g = parent_node.info.real_g + parent_op.get_cost();
info.parent_state_id = parent_node.get_state().get_id();
info.creating_operator = OperatorID(parent_op.get_id());
}
void SearchNode::close() {
assert(info.status == SearchNodeInfo::OPEN);
info.status = SearchNodeInfo::CLOSED;
}
void SearchNode::mark_as_dead_end() {
info.status = SearchNodeInfo::DEAD_END;
}
void SearchNode::dump(const TaskProxy &task_proxy) const {
utils::g_log << state.get_id() << ": ";
task_properties::dump_fdr(state);
if (info.creating_operator != OperatorID::no_operator) {
OperatorsProxy operators = task_proxy.get_operators();
OperatorProxy op = operators[info.creating_operator.get_index()];
utils::g_log << " created by " << op.get_name()
<< " from " << info.parent_state_id << endl;
} else {
utils::g_log << " no parent" << endl;
}
}
SearchSpace::SearchSpace(StateRegistry &state_registry)
: state_registry(state_registry) {
}
SearchNode SearchSpace::get_node(const State &state) {
return SearchNode(state, search_node_infos[state]);
}
void SearchSpace::trace_path(const State &goal_state,
vector<OperatorID> &path) const {
State current_state = goal_state;
assert(current_state.get_registry() == &state_registry);
assert(path.empty());
for (;;) {
const SearchNodeInfo &info = search_node_infos[current_state];
if (info.creating_operator == OperatorID::no_operator) {
assert(info.parent_state_id == StateID::no_state);
break;
}
path.push_back(info.creating_operator);
current_state = state_registry.lookup_state(info.parent_state_id);
}
reverse(path.begin(), path.end());
}
void SearchSpace::dump(const TaskProxy &task_proxy) const {
OperatorsProxy operators = task_proxy.get_operators();
for (StateID id : state_registry) {
/* The body duplicates SearchNode::dump() but we cannot create
a search node without discarding the const qualifier. */
State state = state_registry.lookup_state(id);
const SearchNodeInfo &node_info = search_node_infos[state];
utils::g_log << id << ": ";
task_properties::dump_fdr(state);
if (node_info.creating_operator != OperatorID::no_operator &&
node_info.parent_state_id != StateID::no_state) {
OperatorProxy op = operators[node_info.creating_operator.get_index()];
utils::g_log << " created by " << op.get_name()
<< " from " << node_info.parent_state_id << endl;
} else {
utils::g_log << "has no parent" << endl;
}
}
}
void SearchSpace::print_statistics() const {
state_registry.print_statistics();
}
| 5,595 |
C++
| 32.710843 | 82 | 0.635031 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_progress.h
|
#ifndef SEARCH_PROGRESS_H
#define SEARCH_PROGRESS_H
#include <unordered_map>
class EvaluationContext;
class Evaluator;
namespace utils {
enum class Verbosity;
}
/*
This class helps track search progress.
Evaluators can be configured to be used for reporting new minima, boosting
open lists, or both. This class maintains a record of minimum evaluator
values for evaluators that are used for either of these two things.
*/
class SearchProgress {
const utils::Verbosity verbosity;
std::unordered_map<const Evaluator *, int> min_values;
bool process_evaluator_value(const Evaluator *evaluator, int value);
public:
explicit SearchProgress(utils::Verbosity verbosity);
~SearchProgress() = default;
/*
Call the following function after each state evaluation.
It returns true if the evaluation context contains a new minimum value
for at least one evaluator used for boosting.
It also prints one line of output for all evaluators used for reporting
minima that have a new minimum value in the given evaluation context.
In both cases this includes the situation where the evaluator in question
has not been evaluated previously, e.g., after evaluating the initial
state.
*/
bool check_progress(const EvaluationContext &eval_context);
};
#endif
| 1,338 |
C
| 26.32653 | 79 | 0.740658 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_engine.cc
|
#include "search_engine.h"
#include "evaluation_context.h"
#include "evaluator.h"
#include "option_parser.h"
#include "plugin.h"
#include "algorithms/ordered_set.h"
#include "task_utils/successor_generator.h"
#include "task_utils/task_properties.h"
#include "tasks/root_task.h"
#include "utils/countdown_timer.h"
#include "utils/logging.h"
#include "utils/rng_options.h"
#include "utils/system.h"
#include "utils/timer.h"
#include <cassert>
#include <iostream>
#include <limits>
using namespace std;
using utils::ExitCode;
class PruningMethod;
successor_generator::SuccessorGenerator &get_successor_generator(const TaskProxy &task_proxy) {
utils::g_log << "Building successor generator..." << flush;
int peak_memory_before = utils::get_peak_memory_in_kb();
utils::Timer successor_generator_timer;
successor_generator::SuccessorGenerator &successor_generator =
successor_generator::g_successor_generators[task_proxy];
successor_generator_timer.stop();
utils::g_log << "done!" << endl;
int peak_memory_after = utils::get_peak_memory_in_kb();
int memory_diff = peak_memory_after - peak_memory_before;
utils::g_log << "peak memory difference for successor generator creation: "
<< memory_diff << " KB" << endl
<< "time for successor generation creation: "
<< successor_generator_timer << endl;
return successor_generator;
}
SearchEngine::SearchEngine(const Options &opts)
: status(IN_PROGRESS),
solution_found(false),
task(tasks::g_root_task),
task_proxy(*task),
state_registry(task_proxy),
successor_generator(get_successor_generator(task_proxy)),
search_space(state_registry),
search_progress(opts.get<utils::Verbosity>("verbosity")),
statistics(opts.get<utils::Verbosity>("verbosity")),
cost_type(opts.get<OperatorCost>("cost_type")),
is_unit_cost(task_properties::is_unit_cost(task_proxy)),
max_time(opts.get<double>("max_time")),
verbosity(opts.get<utils::Verbosity>("verbosity")) {
if (opts.get<int>("bound") < 0) {
cerr << "error: negative cost bound " << opts.get<int>("bound") << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
bound = opts.get<int>("bound");
task_properties::print_variable_statistics(task_proxy);
}
SearchEngine::~SearchEngine() {
}
bool SearchEngine::found_solution() const {
return solution_found;
}
SearchStatus SearchEngine::get_status() const {
return status;
}
const Plan &SearchEngine::get_plan() const {
assert(solution_found);
return plan;
}
void SearchEngine::set_plan(const Plan &p) {
solution_found = true;
plan = p;
}
void SearchEngine::search() {
initialize();
utils::CountdownTimer timer(max_time);
while (status == IN_PROGRESS) {
status = step();
if (timer.is_expired()) {
utils::g_log << "Time limit reached. Abort search." << endl;
status = TIMEOUT;
break;
}
}
// TODO: Revise when and which search times are logged.
utils::g_log << "Actual search time: " << timer.get_elapsed_time() << endl;
}
bool SearchEngine::check_goal_and_set_plan(const State &state) {
if (task_properties::is_goal_state(task_proxy, state)) {
utils::g_log << "Solution found!" << endl;
Plan plan;
search_space.trace_path(state, plan);
set_plan(plan);
return true;
}
return false;
}
void SearchEngine::save_plan_if_necessary() {
if (found_solution()) {
plan_manager.save_plan(get_plan(), task_proxy);
}
}
int SearchEngine::get_adjusted_cost(const OperatorProxy &op) const {
return get_adjusted_action_cost(op, cost_type, is_unit_cost);
}
/* TODO: merge this into add_options_to_parser when all search
engines support pruning.
Method doesn't belong here because it's only useful for certain derived classes.
TODO: Figure out where it belongs and move it there. */
void SearchEngine::add_pruning_option(OptionParser &parser) {
parser.add_option<shared_ptr<PruningMethod>>(
"pruning",
"Pruning methods can prune or reorder the set of applicable operators in "
"each state and thereby influence the number and order of successor states "
"that are considered.",
"null()");
}
void SearchEngine::add_options_to_parser(OptionParser &parser) {
::add_cost_type_option_to_parser(parser);
parser.add_option<int>(
"bound",
"exclusive depth bound on g-values. Cutoffs are always performed according to "
"the real cost, regardless of the cost_type parameter", "infinity");
parser.add_option<double>(
"max_time",
"maximum time in seconds the search is allowed to run for. The "
"timeout is only checked after each complete search step "
"(usually a node expansion), so the actual runtime can be arbitrarily "
"longer. Therefore, this parameter should not be used for time-limiting "
"experiments. Timed-out searches are treated as failed searches, "
"just like incomplete search algorithms that exhaust their search space.",
"infinity");
utils::add_verbosity_option_to_parser(parser);
}
/* Method doesn't belong here because it's only useful for certain derived classes.
TODO: Figure out where it belongs and move it there. */
void SearchEngine::add_succ_order_options(OptionParser &parser) {
vector<string> options;
parser.add_option<bool>(
"randomize_successors",
"randomize the order in which successors are generated",
"false");
parser.add_option<bool>(
"preferred_successors_first",
"consider preferred operators first",
"false");
parser.document_note(
"Successor ordering",
"When using randomize_successors=true and "
"preferred_successors_first=true, randomization happens before "
"preferred operators are moved to the front.");
utils::add_rng_options(parser);
}
void print_initial_evaluator_values(const EvaluationContext &eval_context) {
eval_context.get_cache().for_each_evaluator_result(
[] (const Evaluator *eval, const EvaluationResult &result) {
if (eval->is_used_for_reporting_minima()) {
eval->report_value_for_initial_state(result);
}
}
);
}
static PluginTypePlugin<SearchEngine> _type_plugin(
"SearchEngine",
// TODO: Replace empty string by synopsis for the wiki page.
"");
void collect_preferred_operators(
EvaluationContext &eval_context,
Evaluator *preferred_operator_evaluator,
ordered_set::OrderedSet<OperatorID> &preferred_operators) {
if (!eval_context.is_evaluator_value_infinite(preferred_operator_evaluator)) {
for (OperatorID op_id : eval_context.get_preferred_operators(preferred_operator_evaluator)) {
preferred_operators.insert(op_id);
}
}
}
| 6,989 |
C++
| 33.95 | 101 | 0.665045 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/axioms.cc
|
#include "axioms.h"
#include "task_utils/task_properties.h"
#include "utils/memory.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <unordered_map>
#include <vector>
using namespace std;
AxiomEvaluator::AxiomEvaluator(const TaskProxy &task_proxy) {
task_has_axioms = task_properties::has_axioms(task_proxy);
if (task_has_axioms) {
VariablesProxy variables = task_proxy.get_variables();
AxiomsProxy axioms = task_proxy.get_axioms();
// Initialize literals
for (VariableProxy var : variables)
axiom_literals.emplace_back(var.get_domain_size());
// Initialize rules
// Since we are skipping some axioms, we cannot access them through
// their id position directly.
vector<int> axiom_id_to_position(axioms.size(), -1);
for (OperatorProxy axiom : axioms) {
assert(axiom.get_effects().size() == 1);
EffectProxy cond_effect = axiom.get_effects()[0];
FactPair effect = cond_effect.get_fact().get_pair();
int num_conditions = cond_effect.get_conditions().size();
// Ignore axioms which set the variable to its default value.
if (effect.value != variables[effect.var].get_default_axiom_value()) {
AxiomLiteral *eff_literal = &axiom_literals[effect.var][effect.value];
axiom_id_to_position[axiom.get_id()] = rules.size();
rules.emplace_back(
num_conditions, effect.var, effect.value, eff_literal);
}
}
// Cross-reference rules and literals
for (OperatorProxy axiom : axioms) {
// Ignore axioms which set the variable to its default value.
int position = axiom_id_to_position[axiom.get_id()];
if (position != -1) {
EffectProxy effect = axiom.get_effects()[0];
for (FactProxy condition : effect.get_conditions()) {
int var_id = condition.get_variable().get_id();
int val = condition.get_value();
AxiomRule *rule = &rules[position];
axiom_literals[var_id][val].condition_of.push_back(rule);
}
}
}
// Initialize negation-by-failure information
int last_layer = -1;
for (VariableProxy var : variables) {
if (var.is_derived()) {
last_layer = max(last_layer, var.get_axiom_layer());
}
}
nbf_info_by_layer.resize(last_layer + 1);
for (VariableProxy var : variables) {
if (var.is_derived()) {
int layer = var.get_axiom_layer();
if (layer != last_layer) {
int var_id = var.get_id();
int nbf_value = var.get_default_axiom_value();
AxiomLiteral *nbf_literal = &axiom_literals[var_id][nbf_value];
nbf_info_by_layer[layer].emplace_back(var_id, nbf_literal);
}
}
}
default_values.reserve(variables.size());
for (VariableProxy var : variables) {
if (var.is_derived())
default_values.emplace_back(var.get_default_axiom_value());
else
default_values.emplace_back(-1);
}
}
}
void AxiomEvaluator::evaluate(vector<int> &state) {
if (!task_has_axioms)
return;
assert(queue.empty());
for (size_t var_id = 0; var_id < default_values.size(); ++var_id) {
int default_value = default_values[var_id];
if (default_value != -1) {
state[var_id] = default_value;
} else {
int value = state[var_id];
queue.push_back(&axiom_literals[var_id][value]);
}
}
for (AxiomRule &rule : rules) {
rule.unsatisfied_conditions = rule.condition_count;
/*
TODO: In a perfect world, trivial axioms would have been
compiled away, and we could have the following assertion
instead of the following block.
assert(rule.condition_count != 0);
*/
if (rule.condition_count == 0) {
/*
NOTE: This duplicates code from the main loop below.
I don't mind because this is (hopefully!) going away
some time.
*/
int var_no = rule.effect_var;
int val = rule.effect_val;
if (state[var_no] != val) {
state[var_no] = val;
queue.push_back(rule.effect_literal);
}
}
}
for (size_t layer_no = 0; layer_no < nbf_info_by_layer.size(); ++layer_no) {
// Apply Horn rules.
while (!queue.empty()) {
const AxiomLiteral *curr_literal = queue.back();
queue.pop_back();
for (size_t i = 0; i < curr_literal->condition_of.size(); ++i) {
AxiomRule *rule = curr_literal->condition_of[i];
if (--rule->unsatisfied_conditions == 0) {
int var_no = rule->effect_var;
int val = rule->effect_val;
if (state[var_no] != val) {
state[var_no] = val;
queue.push_back(rule->effect_literal);
}
}
}
}
/*
Apply negation by failure rules. Skip this in last iteration
to save some time (see issue420, msg3058).
*/
if (layer_no != nbf_info_by_layer.size() - 1) {
const vector<NegationByFailureInfo> &nbf_info = nbf_info_by_layer[layer_no];
for (size_t i = 0; i < nbf_info.size(); ++i) {
int var_no = nbf_info[i].var_no;
// Verify that variable is derived.
assert(default_values[var_no] != -1);
if (state[var_no] == default_values[var_no])
queue.push_back(nbf_info[i].literal);
}
}
}
}
PerTaskInformation<AxiomEvaluator> g_axiom_evaluators;
| 6,101 |
C++
| 36.435583 | 88 | 0.529585 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_statistics.h
|
#ifndef SEARCH_STATISTICS_H
#define SEARCH_STATISTICS_H
/*
This class keeps track of search statistics.
It keeps counters for expanded, generated and evaluated states (and
some other statistics) and provides uniform output for all search
methods.
*/
namespace utils {
enum class Verbosity;
}
class SearchStatistics {
const utils::Verbosity verbosity;
// General statistics
int expanded_states; // no states for which successors were generated
int evaluated_states; // no states for which h fn was computed
int evaluations; // no of heuristic evaluations performed
int generated_states; // no states created in total (plus those removed since already in close list)
int reopened_states; // no of *closed* states which we reopened
int dead_end_states;
int generated_ops; // no of operators that were returned as applicable
// Statistics related to f values
int lastjump_f_value; //f value obtained in the last jump
int lastjump_expanded_states; // same guy but at point where the last jump in the open list
int lastjump_reopened_states; // occurred (jump == f-value of the first node in the queue increases)
int lastjump_evaluated_states;
int lastjump_generated_states;
void print_f_line() const;
public:
explicit SearchStatistics(utils::Verbosity verbosity);
~SearchStatistics() = default;
// Methods that update statistics.
void inc_expanded(int inc = 1) {expanded_states += inc;}
void inc_evaluated_states(int inc = 1) {evaluated_states += inc;}
void inc_generated(int inc = 1) {generated_states += inc;}
void inc_reopened(int inc = 1) {reopened_states += inc;}
void inc_generated_ops(int inc = 1) {generated_ops += inc;}
void inc_evaluations(int inc = 1) {evaluations += inc;}
void inc_dead_ends(int inc = 1) {dead_end_states += inc;}
// Methods that access statistics.
int get_expanded() const {return expanded_states;}
int get_evaluated_states() const {return evaluated_states;}
int get_evaluations() const {return evaluations;}
int get_generated() const {return generated_states;}
int get_reopened() const {return reopened_states;}
int get_generated_ops() const {return generated_ops;}
/*
Call the following method with the f value of every expanded
state. It will notice "jumps" (i.e., when the expanded f value
is the highest f value encountered so far), print some
statistics on jumps, and keep track of expansions etc. up to the
last jump.
Statistics until the final jump are often useful to report in
A*-style searches because they are not affected by tie-breaking
as the overall statistics. (With a non-random, admissible and
consistent heuristic, the number of expanded, evaluated and
generated states until the final jump is fully determined by the
state space and heuristic, independently of things like the
order in which successors are generated or the tie-breaking
performed by the open list.)
*/
void report_f_value_progress(int f);
void print_checkpoint_line(int g) const;
// output
void print_basic_statistics() const;
void print_detailed_statistics() const;
};
#endif
| 3,259 |
C
| 38.277108 | 104 | 0.706965 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/state_id.h
|
#ifndef STATE_ID_H
#define STATE_ID_H
#include <iostream>
// For documentation on classes relevant to storing and working with registered
// states see the file state_registry.h.
class StateID {
friend class StateRegistry;
friend std::ostream &operator<<(std::ostream &os, StateID id);
template<typename>
friend class PerStateInformation;
template<typename>
friend class PerStateArray;
friend class PerStateBitset;
int value;
explicit StateID(int value_)
: value(value_) {
}
// No implementation to prevent default construction
StateID();
public:
~StateID() {
}
static const StateID no_state;
bool operator==(const StateID &other) const {
return value == other.value;
}
bool operator!=(const StateID &other) const {
return !(*this == other);
}
};
#endif
| 864 |
C
| 19.595238 | 79 | 0.659722 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/per_state_information.h
|
#ifndef PER_STATE_INFORMATION_H
#define PER_STATE_INFORMATION_H
#include "state_registry.h"
#include "algorithms/segmented_vector.h"
#include "algorithms/subscriber.h"
#include "utils/collections.h"
#include <cassert>
#include <iostream>
#include <unordered_map>
/*
PerStateInformation is used to associate information with states.
PerStateInformation<Entry> logically behaves somewhat like an unordered map
from states to objects of class Entry. However, lookup of unknown states is
supported and leads to insertion of a default value (similar to the
defaultdict class in Python).
For example, search algorithms can use it to associate g values or create
operators with a state.
Implementation notes: PerStateInformation is essentially implemented as a
kind of two-level map:
1. Find the correct SegmentedVector for the registry of the given state.
2. Look up the associated entry in the SegmentedVector based on the ID of
the state.
It is common in many use cases that we look up information for states from
the same registry in sequence. Therefore, to make step 1. more efficient, we
remember (in "cached_registry" and "cached_entries") the results of the
previous lookup and reuse it on consecutive lookups for the same registry.
A PerStateInformation object subscribes to every StateRegistry for which it
stores information. Once a StateRegistry is destroyed, it notifies all
subscribed objects, which in turn destroy all information stored for states
in that registry.
*/
template<class Entry>
class PerStateInformation : public subscriber::Subscriber<StateRegistry> {
const Entry default_value;
using EntryVectorMap = std::unordered_map<const StateRegistry *,
segmented_vector::SegmentedVector<Entry> * >;
EntryVectorMap entries_by_registry;
mutable const StateRegistry *cached_registry;
mutable segmented_vector::SegmentedVector<Entry> *cached_entries;
/*
Returns the SegmentedVector associated with the given StateRegistry.
If no vector is associated with this registry yet, an empty one is created.
Both the registry and the returned vector are cached to speed up
consecutive calls with the same registry.
*/
segmented_vector::SegmentedVector<Entry> *get_entries(const StateRegistry *registry) {
if (cached_registry != registry) {
cached_registry = registry;
auto it = entries_by_registry.find(registry);
if (it == entries_by_registry.end()) {
cached_entries = new segmented_vector::SegmentedVector<Entry>();
entries_by_registry[registry] = cached_entries;
registry->subscribe(this);
} else {
cached_entries = it->second;
}
}
assert(cached_registry == registry && cached_entries == entries_by_registry[registry]);
return cached_entries;
}
/*
Returns the SegmentedVector associated with the given StateRegistry.
Returns nullptr, if no vector is associated with this registry yet.
Otherwise, both the registry and the returned vector are cached to speed
up consecutive calls with the same registry.
*/
const segmented_vector::SegmentedVector<Entry> *get_entries(const StateRegistry *registry) const {
if (cached_registry != registry) {
const auto it = entries_by_registry.find(registry);
if (it == entries_by_registry.end()) {
return nullptr;
} else {
cached_registry = registry;
cached_entries = const_cast<segmented_vector::SegmentedVector<Entry> *>(it->second);
}
}
assert(cached_registry == registry);
return cached_entries;
}
public:
PerStateInformation()
: default_value(),
cached_registry(nullptr),
cached_entries(nullptr) {
}
explicit PerStateInformation(const Entry &default_value_)
: default_value(default_value_),
cached_registry(nullptr),
cached_entries(nullptr) {
}
PerStateInformation(const PerStateInformation<Entry> &) = delete;
PerStateInformation &operator=(const PerStateInformation<Entry> &) = delete;
virtual ~PerStateInformation() override {
for (auto it : entries_by_registry) {
delete it.second;
}
}
Entry &operator[](const State &state) {
const StateRegistry *registry = state.get_registry();
if (!registry) {
std::cerr << "Tried to access per-state information with an "
<< "unregistered state." << std::endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
segmented_vector::SegmentedVector<Entry> *entries = get_entries(registry);
int state_id = state.get_id().value;
assert(state.get_id() != StateID::no_state);
size_t virtual_size = registry->size();
assert(utils::in_bounds(state_id, *registry));
if (entries->size() < virtual_size) {
entries->resize(virtual_size, default_value);
}
return (*entries)[state_id];
}
const Entry &operator[](const State &state) const {
const StateRegistry *registry = state.get_registry();
if (!registry) {
std::cerr << "Tried to access per-state information with an "
<< "unregistered state." << std::endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
const segmented_vector::SegmentedVector<Entry> *entries = get_entries(registry);
if (!entries) {
return default_value;
}
int state_id = state.get_id().value;
assert(state.get_id() != StateID::no_state);
assert(utils::in_bounds(state_id, *registry));
int num_entries = entries->size();
if (state_id >= num_entries) {
return default_value;
}
return (*entries)[state_id];
}
virtual void notify_service_destroyed(const StateRegistry *registry) override {
delete entries_by_registry[registry];
entries_by_registry.erase(registry);
if (registry == cached_registry) {
cached_registry = nullptr;
cached_entries = nullptr;
}
}
};
#endif
| 6,390 |
C
| 38.208589 | 102 | 0.648513 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/command_line.cc
|
#include "command_line.h"
#include "option_parser.h"
#include "plan_manager.h"
#include "search_engine.h"
#include "options/doc_printer.h"
#include "options/predefinitions.h"
#include "options/registries.h"
#include "utils/strings.h"
#include <algorithm>
#include <vector>
using namespace std;
ArgError::ArgError(const string &msg)
: msg(msg) {
}
void ArgError::print() const {
cerr << "argument error: " << msg << endl;
}
static string sanitize_arg_string(string s) {
// Convert newlines to spaces.
replace(s.begin(), s.end(), '\n', ' ');
// Convert string to lower case.
transform(s.begin(), s.end(), s.begin(), ::tolower);
return s;
}
static int parse_int_arg(const string &name, const string &value) {
try {
return stoi(value);
} catch (invalid_argument &) {
throw ArgError("argument for " + name + " must be an integer");
} catch (out_of_range &) {
throw ArgError("argument for " + name + " is out of range");
}
}
static shared_ptr<SearchEngine> parse_cmd_line_aux(
const vector<string> &args, options::Registry ®istry, bool dry_run) {
string plan_filename = "sas_plan";
int num_previously_generated_plans = 0;
bool is_part_of_anytime_portfolio = false;
options::Predefinitions predefinitions;
shared_ptr<SearchEngine> engine;
/*
Note that we don’t sanitize all arguments beforehand because filenames should remain as-is
(no conversion to lower-case, no conversion of newlines to spaces).
*/
// TODO: Remove code duplication.
for (size_t i = 0; i < args.size(); ++i) {
string arg = sanitize_arg_string(args[i]);
bool is_last = (i == args.size() - 1);
if (arg == "--search") {
if (is_last)
throw ArgError("missing argument after --search");
++i;
OptionParser parser(sanitize_arg_string(args[i]), registry,
predefinitions, dry_run);
engine = parser.start_parsing<shared_ptr<SearchEngine>>();
} else if (arg == "--help" && dry_run) {
cout << "Help:" << endl;
bool txt2tags = false;
vector<string> plugin_names;
for (size_t j = i + 1; j < args.size(); ++j) {
string help_arg = sanitize_arg_string(args[j]);
if (help_arg == "--txt2tags") {
txt2tags = true;
} else {
plugin_names.push_back(help_arg);
}
}
unique_ptr<options::DocPrinter> doc_printer;
if (txt2tags)
doc_printer = utils::make_unique_ptr<options::Txt2TagsPrinter>(
cout, registry);
else
doc_printer = utils::make_unique_ptr<options::PlainPrinter>(
cout, registry);
if (plugin_names.empty()) {
doc_printer->print_all();
} else {
for (const string &name : plugin_names) {
doc_printer->print_plugin(name);
}
}
cout << "Help output finished." << endl;
exit(0);
} else if (arg == "--internal-plan-file") {
if (is_last)
throw ArgError("missing argument after --internal-plan-file");
++i;
plan_filename = args[i];
} else if (arg == "--internal-previous-portfolio-plans") {
if (is_last)
throw ArgError("missing argument after --internal-previous-portfolio-plans");
++i;
is_part_of_anytime_portfolio = true;
num_previously_generated_plans = parse_int_arg(arg, args[i]);
if (num_previously_generated_plans < 0)
throw ArgError("argument for --internal-previous-portfolio-plans must be positive");
} else if (utils::startswith(arg, "--") &&
registry.is_predefinition(arg.substr(2))) {
if (is_last)
throw ArgError("missing argument after " + arg);
++i;
registry.handle_predefinition(arg.substr(2),
sanitize_arg_string(args[i]),
predefinitions, dry_run);
} else {
throw ArgError("unknown option " + arg);
}
}
if (engine) {
PlanManager &plan_manager = engine->get_plan_manager();
plan_manager.set_plan_filename(plan_filename);
plan_manager.set_num_previously_generated_plans(num_previously_generated_plans);
plan_manager.set_is_part_of_anytime_portfolio(is_part_of_anytime_portfolio);
}
return engine;
}
shared_ptr<SearchEngine> parse_cmd_line(
int argc, const char **argv, options::Registry ®istry, bool dry_run, bool is_unit_cost) {
vector<string> args;
bool active = true;
for (int i = 1; i < argc; ++i) {
string arg = sanitize_arg_string(argv[i]);
if (arg == "--if-unit-cost") {
active = is_unit_cost;
} else if (arg == "--if-non-unit-cost") {
active = !is_unit_cost;
} else if (arg == "--always") {
active = true;
} else if (active) {
// We use the unsanitized arguments because sanitizing is inappropriate for things like filenames.
args.push_back(argv[i]);
}
}
return parse_cmd_line_aux(args, registry, dry_run);
}
string usage(const string &progname) {
return "usage: \n" +
progname + " [OPTIONS] --search SEARCH < OUTPUT\n\n"
"* SEARCH (SearchEngine): configuration of the search algorithm\n"
"* OUTPUT (filename): translator output\n\n"
"Options:\n"
"--help [NAME]\n"
" Prints help for all heuristics, open lists, etc. called NAME.\n"
" Without parameter: prints help for everything available\n"
"--landmarks LANDMARKS_PREDEFINITION\n"
" Predefines a set of landmarks that can afterwards be referenced\n"
" by the name that is specified in the definition.\n"
"--evaluator EVALUATOR_PREDEFINITION\n"
" Predefines an evaluator that can afterwards be referenced\n"
" by the name that is specified in the definition.\n"
"--internal-plan-file FILENAME\n"
" Plan will be output to a file called FILENAME\n\n"
"--internal-previous-portfolio-plans COUNTER\n"
" This planner call is part of a portfolio which already created\n"
" plan files FILENAME.1 up to FILENAME.COUNTER.\n"
" Start enumerating plan files with COUNTER+1, i.e. FILENAME.COUNTER+1\n\n"
"See http://www.fast-downward.org/ for details.";
}
| 6,811 |
C++
| 37.704545 | 110 | 0.560123 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/state_id.cc
|
#include "state_id.h"
#include <ostream>
using namespace std;
const StateID StateID::no_state = StateID(-1);
ostream &operator<<(ostream &os, StateID id) {
os << "#" << id.value;
return os;
}
| 204 |
C++
| 14.76923 | 46 | 0.637255 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristic.h
|
#ifndef HEURISTIC_H
#define HEURISTIC_H
#include "evaluator.h"
#include "operator_id.h"
#include "per_state_information.h"
#include "task_proxy.h"
#include "algorithms/ordered_set.h"
#include <memory>
#include <vector>
class TaskProxy;
namespace options {
class OptionParser;
class Options;
}
class Heuristic : public Evaluator {
struct HEntry {
/* dirty is conceptually a bool, but Visual C++ does not support
packing ints and bools together in a bitfield. */
int h : 31;
unsigned int dirty : 1;
HEntry(int h, bool dirty)
: h(h), dirty(dirty) {
}
};
static_assert(sizeof(HEntry) == 4, "HEntry has unexpected size.");
/*
TODO: We might want to get rid of the preferred_operators
attribute. It is currently only used by compute_result() and the
methods it calls (compute_heuristic() directly, further methods
indirectly), and we could e.g. change this by having
compute_heuristic return an EvaluationResult object.
If we do this, we should be mindful of the cost incurred by not
being able to reuse the data structure from one iteration to the
next, but this seems to be the only potential downside.
*/
ordered_set::OrderedSet<OperatorID> preferred_operators;
protected:
/*
Cache for saving h values
Before accessing this cache always make sure that the cache_evaluator_values
flag is set to true - as soon as the cache is accessed it will create
entries for all existing states
*/
PerStateInformation<HEntry> heuristic_cache;
bool cache_evaluator_values;
// Hold a reference to the task implementation and pass it to objects that need it.
const std::shared_ptr<AbstractTask> task;
// Use task_proxy to access task information.
TaskProxy task_proxy;
enum {DEAD_END = -1, NO_VALUE = -2};
virtual int compute_heuristic(const State &ancestor_state) = 0;
/*
Usage note: Marking the same operator as preferred multiple times
is OK -- it will only appear once in the list of preferred
operators for this heuristic.
*/
void set_preferred(const OperatorProxy &op);
State convert_ancestor_state(const State &ancestor_state) const;
public:
explicit Heuristic(const options::Options &opts);
virtual ~Heuristic() override;
virtual void get_path_dependent_evaluators(
std::set<Evaluator *> & /*evals*/) override {
}
static void add_options_to_parser(options::OptionParser &parser);
virtual EvaluationResult compute_result(
EvaluationContext &eval_context) override;
virtual bool does_cache_estimates() const override;
virtual bool is_estimate_cached(const State &state) const override;
virtual int get_cached_estimate(const State &state) const override;
};
#endif
| 2,856 |
C
| 29.393617 | 87 | 0.690126 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/per_state_bitset.h
|
#ifndef PER_STATE_BITSET_H
#define PER_STATE_BITSET_H
#include "per_state_array.h"
#include <vector>
class BitsetMath {
public:
using Block = unsigned int;
static_assert(
!std::numeric_limits<Block>::is_signed,
"Block type must be unsigned");
static const Block zeros = Block(0);
// MSVC's bitwise negation always returns a signed type.
static const Block ones = Block(~Block(0));
static const int bits_per_block = std::numeric_limits<Block>::digits;
static int compute_num_blocks(std::size_t num_bits);
static std::size_t block_index(std::size_t pos);
static std::size_t bit_index(std::size_t pos);
static Block bit_mask(std::size_t pos);
};
class BitsetView {
ArrayView<BitsetMath::Block> data;
int num_bits;
public:
BitsetView(ArrayView<BitsetMath::Block> data, int num_bits);
BitsetView(const BitsetView &other) = default;
BitsetView &operator=(const BitsetView &other) = default;
void set(int index);
void reset(int index);
void reset();
bool test(int index) const;
void intersect(const BitsetView &other);
int size() const;
};
class PerStateBitset {
int num_bits_per_entry;
PerStateArray<BitsetMath::Block> data;
public:
explicit PerStateBitset(const std::vector<bool> &default_bits);
PerStateBitset(const PerStateBitset &) = delete;
PerStateBitset &operator=(const PerStateBitset &) = delete;
BitsetView operator[](const State &state);
};
#endif
| 1,492 |
C
| 24.305084 | 73 | 0.683646 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_cost.h
|
#ifndef OPERATOR_COST_H
#define OPERATOR_COST_H
class OperatorProxy;
namespace options {
class OptionParser;
}
enum OperatorCost {NORMAL = 0, ONE = 1, PLUSONE = 2, MAX_OPERATOR_COST};
int get_adjusted_action_cost(const OperatorProxy &op, OperatorCost cost_type, bool is_unit_cost);
void add_cost_type_option_to_parser(options::OptionParser &parser);
#endif
| 362 |
C
| 21.687499 | 97 | 0.762431 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_statistics.cc
|
#include "search_statistics.h"
#include "utils/logging.h"
#include "utils/timer.h"
#include "utils/system.h"
#include <iostream>
using namespace std;
SearchStatistics::SearchStatistics(utils::Verbosity verbosity)
: verbosity(verbosity) {
expanded_states = 0;
reopened_states = 0;
evaluated_states = 0;
evaluations = 0;
generated_states = 0;
dead_end_states = 0;
generated_ops = 0;
lastjump_expanded_states = 0;
lastjump_reopened_states = 0;
lastjump_evaluated_states = 0;
lastjump_generated_states = 0;
lastjump_f_value = -1;
}
void SearchStatistics::report_f_value_progress(int f) {
if (f > lastjump_f_value) {
lastjump_f_value = f;
print_f_line();
lastjump_expanded_states = expanded_states;
lastjump_reopened_states = reopened_states;
lastjump_evaluated_states = evaluated_states;
lastjump_generated_states = generated_states;
}
}
void SearchStatistics::print_f_line() const {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "f = " << lastjump_f_value
<< ", ";
print_basic_statistics();
utils::g_log << endl;
}
}
void SearchStatistics::print_checkpoint_line(int g) const {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "g=" << g << ", ";
print_basic_statistics();
utils::g_log << endl;
}
}
void SearchStatistics::print_basic_statistics() const {
utils::g_log << evaluated_states << " evaluated, "
<< expanded_states << " expanded";
if (reopened_states > 0) {
utils::g_log << ", " << reopened_states << " reopened";
}
}
void SearchStatistics::print_detailed_statistics() const {
utils::g_log << "Expanded " << expanded_states << " state(s)." << endl;
utils::g_log << "Reopened " << reopened_states << " state(s)." << endl;
utils::g_log << "Evaluated " << evaluated_states << " state(s)." << endl;
utils::g_log << "Evaluations: " << evaluations << endl;
utils::g_log << "Generated " << generated_states << " state(s)." << endl;
utils::g_log << "Dead ends: " << dead_end_states << " state(s)." << endl;
if (lastjump_f_value >= 0) {
utils::g_log << "Expanded until last jump: "
<< lastjump_expanded_states << " state(s)." << endl;
utils::g_log << "Reopened until last jump: "
<< lastjump_reopened_states << " state(s)." << endl;
utils::g_log << "Evaluated until last jump: "
<< lastjump_evaluated_states << " state(s)." << endl;
utils::g_log << "Generated until last jump: "
<< lastjump_generated_states << " state(s)." << endl;
}
}
| 2,751 |
C++
| 31.37647 | 77 | 0.575791 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_node_info.h
|
#ifndef SEARCH_NODE_INFO_H
#define SEARCH_NODE_INFO_H
#include "operator_id.h"
#include "state_id.h"
// For documentation on classes relevant to storing and working with registered
// states see the file state_registry.h.
struct SearchNodeInfo {
enum NodeStatus {NEW = 0, OPEN = 1, CLOSED = 2, DEAD_END = 3};
unsigned int status : 2;
int g : 30;
StateID parent_state_id;
OperatorID creating_operator;
int real_g;
SearchNodeInfo()
: status(NEW), g(-1), parent_state_id(StateID::no_state),
creating_operator(-1), real_g(-1) {
}
};
#endif
| 592 |
C
| 21.807691 | 79 | 0.653716 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/axioms.h
|
#ifndef AXIOMS_H
#define AXIOMS_H
#include "per_task_information.h"
#include "task_proxy.h"
#include <memory>
#include <vector>
class AxiomEvaluator {
struct AxiomRule;
struct AxiomLiteral {
std::vector<AxiomRule *> condition_of;
};
struct AxiomRule {
int condition_count;
int unsatisfied_conditions;
int effect_var;
int effect_val;
AxiomLiteral *effect_literal;
AxiomRule(int cond_count, int eff_var, int eff_val, AxiomLiteral *eff_literal)
: condition_count(cond_count), unsatisfied_conditions(cond_count),
effect_var(eff_var), effect_val(eff_val), effect_literal(eff_literal) {
}
};
struct NegationByFailureInfo {
int var_no;
AxiomLiteral *literal;
NegationByFailureInfo(int var, AxiomLiteral *lit)
: var_no(var), literal(lit) {}
};
bool task_has_axioms;
std::vector<std::vector<AxiomLiteral>> axiom_literals;
std::vector<AxiomRule> rules;
std::vector<std::vector<NegationByFailureInfo>> nbf_info_by_layer;
/*
default_values stores the default (negation by failure) values
for all derived variables, i.e., the value that a derived
variable holds by default if no derivation rule triggers.
This is indexed by variable number and set to -1 for non-derived
variables, so can also be used to test if a variable is derived.
We have our own copy of the data to avoid going through the task
interface in the time-critical evaluate method.
*/
std::vector<int> default_values;
/*
The queue is an instance variable rather than a local variable
to reduce reallocation effort. See issue420.
*/
std::vector<const AxiomLiteral *> queue;
template<typename Values, typename Accessor>
void evaluate_aux(Values &values, const Accessor &accessor);
public:
explicit AxiomEvaluator(const TaskProxy &task_proxy);
void evaluate(std::vector<int> &state);
};
extern PerTaskInformation<AxiomEvaluator> g_axiom_evaluators;
#endif
| 2,082 |
C
| 29.632353 | 86 | 0.674352 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_proxy.cc
|
#include "task_proxy.h"
#include "axioms.h"
#include "state_registry.h"
#include "task_utils/causal_graph.h"
#include "task_utils/task_properties.h"
#include <iostream>
using namespace std;
State::State(const AbstractTask &task, const StateRegistry ®istry,
StateID id, const PackedStateBin *buffer)
: task(&task), registry(®istry), id(id), buffer(buffer), values(nullptr),
state_packer(®istry.get_state_packer()),
num_variables(registry.get_num_variables()) {
assert(id != StateID::no_state);
assert(buffer);
assert(num_variables == task.get_num_variables());
}
State::State(const AbstractTask &task, const StateRegistry ®istry,
StateID id, const PackedStateBin *buffer,
vector<int> &&values)
: State(task, registry, id, buffer) {
assert(num_variables == static_cast<int>(values.size()));
this->values = make_shared<vector<int>>(move(values));
}
State::State(const AbstractTask &task, vector<int> &&values)
: task(&task), registry(nullptr), id(StateID::no_state), buffer(nullptr),
values(make_shared<vector<int>>(move(values))),
state_packer(nullptr), num_variables(this->values->size()) {
assert(num_variables == task.get_num_variables());
}
State State::get_unregistered_successor(const OperatorProxy &op) const {
assert(!op.is_axiom());
assert(task_properties::is_applicable(op, *this));
assert(values);
vector<int> new_values = get_unpacked_values();
for (EffectProxy effect : op.get_effects()) {
if (does_fire(effect, *this)) {
FactPair effect_fact = effect.get_fact().get_pair();
new_values[effect_fact.var] = effect_fact.value;
}
}
if (task->get_num_axioms() > 0) {
AxiomEvaluator &axiom_evaluator = g_axiom_evaluators[TaskProxy(*task)];
axiom_evaluator.evaluate(new_values);
}
return State(*task, move(new_values));
}
const causal_graph::CausalGraph &TaskProxy::get_causal_graph() const {
return causal_graph::get_causal_graph(task);
}
| 2,062 |
C++
| 32.819672 | 80 | 0.660524 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/abstract_task.cc
|
#include "abstract_task.h"
#include "per_task_information.h"
#include "plugin.h"
#include <iostream>
using namespace std;
const FactPair FactPair::no_fact = FactPair(-1, -1);
ostream &operator<<(ostream &os, const FactPair &fact_pair) {
os << fact_pair.var << "=" << fact_pair.value;
return os;
}
static PluginTypePlugin<AbstractTask> _type_plugin(
"AbstractTask",
// TODO: Replace empty string by synopsis for the wiki page.
"");
| 457 |
C++
| 20.809523 | 64 | 0.678337 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_progress.cc
|
#include "search_progress.h"
#include "evaluation_context.h"
#include "evaluator.h"
#include "../utils/logging.h"
#include <iostream>
#include <string>
using namespace std;
SearchProgress::SearchProgress(utils::Verbosity verbosity)
: verbosity(verbosity) {
}
bool SearchProgress::process_evaluator_value(const Evaluator *evaluator, int value) {
/*
Handle one evaluator value:
1. insert into or update min_values if necessary
2. return true if this is a new lowest value
(includes case where we haven't seen this evaluator before)
*/
auto insert_result = min_values.insert(make_pair(evaluator, value));
auto iter = insert_result.first;
bool was_inserted = insert_result.second;
if (was_inserted) {
// We haven't seen this evaluator before.
return true;
} else {
int &min_value = iter->second;
if (value < min_value) {
min_value = value;
return true;
}
}
return false;
}
bool SearchProgress::check_progress(const EvaluationContext &eval_context) {
bool boost = false;
eval_context.get_cache().for_each_evaluator_result(
[this, &boost](const Evaluator *eval, const EvaluationResult &result) {
if (eval->is_used_for_reporting_minima() || eval->is_used_for_boosting()) {
if (process_evaluator_value(eval, result.get_evaluator_value())) {
if (verbosity >= utils::Verbosity::NORMAL &&
eval->is_used_for_reporting_minima()) {
eval->report_new_minimum_value(result);
}
if (eval->is_used_for_boosting()) {
boost = true;
}
}
}
}
);
return boost;
}
| 1,819 |
C++
| 29.847457 | 87 | 0.581638 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluation_context.cc
|
#include "evaluation_context.h"
#include "evaluation_result.h"
#include "evaluator.h"
#include "search_statistics.h"
#include <cassert>
using namespace std;
EvaluationContext::EvaluationContext(
const EvaluatorCache &cache, const State &state, int g_value,
bool is_preferred, SearchStatistics *statistics,
bool calculate_preferred)
: cache(cache),
state(state),
g_value(g_value),
preferred(is_preferred),
statistics(statistics),
calculate_preferred(calculate_preferred) {
}
EvaluationContext::EvaluationContext(
const EvaluationContext &other, int g_value,
bool is_preferred, SearchStatistics *statistics, bool calculate_preferred)
: EvaluationContext(other.cache, other.state, g_value, is_preferred,
statistics, calculate_preferred) {
}
EvaluationContext::EvaluationContext(
const State &state, int g_value, bool is_preferred,
SearchStatistics *statistics, bool calculate_preferred)
: EvaluationContext(EvaluatorCache(), state, g_value, is_preferred,
statistics, calculate_preferred) {
}
EvaluationContext::EvaluationContext(
const State &state,
SearchStatistics *statistics, bool calculate_preferred)
: EvaluationContext(EvaluatorCache(), state, INVALID, false,
statistics, calculate_preferred) {
}
const EvaluationResult &EvaluationContext::get_result(Evaluator *evaluator) {
EvaluationResult &result = cache[evaluator];
if (result.is_uninitialized()) {
result = evaluator->compute_result(*this);
if (statistics &&
evaluator->is_used_for_counting_evaluations() &&
result.get_count_evaluation()) {
statistics->inc_evaluations();
}
}
return result;
}
const EvaluatorCache &EvaluationContext::get_cache() const {
return cache;
}
const State &EvaluationContext::get_state() const {
return state;
}
int EvaluationContext::get_g_value() const {
assert(g_value != INVALID);
return g_value;
}
bool EvaluationContext::is_preferred() const {
assert(g_value != INVALID);
return preferred;
}
bool EvaluationContext::is_evaluator_value_infinite(Evaluator *eval) {
return get_result(eval).is_infinite();
}
int EvaluationContext::get_evaluator_value(Evaluator *eval) {
int h = get_result(eval).get_evaluator_value();
assert(h != EvaluationResult::INFTY);
return h;
}
int EvaluationContext::get_evaluator_value_or_infinity(Evaluator *eval) {
return get_result(eval).get_evaluator_value();
}
const vector<OperatorID> &
EvaluationContext::get_preferred_operators(Evaluator *eval) {
return get_result(eval).get_preferred_operators();
}
bool EvaluationContext::get_calculate_preferred() const {
return calculate_preferred;
}
| 2,809 |
C++
| 27.383838 | 78 | 0.698113 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluation_result.h
|
#ifndef EVALUATION_RESULT_H
#define EVALUATION_RESULT_H
#include "operator_id.h"
#include <limits>
#include <vector>
class EvaluationResult {
static const int UNINITIALIZED = -2;
int evaluator_value;
std::vector<OperatorID> preferred_operators;
bool count_evaluation;
public:
// "INFINITY" is an ISO C99 macro and "INFINITE" is a macro in windows.h.
static const int INFTY;
EvaluationResult();
/* TODO: Can we do without this "uninitialized" business?
One reason why it currently exists is to simplify the
implementation of the EvaluationContext class, where we don't
want to perform two separate map operations in the case where
we determine that an entry doesn't yet exist (lookup) and hence
needs to be created (insertion). This can be avoided most
easily if we have a default constructor for EvaluationResult
and if it's easy to test if a given object has just been
default-constructed.
*/
/*
TODO: Can we get rid of count_evaluation?
The EvaluationContext needs to know (for statistics) if the
heuristic actually computed the heuristic value or just looked it
up in a cache. Currently this information is passed over the
count_evaluation flag, which is somewhat awkward.
*/
bool is_uninitialized() const;
bool is_infinite() const;
int get_evaluator_value() const;
bool get_count_evaluation() const;
const std::vector<OperatorID> &get_preferred_operators() const;
void set_evaluator_value(int value);
void set_preferred_operators(std::vector<OperatorID> &&preferred_operators);
void set_count_evaluation(bool count_eval);
};
#endif
| 1,709 |
C
| 31.26415 | 80 | 0.705676 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/per_state_array.h
|
#ifndef PER_STATE_ARRAY_H
#define PER_STATE_ARRAY_H
#include "per_state_information.h"
#include <cassert>
#include <unordered_map>
template<class T>
class ArrayView {
T *p;
int size_;
public:
ArrayView(T *p, int size) : p(p), size_(size) {}
ArrayView(const ArrayView<T> &other) = default;
ArrayView<T> &operator=(const ArrayView<T> &other) = default;
T &operator[](int index) {
assert(index >= 0 && index < size_);
return p[index];
}
const T &operator[](int index) const {
assert(index >= 0 && index < size_);
return p[index];
}
int size() const {
return size_;
}
};
/*
PerStateArray is used to associate array-like information with states.
PerStateArray<Entry> logically behaves somewhat like an unordered map
from states to equal-length arrays of class Entry. However, lookup of
unknown states is supported and leads to insertion of a default value
(similar to the defaultdict class in Python).
The implementation is similar to the one of PerStateInformation, which
also contains more documentation.
*/
template<class Element>
class PerStateArray : public subscriber::Subscriber<StateRegistry> {
const std::vector<Element> default_array;
using EntryArrayVectorMap = std::unordered_map<const StateRegistry *,
segmented_vector::SegmentedArrayVector<Element> *>;
EntryArrayVectorMap entry_arrays_by_registry;
mutable const StateRegistry *cached_registry;
mutable segmented_vector::SegmentedArrayVector<Element> *cached_entries;
segmented_vector::SegmentedArrayVector<Element> *get_entries(const StateRegistry *registry) {
if (cached_registry != registry) {
cached_registry = registry;
auto it = entry_arrays_by_registry.find(registry);
if (it == entry_arrays_by_registry.end()) {
cached_entries = new segmented_vector::SegmentedArrayVector<Element>(
default_array.size());
entry_arrays_by_registry[registry] = cached_entries;
registry->subscribe(this);
} else {
cached_entries = it->second;
}
}
assert(cached_registry == registry && cached_entries == entry_arrays_by_registry[registry]);
return cached_entries;
}
const segmented_vector::SegmentedArrayVector<Element> *get_entries(
const StateRegistry *registry) const {
if (cached_registry != registry) {
const auto it = entry_arrays_by_registry.find(registry);
if (it == entry_arrays_by_registry.end()) {
return nullptr;
} else {
cached_registry = registry;
cached_entries = const_cast<segmented_vector::SegmentedArrayVector<Element> *>(
it->second);
}
}
assert(cached_registry == registry);
return cached_entries;
}
public:
explicit PerStateArray(const std::vector<Element> &default_array)
: default_array(default_array),
cached_registry(nullptr),
cached_entries(nullptr) {
}
PerStateArray(const PerStateArray<Element> &) = delete;
PerStateArray &operator=(const PerStateArray<Element> &) = delete;
virtual ~PerStateArray() override {
for (auto it : entry_arrays_by_registry) {
delete it.second;
}
}
ArrayView<Element> operator[](const State &state) {
const StateRegistry *registry = state.get_registry();
if (!registry) {
std::cerr << "Tried to access per-state array with an unregistered "
<< "state." << std::endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
segmented_vector::SegmentedArrayVector<Element> *entries = get_entries(registry);
int state_id = state.get_id().value;
assert(state.get_id() != StateID::no_state);
size_t virtual_size = registry->size();
assert(utils::in_bounds(state_id, *registry));
if (entries->size() < virtual_size) {
entries->resize(virtual_size, default_array.data());
}
return ArrayView<Element>((*entries)[state_id], default_array.size());
}
ArrayView<Element> operator[](const State &) const {
ABORT("PerStateArray::operator[] const not implemented. "
"See source code for more information.");
/*
This method is not implemented because it is currently not used and
would require quite a bit of boilerplate, introducing a ConstArrayView
class similar to ArrayView. If you need it, it should be easy to
implement based on PerStateInformation:operator[] const. This method
should return a ConstArrayView<Element>.
*/
}
virtual void notify_service_destroyed(const StateRegistry *registry) override {
delete entry_arrays_by_registry[registry];
entry_arrays_by_registry.erase(registry);
if (registry == cached_registry) {
cached_registry = nullptr;
cached_entries = nullptr;
}
}
};
#endif
| 5,220 |
C
| 34.760274 | 102 | 0.622222 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/pruning_method.cc
|
#include "pruning_method.h"
#include "plugin.h"
#include <cassert>
using namespace std;
PruningMethod::PruningMethod()
: task(nullptr) {
}
void PruningMethod::initialize(const shared_ptr<AbstractTask> &task_) {
assert(!task);
task = task_;
}
static PluginTypePlugin<PruningMethod> _type_plugin(
"PruningMethod",
"Prune or reorder applicable operators.");
| 381 |
C++
| 17.190475 | 71 | 0.706037 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/task_id.h
|
#ifndef TASK_ID_H
#define TASK_ID_H
#include "utils/hash.h"
#include <cstdint>
#include <functional>
class AbstractTask;
/*
A TaskID uniquely identifies a task (for unordered_maps and comparison)
without publicly exposing the internal AbstractTask pointer.
*/
class TaskID {
const std::uintptr_t value;
public:
explicit TaskID(const AbstractTask *task)
: value(reinterpret_cast<uintptr_t>(task)) {
}
TaskID() = delete;
bool operator==(const TaskID &other) const {
return value == other.value;
}
bool operator!=(const TaskID &other) const {
return !(*this == other);
}
std::uint64_t hash() const {
return value;
}
};
namespace utils {
inline void feed(HashState &hash_state, TaskID id) {
feed(hash_state, id.hash());
}
}
#endif
| 817 |
C
| 17.590909 | 73 | 0.648715 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluation_result.cc
|
#include "evaluation_result.h"
using namespace std;
const int EvaluationResult::INFTY = numeric_limits<int>::max();
EvaluationResult::EvaluationResult() : evaluator_value(UNINITIALIZED) {
}
bool EvaluationResult::is_uninitialized() const {
return evaluator_value == UNINITIALIZED;
}
bool EvaluationResult::is_infinite() const {
return evaluator_value == INFTY;
}
int EvaluationResult::get_evaluator_value() const {
return evaluator_value;
}
const vector<OperatorID> &EvaluationResult::get_preferred_operators() const {
return preferred_operators;
}
bool EvaluationResult::get_count_evaluation() const {
return count_evaluation;
}
void EvaluationResult::set_evaluator_value(int value) {
evaluator_value = value;
}
void EvaluationResult::set_preferred_operators(
vector<OperatorID> &&preferred_ops) {
preferred_operators = move(preferred_ops);
}
void EvaluationResult::set_count_evaluation(bool count_eval) {
count_evaluation = count_eval;
}
| 987 |
C++
| 22.523809 | 77 | 0.744681 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/plugin.h
|
#ifndef PLUGIN_H
#define PLUGIN_H
/*
TODO: This file was intended to be a temporary stub. We should think
of getting rid of it. This would entail including "options/plugin.h"
instead and explicitly using the options namespace. See also option_parser.h,
option_parser_util.h and issue588 for a discussion.
*/
#include "options/plugin.h"
using options::Plugin;
using options::PluginGroupPlugin;
using options::PluginTypePlugin;
#endif
| 444 |
C
| 25.176469 | 79 | 0.768018 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_list.h
|
#ifndef OPEN_LIST_H
#define OPEN_LIST_H
#include <set>
#include "evaluation_context.h"
#include "operator_id.h"
class StateID;
template<class Entry>
class OpenList {
bool only_preferred;
protected:
/*
Insert an entry into the open list. This is called by insert, so
see comments there. This method will not be called if
is_dead_end() is true or if only_preferred is true and the entry
to be inserted is not preferred. Hence, these conditions need
not be checked by the implementation.
*/
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) = 0;
public:
explicit OpenList(bool preferred_only = false);
virtual ~OpenList() = default;
/*
Insert an entry into the open list.
This method may be called with entries that the open list does
not want to insert, e.g. because they have an infinite estimate
or because they are non-preferred successor and the open list
only wants preferred successors. In this case, the open list
will remain unchanged.
This method will often compute heuristic estimates as a side
effect, which are cached in the EvaluationContext object that
is passed in.
Implementation note: uses the template method pattern, with
do_insertion performing the bulk of the work. See comments for
do_insertion.
*/
void insert(EvaluationContext &eval_context, const Entry &entry);
/*
Remove and return the entry that should be expanded next.
*/
virtual Entry remove_min() = 0;
// Return true if the open list is empty.
virtual bool empty() const = 0;
/*
Remove all elements from the open list.
TODO: This method might eventually go away, instead using a
mechanism for generating new open lists at a higher level.
This is currently only used by enforced hill-climbing.
*/
virtual void clear() = 0;
/*
Called when the search algorithm wants to "boost" open lists
using preferred successors.
The default implementation does nothing. The main use case for
this is for alternation open lists.
TODO: Might want to change the name of the method to something
generic that reflects when it is called rather than how one
particular open list handles the event. I think this is called
on "search progress", so we should verify if this is indeed the
case, clarify what exactly that means, and then rename the
method to reflect this (e.g. with a name like
"on_search_progress").
*/
virtual void boost_preferred();
/*
Add all path-dependent evaluators that this open lists uses (directly or
indirectly) into the result set.
TODO: This method can probably go away at some point.
*/
virtual void get_path_dependent_evaluators(
std::set<Evaluator *> &evals) = 0;
/*
Accessor method for only_preferred.
The only use case for this at the moment is for alternation open
lists, which boost those sublists which only include preferred
entries.
TODO: Is this sufficient reason to have this method? We could
get rid of it if instead AlternationOpenList would be passed
information upon construction which lists should be boosted on
progress. This could also make the code more general (it would
be easy to boost different lists by different amounts, and
boosting would not have to be tied to preferredness). We should
discuss the best way to proceed here.
*/
bool only_contains_preferred_entries() const;
/*
is_dead_end and is_reliable_dead_end return true if the state
associated with the passed-in evaluation context is deemed a
dead end by the open list.
The difference between the two methods is that
is_reliable_dead_end must guarantee that the associated state is
actually unsolvable, i.e., it must not believe the claims of
unsafe heuristics.
Like OpenList::insert, the methods usually evaluate heuristic
values, which are then cached in eval_context as a side effect.
*/
virtual bool is_dead_end(EvaluationContext &eval_context) const = 0;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const = 0;
};
using StateOpenListEntry = StateID;
using EdgeOpenListEntry = std::pair<StateID, OperatorID>;
using StateOpenList = OpenList<StateOpenListEntry>;
using EdgeOpenList = OpenList<EdgeOpenListEntry>;
template<class Entry>
OpenList<Entry>::OpenList(bool only_preferred)
: only_preferred(only_preferred) {
}
template<class Entry>
void OpenList<Entry>::boost_preferred() {
}
template<class Entry>
void OpenList<Entry>::insert(
EvaluationContext &eval_context, const Entry &entry) {
if (only_preferred && !eval_context.is_preferred())
return;
if (!is_dead_end(eval_context))
do_insertion(eval_context, entry);
}
template<class Entry>
bool OpenList<Entry>::only_contains_preferred_entries() const {
return only_preferred;
}
#endif
| 5,134 |
C
| 31.09375 | 78 | 0.695754 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluator.h
|
#ifndef EVALUATOR_H
#define EVALUATOR_H
#include "evaluation_result.h"
#include <set>
class EvaluationContext;
class State;
class Evaluator {
const std::string description;
const bool use_for_reporting_minima;
const bool use_for_boosting;
const bool use_for_counting_evaluations;
public:
Evaluator(
const std::string &description = "<none>",
bool use_for_reporting_minima = false,
bool use_for_boosting = false,
bool use_for_counting_evaluations = false);
virtual ~Evaluator() = default;
/*
dead_ends_are_reliable should return true if the evaluator is
"safe", i.e., infinite estimates can be trusted.
The default implementation returns true.
*/
virtual bool dead_ends_are_reliable() const;
/*
get_path_dependent_evaluators should insert all path-dependent
evaluators that this evaluator directly or indirectly depends on
into the result set, including itself if necessary.
The two notify methods below will be called for these and only
these evaluators. In other words, "path-dependent" for our
purposes means "needs to be notified of the initial state and
state transitions".
*/
virtual void get_path_dependent_evaluators(
std::set<Evaluator *> &evals) = 0;
virtual void notify_initial_state(const State & /*initial_state*/) {
}
virtual void notify_state_transition(
const State & /*parent_state*/,
OperatorID /*op_id*/,
const State & /*state*/) {
}
/*
compute_result should compute the estimate and possibly
preferred operators for the given evaluation context and return
them as an EvaluationResult instance.
It should not add the result to the evaluation context -- this
is done automatically elsewhere.
The passed-in evaluation context is not const because this
evaluator might depend on other evaluators, in which case their
results will be stored in the evaluation context as a side
effect (to make sure they are only computed once).
TODO: We should make sure that evaluators don't directly call
compute_result for their subevaluators, as this could circumvent
the caching mechanism provided by the EvaluationContext. The
compute_result method should only be called by
EvaluationContext. We need to think of a clean way to achieve
this.
*/
virtual EvaluationResult compute_result(
EvaluationContext &eval_context) = 0;
void report_value_for_initial_state(const EvaluationResult &result) const;
void report_new_minimum_value(const EvaluationResult &result) const;
const std::string &get_description() const;
bool is_used_for_reporting_minima() const;
bool is_used_for_boosting() const;
bool is_used_for_counting_evaluations() const;
virtual bool does_cache_estimates() const;
virtual bool is_estimate_cached(const State &state) const;
/*
Calling get_cached_estimate is only allowed if an estimate for
the given state is cached, i.e., is_estimate_cached returns true.
*/
virtual int get_cached_estimate(const State &state) const;
};
#endif
| 3,220 |
C
| 32.206185 | 78 | 0.695342 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_space.h
|
#ifndef SEARCH_SPACE_H
#define SEARCH_SPACE_H
#include "operator_cost.h"
#include "per_state_information.h"
#include "search_node_info.h"
#include <vector>
class OperatorProxy;
class State;
class TaskProxy;
class SearchNode {
State state;
SearchNodeInfo &info;
public:
SearchNode(const State &state, SearchNodeInfo &info);
const State &get_state() const;
bool is_new() const;
bool is_open() const;
bool is_closed() const;
bool is_dead_end() const;
int get_g() const;
int get_real_g() const;
void open_initial();
void open(const SearchNode &parent_node,
const OperatorProxy &parent_op,
int adjusted_cost);
void reopen(const SearchNode &parent_node,
const OperatorProxy &parent_op,
int adjusted_cost);
void update_parent(const SearchNode &parent_node,
const OperatorProxy &parent_op,
int adjusted_cost);
void close();
void mark_as_dead_end();
void dump(const TaskProxy &task_proxy) const;
};
class SearchSpace {
PerStateInformation<SearchNodeInfo> search_node_infos;
StateRegistry &state_registry;
public:
explicit SearchSpace(StateRegistry &state_registry);
SearchNode get_node(const State &state);
void trace_path(const State &goal_state,
std::vector<OperatorID> &path) const;
void dump(const TaskProxy &task_proxy) const;
void print_statistics() const;
};
#endif
| 1,502 |
C
| 22.484375 | 58 | 0.648469 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_cost.cc
|
#include "operator_cost.h"
#include "option_parser.h"
#include "task_proxy.h"
#include "utils/system.h"
#include <cstdlib>
#include <vector>
using namespace std;
static int get_adjusted_action_cost(int cost, OperatorCost cost_type, bool is_unit_cost) {
switch (cost_type) {
case NORMAL:
return cost;
case ONE:
return 1;
case PLUSONE:
if (is_unit_cost)
return 1;
else
return cost + 1;
default:
ABORT("Unknown cost type");
}
}
int get_adjusted_action_cost(const OperatorProxy &op, OperatorCost cost_type, bool is_unit_cost) {
if (op.is_axiom())
return 0;
else
return get_adjusted_action_cost(op.get_cost(), cost_type, is_unit_cost);
}
void add_cost_type_option_to_parser(OptionParser &parser) {
vector<string> cost_types;
vector<string> cost_types_doc;
cost_types.push_back("NORMAL");
cost_types_doc.push_back(
"all actions are accounted for with their real cost");
cost_types.push_back("ONE");
cost_types_doc.push_back(
"all actions are accounted for as unit cost");
cost_types.push_back("PLUSONE");
cost_types_doc.push_back(
"all actions are accounted for as their real cost + 1 "
"(except if all actions have original cost 1, "
"in which case cost 1 is used). "
"This is the behaviour known for the heuristics of the LAMA planner. "
"This is intended to be used by the heuristics, not search engines, "
"but is supported for both.");
parser.add_enum_option<OperatorCost>(
"cost_type",
cost_types,
"Operator cost adjustment type. "
"No matter what this setting is, axioms will always be considered "
"as actions of cost 0 by the heuristics that treat axioms as actions.",
"NORMAL",
cost_types_doc);
}
| 1,875 |
C++
| 29.754098 | 98 | 0.6336 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_node_info.cc
|
#include "search_node_info.h"
static const int pointer_bytes = sizeof(void *);
static const int info_bytes = 3 * sizeof(int) + sizeof(StateID);
static const int padding_bytes = info_bytes % pointer_bytes;
static_assert(
sizeof(SearchNodeInfo) == info_bytes + padding_bytes,
"The size of SearchNodeInfo is larger than expected. This probably means "
"that packing two fields into one integer using bitfields is not supported.");
| 442 |
C++
| 39.272724 | 82 | 0.737557 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/abstract_task.h
|
#ifndef ABSTRACT_TASK_H
#define ABSTRACT_TASK_H
#include "operator_id.h"
#include "algorithms/subscriber.h"
#include "utils/hash.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace options {
class Options;
}
struct FactPair {
int var;
int value;
FactPair(int var, int value)
: var(var), value(value) {
}
bool operator<(const FactPair &other) const {
return var < other.var || (var == other.var && value < other.value);
}
bool operator==(const FactPair &other) const {
return var == other.var && value == other.value;
}
bool operator!=(const FactPair &other) const {
return var != other.var || value != other.value;
}
/*
This special object represents "no such fact". E.g., functions
that search a fact can return "no_fact" when no matching fact is
found.
*/
static const FactPair no_fact;
};
std::ostream &operator<<(std::ostream &os, const FactPair &fact_pair);
namespace utils {
inline void feed(HashState &hash_state, const FactPair &fact) {
feed(hash_state, fact.var);
feed(hash_state, fact.value);
}
}
class AbstractTask : public subscriber::SubscriberService<AbstractTask> {
public:
AbstractTask() = default;
virtual ~AbstractTask() override = default;
virtual int get_num_variables() const = 0;
virtual std::string get_variable_name(int var) const = 0;
virtual int get_variable_domain_size(int var) const = 0;
virtual int get_variable_axiom_layer(int var) const = 0;
virtual int get_variable_default_axiom_value(int var) const = 0;
virtual std::string get_fact_name(const FactPair &fact) const = 0;
virtual bool are_facts_mutex(const FactPair &fact1, const FactPair &fact2) const = 0;
virtual int get_operator_cost(int index, bool is_axiom) const = 0;
virtual std::string get_operator_name(int index, bool is_axiom) const = 0;
virtual int get_num_operators() const = 0;
virtual int get_num_operator_preconditions(int index, bool is_axiom) const = 0;
virtual FactPair get_operator_precondition(
int op_index, int fact_index, bool is_axiom) const = 0;
virtual int get_num_operator_effects(int op_index, bool is_axiom) const = 0;
virtual int get_num_operator_effect_conditions(
int op_index, int eff_index, bool is_axiom) const = 0;
virtual FactPair get_operator_effect_condition(
int op_index, int eff_index, int cond_index, bool is_axiom) const = 0;
virtual FactPair get_operator_effect(
int op_index, int eff_index, bool is_axiom) const = 0;
/*
Convert an operator index from this task, C (child), into an operator index
from an ancestor task A (ancestor). Task A has to be an ancestor of C in
the sense that C is the result of a sequence of task transformations on A.
*/
virtual int convert_operator_index(
int index, const AbstractTask *ancestor_task) const = 0;
virtual int get_num_axioms() const = 0;
virtual int get_num_goals() const = 0;
virtual FactPair get_goal_fact(int index) const = 0;
virtual std::vector<int> get_initial_state_values() const = 0;
/*
Convert state values from an ancestor task A (ancestor) into
state values from this task, C (child). Task A has to be an
ancestor of C in the sense that C is the result of a sequence of
task transformations on A.
The values are converted in-place to avoid unnecessary copies. If a
subclass needs to create a new vector, e.g., because the size changes,
it should create the new vector in a local variable and then swap it with
the parameter.
*/
virtual void convert_ancestor_state_values(
std::vector<int> &values,
const AbstractTask *ancestor_task) const = 0;
};
#endif
| 3,837 |
C
| 33.576576 | 89 | 0.674746 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/command_line.h
|
#ifndef COMMAND_LINE_H
#define COMMAND_LINE_H
#include "utils/exceptions.h"
#include <memory>
#include <string>
namespace options {
class Registry;
}
class SearchEngine;
class ArgError : public utils::Exception {
std::string msg;
public:
explicit ArgError(const std::string &msg);
virtual void print() const override;
};
extern std::shared_ptr<SearchEngine> parse_cmd_line(
int argc, const char **argv, options::Registry ®istry, bool dry_run,
bool is_unit_cost);
extern std::string usage(const std::string &progname);
#endif
| 557 |
C
| 17.599999 | 75 | 0.718133 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/heuristic.cc
|
#include "heuristic.h"
#include "evaluation_context.h"
#include "evaluation_result.h"
#include "option_parser.h"
#include "plugin.h"
#include "task_utils/task_properties.h"
#include "tasks/cost_adapted_task.h"
#include "tasks/root_task.h"
#include <cassert>
#include <cstdlib>
#include <limits>
using namespace std;
Heuristic::Heuristic(const Options &opts)
: Evaluator(opts.get_unparsed_config(), true, true, true),
heuristic_cache(HEntry(NO_VALUE, true)), //TODO: is true really a good idea here?
cache_evaluator_values(opts.get<bool>("cache_estimates")),
task(opts.get<shared_ptr<AbstractTask>>("transform")),
task_proxy(*task) {
}
Heuristic::~Heuristic() {
}
void Heuristic::set_preferred(const OperatorProxy &op) {
preferred_operators.insert(op.get_ancestor_operator_id(tasks::g_root_task.get()));
}
State Heuristic::convert_ancestor_state(const State &ancestor_state) const {
return task_proxy.convert_ancestor_state(ancestor_state);
//State Heuristic::convert_global_state(const GlobalState &global_state) const {
// vector<int> values = global_state.get_values();
// for (const VariableProxy var : task_proxy.get_variables()) {
// if (var.is_derived()) {
// // Caelan: resets axiom variables to their default value
// values[var.get_id()] = var.get_default_axiom_value();
// }
// }
// State state(*g_root_task(), move(values));
// return task_proxy.convert_ancestor_state(state);
}
void Heuristic::add_options_to_parser(OptionParser &parser) {
parser.add_option<shared_ptr<AbstractTask>>(
"transform",
"Optional task transformation for the heuristic."
" Currently, adapt_costs() and no_transform() are available.",
"no_transform()");
parser.add_option<bool>("cache_estimates", "cache heuristic estimates", "true");
}
EvaluationResult Heuristic::compute_result(EvaluationContext &eval_context) {
EvaluationResult result;
assert(preferred_operators.empty());
const State &state = eval_context.get_state();
bool calculate_preferred = eval_context.get_calculate_preferred();
int heuristic = NO_VALUE;
if (!calculate_preferred && cache_evaluator_values &&
heuristic_cache[state].h != NO_VALUE && !heuristic_cache[state].dirty) {
heuristic = heuristic_cache[state].h;
result.set_count_evaluation(false);
} else {
heuristic = compute_heuristic(state);
if (cache_evaluator_values) {
heuristic_cache[state] = HEntry(heuristic, false);
}
result.set_count_evaluation(true);
}
assert(heuristic == DEAD_END || heuristic >= 0);
if (heuristic == DEAD_END) {
/*
It is permissible to mark preferred operators for dead-end
states (thus allowing a heuristic to mark them on-the-fly
before knowing the final result), but if it turns out we
have a dead end, we don't want to actually report any
preferred operators.
*/
preferred_operators.clear();
heuristic = EvaluationResult::INFTY;
}
/*TaskProxy global_task_proxy = TaskProxy(*g_root_task());
State global_state(*g_root_task(), state.get_values());
OperatorsProxy global_operators = global_task_proxy.get_operators();
ordered_set::OrderedSet<OperatorID> applicable_preferred;
for (OperatorID op_id : preferred_operators) {
if (task_properties::is_applicable(global_operators[op_id], global_state)) {
// Caelan: prune preferred operators that are not applicable
applicable_preferred.insert(op_id);
}
}
result.set_preferred_operators(applicable_preferred.pop_as_vector());
preferred_operators.clear();*/
//#ifndef NDEBUG
// TaskProxy global_task_proxy = state.get_task();
// OperatorsProxy global_operators = global_task_proxy.get_operators();
// if (heuristic != EvaluationResult::INFTY) {
// for (OperatorID op_id : preferred_operators)
// assert(task_properties::is_applicable(global_operators[op_id], state));
// }
//#endif
result.set_evaluator_value(heuristic);
result.set_preferred_operators(preferred_operators.pop_as_vector());
assert(preferred_operators.empty());
return result;
}
bool Heuristic::does_cache_estimates() const {
return cache_evaluator_values;
}
bool Heuristic::is_estimate_cached(const State &state) const {
return heuristic_cache[state].h != NO_VALUE;
}
int Heuristic::get_cached_estimate(const State &state) const {
assert(is_estimate_cached(state));
return heuristic_cache[state].h;
}
| 4,629 |
C++
| 33.81203 | 87 | 0.672067 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluator_cache.cc
|
#include "evaluator_cache.h"
using namespace std;
EvaluationResult &EvaluatorCache::operator[](Evaluator *eval) {
return eval_results[eval];
}
| 150 |
C++
| 15.777776 | 63 | 0.74 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_id.h
|
#ifndef OPERATOR_ID_H
#define OPERATOR_ID_H
#include "utils/hash.h"
#include <iostream>
/*
OperatorIDs are used to define an operator that belongs to a given
planning task. These IDs are meant to be compact and efficient to use.
They can be thought of as a type-safe replacement for "int" for the
purpose of referring to an operator.
Because of their efficiency requirements, they do *not* store which
task they belong to, and it is the user's responsibility not to mix
OperatorIDs that belong to different tasks.
OperatorIDs can only refer to *operators*, not to *axioms*. This is
by design: using OperatorID clearly communicates that only operators
are appropriate in a given place and it is an error to use an axiom.
We also considered introducing a class that can refer to operators or
axioms (suggested names were OperatorOrAxiomID and ActionID, introducing
the convention that "action" stands for "operator or axiom"), but so
far we have not found a use case for it.
*/
class OperatorID {
int index;
public:
explicit OperatorID(int index)
: index(index) {
}
static const OperatorID no_operator;
int get_index() const {
return index;
}
bool operator==(const OperatorID &other) const {
return index == other.index;
}
bool operator!=(const OperatorID &other) const {
return !(*this == other);
}
int hash() const {
return index;
}
};
std::ostream &operator<<(std::ostream &os, OperatorID id);
namespace utils {
inline void feed(HashState &hash_state, OperatorID id) {
feed(hash_state, id.hash());
}
}
#endif
| 1,644 |
C
| 25.532258 | 74 | 0.699513 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/operator_id.cc
|
#include "operator_id.h"
#include <ostream>
using namespace std;
ostream &operator<<(ostream &os, OperatorID id) {
os << "op" << id.get_index();
return os;
}
const OperatorID OperatorID::no_operator = OperatorID(-1);
| 229 |
C++
| 16.692306 | 58 | 0.668122 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/plan_manager.h
|
#ifndef PLAN_MANAGER_H
#define PLAN_MANAGER_H
#include <string>
#include <vector>
class OperatorID;
class TaskProxy;
using Plan = std::vector<OperatorID>;
class PlanManager {
std::string plan_filename;
int num_previously_generated_plans;
bool is_part_of_anytime_portfolio;
public:
PlanManager();
void set_plan_filename(const std::string &plan_filename);
void set_num_previously_generated_plans(int num_previously_generated_plans);
void set_is_part_of_anytime_portfolio(bool is_part_of_anytime_portfolio);
/*
Set generates_multiple_plan_files to true if the planner can find more than
one plan and should number the plans as FILENAME.1, ..., FILENAME.n.
*/
void save_plan(
const Plan &plan, const TaskProxy &task_proxy,
bool generates_multiple_plan_files = false);
};
extern int calculate_plan_cost(const Plan &plan, const TaskProxy &task_proxy);
#endif
| 929 |
C
| 25.571428 | 81 | 0.711518 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluator_cache.h
|
#ifndef EVALUATOR_CACHE_H
#define EVALUATOR_CACHE_H
#include "evaluation_result.h"
#include <unordered_map>
class Evaluator;
using EvaluationResults = std::unordered_map<Evaluator *, EvaluationResult>;
/*
Store evaluation results for evaluators.
*/
class EvaluatorCache {
EvaluationResults eval_results;
public:
EvaluationResult &operator[](Evaluator *eval);
template<class Callback>
void for_each_evaluator_result(const Callback &callback) const {
for (const auto &element : eval_results) {
const Evaluator *eval = element.first;
const EvaluationResult &result = element.second;
callback(eval, result);
}
}
};
#endif
| 700 |
C
| 20.906249 | 76 | 0.688571 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/plan_manager.cc
|
#include "plan_manager.h"
#include "task_proxy.h"
#include "task_utils/task_properties.h"
#include "utils/logging.h"
#include <fstream>
#include <iostream>
#include <sstream>
using namespace std;
int calculate_plan_cost(const Plan &plan, const TaskProxy &task_proxy) {
OperatorsProxy operators = task_proxy.get_operators();
int plan_cost = 0;
for (OperatorID op_id : plan) {
plan_cost += operators[op_id].get_cost();
}
return plan_cost;
}
PlanManager::PlanManager()
: plan_filename("sas_plan"),
num_previously_generated_plans(0),
is_part_of_anytime_portfolio(false) {
}
void PlanManager::set_plan_filename(const string &plan_filename_) {
plan_filename = plan_filename_;
}
void PlanManager::set_num_previously_generated_plans(int num_previously_generated_plans_) {
num_previously_generated_plans = num_previously_generated_plans_;
}
void PlanManager::set_is_part_of_anytime_portfolio(bool is_part_of_anytime_portfolio_) {
is_part_of_anytime_portfolio = is_part_of_anytime_portfolio_;
}
void PlanManager::save_plan(
const Plan &plan, const TaskProxy &task_proxy,
bool generates_multiple_plan_files) {
ostringstream filename;
filename << plan_filename;
int plan_number = num_previously_generated_plans + 1;
if (generates_multiple_plan_files || is_part_of_anytime_portfolio) {
filename << "." << plan_number;
} else {
assert(plan_number == 1);
}
ofstream outfile(filename.str());
if (outfile.rdstate() & ofstream::failbit) {
cerr << "Failed to open plan file: " << filename.str() << endl;
utils::exit_with(utils::ExitCode::SEARCH_INPUT_ERROR);
}
OperatorsProxy operators = task_proxy.get_operators();
for (OperatorID op_id : plan) {
cout << operators[op_id].get_name() << " (" << operators[op_id].get_cost() << ")" << endl;
outfile << "(" << operators[op_id].get_name() << ")" << endl;
}
int plan_cost = calculate_plan_cost(plan, task_proxy);
bool is_unit_cost = task_properties::is_unit_cost(task_proxy);
outfile << "; cost = " << plan_cost << " ("
<< (is_unit_cost ? "unit cost" : "general cost") << ")" << endl;
outfile.close();
utils::g_log << "Plan length: " << plan.size() << " step(s)." << endl;
utils::g_log << "Plan cost: " << plan_cost << endl;
++num_previously_generated_plans;
}
| 2,398 |
C++
| 32.319444 | 98 | 0.638866 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/state_registry.h
|
#ifndef STATE_REGISTRY_H
#define STATE_REGISTRY_H
#include "abstract_task.h"
#include "axioms.h"
#include "state_id.h"
#include "algorithms/int_hash_set.h"
#include "algorithms/int_packer.h"
#include "algorithms/segmented_vector.h"
#include "algorithms/subscriber.h"
#include "utils/hash.h"
#include <set>
/*
Overview of classes relevant to storing and working with registered states.
State
Objects of this class can represent registered or unregistered states.
Registered states contain a pointer to the StateRegistry that created them
and the ID they have there. Using this data, states can be used to index
PerStateInformation objects.
In addition, registered states have a pointer to the packed data of a state
that is stored in their registry. Values of the state can be accessed
through this pointer. For situations where a state's values have to be
accessed a lot, the state's data can be unpacked. The unpacked data is
stored in a vector<int> to which the state maintains a shared pointer.
Unregistered states contain only this unpacked data. Compared to registered
states, they are not guaranteed to be reachable and use no form of duplicate
detection.
Copying states is relatively cheap because the actual data does not have to
be copied.
StateID
StateIDs identify states within a state registry.
If the registry is known, the ID is sufficient to look up the state, which
is why IDs are intended for long term storage (e.g. in open lists).
Internally, a StateID is just an integer, so it is cheap to store and copy.
PackedStateBin (currently the same as unsigned int)
The actual state data is internally represented as a PackedStateBin array.
Each PackedStateBin can contain the values of multiple variables.
To minimize allocation overhead, the implementation stores the data of many
such states in a single large array (see SegmentedArrayVector).
PackedStateBin arrays are never manipulated directly but through
the task's state packer (see IntPacker).
-------------
StateRegistry
The StateRegistry allows to create states giving them an ID. IDs from
different state registries must not be mixed.
The StateRegistry also stores the actual state data in a memory friendly way.
It uses the following class:
SegmentedArrayVector<PackedStateBin>
This class is used to store the actual (packed) state data for all states
while avoiding dynamically allocating each state individually.
The index within this vector corresponds to the ID of the state.
PerStateInformation<T>
Associates a value of type T with every state in a given StateRegistry.
Can be thought of as a very compactly implemented map from State to T.
References stay valid as long as the state registry exists. Memory usage is
essentially the same as a vector<T> whose size is the number of states in
the registry.
---------------
Usage example 1
---------------
Problem:
A search node contains a state together with some information about how this
state was reached and the status of the node. The state data is already
stored and should not be duplicated. Open lists should in theory store search
nodes but we want to keep the amount of data stored in the open list to a
minimum.
Solution:
SearchNodeInfo
Remaining part of a search node besides the state that needs to be stored.
SearchNode
A SearchNode combines a StateID, a reference to a SearchNodeInfo and
OperatorCost. It is generated for easier access and not intended for long
term storage. The state data is only stored once an can be accessed
through the StateID.
SearchSpace
The SearchSpace uses PerStateInformation<SearchNodeInfo> to map StateIDs to
SearchNodeInfos. The open lists only have to store StateIDs which can be
used to look up a search node in the SearchSpace on demand.
---------------
Usage example 2
---------------
Problem:
In the LMcount heuristic each state should store which landmarks are
already reached when this state is reached. This should only require
additional memory when the LMcount heuristic is used.
Solution:
The heuristic object uses an attribute of type PerStateBitset to store for each
state and each landmark whether it was reached in this state.
*/
namespace int_packer {
class IntPacker;
}
using PackedStateBin = int_packer::IntPacker::Bin;
class StateRegistry : public subscriber::SubscriberService<StateRegistry> {
struct StateIDSemanticHash {
const segmented_vector::SegmentedArrayVector<PackedStateBin> &state_data_pool;
int state_size;
StateIDSemanticHash(
const segmented_vector::SegmentedArrayVector<PackedStateBin> &state_data_pool,
int state_size)
: state_data_pool(state_data_pool),
state_size(state_size) {
}
int_hash_set::HashType operator()(int id) const {
const PackedStateBin *data = state_data_pool[id];
utils::HashState hash_state;
for (int i = 0; i < state_size; ++i) {
hash_state.feed(data[i]);
}
return hash_state.get_hash32();
}
};
struct StateIDSemanticEqual {
const segmented_vector::SegmentedArrayVector<PackedStateBin> &state_data_pool;
int state_size;
StateIDSemanticEqual(
const segmented_vector::SegmentedArrayVector<PackedStateBin> &state_data_pool,
int state_size)
: state_data_pool(state_data_pool),
state_size(state_size) {
}
bool operator()(int lhs, int rhs) const {
const PackedStateBin *lhs_data = state_data_pool[lhs];
const PackedStateBin *rhs_data = state_data_pool[rhs];
return std::equal(lhs_data, lhs_data + state_size, rhs_data);
}
};
/*
Hash set of StateIDs used to detect states that are already registered in
this registry and find their IDs. States are compared/hashed semantically,
i.e. the actual state data is compared, not the memory location.
*/
using StateIDSet = int_hash_set::IntHashSet<StateIDSemanticHash, StateIDSemanticEqual>;
TaskProxy task_proxy;
const int_packer::IntPacker &state_packer;
AxiomEvaluator &axiom_evaluator;
const int num_variables;
segmented_vector::SegmentedArrayVector<PackedStateBin> state_data_pool;
StateIDSet registered_states;
std::unique_ptr<State> cached_initial_state;
StateID insert_id_or_pop_state();
int get_bins_per_state() const;
public:
explicit StateRegistry(const TaskProxy &task_proxy);
const TaskProxy &get_task_proxy() const {
return task_proxy;
}
int get_num_variables() const {
return num_variables;
}
const int_packer::IntPacker &get_state_packer() const {
return state_packer;
}
/*
Returns the state that was registered at the given ID. The ID must refer
to a state in this registry. Do not mix IDs from from different registries.
*/
State lookup_state(StateID id) const;
/*
Returns a reference to the initial state and registers it if this was not
done before. The result is cached internally so subsequent calls are cheap.
*/
const State &get_initial_state();
/*
Returns the state that results from applying op to predecessor and
registers it if this was not done before. This is an expensive operation
as it includes duplicate checking.
*/
State get_successor_state(const State &predecessor, const OperatorProxy &op);
/*
Returns the number of states registered so far.
*/
size_t size() const {
return registered_states.size();
}
int get_state_size_in_bytes() const;
void print_statistics() const;
class const_iterator : public std::iterator<
std::forward_iterator_tag, StateID> {
/*
We intentionally omit parts of the forward iterator concept
(e.g. default construction, copy assignment, post-increment)
to reduce boilerplate. Supported compilers may complain about
this, in which case we will add the missing methods.
*/
friend class StateRegistry;
const StateRegistry ®istry;
StateID pos;
const_iterator(const StateRegistry ®istry, size_t start)
: registry(registry), pos(start) {
utils::unused_variable(this->registry);
}
public:
const_iterator &operator++() {
++pos.value;
return *this;
}
bool operator==(const const_iterator &rhs) {
assert(®istry == &rhs.registry);
return pos == rhs.pos;
}
bool operator!=(const const_iterator &rhs) {
return !(*this == rhs);
}
StateID operator*() {
return pos;
}
StateID *operator->() {
return &pos;
}
};
const_iterator begin() const {
return const_iterator(*this, 0);
}
const_iterator end() const {
return const_iterator(*this, size());
}
};
#endif
| 9,317 |
C
| 33.768657 | 91 | 0.676291 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_list_factory.cc
|
#include "open_list_factory.h"
#include "plugin.h"
using namespace std;
template<>
unique_ptr<StateOpenList> OpenListFactory::create_open_list() {
return create_state_open_list();
}
template<>
unique_ptr<EdgeOpenList> OpenListFactory::create_open_list() {
return create_edge_open_list();
}
static PluginTypePlugin<OpenListFactory> _type_plugin(
"OpenList",
// TODO: Replace empty string by synopsis for the wiki page.
"");
| 450 |
C++
| 18.608695 | 64 | 0.713333 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/evaluation_context.h
|
#ifndef EVALUATION_CONTEXT_H
#define EVALUATION_CONTEXT_H
#include "evaluation_result.h"
#include "evaluator_cache.h"
#include "operator_id.h"
#include "task_proxy.h"
#include <unordered_map>
class Evaluator;
class SearchStatistics;
/*
TODO: Now that we have an explicit EvaluationResult class, it's
perhaps not such a great idea to duplicate all its access methods
like "get_evaluator_value()" etc. on EvaluationContext. Might be a
simpler interface to just give EvaluationContext an operator[]
method or other simple way of accessing a given EvaluationResult
and then use the methods of the result directly.
*/
/*
EvaluationContext has two main purposes:
1. It packages up the information that evaluators and open lists
need in order to perform an evaluation: the state, the g value of
the node, and whether it was reached by a preferred operator.
2. It caches computed evaluator values and preferred operators for
the current evaluation so that they do not need to be computed
multiple times just because they appear in multiple contexts,
and also so that we don't need to know a priori which evaluators
need to be evaluated throughout the evaluation process.
For example, our current implementation of A* search uses the
evaluator value h at least three times: twice for its
tie-breaking open list based on <g + h, h> and a third time for
its "progress evaluator" that produces output whenever we reach a
new best f value.
*/
class EvaluationContext {
EvaluatorCache cache;
State state;
int g_value;
bool preferred;
SearchStatistics *statistics;
bool calculate_preferred;
static const int INVALID = -1;
EvaluationContext(
const EvaluatorCache &cache, const State &state, int g_value,
bool is_preferred, SearchStatistics *statistics,
bool calculate_preferred);
public:
/*
Copy existing heuristic cache and use it to look up heuristic values.
Used for example by lazy search.
TODO: Can we reuse caches? Can we move them instead of copying them?
*/
EvaluationContext(
const EvaluationContext &other,
int g_value, bool is_preferred, SearchStatistics *statistics,
bool calculate_preferred = false);
/*
Create new heuristic cache for caching heuristic values. Used for example
by eager search.
*/
EvaluationContext(
const State &state, int g_value, bool is_preferred,
SearchStatistics *statistics, bool calculate_preferred = false);
/*
Use the following constructor when you don't care about g values,
preferredness (and statistics), e.g. when sampling states for heuristics.
This constructor sets g_value to -1 and checks that neither get_g_value()
nor is_preferred() are called for objects constructed with it.
TODO: In the long term we might want to separate how path-dependent and
path-independent evaluators are evaluated. This change would remove
the need to store the g value and preferredness for evaluation
contexts that don't need this information.
*/
EvaluationContext(
const State &state,
SearchStatistics *statistics = nullptr, bool calculate_preferred = false);
const EvaluationResult &get_result(Evaluator *eval);
const EvaluatorCache &get_cache() const;
const State &get_state() const;
int get_g_value() const;
bool is_preferred() const;
/*
Use get_evaluator_value() to query finite evaluator values. It
is an error (guarded by an assertion) to call this method for
states with infinite evaluator values, because such states often
need to be treated specially and we want to catch cases where we
forget to do this.
In cases where finite and infinite evaluator values can be
treated uniformly, use get_evaluator_value_or_infinity(), which
returns numeric_limits<int>::max() for infinite estimates.
*/
bool is_evaluator_value_infinite(Evaluator *eval);
int get_evaluator_value(Evaluator *eval);
int get_evaluator_value_or_infinity(Evaluator *eval);
const std::vector<OperatorID> &get_preferred_operators(Evaluator *eval);
bool get_calculate_preferred() const;
};
#endif
| 4,313 |
C
| 36.189655 | 82 | 0.713425 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/abstract_search.cc
|
#include "abstract_search.h"
#include "abstract_state.h"
#include "transition_system.h"
#include "utils.h"
#include "../utils/memory.h"
#include <cassert>
using namespace std;
namespace cegar {
AbstractSearch::AbstractSearch(
const vector<int> &operator_costs)
: operator_costs(operator_costs),
search_info(1) {
}
void AbstractSearch::reset(int num_states) {
open_queue.clear();
search_info.resize(num_states);
for (AbstractSearchInfo &info : search_info) {
info.reset();
}
}
unique_ptr<Solution> AbstractSearch::extract_solution(int init_id, int goal_id) const {
unique_ptr<Solution> solution = utils::make_unique_ptr<Solution>();
int current_id = goal_id;
while (current_id != init_id) {
const Transition &prev = search_info[current_id].get_incoming_transition();
solution->emplace_front(prev.op_id, current_id);
assert(prev.target_id != current_id);
current_id = prev.target_id;
}
return solution;
}
void AbstractSearch::update_goal_distances(const Solution &solution) {
/*
Originally, we only updated the goal distances of states that are part of
the trace (see Seipp and Helmert, JAIR 2018). The code below generalizes
this idea and potentially updates the goal distances of all states.
Let C* be the cost of the trace and g(s) be the g value of states s when
A* finds the trace. Then for all states s with g(s) < INF (i.e., s has
been reached by the search), C*-g(s) is a lower bound on the goal
distance. This is the case since
g(s) >= g*(s) [1]
and
f*(s) >= C* (optimality of A* with an admissible heuristic)
==> g*(s) + h*(s) >= C* (definition of f values)
==> g(s) + h*(s) >= C* (using [1])
==> h*(s) >= C* - g(s) (arithmetic)
Together with our existing lower bound h*(s) >= h(s), i.e., the h values
from the last iteration, for each abstract state s with g(s) < INF, we
can set h(s) = max(h(s), C*-g(s)).
*/
int solution_cost = 0;
for (const Transition &transition : solution) {
solution_cost += operator_costs[transition.op_id];
}
for (auto &info : search_info) {
if (info.get_g_value() < INF) {
int new_h = max(info.get_h_value(), solution_cost - info.get_g_value());
info.increase_h_value_to(new_h);
}
}
}
unique_ptr<Solution> AbstractSearch::find_solution(
const vector<Transitions> &transitions,
int init_id,
const Goals &goal_ids) {
reset(transitions.size());
search_info[init_id].decrease_g_value_to(0);
open_queue.push(search_info[init_id].get_h_value(), init_id);
int goal_id = astar_search(transitions, goal_ids);
open_queue.clear();
bool has_found_solution = (goal_id != UNDEFINED);
if (has_found_solution) {
unique_ptr<Solution> solution = extract_solution(init_id, goal_id);
update_goal_distances(*solution);
return solution;
} else {
search_info[init_id].increase_h_value_to(INF);
}
return nullptr;
}
int AbstractSearch::astar_search(
const vector<Transitions> &transitions, const Goals &goals) {
while (!open_queue.empty()) {
pair<int, int> top_pair = open_queue.pop();
int old_f = top_pair.first;
int state_id = top_pair.second;
const int g = search_info[state_id].get_g_value();
assert(0 <= g && g < INF);
int new_f = g + search_info[state_id].get_h_value();
assert(new_f <= old_f);
if (new_f < old_f)
continue;
if (goals.count(state_id)) {
return state_id;
}
assert(utils::in_bounds(state_id, transitions));
for (const Transition &transition : transitions[state_id]) {
int op_id = transition.op_id;
int succ_id = transition.target_id;
assert(utils::in_bounds(op_id, operator_costs));
const int op_cost = operator_costs[op_id];
assert(op_cost >= 0);
int succ_g = (op_cost == INF) ? INF : g + op_cost;
assert(succ_g >= 0);
if (succ_g < search_info[succ_id].get_g_value()) {
search_info[succ_id].decrease_g_value_to(succ_g);
int h = search_info[succ_id].get_h_value();
if (h == INF)
continue;
int f = succ_g + h;
assert(f >= 0);
assert(f != INF);
open_queue.push(f, succ_id);
search_info[succ_id].set_incoming_transition(Transition(op_id, state_id));
}
}
}
return UNDEFINED;
}
int AbstractSearch::get_h_value(int state_id) const {
assert(utils::in_bounds(state_id, search_info));
return search_info[state_id].get_h_value();
}
void AbstractSearch::set_h_value(int state_id, int h) {
assert(utils::in_bounds(state_id, search_info));
search_info[state_id].increase_h_value_to(h);
}
void AbstractSearch::copy_h_value_to_children(int v, int v1, int v2) {
int h = get_h_value(v);
search_info.resize(search_info.size() + 1);
set_h_value(v1, h);
set_h_value(v2, h);
}
vector<int> compute_distances(
const vector<Transitions> &transitions,
const vector<int> &costs,
const unordered_set<int> &start_ids) {
vector<int> distances(transitions.size(), INF);
priority_queues::AdaptiveQueue<int> open_queue;
for (int goal_id : start_ids) {
distances[goal_id] = 0;
open_queue.push(0, goal_id);
}
while (!open_queue.empty()) {
pair<int, int> top_pair = open_queue.pop();
int old_g = top_pair.first;
int state_id = top_pair.second;
const int g = distances[state_id];
assert(0 <= g && g < INF);
assert(g <= old_g);
if (g < old_g)
continue;
assert(utils::in_bounds(state_id, transitions));
for (const Transition &transition : transitions[state_id]) {
const int op_cost = costs[transition.op_id];
assert(op_cost >= 0);
int succ_g = (op_cost == INF) ? INF : g + op_cost;
assert(succ_g >= 0);
int succ_id = transition.target_id;
if (succ_g < distances[succ_id]) {
distances[succ_id] = succ_g;
open_queue.push(succ_g, succ_id);
}
}
}
return distances;
}
}
| 6,437 |
C++
| 32.357513 | 90 | 0.580861 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/refinement_hierarchy.h
|
#ifndef CEGAR_REFINEMENT_HIERARCHY_H
#define CEGAR_REFINEMENT_HIERARCHY_H
#include "types.h"
#include <cassert>
#include <memory>
#include <ostream>
#include <utility>
#include <vector>
class AbstractTask;
class State;
namespace cegar {
class Node;
/*
This class stores the refinement hierarchy of a Cartesian
abstraction. The hierarchy forms a DAG with inner nodes for each
split and leaf nodes for the abstract states.
It is used for efficient lookup of abstract states during search.
Inner nodes correspond to abstract states that have been split (or
helper nodes, see below). Leaf nodes correspond to the current
(unsplit) states in an abstraction. The use of helper nodes makes
this structure a directed acyclic graph (instead of a tree).
*/
class RefinementHierarchy {
std::shared_ptr<AbstractTask> task;
std::vector<Node> nodes;
NodeID add_node(int state_id);
NodeID get_node_id(const State &state) const;
public:
explicit RefinementHierarchy(const std::shared_ptr<AbstractTask> &task);
/*
Update the split tree for the new split. Additionally to the left
and right child nodes add |values|-1 helper nodes that all have
the right child as their right child and the next helper node as
their left child.
*/
std::pair<NodeID, NodeID> split(
NodeID node_id, int var, const std::vector<int> &values,
int left_state_id, int right_state_id);
int get_abstract_state_id(const State &state) const;
};
class Node {
/*
While right_child is always the node of a (possibly split)
abstract state, left_child may be a helper node. We add helper
nodes to the hierarchy to allow for efficient lookup in case more
than one fact is split off a state.
*/
NodeID left_child;
NodeID right_child;
/* Before splitting the corresponding state for var and value, both
members hold UNDEFINED. */
int var;
int value;
// When splitting the corresponding state, we change this value to UNDEFINED.
int state_id;
bool information_is_valid() const;
public:
explicit Node(int state_id);
bool is_split() const;
void split(int var, int value, NodeID left_child, NodeID right_child);
int get_var() const {
assert(is_split());
return var;
}
NodeID get_child(int value) const {
assert(is_split());
if (value == this->value)
return right_child;
return left_child;
}
int get_state_id() const {
assert(!is_split());
return state_id;
}
friend std::ostream &operator<<(std::ostream &os, const Node &node);
};
}
#endif
| 2,676 |
C
| 24.990291 | 81 | 0.676756 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/additive_cartesian_heuristic.h
|
#ifndef CEGAR_ADDITIVE_CARTESIAN_HEURISTIC_H
#define CEGAR_ADDITIVE_CARTESIAN_HEURISTIC_H
#include "../heuristic.h"
#include <vector>
namespace cegar {
class CartesianHeuristicFunction;
/*
Store CartesianHeuristicFunctions and compute overall heuristic by
summing all of their values.
*/
class AdditiveCartesianHeuristic : public Heuristic {
const std::vector<CartesianHeuristicFunction> heuristic_functions;
protected:
virtual int compute_heuristic(const State &ancestor_state) override;
public:
explicit AdditiveCartesianHeuristic(const options::Options &opts);
};
}
#endif
| 599 |
C
| 21.222221 | 72 | 0.78798 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/transition_system.h
|
#ifndef CEGAR_TRANSITION_SYSTEM_H
#define CEGAR_TRANSITION_SYSTEM_H
#include "types.h"
#include <vector>
struct FactPair;
class OperatorsProxy;
namespace cegar {
/*
Rewire transitions after each split.
*/
class TransitionSystem {
const std::vector<std::vector<FactPair>> preconditions_by_operator;
const std::vector<std::vector<FactPair>> postconditions_by_operator;
// Transitions from and to other abstract states.
std::vector<Transitions> incoming;
std::vector<Transitions> outgoing;
// Store self-loops (operator indices) separately to save space.
std::vector<Loops> loops;
int num_non_loops;
int num_loops;
void enlarge_vectors_by_one();
// Add self-loops to single abstract state in trivial abstraction.
void add_loops_in_trivial_abstraction();
int get_precondition_value(int op_id, int var) const;
int get_postcondition_value(int op_id, int var) const;
void add_transition(int src_id, int op_id, int target_id);
void add_loop(int state_id, int op_id);
void rewire_incoming_transitions(
const Transitions &old_incoming, const AbstractStates &states,
const AbstractState &v1, const AbstractState &v2, int var);
void rewire_outgoing_transitions(
const Transitions &old_outgoing, const AbstractStates &states,
const AbstractState &v1, const AbstractState &v2, int var);
void rewire_loops(
const Loops &old_loops,
const AbstractState &v1, const AbstractState &v2, int var);
public:
explicit TransitionSystem(const OperatorsProxy &ops);
// Update transition system after v has been split for var into v1 and v2.
void rewire(
const AbstractStates &states, int v_id,
const AbstractState &v1, const AbstractState &v2, int var);
const std::vector<Transitions> &get_incoming_transitions() const;
const std::vector<Transitions> &get_outgoing_transitions() const;
const std::vector<Loops> &get_loops() const;
int get_num_states() const;
int get_num_operators() const;
int get_num_non_loops() const;
int get_num_loops() const;
void print_statistics() const;
};
}
#endif
| 2,167 |
C
| 29.111111 | 78 | 0.700969 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/utils_landmarks.h
|
#ifndef CEGAR_UTILS_LANDMARKS_H
#define CEGAR_UTILS_LANDMARKS_H
#include <memory>
#include <unordered_map>
#include <vector>
class AbstractTask;
struct FactPair;
namespace landmarks {
class LandmarkGraph;
}
namespace cegar {
using VarToValues = std::unordered_map<int, std::vector<int>>;
extern std::shared_ptr<landmarks::LandmarkGraph> get_landmark_graph(
const std::shared_ptr<AbstractTask> &task);
extern std::vector<FactPair> get_fact_landmarks(
const landmarks::LandmarkGraph &graph);
/*
Do a breadth-first search through the landmark graph ignoring
duplicates. Start at the node for the given fact and collect for each
variable the facts that have to be made true before the given fact
can be true for the first time.
*/
extern VarToValues get_prev_landmarks(
const landmarks::LandmarkGraph &graph, const FactPair &fact);
}
#endif
| 864 |
C
| 24.441176 | 71 | 0.760417 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/additive_cartesian_heuristic.cc
|
#include "additive_cartesian_heuristic.h"
#include "cartesian_heuristic_function.h"
#include "cost_saturation.h"
#include "types.h"
#include "utils.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
using namespace std;
namespace cegar {
static vector<CartesianHeuristicFunction> generate_heuristic_functions(
const options::Options &opts) {
utils::g_log << "Initializing additive Cartesian heuristic..." << endl;
vector<shared_ptr<SubtaskGenerator>> subtask_generators =
opts.get_list<shared_ptr<SubtaskGenerator>>("subtasks");
shared_ptr<utils::RandomNumberGenerator> rng =
utils::parse_rng_from_options(opts);
CostSaturation cost_saturation(
subtask_generators,
opts.get<int>("max_states"),
opts.get<int>("max_transitions"),
opts.get<double>("max_time"),
opts.get<bool>("use_general_costs"),
opts.get<PickSplit>("pick"),
*rng,
opts.get<bool>("debug"));
return cost_saturation.generate_heuristic_functions(
opts.get<shared_ptr<AbstractTask>>("transform"));
}
AdditiveCartesianHeuristic::AdditiveCartesianHeuristic(
const options::Options &opts)
: Heuristic(opts),
heuristic_functions(generate_heuristic_functions(opts)) {
}
int AdditiveCartesianHeuristic::compute_heuristic(const State &ancestor_state) {
State state = convert_ancestor_state(ancestor_state);
int sum_h = 0;
for (const CartesianHeuristicFunction &function : heuristic_functions) {
int value = function.get_value(state);
assert(value >= 0);
if (value == INF)
return DEAD_END;
sum_h += value;
}
assert(sum_h >= 0);
return sum_h;
}
static shared_ptr<Heuristic> _parse(OptionParser &parser) {
parser.document_synopsis(
"Additive CEGAR heuristic",
"See the paper introducing Counterexample-guided Abstraction "
"Refinement (CEGAR) for classical planning:" +
utils::format_conference_reference(
{"Jendrik Seipp", "Malte Helmert"},
"Counterexample-guided Cartesian Abstraction Refinement",
"https://ai.dmi.unibas.ch/papers/seipp-helmert-icaps2013.pdf",
"Proceedings of the 23rd International Conference on Automated "
"Planning and Scheduling (ICAPS 2013)",
"347-351",
"AAAI Press",
"2013") +
"and the paper showing how to make the abstractions additive:" +
utils::format_conference_reference(
{"Jendrik Seipp", "Malte Helmert"},
"Diverse and Additive Cartesian Abstraction Heuristics",
"https://ai.dmi.unibas.ch/papers/seipp-helmert-icaps2014.pdf",
"Proceedings of the 24th International Conference on "
"Automated Planning and Scheduling (ICAPS 2014)",
"289-297",
"AAAI Press",
"2014") +
"For more details on Cartesian CEGAR and saturated cost partitioning, "
"see the journal paper" +
utils::format_journal_reference(
{"Jendrik Seipp", "Malte Helmert"},
"Counterexample-Guided Cartesian Abstraction Refinement for "
"Classical Planning",
"https://ai.dmi.unibas.ch/papers/seipp-helmert-jair2018.pdf",
"Journal of Artificial Intelligence Research",
"62",
"535-577",
"2018"));
parser.document_language_support("action costs", "supported");
parser.document_language_support("conditional effects", "not supported");
parser.document_language_support("axioms", "not supported");
parser.document_property("admissible", "yes");
parser.document_property("consistent", "yes");
parser.document_property("safe", "yes");
parser.document_property("preferred operators", "no");
parser.add_list_option<shared_ptr<SubtaskGenerator>>(
"subtasks",
"subtask generators",
"[landmarks(),goals()]");
parser.add_option<int>(
"max_states",
"maximum sum of abstract states over all abstractions",
"infinity",
Bounds("1", "infinity"));
parser.add_option<int>(
"max_transitions",
"maximum sum of real transitions (excluding self-loops) over "
" all abstractions",
"1M",
Bounds("0", "infinity"));
parser.add_option<double>(
"max_time",
"maximum time in seconds for building abstractions",
"infinity",
Bounds("0.0", "infinity"));
vector<string> pick_strategies;
pick_strategies.push_back("RANDOM");
pick_strategies.push_back("MIN_UNWANTED");
pick_strategies.push_back("MAX_UNWANTED");
pick_strategies.push_back("MIN_REFINED");
pick_strategies.push_back("MAX_REFINED");
pick_strategies.push_back("MIN_HADD");
pick_strategies.push_back("MAX_HADD");
parser.add_enum_option<PickSplit>(
"pick", pick_strategies, "split-selection strategy", "MAX_REFINED");
parser.add_option<bool>(
"use_general_costs",
"allow negative costs in cost partitioning",
"true");
parser.add_option<bool>(
"debug",
"print debugging output",
"false");
Heuristic::add_options_to_parser(parser);
utils::add_rng_options(parser);
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
return make_shared<AdditiveCartesianHeuristic>(opts);
}
static Plugin<Evaluator> _plugin("cegar", _parse);
}
| 5,620 |
C++
| 35.5 | 80 | 0.637011 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/split_selector.h
|
#ifndef CEGAR_SPLIT_SELECTOR_H
#define CEGAR_SPLIT_SELECTOR_H
#include "../task_proxy.h"
#include <memory>
#include <vector>
namespace additive_heuristic {
class AdditiveHeuristic;
}
namespace utils {
class RandomNumberGenerator;
}
namespace cegar {
class AbstractState;
// Strategies for selecting a split in case there are multiple possibilities.
enum class PickSplit {
RANDOM,
// Number of values that land in the state whose h-value is probably raised.
MIN_UNWANTED,
MAX_UNWANTED,
// Refinement: - (remaining_values / original_domain_size)
MIN_REFINED,
MAX_REFINED,
// Compare the h^add(s_0) values of the facts.
MIN_HADD,
MAX_HADD
};
struct Split {
const int var_id;
const std::vector<int> values;
Split(int var_id, std::vector<int> &&values)
: var_id(var_id), values(move(values)) {
}
};
/*
Select split in case there are multiple possible splits.
*/
class SplitSelector {
const std::shared_ptr<AbstractTask> task;
const TaskProxy task_proxy;
std::unique_ptr<additive_heuristic::AdditiveHeuristic> additive_heuristic;
const PickSplit pick;
int get_num_unwanted_values(const AbstractState &state, const Split &split) const;
double get_refinedness(const AbstractState &state, int var_id) const;
int get_hadd_value(int var_id, int value) const;
int get_min_hadd_value(int var_id, const std::vector<int> &values) const;
int get_max_hadd_value(int var_id, const std::vector<int> &values) const;
double rate_split(const AbstractState &state, const Split &split) const;
public:
SplitSelector(const std::shared_ptr<AbstractTask> &task, PickSplit pick);
~SplitSelector();
const Split &pick_split(
const AbstractState &state,
const std::vector<Split> &splits,
utils::RandomNumberGenerator &rng) const;
};
}
#endif
| 1,869 |
C
| 23.933333 | 86 | 0.697699 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/transition_system.cc
|
#include "transition_system.h"
#include "abstract_state.h"
#include "transition.h"
#include "utils.h"
#include "../task_proxy.h"
#include "../task_utils/task_properties.h"
#include "../utils/logging.h"
#include <algorithm>
#include <map>
using namespace std;
namespace cegar {
static vector<vector<FactPair>> get_preconditions_by_operator(
const OperatorsProxy &ops) {
vector<vector<FactPair>> preconditions_by_operator;
preconditions_by_operator.reserve(ops.size());
for (OperatorProxy op : ops) {
vector<FactPair> preconditions = task_properties::get_fact_pairs(op.get_preconditions());
sort(preconditions.begin(), preconditions.end());
preconditions_by_operator.push_back(move(preconditions));
}
return preconditions_by_operator;
}
static vector<FactPair> get_postconditions(
const OperatorProxy &op) {
// Use map to obtain sorted postconditions.
map<int, int> var_to_post;
for (FactProxy fact : op.get_preconditions()) {
var_to_post[fact.get_variable().get_id()] = fact.get_value();
}
for (EffectProxy effect : op.get_effects()) {
FactPair fact = effect.get_fact().get_pair();
var_to_post[fact.var] = fact.value;
}
vector<FactPair> postconditions;
postconditions.reserve(var_to_post.size());
for (const pair<const int, int> &fact : var_to_post) {
postconditions.emplace_back(fact.first, fact.second);
}
return postconditions;
}
static vector<vector<FactPair>> get_postconditions_by_operator(
const OperatorsProxy &ops) {
vector<vector<FactPair>> postconditions_by_operator;
postconditions_by_operator.reserve(ops.size());
for (OperatorProxy op : ops) {
postconditions_by_operator.push_back(get_postconditions(op));
}
return postconditions_by_operator;
}
static int lookup_value(const vector<FactPair> &facts, int var) {
assert(is_sorted(facts.begin(), facts.end()));
for (const FactPair &fact : facts) {
if (fact.var == var) {
return fact.value;
} else if (fact.var > var) {
return UNDEFINED;
}
}
return UNDEFINED;
}
static void remove_transitions_with_given_target(
Transitions &transitions, int state_id) {
auto new_end = remove_if(
transitions.begin(), transitions.end(),
[state_id](const Transition &t) {return t.target_id == state_id;});
assert(new_end != transitions.end());
transitions.erase(new_end, transitions.end());
}
TransitionSystem::TransitionSystem(const OperatorsProxy &ops)
: preconditions_by_operator(get_preconditions_by_operator(ops)),
postconditions_by_operator(get_postconditions_by_operator(ops)),
num_non_loops(0),
num_loops(0) {
add_loops_in_trivial_abstraction();
}
int TransitionSystem::get_precondition_value(int op_id, int var) const {
return lookup_value(preconditions_by_operator[op_id], var);
}
int TransitionSystem::get_postcondition_value(int op_id, int var) const {
return lookup_value(postconditions_by_operator[op_id], var);
}
void TransitionSystem::enlarge_vectors_by_one() {
int new_num_states = get_num_states() + 1;
outgoing.resize(new_num_states);
incoming.resize(new_num_states);
loops.resize(new_num_states);
}
void TransitionSystem::add_loops_in_trivial_abstraction() {
assert(get_num_states() == 0);
enlarge_vectors_by_one();
int init_id = 0;
for (int i = 0; i < get_num_operators(); ++i) {
add_loop(init_id, i);
}
}
void TransitionSystem::add_transition(int src_id, int op_id, int target_id) {
assert(src_id != target_id);
outgoing[src_id].emplace_back(op_id, target_id);
incoming[target_id].emplace_back(op_id, src_id);
++num_non_loops;
}
void TransitionSystem::add_loop(int state_id, int op_id) {
assert(utils::in_bounds(state_id, loops));
loops[state_id].push_back(op_id);
++num_loops;
}
void TransitionSystem::rewire_incoming_transitions(
const Transitions &old_incoming, const AbstractStates &states,
const AbstractState &v1, const AbstractState &v2, int var) {
/* State v has been split into v1 and v2. Now for all transitions
u->v we need to add transitions u->v1, u->v2, or both. */
int v1_id = v1.get_id();
int v2_id = v2.get_id();
unordered_set<int> updated_states;
for (const Transition &transition : old_incoming) {
int u_id = transition.target_id;
bool is_new_state = updated_states.insert(u_id).second;
if (is_new_state) {
remove_transitions_with_given_target(outgoing[u_id], v1_id);
}
}
num_non_loops -= old_incoming.size();
for (const Transition &transition : old_incoming) {
int op_id = transition.op_id;
int u_id = transition.target_id;
const AbstractState &u = *states[u_id];
int post = get_postcondition_value(op_id, var);
if (post == UNDEFINED) {
// op has no precondition and no effect on var.
bool u_and_v1_intersect = u.domain_subsets_intersect(v1, var);
if (u_and_v1_intersect) {
add_transition(u_id, op_id, v1_id);
}
/* If u and v1 don't intersect, we must add the other transition
and can avoid an intersection test. */
if (!u_and_v1_intersect || u.domain_subsets_intersect(v2, var)) {
add_transition(u_id, op_id, v2_id);
}
} else if (v1.contains(var, post)) {
// op can only end in v1.
add_transition(u_id, op_id, v1_id);
} else {
// op can only end in v2.
assert(v2.contains(var, post));
add_transition(u_id, op_id, v2_id);
}
}
}
void TransitionSystem::rewire_outgoing_transitions(
const Transitions &old_outgoing, const AbstractStates &states,
const AbstractState &v1, const AbstractState &v2, int var) {
/* State v has been split into v1 and v2. Now for all transitions
v->w we need to add transitions v1->w, v2->w, or both. */
int v1_id = v1.get_id();
int v2_id = v2.get_id();
unordered_set<int> updated_states;
for (const Transition &transition : old_outgoing) {
int w_id = transition.target_id;
bool is_new_state = updated_states.insert(w_id).second;
if (is_new_state) {
remove_transitions_with_given_target(incoming[w_id], v1_id);
}
}
num_non_loops -= old_outgoing.size();
for (const Transition &transition : old_outgoing) {
int op_id = transition.op_id;
int w_id = transition.target_id;
const AbstractState &w = *states[w_id];
int pre = get_precondition_value(op_id, var);
int post = get_postcondition_value(op_id, var);
if (post == UNDEFINED) {
assert(pre == UNDEFINED);
// op has no precondition and no effect on var.
bool v1_and_w_intersect = v1.domain_subsets_intersect(w, var);
if (v1_and_w_intersect) {
add_transition(v1_id, op_id, w_id);
}
/* If v1 and w don't intersect, we must add the other transition
and can avoid an intersection test. */
if (!v1_and_w_intersect || v2.domain_subsets_intersect(w, var)) {
add_transition(v2_id, op_id, w_id);
}
} else if (pre == UNDEFINED) {
// op has no precondition, but an effect on var.
add_transition(v1_id, op_id, w_id);
add_transition(v2_id, op_id, w_id);
} else if (v1.contains(var, pre)) {
// op can only start in v1.
add_transition(v1_id, op_id, w_id);
} else {
// op can only start in v2.
assert(v2.contains(var, pre));
add_transition(v2_id, op_id, w_id);
}
}
}
void TransitionSystem::rewire_loops(
const Loops &old_loops, const AbstractState &v1, const AbstractState &v2, int var) {
/* State v has been split into v1 and v2. Now for all self-loops
v->v we need to add one or two of the transitions v1->v1, v1->v2,
v2->v1 and v2->v2. */
int v1_id = v1.get_id();
int v2_id = v2.get_id();
for (int op_id : old_loops) {
int pre = get_precondition_value(op_id, var);
int post = get_postcondition_value(op_id, var);
if (pre == UNDEFINED) {
// op has no precondition on var --> it must start in v1 and v2.
if (post == UNDEFINED) {
// op has no effect on var --> it must end in v1 and v2.
add_loop(v1_id, op_id);
add_loop(v2_id, op_id);
} else if (v2.contains(var, post)) {
// op must end in v2.
add_transition(v1_id, op_id, v2_id);
add_loop(v2_id, op_id);
} else {
// op must end in v1.
assert(v1.contains(var, post));
add_loop(v1_id, op_id);
add_transition(v2_id, op_id, v1_id);
}
} else if (v1.contains(var, pre)) {
// op must start in v1.
assert(post != UNDEFINED);
if (v1.contains(var, post)) {
// op must end in v1.
add_loop(v1_id, op_id);
} else {
// op must end in v2.
assert(v2.contains(var, post));
add_transition(v1_id, op_id, v2_id);
}
} else {
// op must start in v2.
assert(v2.contains(var, pre));
assert(post != UNDEFINED);
if (v1.contains(var, post)) {
// op must end in v1.
add_transition(v2_id, op_id, v1_id);
} else {
// op must end in v2.
assert(v2.contains(var, post));
add_loop(v2_id, op_id);
}
}
}
num_loops -= old_loops.size();
}
void TransitionSystem::rewire(
const AbstractStates &states, int v_id,
const AbstractState &v1, const AbstractState &v2, int var) {
// Retrieve old transitions and make space for new transitions.
Transitions old_incoming = move(incoming[v_id]);
Transitions old_outgoing = move(outgoing[v_id]);
Loops old_loops = move(loops[v_id]);
enlarge_vectors_by_one();
int v1_id = v1.get_id();
int v2_id = v2.get_id();
utils::unused_variable(v1_id);
utils::unused_variable(v2_id);
assert(incoming[v1_id].empty() && outgoing[v1_id].empty() && loops[v1_id].empty());
assert(incoming[v2_id].empty() && outgoing[v2_id].empty() && loops[v2_id].empty());
// Remove old transitions and add new transitions.
rewire_incoming_transitions(old_incoming, states, v1, v2, var);
rewire_outgoing_transitions(old_outgoing, states, v1, v2, var);
rewire_loops(old_loops, v1, v2, var);
}
const vector<Transitions> &TransitionSystem::get_incoming_transitions() const {
return incoming;
}
const vector<Transitions> &TransitionSystem::get_outgoing_transitions() const {
return outgoing;
}
const vector<Loops> &TransitionSystem::get_loops() const {
return loops;
}
int TransitionSystem::get_num_states() const {
assert(incoming.size() == outgoing.size());
assert(loops.size() == outgoing.size());
return outgoing.size();
}
int TransitionSystem::get_num_operators() const {
return preconditions_by_operator.size();
}
int TransitionSystem::get_num_non_loops() const {
return num_non_loops;
}
int TransitionSystem::get_num_loops() const {
return num_loops;
}
void TransitionSystem::print_statistics() const {
int total_incoming_transitions = 0;
int total_outgoing_transitions = 0;
int total_loops = 0;
for (int state_id = 0; state_id < get_num_states(); ++state_id) {
total_incoming_transitions += incoming[state_id].size();
total_outgoing_transitions += outgoing[state_id].size();
total_loops += loops[state_id].size();
}
assert(total_outgoing_transitions == total_incoming_transitions);
assert(get_num_loops() == total_loops);
assert(get_num_non_loops() == total_outgoing_transitions);
utils::g_log << "Looping transitions: " << total_loops << endl;
utils::g_log << "Non-looping transitions: " << total_outgoing_transitions << endl;
}
}
| 12,321 |
C++
| 34.924198 | 97 | 0.60263 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cartesian_set.cc
|
#include "cartesian_set.h"
#include <sstream>
using namespace std;
namespace cegar {
CartesianSet::CartesianSet(const vector<int> &domain_sizes) {
domain_subsets.reserve(domain_sizes.size());
for (int domain_size : domain_sizes) {
Bitset domain(domain_size);
domain.set();
domain_subsets.push_back(move(domain));
}
}
void CartesianSet::add(int var, int value) {
domain_subsets[var].set(value);
}
void CartesianSet::remove(int var, int value) {
domain_subsets[var].reset(value);
}
void CartesianSet::set_single_value(int var, int value) {
remove_all(var);
add(var, value);
}
void CartesianSet::add_all(int var) {
domain_subsets[var].set();
}
void CartesianSet::remove_all(int var) {
domain_subsets[var].reset();
}
int CartesianSet::count(int var) const {
return domain_subsets[var].count();
}
bool CartesianSet::intersects(const CartesianSet &other, int var) const {
return domain_subsets[var].intersects(other.domain_subsets[var]);
}
bool CartesianSet::is_superset_of(const CartesianSet &other) const {
int num_vars = domain_subsets.size();
for (int var = 0; var < num_vars; ++var) {
if (!other.domain_subsets[var].is_subset_of(domain_subsets[var]))
return false;
}
return true;
}
ostream &operator<<(ostream &os, const CartesianSet &cartesian_set) {
int num_vars = cartesian_set.domain_subsets.size();
string var_sep;
os << "<";
for (int var = 0; var < num_vars; ++var) {
const Bitset &domain = cartesian_set.domain_subsets[var];
vector<int> values;
for (size_t value = 0; value < domain.size(); ++value) {
if (domain[value])
values.push_back(value);
}
assert(!values.empty());
if (values.size() < domain.size()) {
os << var_sep << var << "={";
string value_sep;
for (int value : values) {
os << value_sep << value;
value_sep = ",";
}
os << "}";
var_sep = ",";
}
}
return os << ">";
}
}
| 2,116 |
C++
| 25.135802 | 73 | 0.587429 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/transition.h
|
#ifndef CEGAR_TRANSITION_H
#define CEGAR_TRANSITION_H
#include <iostream>
namespace cegar {
struct Transition {
int op_id;
int target_id;
Transition(int op_id, int target_id)
: op_id(op_id),
target_id(target_id) {
}
bool operator==(const Transition &other) const {
return op_id == other.op_id && target_id == other.target_id;
}
friend std::ostream &operator<<(std::ostream &os, const Transition &t) {
return os << "[" << t.op_id << "," << t.target_id << "]";
}
};
}
#endif
| 545 |
C
| 19.222222 | 76 | 0.577982 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/subtask_generators.cc
|
#include "subtask_generators.h"
#include "utils.h"
#include "utils_landmarks.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../heuristics/additive_heuristic.h"
#include "../landmarks/landmark_graph.h"
#include "../task_utils/task_properties.h"
#include "../tasks/domain_abstracted_task_factory.h"
#include "../tasks/modified_goals_task.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <string>
#include <unordered_set>
#include <vector>
using namespace std;
namespace cegar {
class SortFactsByIncreasingHaddValues {
// Can't store as unique_ptr since the class needs copy-constructor.
shared_ptr<additive_heuristic::AdditiveHeuristic> hadd;
int get_cost(const FactPair &fact) {
return hadd->get_cost_for_cegar(fact.var, fact.value);
}
public:
explicit SortFactsByIncreasingHaddValues(
const shared_ptr<AbstractTask> &task)
: hadd(create_additive_heuristic(task)) {
TaskProxy task_proxy(*task);
hadd->compute_heuristic_for_cegar(task_proxy.get_initial_state());
}
bool operator()(const FactPair &a, const FactPair &b) {
return get_cost(a) < get_cost(b);
}
};
static void remove_initial_state_facts(
const TaskProxy &task_proxy, Facts &facts) {
State initial_state = task_proxy.get_initial_state();
facts.erase(remove_if(facts.begin(), facts.end(), [&](FactPair fact) {
return initial_state[fact.var].get_value() == fact.value;
}), facts.end());
}
static void order_facts(
const shared_ptr<AbstractTask> &task,
FactOrder fact_order,
vector<FactPair> &facts,
utils::RandomNumberGenerator &rng) {
utils::g_log << "Sort " << facts.size() << " facts" << endl;
switch (fact_order) {
case FactOrder::ORIGINAL:
// Nothing to do.
break;
case FactOrder::RANDOM:
rng.shuffle(facts);
break;
case FactOrder::HADD_UP:
case FactOrder::HADD_DOWN:
sort(facts.begin(), facts.end(), SortFactsByIncreasingHaddValues(task));
if (fact_order == FactOrder::HADD_DOWN)
reverse(facts.begin(), facts.end());
break;
default:
cerr << "Invalid task order: " << static_cast<int>(fact_order) << endl;
utils::exit_with(utils::ExitCode::SEARCH_INPUT_ERROR);
}
}
static Facts filter_and_order_facts(
const shared_ptr<AbstractTask> &task,
FactOrder fact_order,
Facts &facts,
utils::RandomNumberGenerator &rng) {
TaskProxy task_proxy(*task);
remove_initial_state_facts(task_proxy, facts);
order_facts(task, fact_order, facts, rng);
return facts;
}
TaskDuplicator::TaskDuplicator(const Options &opts)
: num_copies(opts.get<int>("copies")) {
}
SharedTasks TaskDuplicator::get_subtasks(
const shared_ptr<AbstractTask> &task) const {
SharedTasks subtasks;
subtasks.reserve(num_copies);
for (int i = 0; i < num_copies; ++i) {
subtasks.push_back(task);
}
return subtasks;
}
GoalDecomposition::GoalDecomposition(const Options &opts)
: fact_order(opts.get<FactOrder>("order")),
rng(utils::parse_rng_from_options(opts)) {
}
SharedTasks GoalDecomposition::get_subtasks(
const shared_ptr<AbstractTask> &task) const {
SharedTasks subtasks;
TaskProxy task_proxy(*task);
Facts goal_facts = task_properties::get_fact_pairs(task_proxy.get_goals());
filter_and_order_facts(task, fact_order, goal_facts, *rng);
for (const FactPair &goal : goal_facts) {
shared_ptr<AbstractTask> subtask =
make_shared<extra_tasks::ModifiedGoalsTask>(task, Facts {goal});
subtasks.push_back(subtask);
}
return subtasks;
}
LandmarkDecomposition::LandmarkDecomposition(const Options &opts)
: fact_order(opts.get<FactOrder>("order")),
combine_facts(opts.get<bool>("combine_facts")),
rng(utils::parse_rng_from_options(opts)) {
}
shared_ptr<AbstractTask> LandmarkDecomposition::build_domain_abstracted_task(
const shared_ptr<AbstractTask> &parent,
const landmarks::LandmarkGraph &landmark_graph,
const FactPair &fact) const {
assert(combine_facts);
extra_tasks::VarToGroups value_groups;
for (auto &pair : get_prev_landmarks(landmark_graph, fact)) {
int var = pair.first;
vector<int> &group = pair.second;
if (group.size() >= 2)
value_groups[var].push_back(group);
}
return extra_tasks::build_domain_abstracted_task(parent, value_groups);
}
SharedTasks LandmarkDecomposition::get_subtasks(
const shared_ptr<AbstractTask> &task) const {
SharedTasks subtasks;
shared_ptr<landmarks::LandmarkGraph> landmark_graph =
get_landmark_graph(task);
Facts landmark_facts = get_fact_landmarks(*landmark_graph);
filter_and_order_facts(task, fact_order, landmark_facts, *rng);
for (const FactPair &landmark : landmark_facts) {
shared_ptr<AbstractTask> subtask =
make_shared<extra_tasks::ModifiedGoalsTask>(task, Facts {landmark});
if (combine_facts) {
subtask = build_domain_abstracted_task(
subtask, *landmark_graph, landmark);
}
subtasks.push_back(subtask);
}
return subtasks;
}
static shared_ptr<SubtaskGenerator> _parse_original(OptionParser &parser) {
parser.add_option<int>(
"copies",
"number of task copies",
"1",
Bounds("1", "infinity"));
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<TaskDuplicator>(opts);
}
static void add_fact_order_option(OptionParser &parser) {
vector<string> fact_orders;
fact_orders.push_back("ORIGINAL");
fact_orders.push_back("RANDOM");
fact_orders.push_back("HADD_UP");
fact_orders.push_back("HADD_DOWN");
parser.add_enum_option<FactOrder>(
"order",
fact_orders,
"ordering of goal or landmark facts",
"HADD_DOWN");
utils::add_rng_options(parser);
}
static shared_ptr<SubtaskGenerator> _parse_goals(OptionParser &parser) {
add_fact_order_option(parser);
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<GoalDecomposition>(opts);
}
static shared_ptr<SubtaskGenerator> _parse_landmarks(OptionParser &parser) {
add_fact_order_option(parser);
parser.add_option<bool>(
"combine_facts",
"combine landmark facts with domain abstraction",
"true");
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<LandmarkDecomposition>(opts);
}
static Plugin<SubtaskGenerator> _plugin_original(
"original", _parse_original);
static Plugin<SubtaskGenerator> _plugin_goals(
"goals", _parse_goals);
static Plugin<SubtaskGenerator> _plugin_landmarks(
"landmarks", _parse_landmarks);
static PluginTypePlugin<SubtaskGenerator> _type_plugin(
"SubtaskGenerator",
"Subtask generator (used by the CEGAR heuristic).");
}
| 7,171 |
C++
| 30.318777 | 87 | 0.659601 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/abstract_state.h
|
#ifndef CEGAR_ABSTRACT_STATE_H
#define CEGAR_ABSTRACT_STATE_H
#include "cartesian_set.h"
#include "types.h"
#include <vector>
class ConditionsProxy;
struct FactPair;
class OperatorProxy;
class State;
class TaskProxy;
namespace cegar {
class Node;
/*
Store the Cartesian set and the ID of the node in the refinement hierarchy
for an abstract state.
*/
class AbstractState {
int state_id;
// This state's node in the refinement hierarchy.
NodeID node_id;
CartesianSet cartesian_set;
public:
AbstractState(int state_id, NodeID node_id, CartesianSet &&cartesian_set);
AbstractState(const AbstractState &) = delete;
bool domain_subsets_intersect(const AbstractState &other, int var) const;
// Return the size of var's abstract domain for this state.
int count(int var) const;
bool contains(int var, int value) const;
// Return the Cartesian set in which applying "op" can lead to this state.
CartesianSet regress(const OperatorProxy &op) const;
/*
Separate the "wanted" values from the other values in the abstract domain
and return the resulting two new Cartesian sets.
*/
std::pair<CartesianSet, CartesianSet> split_domain(
int var, const std::vector<int> &wanted) const;
bool includes(const AbstractState &other) const;
bool includes(const State &concrete_state) const;
bool includes(const std::vector<FactPair> &facts) const;
// IDs are consecutive, so they can be used to index states in vectors.
int get_id() const;
NodeID get_node_id() const;
friend std::ostream &operator<<(std::ostream &os, const AbstractState &state) {
return os << "#" << state.get_id() << state.cartesian_set;
}
// Create the initial, unrefined abstract state.
static std::unique_ptr<AbstractState> get_trivial_abstract_state(
const std::vector<int> &domain_sizes);
};
}
#endif
| 1,912 |
C
| 25.569444 | 83 | 0.700314 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cartesian_heuristic_function.cc
|
#include "cartesian_heuristic_function.h"
#include "refinement_hierarchy.h"
#include "../utils/collections.h"
using namespace std;
namespace cegar {
CartesianHeuristicFunction::CartesianHeuristicFunction(
unique_ptr<RefinementHierarchy> &&hierarchy,
vector<int> &&h_values)
: refinement_hierarchy(move(hierarchy)),
h_values(move(h_values)) {
}
int CartesianHeuristicFunction::get_value(const State &state) const {
int abstract_state_id = refinement_hierarchy->get_abstract_state_id(state);
assert(utils::in_bounds(abstract_state_id, h_values));
return h_values[abstract_state_id];
}
}
| 620 |
C++
| 25.999999 | 79 | 0.737097 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/utils_landmarks.cc
|
#include "utils_landmarks.h"
#include "../option_parser.h"
#include "../landmarks/landmark_factory_h_m.h"
#include "../landmarks/landmark_graph.h"
#include "../utils/memory.h"
#include <algorithm>
using namespace std;
using namespace landmarks;
namespace cegar {
static FactPair get_fact(const LandmarkNode &node) {
/* We assume that the given LandmarkNodes are from an h^m landmark
graph with m=1. */
assert(node.facts.size() == 1);
return node.facts[0];
}
shared_ptr<LandmarkGraph> get_landmark_graph(const shared_ptr<AbstractTask> &task) {
Options hm_opts;
hm_opts.set<int>("m", 1);
hm_opts.set<bool>("only_causal_landmarks", false);
hm_opts.set<bool>("conjunctive_landmarks", false);
hm_opts.set<bool>("use_orders", true);
LandmarkFactoryHM lm_graph_factory(hm_opts);
return lm_graph_factory.compute_lm_graph(task);
}
vector<FactPair> get_fact_landmarks(const LandmarkGraph &graph) {
vector<FactPair> facts;
const LandmarkGraph::Nodes &nodes = graph.get_nodes();
facts.reserve(nodes.size());
for (auto &node : nodes) {
facts.push_back(get_fact(*node));
}
sort(facts.begin(), facts.end());
return facts;
}
VarToValues get_prev_landmarks(const LandmarkGraph &graph, const FactPair &fact) {
VarToValues groups;
LandmarkNode *node = graph.get_landmark(fact);
assert(node);
vector<const LandmarkNode *> open;
unordered_set<const LandmarkNode *> closed;
for (const auto &parent_and_edge : node->parents) {
const LandmarkNode *parent = parent_and_edge.first;
open.push_back(parent);
}
while (!open.empty()) {
const LandmarkNode *ancestor = open.back();
open.pop_back();
if (closed.find(ancestor) != closed.end())
continue;
closed.insert(ancestor);
FactPair ancestor_fact = get_fact(*ancestor);
groups[ancestor_fact.var].push_back(ancestor_fact.value);
for (const auto &parent_and_edge : ancestor->parents) {
const LandmarkNode *parent = parent_and_edge.first;
open.push_back(parent);
}
}
return groups;
}
}
| 2,153 |
C++
| 29.338028 | 84 | 0.653971 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cartesian_set.h
|
#ifndef CEGAR_CARTESIAN_SET_H
#define CEGAR_CARTESIAN_SET_H
#include "../algorithms/dynamic_bitset.h"
#include <ostream>
#include <vector>
namespace cegar {
using Bitset = dynamic_bitset::DynamicBitset<unsigned short>;
/*
For each variable store a subset of its domain.
The underlying data structure is a vector of bitsets.
*/
class CartesianSet {
std::vector<Bitset> domain_subsets;
public:
explicit CartesianSet(const std::vector<int> &domain_sizes);
void add(int var, int value);
void set_single_value(int var, int value);
void remove(int var, int value);
void add_all(int var);
void remove_all(int var);
bool test(int var, int value) const {
return domain_subsets[var][value];
}
int count(int var) const;
bool intersects(const CartesianSet &other, int var) const;
bool is_superset_of(const CartesianSet &other) const;
friend std::ostream &operator<<(
std::ostream &os, const CartesianSet &cartesian_set);
};
}
#endif
| 1,004 |
C
| 22.372093 | 64 | 0.693227 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/subtask_generators.h
|
#ifndef CEGAR_SUBTASK_GENERATORS_H
#define CEGAR_SUBTASK_GENERATORS_H
#include <memory>
#include <vector>
class AbstractTask;
struct FactPair;
namespace landmarks {
class LandmarkGraph;
}
namespace options {
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace cegar {
using Facts = std::vector<FactPair>;
using SharedTasks = std::vector<std::shared_ptr<AbstractTask>>;
enum class FactOrder {
ORIGINAL,
RANDOM,
HADD_UP,
HADD_DOWN
};
/*
Create focused subtasks.
*/
class SubtaskGenerator {
public:
virtual SharedTasks get_subtasks(
const std::shared_ptr<AbstractTask> &task) const = 0;
virtual ~SubtaskGenerator() = default;
};
/*
Return copies of the original task.
*/
class TaskDuplicator : public SubtaskGenerator {
int num_copies;
public:
explicit TaskDuplicator(const options::Options &opts);
virtual SharedTasks get_subtasks(
const std::shared_ptr<AbstractTask> &task) const override;
};
/*
Use ModifiedGoalsTask to return a subtask for each goal fact.
*/
class GoalDecomposition : public SubtaskGenerator {
FactOrder fact_order;
std::shared_ptr<utils::RandomNumberGenerator> rng;
public:
explicit GoalDecomposition(const options::Options &opts);
virtual SharedTasks get_subtasks(
const std::shared_ptr<AbstractTask> &task) const override;
};
/*
Nest ModifiedGoalsTask and DomainAbstractedTask to return subtasks
focussing on a single landmark fact.
*/
class LandmarkDecomposition : public SubtaskGenerator {
FactOrder fact_order;
bool combine_facts;
std::shared_ptr<utils::RandomNumberGenerator> rng;
/* Perform domain abstraction by combining facts that have to be
achieved before a given landmark can be made true. */
std::shared_ptr<AbstractTask> build_domain_abstracted_task(
const std::shared_ptr<AbstractTask> &parent,
const landmarks::LandmarkGraph &landmark_graph,
const FactPair &fact) const;
public:
explicit LandmarkDecomposition(const options::Options &opts);
virtual SharedTasks get_subtasks(
const std::shared_ptr<AbstractTask> &task) const override;
};
}
#endif
| 2,182 |
C
| 21.050505 | 68 | 0.725023 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cost_saturation.h
|
#ifndef CEGAR_COST_SATURATION_H
#define CEGAR_COST_SATURATION_H
#include "refinement_hierarchy.h"
#include "split_selector.h"
#include <memory>
#include <vector>
namespace utils {
class CountdownTimer;
class Duration;
class RandomNumberGenerator;
}
namespace cegar {
class CartesianHeuristicFunction;
class SubtaskGenerator;
/*
Get subtasks from SubtaskGenerators, reduce their costs by wrapping
them in ModifiedOperatorCostsTasks, compute Abstractions, move
RefinementHierarchies from Abstractions to
CartesianHeuristicFunctions, allow extracting
CartesianHeuristicFunctions into AdditiveCartesianHeuristic.
*/
class CostSaturation {
const std::vector<std::shared_ptr<SubtaskGenerator>> subtask_generators;
const int max_states;
const int max_non_looping_transitions;
const double max_time;
const bool use_general_costs;
const PickSplit pick_split;
utils::RandomNumberGenerator &rng;
const bool debug;
std::vector<CartesianHeuristicFunction> heuristic_functions;
std::vector<int> remaining_costs;
int num_abstractions;
int num_states;
int num_non_looping_transitions;
void reset(const TaskProxy &task_proxy);
void reduce_remaining_costs(const std::vector<int> &saturated_costs);
std::shared_ptr<AbstractTask> get_remaining_costs_task(
std::shared_ptr<AbstractTask> &parent) const;
bool state_is_dead_end(const State &state) const;
void build_abstractions(
const std::vector<std::shared_ptr<AbstractTask>> &subtasks,
const utils::CountdownTimer &timer,
std::function<bool()> should_abort);
void print_statistics(utils::Duration init_time) const;
public:
CostSaturation(
const std::vector<std::shared_ptr<SubtaskGenerator>> &subtask_generators,
int max_states,
int max_non_looping_transitions,
double max_time,
bool use_general_costs,
PickSplit pick_split,
utils::RandomNumberGenerator &rng,
bool debug);
std::vector<CartesianHeuristicFunction> generate_heuristic_functions(
const std::shared_ptr<AbstractTask> &task);
};
}
#endif
| 2,138 |
C
| 29.12676 | 81 | 0.731057 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cost_saturation.cc
|
#include "cost_saturation.h"
#include "abstract_state.h"
#include "abstraction.h"
#include "cartesian_heuristic_function.h"
#include "cegar.h"
#include "refinement_hierarchy.h"
#include "subtask_generators.h"
#include "transition_system.h"
#include "utils.h"
#include "../task_utils/task_properties.h"
#include "../tasks/modified_operator_costs_task.h"
#include "../utils/countdown_timer.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
using namespace std;
namespace cegar {
/*
We reserve some memory to be able to recover from out-of-memory
situations gracefully. When the memory runs out, we stop refining and
start the next refinement or the search. Due to memory fragmentation
the memory used for building the abstraction (states, transitions,
etc.) often can't be reused for things that require big continuous
blocks of memory. It is for this reason that we require such a large
amount of memory padding.
*/
static const int memory_padding_in_mb = 75;
static vector<int> compute_saturated_costs(
const TransitionSystem &transition_system,
const vector<int> &g_values,
const vector<int> &h_values,
bool use_general_costs) {
const int min_cost = use_general_costs ? -INF : 0;
vector<int> saturated_costs(transition_system.get_num_operators(), min_cost);
assert(g_values.size() == h_values.size());
int num_states = h_values.size();
for (int state_id = 0; state_id < num_states; ++state_id) {
int g = g_values[state_id];
int h = h_values[state_id];
/*
No need to maintain goal distances of unreachable (g == INF)
and dead end states (h == INF).
Note that the "succ_h == INF" test below is sufficient for
ignoring dead end states. The "h == INF" test is a speed
optimization.
*/
if (g == INF || h == INF)
continue;
for (const Transition &transition:
transition_system.get_outgoing_transitions()[state_id]) {
int op_id = transition.op_id;
int succ_id = transition.target_id;
int succ_h = h_values[succ_id];
if (succ_h == INF)
continue;
int needed = h - succ_h;
saturated_costs[op_id] = max(saturated_costs[op_id], needed);
}
if (use_general_costs) {
/* To prevent negative cost cycles, all operators inducing
self-loops must have non-negative costs. */
for (int op_id : transition_system.get_loops()[state_id]) {
saturated_costs[op_id] = max(saturated_costs[op_id], 0);
}
}
}
return saturated_costs;
}
CostSaturation::CostSaturation(
const vector<shared_ptr<SubtaskGenerator>> &subtask_generators,
int max_states,
int max_non_looping_transitions,
double max_time,
bool use_general_costs,
PickSplit pick_split,
utils::RandomNumberGenerator &rng,
bool debug)
: subtask_generators(subtask_generators),
max_states(max_states),
max_non_looping_transitions(max_non_looping_transitions),
max_time(max_time),
use_general_costs(use_general_costs),
pick_split(pick_split),
rng(rng),
debug(debug),
num_abstractions(0),
num_states(0),
num_non_looping_transitions(0) {
}
vector<CartesianHeuristicFunction> CostSaturation::generate_heuristic_functions(
const shared_ptr<AbstractTask> &task) {
// For simplicity this is a member object. Make sure it is in a valid state.
assert(heuristic_functions.empty());
utils::CountdownTimer timer(max_time);
TaskProxy task_proxy(*task);
task_properties::verify_no_axioms(task_proxy);
task_properties::verify_no_conditional_effects(task_proxy);
reset(task_proxy);
State initial_state = TaskProxy(*task).get_initial_state();
function<bool()> should_abort =
[&] () {
return num_states >= max_states ||
num_non_looping_transitions >= max_non_looping_transitions ||
timer.is_expired() ||
!utils::extra_memory_padding_is_reserved() ||
state_is_dead_end(initial_state);
};
utils::reserve_extra_memory_padding(memory_padding_in_mb);
for (const shared_ptr<SubtaskGenerator> &subtask_generator : subtask_generators) {
SharedTasks subtasks = subtask_generator->get_subtasks(task);
build_abstractions(subtasks, timer, should_abort);
if (should_abort())
break;
}
if (utils::extra_memory_padding_is_reserved())
utils::release_extra_memory_padding();
print_statistics(timer.get_elapsed_time());
vector<CartesianHeuristicFunction> functions;
swap(heuristic_functions, functions);
return functions;
}
void CostSaturation::reset(const TaskProxy &task_proxy) {
remaining_costs = task_properties::get_operator_costs(task_proxy);
num_abstractions = 0;
num_states = 0;
}
void CostSaturation::reduce_remaining_costs(
const vector<int> &saturated_costs) {
assert(remaining_costs.size() == saturated_costs.size());
for (size_t i = 0; i < remaining_costs.size(); ++i) {
int &remaining = remaining_costs[i];
const int &saturated = saturated_costs[i];
assert(saturated <= remaining);
/* Since we ignore transitions from states s with h(s)=INF, all
saturated costs (h(s)-h(s')) are finite or -INF. */
assert(saturated != INF);
if (remaining == INF) {
// INF - x = INF for finite values x.
} else if (saturated == -INF) {
remaining = INF;
} else {
remaining -= saturated;
}
assert(remaining >= 0);
}
}
shared_ptr<AbstractTask> CostSaturation::get_remaining_costs_task(
shared_ptr<AbstractTask> &parent) const {
vector<int> costs = remaining_costs;
return make_shared<extra_tasks::ModifiedOperatorCostsTask>(
parent, move(costs));
}
bool CostSaturation::state_is_dead_end(const State &state) const {
for (const CartesianHeuristicFunction &function : heuristic_functions) {
if (function.get_value(state) == INF)
return true;
}
return false;
}
void CostSaturation::build_abstractions(
const vector<shared_ptr<AbstractTask>> &subtasks,
const utils::CountdownTimer &timer,
function<bool()> should_abort) {
int rem_subtasks = subtasks.size();
for (shared_ptr<AbstractTask> subtask : subtasks) {
subtask = get_remaining_costs_task(subtask);
assert(num_states < max_states);
CEGAR cegar(
subtask,
max(1, (max_states - num_states) / rem_subtasks),
max(1, (max_non_looping_transitions - num_non_looping_transitions) /
rem_subtasks),
timer.get_remaining_time() / rem_subtasks,
pick_split,
rng,
debug);
unique_ptr<Abstraction> abstraction = cegar.extract_abstraction();
++num_abstractions;
num_states += abstraction->get_num_states();
num_non_looping_transitions += abstraction->get_transition_system().get_num_non_loops();
assert(num_states <= max_states);
vector<int> costs = task_properties::get_operator_costs(TaskProxy(*subtask));
vector<int> init_distances = compute_distances(
abstraction->get_transition_system().get_outgoing_transitions(),
costs,
{abstraction->get_initial_state().get_id()});
vector<int> goal_distances = compute_distances(
abstraction->get_transition_system().get_incoming_transitions(),
costs,
abstraction->get_goals());
vector<int> saturated_costs = compute_saturated_costs(
abstraction->get_transition_system(),
init_distances,
goal_distances,
use_general_costs);
heuristic_functions.emplace_back(
abstraction->extract_refinement_hierarchy(),
move(goal_distances));
reduce_remaining_costs(saturated_costs);
if (should_abort())
break;
--rem_subtasks;
}
}
void CostSaturation::print_statistics(utils::Duration init_time) const {
utils::g_log << "Done initializing additive Cartesian heuristic" << endl;
utils::g_log << "Time for initializing additive Cartesian heuristic: "
<< init_time << endl;
utils::g_log << "Cartesian abstractions built: " << num_abstractions << endl;
utils::g_log << "Cartesian states: " << num_states << endl;
utils::g_log << "Total number of non-looping transitions: "
<< num_non_looping_transitions << endl;
utils::g_log << endl;
}
}
| 8,810 |
C++
| 33.688976 | 96 | 0.628263 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/abstract_state.cc
|
#include "abstract_state.h"
#include "refinement_hierarchy.h"
#include "utils.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
#include <unordered_set>
using namespace std;
namespace cegar {
AbstractState::AbstractState(
int state_id, NodeID node_id, CartesianSet &&cartesian_set)
: state_id(state_id),
node_id(node_id),
cartesian_set(move(cartesian_set)) {
}
int AbstractState::count(int var) const {
return cartesian_set.count(var);
}
bool AbstractState::contains(int var, int value) const {
return cartesian_set.test(var, value);
}
pair<CartesianSet, CartesianSet> AbstractState::split_domain(
int var, const vector<int> &wanted) const {
int num_wanted = wanted.size();
utils::unused_variable(num_wanted);
// We can only refine for variables with at least two values.
assert(num_wanted >= 1);
assert(cartesian_set.count(var) > num_wanted);
CartesianSet v1_cartesian_set(cartesian_set);
CartesianSet v2_cartesian_set(cartesian_set);
v2_cartesian_set.remove_all(var);
for (int value : wanted) {
// The wanted value has to be in the set of possible values.
assert(cartesian_set.test(var, value));
// In v1 var can have all of the previous values except the wanted ones.
v1_cartesian_set.remove(var, value);
// In v2 var can only have the wanted values.
v2_cartesian_set.add(var, value);
}
assert(v1_cartesian_set.count(var) == cartesian_set.count(var) - num_wanted);
assert(v2_cartesian_set.count(var) == num_wanted);
return make_pair(v1_cartesian_set, v2_cartesian_set);
}
CartesianSet AbstractState::regress(const OperatorProxy &op) const {
CartesianSet regression = cartesian_set;
for (EffectProxy effect : op.get_effects()) {
int var_id = effect.get_fact().get_variable().get_id();
regression.add_all(var_id);
}
for (FactProxy precondition : op.get_preconditions()) {
int var_id = precondition.get_variable().get_id();
regression.set_single_value(var_id, precondition.get_value());
}
return regression;
}
bool AbstractState::domain_subsets_intersect(const AbstractState &other, int var) const {
return cartesian_set.intersects(other.cartesian_set, var);
}
bool AbstractState::includes(const State &concrete_state) const {
for (FactProxy fact : concrete_state) {
if (!cartesian_set.test(fact.get_variable().get_id(), fact.get_value()))
return false;
}
return true;
}
bool AbstractState::includes(const vector<FactPair> &facts) const {
for (const FactPair &fact : facts) {
if (!cartesian_set.test(fact.var, fact.value))
return false;
}
return true;
}
bool AbstractState::includes(const AbstractState &other) const {
return cartesian_set.is_superset_of(other.cartesian_set);
}
int AbstractState::get_id() const {
return state_id;
}
NodeID AbstractState::get_node_id() const {
return node_id;
}
unique_ptr<AbstractState> AbstractState::get_trivial_abstract_state(
const vector<int> &domain_sizes) {
return utils::make_unique_ptr<AbstractState>(0, 0, CartesianSet(domain_sizes));
}
}
| 3,202 |
C++
| 28.934579 | 89 | 0.681137 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/refinement_hierarchy.cc
|
#include "refinement_hierarchy.h"
#include "../task_proxy.h"
using namespace std;
namespace cegar {
Node::Node(int state_id)
: left_child(UNDEFINED),
right_child(UNDEFINED),
var(UNDEFINED),
value(UNDEFINED),
state_id(state_id) {
assert(state_id != UNDEFINED);
assert(!is_split());
}
bool Node::information_is_valid() const {
return (left_child == UNDEFINED && right_child == UNDEFINED &&
var == UNDEFINED && value == UNDEFINED && state_id != UNDEFINED) ||
(left_child != UNDEFINED && right_child != UNDEFINED &&
var != UNDEFINED && value != UNDEFINED && state_id == UNDEFINED);
}
bool Node::is_split() const {
assert(information_is_valid());
return left_child != UNDEFINED;
}
void Node::split(int var, int value, NodeID left_child, NodeID right_child) {
this->var = var;
this->value = value;
this->left_child = left_child;
this->right_child = right_child;
state_id = UNDEFINED;
assert(is_split());
}
ostream &operator<<(ostream &os, const Node &node) {
return os << "<Node: var=" << node.var << " value=" << node.value
<< " state=" << node.state_id << " left=" << node.left_child
<< " right=" << node.right_child << ">";
}
RefinementHierarchy::RefinementHierarchy(const shared_ptr<AbstractTask> &task)
: task(task) {
nodes.emplace_back(0);
}
NodeID RefinementHierarchy::add_node(int state_id) {
NodeID node_id = nodes.size();
nodes.emplace_back(state_id);
return node_id;
}
NodeID RefinementHierarchy::get_node_id(const State &state) const {
NodeID id = 0;
while (nodes[id].is_split()) {
const Node &node = nodes[id];
id = node.get_child(state[node.get_var()].get_value());
}
return id;
}
pair<NodeID, NodeID> RefinementHierarchy::split(
NodeID node_id, int var, const vector<int> &values, int left_state_id, int right_state_id) {
NodeID helper_id = node_id;
NodeID right_child_id = add_node(right_state_id);
for (int value : values) {
NodeID new_helper_id = add_node(left_state_id);
nodes[helper_id].split(var, value, new_helper_id, right_child_id);
helper_id = new_helper_id;
}
return make_pair(helper_id, right_child_id);
}
int RefinementHierarchy::get_abstract_state_id(const State &state) const {
TaskProxy subtask_proxy(*task);
State subtask_state = subtask_proxy.convert_ancestor_state(state);
return nodes[get_node_id(subtask_state)].get_state_id();
}
}
| 2,528 |
C++
| 28.406976 | 96 | 0.627769 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/utils.cc
|
#include "utils.h"
#include "../option_parser.h"
#include "../heuristics/additive_heuristic.h"
#include "../task_utils/task_properties.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
#include <unordered_map>
using namespace std;
namespace cegar {
unique_ptr<additive_heuristic::AdditiveHeuristic> create_additive_heuristic(
const shared_ptr<AbstractTask> &task) {
Options opts;
opts.set<shared_ptr<AbstractTask>>("transform", task);
opts.set<bool>("cache_estimates", false);
return utils::make_unique_ptr<additive_heuristic::AdditiveHeuristic>(opts);
}
static bool operator_applicable(
const OperatorProxy &op, const utils::HashSet<FactProxy> &facts) {
for (FactProxy precondition : op.get_preconditions()) {
if (facts.count(precondition) == 0)
return false;
}
return true;
}
static bool operator_achieves_fact(
const OperatorProxy &op, const FactProxy &fact) {
for (EffectProxy effect : op.get_effects()) {
if (effect.get_fact() == fact)
return true;
}
return false;
}
static utils::HashSet<FactProxy> compute_possibly_before_facts(
const TaskProxy &task, const FactProxy &last_fact) {
utils::HashSet<FactProxy> pb_facts;
// Add facts from initial state.
for (FactProxy fact : task.get_initial_state())
pb_facts.insert(fact);
// Until no more facts can be added:
size_t last_num_reached = 0;
/*
Note: This can be done more efficiently by maintaining the number
of unsatisfied preconditions for each operator and a queue of
unhandled effects.
TODO: Find out if this code is time critical, and change it if it
is.
*/
while (last_num_reached != pb_facts.size()) {
last_num_reached = pb_facts.size();
for (OperatorProxy op : task.get_operators()) {
// Ignore operators that achieve last_fact.
if (operator_achieves_fact(op, last_fact))
continue;
// Add all facts that are achieved by an applicable operator.
if (operator_applicable(op, pb_facts)) {
for (EffectProxy effect : op.get_effects()) {
pb_facts.insert(effect.get_fact());
}
}
}
}
return pb_facts;
}
utils::HashSet<FactProxy> get_relaxed_possible_before(
const TaskProxy &task, const FactProxy &fact) {
utils::HashSet<FactProxy> reachable_facts =
compute_possibly_before_facts(task, fact);
reachable_facts.insert(fact);
return reachable_facts;
}
vector<int> get_domain_sizes(const TaskProxy &task) {
vector<int> domain_sizes;
for (VariableProxy var : task.get_variables())
domain_sizes.push_back(var.get_domain_size());
return domain_sizes;
}
}
| 2,807 |
C++
| 29.521739 | 79 | 0.646598 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/utils.h
|
#ifndef CEGAR_UTILS_H
#define CEGAR_UTILS_H
#include "../task_proxy.h"
#include "../utils/hash.h"
#include <memory>
#include <unordered_set>
#include <utility>
#include <vector>
class AbstractTask;
namespace additive_heuristic {
class AdditiveHeuristic;
}
namespace cegar {
extern std::unique_ptr<additive_heuristic::AdditiveHeuristic>
create_additive_heuristic(const std::shared_ptr<AbstractTask> &task);
/*
The set of relaxed-reachable facts is the possibly-before set of facts that
can be reached in the delete-relaxation before 'fact' is reached the first
time, plus 'fact' itself.
*/
extern utils::HashSet<FactProxy> get_relaxed_possible_before(
const TaskProxy &task, const FactProxy &fact);
extern std::vector<int> get_domain_sizes(const TaskProxy &task);
}
/*
TODO: Our proxy classes are meant to be temporary objects and as such
shouldn't be stored in containers. Once we find a way to avoid
storing them in containers, we should remove this hashing function.
*/
namespace utils {
inline void feed(HashState &hash_state, const FactProxy &fact) {
feed(hash_state, fact.get_pair());
}
}
#endif
| 1,131 |
C
| 23.608695 | 77 | 0.747126 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/abstract_search.h
|
#ifndef CEGAR_ABSTRACT_SEARCH_H
#define CEGAR_ABSTRACT_SEARCH_H
#include "transition.h"
#include "types.h"
#include "../algorithms/priority_queues.h"
#include <deque>
#include <memory>
#include <vector>
namespace cegar {
using Solution = std::deque<Transition>;
/*
Find abstract solutions using A*.
*/
class AbstractSearch {
class AbstractSearchInfo {
int g;
int h;
Transition incoming_transition;
public:
AbstractSearchInfo()
: h(0),
incoming_transition(UNDEFINED, UNDEFINED) {
reset();
}
void reset() {
g = std::numeric_limits<int>::max();
incoming_transition = Transition(UNDEFINED, UNDEFINED);
}
void decrease_g_value_to(int new_g) {
assert(new_g <= g);
g = new_g;
}
int get_g_value() const {
return g;
}
void increase_h_value_to(int new_h) {
assert(new_h >= h);
h = new_h;
}
int get_h_value() const {
return h;
}
void set_incoming_transition(const Transition &transition) {
incoming_transition = transition;
}
const Transition &get_incoming_transition() const {
assert(incoming_transition.op_id != UNDEFINED &&
incoming_transition.target_id != UNDEFINED);
return incoming_transition;
}
};
const std::vector<int> operator_costs;
// Keep data structures around to avoid reallocating them.
priority_queues::AdaptiveQueue<int> open_queue;
std::vector<AbstractSearchInfo> search_info;
void reset(int num_states);
void set_h_value(int state_id, int h);
std::unique_ptr<Solution> extract_solution(int init_id, int goal_id) const;
void update_goal_distances(const Solution &solution);
int astar_search(
const std::vector<Transitions> &transitions,
const Goals &goals);
public:
explicit AbstractSearch(const std::vector<int> &operator_costs);
std::unique_ptr<Solution> find_solution(
const std::vector<Transitions> &transitions,
int init_id,
const Goals &goal_ids);
int get_h_value(int state_id) const;
void copy_h_value_to_children(int v, int v1, int v2);
};
std::vector<int> compute_distances(
const std::vector<Transitions> &transitions,
const std::vector<int> &costs,
const std::unordered_set<int> &start_ids);
}
#endif
| 2,494 |
C
| 24.721649 | 79 | 0.603047 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/split_selector.cc
|
#include "split_selector.h"
#include "abstract_state.h"
#include "utils.h"
#include "../heuristics/additive_heuristic.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include <cassert>
#include <iostream>
#include <limits>
using namespace std;
namespace cegar {
SplitSelector::SplitSelector(
const shared_ptr<AbstractTask> &task,
PickSplit pick)
: task(task),
task_proxy(*task),
pick(pick) {
if (pick == PickSplit::MIN_HADD || pick == PickSplit::MAX_HADD) {
additive_heuristic = create_additive_heuristic(task);
additive_heuristic->compute_heuristic_for_cegar(
task_proxy.get_initial_state());
}
}
// Define here to avoid include in header.
SplitSelector::~SplitSelector() {
}
int SplitSelector::get_num_unwanted_values(
const AbstractState &state, const Split &split) const {
int num_unwanted_values = state.count(split.var_id) - split.values.size();
assert(num_unwanted_values >= 1);
return num_unwanted_values;
}
double SplitSelector::get_refinedness(const AbstractState &state, int var_id) const {
double all_values = task_proxy.get_variables()[var_id].get_domain_size();
assert(all_values >= 2);
double remaining_values = state.count(var_id);
assert(2 <= remaining_values && remaining_values <= all_values);
double refinedness = -(remaining_values / all_values);
assert(-1.0 <= refinedness && refinedness < 0.0);
return refinedness;
}
int SplitSelector::get_hadd_value(int var_id, int value) const {
assert(additive_heuristic);
int hadd = additive_heuristic->get_cost_for_cegar(var_id, value);
assert(hadd != -1);
return hadd;
}
int SplitSelector::get_min_hadd_value(int var_id, const vector<int> &values) const {
int min_hadd = numeric_limits<int>::max();
for (int value : values) {
const int hadd = get_hadd_value(var_id, value);
if (hadd < min_hadd) {
min_hadd = hadd;
}
}
return min_hadd;
}
int SplitSelector::get_max_hadd_value(int var_id, const vector<int> &values) const {
int max_hadd = -1;
for (int value : values) {
const int hadd = get_hadd_value(var_id, value);
if (hadd > max_hadd) {
max_hadd = hadd;
}
}
return max_hadd;
}
double SplitSelector::rate_split(const AbstractState &state, const Split &split) const {
int var_id = split.var_id;
const vector<int> &values = split.values;
double rating;
switch (pick) {
case PickSplit::MIN_UNWANTED:
rating = -get_num_unwanted_values(state, split);
break;
case PickSplit::MAX_UNWANTED:
rating = get_num_unwanted_values(state, split);
break;
case PickSplit::MIN_REFINED:
rating = -get_refinedness(state, var_id);
break;
case PickSplit::MAX_REFINED:
rating = get_refinedness(state, var_id);
break;
case PickSplit::MIN_HADD:
rating = -get_min_hadd_value(var_id, values);
break;
case PickSplit::MAX_HADD:
rating = get_max_hadd_value(var_id, values);
break;
default:
utils::g_log << "Invalid pick strategy: " << static_cast<int>(pick) << endl;
utils::exit_with(utils::ExitCode::SEARCH_INPUT_ERROR);
}
return rating;
}
const Split &SplitSelector::pick_split(const AbstractState &state,
const vector<Split> &splits,
utils::RandomNumberGenerator &rng) const {
assert(!splits.empty());
if (splits.size() == 1) {
return splits[0];
}
if (pick == PickSplit::RANDOM) {
return *rng.choose(splits);
}
double max_rating = numeric_limits<double>::lowest();
const Split *selected_split = nullptr;
for (const Split &split : splits) {
double rating = rate_split(state, split);
if (rating > max_rating) {
selected_split = &split;
max_rating = rating;
}
}
assert(selected_split);
return *selected_split;
}
}
| 4,049 |
C++
| 28.562044 | 88 | 0.618671 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cartesian_heuristic_function.h
|
#ifndef CEGAR_CARTESIAN_HEURISTIC_FUNCTION_H
#define CEGAR_CARTESIAN_HEURISTIC_FUNCTION_H
#include <memory>
#include <vector>
class State;
namespace cegar {
class RefinementHierarchy;
/*
Store RefinementHierarchy and heuristic values for looking up abstract state
IDs and corresponding heuristic values efficiently.
*/
class CartesianHeuristicFunction {
// Avoid const to enable moving.
std::unique_ptr<RefinementHierarchy> refinement_hierarchy;
std::vector<int> h_values;
public:
CartesianHeuristicFunction(
std::unique_ptr<RefinementHierarchy> &&hierarchy,
std::vector<int> &&h_values);
CartesianHeuristicFunction(const CartesianHeuristicFunction &) = delete;
CartesianHeuristicFunction(CartesianHeuristicFunction &&) = default;
int get_value(const State &state) const;
};
}
#endif
| 839 |
C
| 24.454545 | 78 | 0.756853 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/types.h
|
#ifndef CEGAR_TYPES_H
#define CEGAR_TYPES_H
#include <limits>
#include <memory>
#include <unordered_set>
#include <vector>
namespace cegar {
class AbstractState;
struct Transition;
using AbstractStates = std::vector<std::unique_ptr<AbstractState>>;
using Goals = std::unordered_set<int>;
using NodeID = int;
using Loops = std::vector<int>;
using Transitions = std::vector<Transition>;
const int UNDEFINED = -1;
// Positive infinity. The name "INFINITY" is taken by an ISO C99 macro.
const int INF = std::numeric_limits<int>::max();
}
#endif
| 547 |
C
| 20.076922 | 71 | 0.73309 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/abstraction.cc
|
#include "abstraction.h"
#include "abstract_state.h"
#include "refinement_hierarchy.h"
#include "transition.h"
#include "transition_system.h"
#include "utils.h"
#include "../task_utils/task_properties.h"
#include "../utils/logging.h"
#include "../utils/math.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <unordered_map>
using namespace std;
namespace cegar {
Abstraction::Abstraction(const shared_ptr<AbstractTask> &task, bool debug)
: transition_system(utils::make_unique_ptr<TransitionSystem>(TaskProxy(*task).get_operators())),
concrete_initial_state(TaskProxy(*task).get_initial_state()),
goal_facts(task_properties::get_fact_pairs(TaskProxy(*task).get_goals())),
refinement_hierarchy(utils::make_unique_ptr<RefinementHierarchy>(task)),
debug(debug) {
initialize_trivial_abstraction(get_domain_sizes(TaskProxy(*task)));
}
Abstraction::~Abstraction() {
}
const AbstractState &Abstraction::get_initial_state() const {
return *states[init_id];
}
int Abstraction::get_num_states() const {
return states.size();
}
const Goals &Abstraction::get_goals() const {
return goals;
}
const AbstractState &Abstraction::get_state(int state_id) const {
return *states[state_id];
}
const TransitionSystem &Abstraction::get_transition_system() const {
return *transition_system;
}
unique_ptr<RefinementHierarchy> Abstraction::extract_refinement_hierarchy() {
assert(refinement_hierarchy);
return move(refinement_hierarchy);
}
void Abstraction::mark_all_states_as_goals() {
goals.clear();
for (auto &state : states) {
goals.insert(state->get_id());
}
}
void Abstraction::initialize_trivial_abstraction(const vector<int> &domain_sizes) {
unique_ptr<AbstractState> init_state =
AbstractState::get_trivial_abstract_state(domain_sizes);
init_id = init_state->get_id();
goals.insert(init_state->get_id());
states.push_back(move(init_state));
}
pair<int, int> Abstraction::refine(
const AbstractState &state, int var, const vector<int> &wanted) {
if (debug)
utils::g_log << "Refine " << state << " for " << var << "=" << wanted << endl;
int v_id = state.get_id();
// Reuse state ID from obsolete parent to obtain consecutive IDs.
int v1_id = v_id;
int v2_id = get_num_states();
// Update refinement hierarchy.
pair<NodeID, NodeID> node_ids = refinement_hierarchy->split(
state.get_node_id(), var, wanted, v1_id, v2_id);
pair<CartesianSet, CartesianSet> cartesian_sets =
state.split_domain(var, wanted);
unique_ptr<AbstractState> v1 = utils::make_unique_ptr<AbstractState>(
v1_id, node_ids.first, move(cartesian_sets.first));
unique_ptr<AbstractState> v2 = utils::make_unique_ptr<AbstractState>(
v2_id, node_ids.second, move(cartesian_sets.second));
assert(state.includes(*v1));
assert(state.includes(*v2));
/*
Due to the way we split the state into v1 and v2, v2 is never the new
initial state and v1 is never a goal state.
*/
if (state.get_id() == init_id) {
if (v1->includes(concrete_initial_state)) {
assert(!v2->includes(concrete_initial_state));
init_id = v1_id;
} else {
assert(v2->includes(concrete_initial_state));
init_id = v2_id;
}
if (debug) {
utils::g_log << "New init state #" << init_id << ": " << get_state(init_id)
<< endl;
}
}
if (goals.count(v_id)) {
goals.erase(v_id);
if (v1->includes(goal_facts)) {
goals.insert(v1_id);
}
if (v2->includes(goal_facts)) {
goals.insert(v2_id);
}
if (debug) {
utils::g_log << "Goal states: " << goals.size() << endl;
}
}
transition_system->rewire(states, v_id, *v1, *v2, var);
states[v1_id] = move(v1);
assert(static_cast<int>(states.size()) == v2_id);
states.push_back(move(v2));
return {
v1_id, v2_id
};
}
void Abstraction::print_statistics() const {
utils::g_log << "States: " << get_num_states() << endl;
utils::g_log << "Goal states: " << goals.size() << endl;
transition_system->print_statistics();
}
}
| 4,318 |
C++
| 28.786207 | 100 | 0.627142 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cegar.cc
|
#include "cegar.h"
#include "abstraction.h"
#include "abstract_state.h"
#include "cartesian_set.h"
#include "transition_system.h"
#include "utils.h"
#include "../task_utils/task_properties.h"
#include "../utils/language.h"
#include "../utils/logging.h"
#include "../utils/math.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <unordered_map>
using namespace std;
namespace cegar {
// Create the Cartesian set that corresponds to the given preconditions or goals.
static CartesianSet get_cartesian_set(
const vector<int> &domain_sizes, const ConditionsProxy &conditions) {
CartesianSet cartesian_set(domain_sizes);
for (FactProxy condition : conditions) {
cartesian_set.set_single_value(
condition.get_variable().get_id(), condition.get_value());
}
return cartesian_set;
}
struct Flaw {
// Last concrete and abstract state reached while tracing solution.
State concrete_state;
const AbstractState ¤t_abstract_state;
// Hypothetical Cartesian set we would have liked to reach.
CartesianSet desired_cartesian_set;
Flaw(
State &&concrete_state,
const AbstractState ¤t_abstract_state,
CartesianSet &&desired_cartesian_set)
: concrete_state(move(concrete_state)),
current_abstract_state(current_abstract_state),
desired_cartesian_set(move(desired_cartesian_set)) {
assert(current_abstract_state.includes(this->concrete_state));
}
vector<Split> get_possible_splits() const {
vector<Split> splits;
/*
For each fact in the concrete state that is not contained in the
desired abstract state, loop over all values in the domain of the
corresponding variable. The values that are in both the current and
the desired abstract state are the "wanted" ones, i.e., the ones that
we want to split off.
*/
for (FactProxy wanted_fact_proxy : concrete_state) {
FactPair fact = wanted_fact_proxy.get_pair();
if (!desired_cartesian_set.test(fact.var, fact.value)) {
VariableProxy var = wanted_fact_proxy.get_variable();
int var_id = var.get_id();
vector<int> wanted;
for (int value = 0; value < var.get_domain_size(); ++value) {
if (current_abstract_state.contains(var_id, value) &&
desired_cartesian_set.test(var_id, value)) {
wanted.push_back(value);
}
}
assert(!wanted.empty());
splits.emplace_back(var_id, move(wanted));
}
}
assert(!splits.empty());
return splits;
}
};
CEGAR::CEGAR(
const shared_ptr<AbstractTask> &task,
int max_states,
int max_non_looping_transitions,
double max_time,
PickSplit pick,
utils::RandomNumberGenerator &rng,
bool debug)
: task_proxy(*task),
domain_sizes(get_domain_sizes(task_proxy)),
max_states(max_states),
max_non_looping_transitions(max_non_looping_transitions),
split_selector(task, pick),
abstraction(utils::make_unique_ptr<Abstraction>(task, debug)),
abstract_search(task_properties::get_operator_costs(task_proxy)),
timer(max_time),
debug(debug) {
assert(max_states >= 1);
utils::g_log << "Start building abstraction." << endl;
utils::g_log << "Maximum number of states: " << max_states << endl;
utils::g_log << "Maximum number of transitions: "
<< max_non_looping_transitions << endl;
refinement_loop(rng);
utils::g_log << "Done building abstraction." << endl;
utils::g_log << "Time for building abstraction: " << timer.get_elapsed_time() << endl;
print_statistics();
}
CEGAR::~CEGAR() {
}
unique_ptr<Abstraction> CEGAR::extract_abstraction() {
assert(abstraction);
return move(abstraction);
}
void CEGAR::separate_facts_unreachable_before_goal() {
assert(abstraction->get_goals().size() == 1);
assert(abstraction->get_num_states() == 1);
assert(task_proxy.get_goals().size() == 1);
FactProxy goal = task_proxy.get_goals()[0];
utils::HashSet<FactProxy> reachable_facts = get_relaxed_possible_before(
task_proxy, goal);
for (VariableProxy var : task_proxy.get_variables()) {
if (!may_keep_refining())
break;
int var_id = var.get_id();
vector<int> unreachable_values;
for (int value = 0; value < var.get_domain_size(); ++value) {
FactProxy fact = var.get_fact(value);
if (reachable_facts.count(fact) == 0)
unreachable_values.push_back(value);
}
if (!unreachable_values.empty())
abstraction->refine(abstraction->get_initial_state(), var_id, unreachable_values);
}
abstraction->mark_all_states_as_goals();
}
bool CEGAR::may_keep_refining() const {
if (abstraction->get_num_states() >= max_states) {
utils::g_log << "Reached maximum number of states." << endl;
return false;
} else if (abstraction->get_transition_system().get_num_non_loops() >= max_non_looping_transitions) {
utils::g_log << "Reached maximum number of transitions." << endl;
return false;
} else if (timer.is_expired()) {
utils::g_log << "Reached time limit." << endl;
return false;
} else if (!utils::extra_memory_padding_is_reserved()) {
utils::g_log << "Reached memory limit." << endl;
return false;
}
return true;
}
void CEGAR::refinement_loop(utils::RandomNumberGenerator &rng) {
/*
For landmark tasks we have to map all states in which the
landmark might have been achieved to arbitrary abstract goal
states. For the other types of subtasks our method won't find
unreachable facts, but calling it unconditionally for subtasks
with one goal doesn't hurt and simplifies the implementation.
*/
if (task_proxy.get_goals().size() == 1) {
separate_facts_unreachable_before_goal();
}
utils::Timer find_trace_timer(false);
utils::Timer find_flaw_timer(false);
utils::Timer refine_timer(false);
while (may_keep_refining()) {
find_trace_timer.resume();
unique_ptr<Solution> solution = abstract_search.find_solution(
abstraction->get_transition_system().get_outgoing_transitions(),
abstraction->get_initial_state().get_id(),
abstraction->get_goals());
find_trace_timer.stop();
if (!solution) {
utils::g_log << "Abstract task is unsolvable." << endl;
break;
}
find_flaw_timer.resume();
unique_ptr<Flaw> flaw = find_flaw(*solution);
find_flaw_timer.stop();
if (!flaw) {
utils::g_log << "Found concrete solution during refinement." << endl;
break;
}
refine_timer.resume();
const AbstractState &abstract_state = flaw->current_abstract_state;
int state_id = abstract_state.get_id();
vector<Split> splits = flaw->get_possible_splits();
const Split &split = split_selector.pick_split(abstract_state, splits, rng);
auto new_state_ids = abstraction->refine(abstract_state, split.var_id, split.values);
// Since h-values only increase we can assign the h-value to the children.
abstract_search.copy_h_value_to_children(
state_id, new_state_ids.first, new_state_ids.second);
refine_timer.stop();
if (abstraction->get_num_states() % 1000 == 0) {
utils::g_log << abstraction->get_num_states() << "/" << max_states << " states, "
<< abstraction->get_transition_system().get_num_non_loops() << "/"
<< max_non_looping_transitions << " transitions" << endl;
}
}
utils::g_log << "Time for finding abstract traces: " << find_trace_timer << endl;
utils::g_log << "Time for finding flaws: " << find_flaw_timer << endl;
utils::g_log << "Time for splitting states: " << refine_timer << endl;
}
unique_ptr<Flaw> CEGAR::find_flaw(const Solution &solution) {
if (debug)
utils::g_log << "Check solution:" << endl;
const AbstractState *abstract_state = &abstraction->get_initial_state();
State concrete_state = task_proxy.get_initial_state();
assert(abstract_state->includes(concrete_state));
if (debug)
utils::g_log << " Initial abstract state: " << *abstract_state << endl;
for (const Transition &step : solution) {
if (!utils::extra_memory_padding_is_reserved())
break;
OperatorProxy op = task_proxy.get_operators()[step.op_id];
const AbstractState *next_abstract_state = &abstraction->get_state(step.target_id);
if (task_properties::is_applicable(op, concrete_state)) {
if (debug)
utils::g_log << " Move to " << *next_abstract_state << " with "
<< op.get_name() << endl;
State next_concrete_state = concrete_state.get_unregistered_successor(op);
if (!next_abstract_state->includes(next_concrete_state)) {
if (debug)
utils::g_log << " Paths deviate." << endl;
return utils::make_unique_ptr<Flaw>(
move(concrete_state),
*abstract_state,
next_abstract_state->regress(op));
}
abstract_state = next_abstract_state;
concrete_state = move(next_concrete_state);
} else {
if (debug)
utils::g_log << " Operator not applicable: " << op.get_name() << endl;
return utils::make_unique_ptr<Flaw>(
move(concrete_state),
*abstract_state,
get_cartesian_set(domain_sizes, op.get_preconditions()));
}
}
assert(abstraction->get_goals().count(abstract_state->get_id()));
if (task_properties::is_goal_state(task_proxy, concrete_state)) {
// We found a concrete solution.
return nullptr;
} else {
if (debug)
utils::g_log << " Goal test failed." << endl;
return utils::make_unique_ptr<Flaw>(
move(concrete_state),
*abstract_state,
get_cartesian_set(domain_sizes, task_proxy.get_goals()));
}
}
void CEGAR::print_statistics() {
abstraction->print_statistics();
int init_id = abstraction->get_initial_state().get_id();
utils::g_log << "Initial h value: " << abstract_search.get_h_value(init_id) << endl;
utils::g_log << endl;
}
}
| 10,751 |
C++
| 37.815884 | 105 | 0.602456 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/cegar.h
|
#ifndef CEGAR_CEGAR_H
#define CEGAR_CEGAR_H
#include "abstract_search.h"
#include "split_selector.h"
#include "../task_proxy.h"
#include "../utils/countdown_timer.h"
#include <memory>
namespace utils {
class RandomNumberGenerator;
}
namespace cegar {
class Abstraction;
struct Flaw;
/*
Iteratively refine a Cartesian abstraction with counterexample-guided
abstraction refinement (CEGAR).
Store the abstraction, use AbstractSearch to find abstract solutions, find
flaws, use SplitSelector to select splits in case of ambiguities and break
spurious solutions.
*/
class CEGAR {
const TaskProxy task_proxy;
const std::vector<int> domain_sizes;
const int max_states;
const int max_non_looping_transitions;
const SplitSelector split_selector;
std::unique_ptr<Abstraction> abstraction;
AbstractSearch abstract_search;
// Limit the time for building the abstraction.
utils::CountdownTimer timer;
const bool debug;
bool may_keep_refining() const;
/*
Map all states that can only be reached after reaching the goal
fact to arbitrary goal states.
We need this method only for landmark subtasks, but calling it
for other subtasks with a single goal fact doesn't hurt and
simplifies the implementation.
*/
void separate_facts_unreachable_before_goal();
/* Try to convert the abstract solution into a concrete trace. Return the
first encountered flaw or nullptr if there is no flaw. */
std::unique_ptr<Flaw> find_flaw(const Solution &solution);
// Build abstraction.
void refinement_loop(utils::RandomNumberGenerator &rng);
void print_statistics();
public:
CEGAR(
const std::shared_ptr<AbstractTask> &task,
int max_states,
int max_non_looping_transitions,
double max_time,
PickSplit pick,
utils::RandomNumberGenerator &rng,
bool debug);
~CEGAR();
CEGAR(const CEGAR &) = delete;
std::unique_ptr<Abstraction> extract_abstraction();
};
}
#endif
| 2,044 |
C
| 23.638554 | 77 | 0.702055 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/cegar/abstraction.h
|
#ifndef CEGAR_ABSTRACTION_H
#define CEGAR_ABSTRACTION_H
#include "types.h"
#include "../task_proxy.h"
#include "../utils/collections.h"
#include <memory>
#include <vector>
namespace cegar {
class AbstractState;
class RefinementHierarchy;
class TransitionSystem;
/*
Store the set of AbstractStates, use AbstractSearch to find abstract
solutions, find flaws, use SplitSelector to select splits in case of
ambiguities, break spurious solutions and maintain the
RefinementHierarchy.
*/
class Abstraction {
const std::unique_ptr<TransitionSystem> transition_system;
const State concrete_initial_state;
const std::vector<FactPair> goal_facts;
// All (as of yet unsplit) abstract states.
AbstractStates states;
// State ID of abstract initial state.
int init_id;
// Abstract goal states. Only landmark tasks can have multiple goal states.
Goals goals;
/* DAG with inner nodes for all split states and leaves for all
current states. */
std::unique_ptr<RefinementHierarchy> refinement_hierarchy;
const bool debug;
void initialize_trivial_abstraction(const std::vector<int> &domain_sizes);
public:
Abstraction(const std::shared_ptr<AbstractTask> &task, bool debug);
~Abstraction();
Abstraction(const Abstraction &) = delete;
int get_num_states() const;
const AbstractState &get_initial_state() const;
const Goals &get_goals() const;
const AbstractState &get_state(int state_id) const;
const TransitionSystem &get_transition_system() const;
std::unique_ptr<RefinementHierarchy> extract_refinement_hierarchy();
/* Needed for CEGAR::separate_facts_unreachable_before_goal(). */
void mark_all_states_as_goals();
// Split state into two child states.
std::pair<int, int> refine(
const AbstractState &state, int var, const std::vector<int> &wanted);
void print_statistics() const;
};
}
#endif
| 1,925 |
C
| 26.913043 | 79 | 0.717403 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_engines/plugin_lazy_greedy.cc
|
#include "lazy_search.h"
#include "search_common.h"
#include "../option_parser.h"
#include "../plugin.h"
using namespace std;
namespace plugin_lazy_greedy {
static const string DEFAULT_LAZY_BOOST = "1000";
static shared_ptr<SearchEngine> _parse(OptionParser &parser) {
parser.document_synopsis("Greedy search (lazy)", "");
parser.document_note(
"Open lists",
"In most cases, lazy greedy best first search uses "
"an alternation open list with one queue for each evaluator. "
"If preferred operator evaluators are used, it adds an "
"extra queue for each of these evaluators that includes "
"only the nodes that are generated with a preferred operator. "
"If only one evaluator and no preferred operator evaluator is used, "
"the search does not use an alternation open list "
"but a standard open list with only one queue.");
parser.document_note(
"Equivalent statements using general lazy search",
"\n```\n--evaluator h2=eval2\n"
"--search lazy_greedy([eval1, h2], preferred=h2, boost=100)\n```\n"
"is equivalent to\n"
"```\n--evaluator h1=eval1 --heuristic h2=eval2\n"
"--search lazy(alt([single(h1), single(h1, pref_only=true), single(h2),\n"
" single(h2, pref_only=true)], boost=100),\n"
" preferred=h2)\n```\n"
"------------------------------------------------------------\n"
"```\n--search lazy_greedy([eval1, eval2], boost=100)\n```\n"
"is equivalent to\n"
"```\n--search lazy(alt([single(eval1), single(eval2)], boost=100))\n```\n"
"------------------------------------------------------------\n"
"```\n--evaluator h1=eval1\n--search lazy_greedy(h1, preferred=h1)\n```\n"
"is equivalent to\n"
"```\n--evaluator h1=eval1\n"
"--search lazy(alt([single(h1), single(h1, pref_only=true)], boost=1000),\n"
" preferred=h1)\n```\n"
"------------------------------------------------------------\n"
"```\n--search lazy_greedy(eval1)\n```\n"
"is equivalent to\n"
"```\n--search lazy(single(eval1))\n```\n",
true);
parser.add_list_option<shared_ptr<Evaluator>>("evals", "evaluators");
parser.add_list_option<shared_ptr<Evaluator>>(
"preferred",
"use preferred operators of these evaluators", "[]");
parser.add_option<bool>("reopen_closed",
"reopen closed nodes", "false");
parser.add_option<int>(
"boost",
"boost value for alternation queues that are restricted "
"to preferred operator nodes",
DEFAULT_LAZY_BOOST);
SearchEngine::add_succ_order_options(parser);
SearchEngine::add_options_to_parser(parser);
Options opts = parser.parse();
shared_ptr<lazy_search::LazySearch> engine;
if (!parser.dry_run()) {
opts.set("open", search_common::create_greedy_open_list_factory(opts));
engine = make_shared<lazy_search::LazySearch>(opts);
// TODO: The following two lines look fishy. See similar comment in _parse.
vector<shared_ptr<Evaluator>> preferred_list = opts.get_list<shared_ptr<Evaluator>>("preferred");
engine->set_preferred_operator_evaluators(preferred_list);
}
return engine;
}
static Plugin<SearchEngine> _plugin("lazy_greedy", _parse);
}
| 3,420 |
C++
| 43.428571 | 105 | 0.578655 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/search_engines/iterated_search.cc
|
#include "iterated_search.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/logging.h"
#include <iostream>
using namespace std;
namespace iterated_search {
IteratedSearch::IteratedSearch(const Options &opts, options::Registry ®istry,
const options::Predefinitions &predefinitions)
: SearchEngine(opts),
engine_configs(opts.get_list<ParseTree>("engine_configs")),
registry(registry),
predefinitions(predefinitions),
pass_bound(opts.get<bool>("pass_bound")),
repeat_last_phase(opts.get<bool>("repeat_last")),
continue_on_fail(opts.get<bool>("continue_on_fail")),
continue_on_solve(opts.get<bool>("continue_on_solve")),
phase(0),
last_phase_found_solution(false),
best_bound(bound),
iterated_found_solution(false) {
}
shared_ptr<SearchEngine> IteratedSearch::get_search_engine(
int engine_configs_index) {
OptionParser parser(engine_configs[engine_configs_index], registry, predefinitions, false);
shared_ptr<SearchEngine> engine(parser.start_parsing<shared_ptr<SearchEngine>>());
ostringstream stream;
kptree::print_tree_bracketed(engine_configs[engine_configs_index], stream);
utils::g_log << "Starting search: " << stream.str() << endl;
return engine;
}
shared_ptr<SearchEngine> IteratedSearch::create_current_phase() {
int num_phases = engine_configs.size();
if (phase >= num_phases) {
/* We've gone through all searches. We continue if
repeat_last_phase is true, but *not* if we didn't find a
solution the last time around, since then this search would
just behave the same way again (assuming determinism, which
we might not actually have right now, but strive for). So
this overrides continue_on_fail.
*/
if (repeat_last_phase && last_phase_found_solution) {
return get_search_engine(engine_configs.size() - 1);
} else {
return nullptr;
}
}
return get_search_engine(phase);
}
SearchStatus IteratedSearch::step() {
shared_ptr<SearchEngine> current_search = create_current_phase();
if (!current_search) {
return found_solution() ? SOLVED : FAILED;
}
if (pass_bound) {
current_search->set_bound(best_bound);
}
++phase;
current_search->search();
Plan found_plan;
int plan_cost = 0;
last_phase_found_solution = current_search->found_solution();
if (last_phase_found_solution) {
iterated_found_solution = true;
found_plan = current_search->get_plan();
plan_cost = calculate_plan_cost(found_plan, task_proxy);
if (plan_cost < best_bound) {
plan_manager.save_plan(found_plan, task_proxy, true);
best_bound = plan_cost;
set_plan(found_plan);
}
}
current_search->print_statistics();
const SearchStatistics ¤t_stats = current_search->get_statistics();
statistics.inc_expanded(current_stats.get_expanded());
statistics.inc_evaluated_states(current_stats.get_evaluated_states());
statistics.inc_evaluations(current_stats.get_evaluations());
statistics.inc_generated(current_stats.get_generated());
statistics.inc_generated_ops(current_stats.get_generated_ops());
statistics.inc_reopened(current_stats.get_reopened());
return step_return_value();
}
SearchStatus IteratedSearch::step_return_value() {
if (iterated_found_solution)
utils::g_log << "Best solution cost so far: " << best_bound << endl;
if (last_phase_found_solution) {
if (continue_on_solve) {
utils::g_log << "Solution found - keep searching" << endl;
return IN_PROGRESS;
} else {
utils::g_log << "Solution found - stop searching" << endl;
return SOLVED;
}
} else {
if (continue_on_fail) {
utils::g_log << "No solution found - keep searching" << endl;
return IN_PROGRESS;
} else {
utils::g_log << "No solution found - stop searching" << endl;
return iterated_found_solution ? SOLVED : FAILED;
}
}
}
void IteratedSearch::print_statistics() const {
utils::g_log << "Cumulative statistics:" << endl;
statistics.print_detailed_statistics();
}
void IteratedSearch::save_plan_if_necessary() {
// We don't need to save here, as we automatically save after
// each successful search iteration.
}
static shared_ptr<SearchEngine> _parse(OptionParser &parser) {
parser.document_synopsis("Iterated search", "");
parser.document_note(
"Note 1",
"We don't cache heuristic values between search iterations at"
" the moment. If you perform a LAMA-style iterative search,"
" heuristic values will be computed multiple times.");
parser.document_note(
"Note 2",
"The configuration\n```\n"
"--search \"iterated([lazy_wastar(merge_and_shrink(),w=10), "
"lazy_wastar(merge_and_shrink(),w=5), lazy_wastar(merge_and_shrink(),w=3), "
"lazy_wastar(merge_and_shrink(),w=2), lazy_wastar(merge_and_shrink(),w=1)])\"\n"
"```\nwould perform the preprocessing phase of the merge and shrink heuristic "
"5 times (once before each iteration).\n\n"
"To avoid this, use heuristic predefinition, which avoids duplicate "
"preprocessing, as follows:\n```\n"
"--evaluator \"h=merge_and_shrink()\" --search "
"\"iterated([lazy_wastar(h,w=10), lazy_wastar(h,w=5), lazy_wastar(h,w=3), "
"lazy_wastar(h,w=2), lazy_wastar(h,w=1)])\"\n"
"```");
parser.document_note(
"Note 3",
"If you reuse the same landmark count heuristic "
"(using heuristic predefinition) between iterations, "
"the path data (that is, landmark status for each visited state) "
"will be saved between iterations.");
parser.add_list_option<ParseTree>("engine_configs",
"list of search engines for each phase");
parser.add_option<bool>(
"pass_bound",
"use bound from previous search. The bound is the real cost "
"of the plan found before, regardless of the cost_type parameter.",
"true");
parser.add_option<bool>("repeat_last",
"repeat last phase of search",
"false");
parser.add_option<bool>("continue_on_fail",
"continue search after no solution found",
"false");
parser.add_option<bool>("continue_on_solve",
"continue search after solution found",
"true");
SearchEngine::add_options_to_parser(parser);
Options opts = parser.parse();
opts.verify_list_non_empty<ParseTree>("engine_configs");
if (parser.help_mode()) {
return nullptr;
} else if (parser.dry_run()) {
//check if the supplied search engines can be parsed
for (const ParseTree &config : opts.get_list<ParseTree>("engine_configs")) {
OptionParser test_parser(config, parser.get_registry(),
parser.get_predefinitions(), true);
test_parser.start_parsing<shared_ptr<SearchEngine>>();
}
return nullptr;
} else {
return make_shared<IteratedSearch>(opts, parser.get_registry(),
parser.get_predefinitions());
}
}
static Plugin<SearchEngine> _plugin("iterated", _parse);
}
| 7,616 |
C++
| 37.469697 | 95 | 0.614758 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.