file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_total_order.cc
|
#include "merge_scoring_function_total_order.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "../task_proxy.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeScoringFunctionTotalOrder::MergeScoringFunctionTotalOrder(
const options::Options &options)
: atomic_ts_order(options.get<AtomicTSOrder>("atomic_ts_order")),
product_ts_order(options.get<ProductTSOrder>("product_ts_order")),
atomic_before_product(options.get<bool>("atomic_before_product")),
random_seed(options.get<int>("random_seed")),
rng(utils::parse_rng_from_options(options)) {
}
vector<double> MergeScoringFunctionTotalOrder::compute_scores(
const FactoredTransitionSystem &,
const vector<pair<int, int>> &merge_candidates) {
assert(initialized);
vector<double> scores;
scores.reserve(merge_candidates.size());
for (size_t candidate_index = 0; candidate_index < merge_candidates.size();
++candidate_index) {
pair<int, int> merge_candidate = merge_candidates[candidate_index];
int ts_index1 = merge_candidate.first;
int ts_index2 = merge_candidate.second;
for (size_t merge_candidate_order_index = 0;
merge_candidate_order_index < merge_candidate_order.size();
++merge_candidate_order_index) {
pair<int, int> other_candidate =
merge_candidate_order[merge_candidate_order_index];
if ((other_candidate.first == ts_index1 &&
other_candidate.second == ts_index2) ||
(other_candidate.second == ts_index1 &&
other_candidate.first == ts_index2)) {
// use the index in the merge candidate order as score
scores.push_back(merge_candidate_order_index);
break;
}
}
// We must have inserted a score for the current candidate.
assert(scores.size() == candidate_index + 1);
}
return scores;
}
void MergeScoringFunctionTotalOrder::initialize(const TaskProxy &task_proxy) {
initialized = true;
int num_variables = task_proxy.get_variables().size();
int max_transition_system_count = num_variables * 2 - 1;
vector<int> transition_system_order;
transition_system_order.reserve(max_transition_system_count);
// Compute the order in which atomic transition systems are considered
vector<int> atomic_tso;
atomic_tso.reserve(num_variables);
for (int i = 0; i < num_variables; ++i) {
atomic_tso.push_back(i);
}
if (atomic_ts_order == AtomicTSOrder::LEVEL) {
reverse(atomic_tso.begin(), atomic_tso.end());
} else if (atomic_ts_order == AtomicTSOrder::RANDOM) {
rng->shuffle(atomic_tso);
}
// Compute the order in which product transition systems are considered
vector<int> product_tso;
for (int i = num_variables; i < max_transition_system_count; ++i) {
product_tso.push_back(i);
}
if (product_ts_order == ProductTSOrder::NEW_TO_OLD) {
reverse(product_tso.begin(), product_tso.end());
} else if (product_ts_order == ProductTSOrder::RANDOM) {
rng->shuffle(product_tso);
}
// Put the orders in the correct order
if (atomic_before_product) {
transition_system_order.insert(transition_system_order.end(),
atomic_tso.begin(),
atomic_tso.end());
transition_system_order.insert(transition_system_order.end(),
product_tso.begin(),
product_tso.end());
} else {
transition_system_order.insert(transition_system_order.end(),
product_tso.begin(),
product_tso.end());
transition_system_order.insert(transition_system_order.end(),
atomic_tso.begin(),
atomic_tso.end());
}
merge_candidate_order.reserve(max_transition_system_count *
max_transition_system_count / 2);
for (size_t i = 0; i < transition_system_order.size(); ++i) {
for (size_t j = i + 1; j < transition_system_order.size(); ++j) {
merge_candidate_order.emplace_back(
transition_system_order[i], transition_system_order[j]);
}
}
}
string MergeScoringFunctionTotalOrder::name() const {
return "total order";
}
void MergeScoringFunctionTotalOrder::dump_function_specific_options() const {
utils::g_log << "Atomic transition system order: ";
switch (atomic_ts_order) {
case AtomicTSOrder::REVERSE_LEVEL:
utils::g_log << "reverse level";
break;
case AtomicTSOrder::LEVEL:
utils::g_log << "level";
break;
case AtomicTSOrder::RANDOM:
utils::g_log << "random";
break;
}
utils::g_log << endl;
utils::g_log << "Product transition system order: ";
switch (product_ts_order) {
case ProductTSOrder::OLD_TO_NEW:
utils::g_log << "old to new";
break;
case ProductTSOrder::NEW_TO_OLD:
utils::g_log << "new to old";
break;
case ProductTSOrder::RANDOM:
utils::g_log << "random";
break;
}
utils::g_log << endl;
utils::g_log << "Consider " << (atomic_before_product ?
"atomic before product" : "product before atomic")
<< " transition systems" << endl;
utils::g_log << "Random seed: " << random_seed << endl;
}
void MergeScoringFunctionTotalOrder::add_options_to_parser(
options::OptionParser &parser) {
vector<string> atomic_ts_order;
vector<string> atomic_ts_order_documentation;
atomic_ts_order.push_back("reverse_level");
atomic_ts_order_documentation.push_back(
"the variable order of Fast Downward");
atomic_ts_order.push_back("level");
atomic_ts_order_documentation.push_back("opposite of reverse_level");
atomic_ts_order.push_back("random");
atomic_ts_order_documentation.push_back("a randomized order");
parser.add_enum_option<AtomicTSOrder>(
"atomic_ts_order",
atomic_ts_order,
"The order in which atomic transition systems are considered when "
"considering pairs of potential merges.",
"reverse_level",
atomic_ts_order_documentation);
vector<string> product_ts_order;
vector<string> product_ts_order_documentation;
product_ts_order.push_back("old_to_new");
product_ts_order_documentation.push_back(
"consider composite transition systems from most recent to oldest, "
"that is in decreasing index order");
product_ts_order.push_back("new_to_old");
product_ts_order_documentation.push_back("opposite of old_to_new");
product_ts_order.push_back("random");
product_ts_order_documentation.push_back("a randomized order");
parser.add_enum_option<ProductTSOrder>(
"product_ts_order",
product_ts_order,
"The order in which product transition systems are considered when "
"considering pairs of potential merges.",
"new_to_old",
product_ts_order_documentation);
parser.add_option<bool>(
"atomic_before_product",
"Consider atomic transition systems before composite ones iff true.",
"false");
utils::add_rng_options(parser);
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Total order",
"This scoring function computes a total order on the merge candidates, "
"based on the specified options. The score for each merge candidate "
"correponds to its position in the order. This scoring function is "
"mainly intended as tie-breaking, and has been introduced in the "
"following paper:"
+ utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Automated "
"Planning and Scheduling (ICAPS 2016)",
"294-298",
"AAAI Press",
"2016") +
"Furthermore, using the atomic_ts_order option, this scoring function, "
"if used alone in a score based filtering merge selector, can be used "
"to emulate the corresponding (precomputed) linear merge strategies "
"reverse level/level (independently of the other options).");
MergeScoringFunctionTotalOrder::add_options_to_parser(parser);
options::Options options = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionTotalOrder>(options);
}
static options::Plugin<MergeScoringFunction> _plugin("total_order", _parse);
}
| 9,300 |
C++
| 38.578723 | 86 | 0.624516 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_sccs.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_SCCS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_SCCS_H
#include "merge_strategy_factory.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
class MergeTreeFactory;
class MergeSelector;
enum class OrderOfSCCs {
TOPOLOGICAL,
REVERSE_TOPOLOGICAL,
DECREASING,
INCREASING
};
class MergeStrategyFactorySCCs : public MergeStrategyFactory {
OrderOfSCCs order_of_sccs;
std::shared_ptr<MergeTreeFactory> merge_tree_factory;
std::shared_ptr<MergeSelector> merge_selector;
protected:
virtual std::string name() const override;
virtual void dump_strategy_specific_options() const override;
public:
MergeStrategyFactorySCCs(const options::Options &options);
virtual ~MergeStrategyFactorySCCs() override = default;
virtual std::unique_ptr<MergeStrategy> compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) override;
virtual bool requires_init_distances() const override;
virtual bool requires_goal_distances() const override;
};
}
#endif
| 1,111 |
C
| 26.799999 | 66 | 0.756976 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree_factory_linear.cc
|
#include "merge_tree_factory_linear.h"
#include "factored_transition_system.h"
#include "merge_tree.h"
#include "transition_system.h"
#include "../task_proxy.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/markup.h"
#include "../utils/rng_options.h"
#include "../utils/system.h"
#include <algorithm>
using namespace std;
namespace merge_and_shrink {
MergeTreeFactoryLinear::MergeTreeFactoryLinear(const options::Options &options)
: MergeTreeFactory(options),
variable_order_type(
options.get<variable_order_finder::VariableOrderType>("variable_order")) {
}
unique_ptr<MergeTree> MergeTreeFactoryLinear::compute_merge_tree(
const TaskProxy &task_proxy) {
variable_order_finder::VariableOrderFinder vof(task_proxy, variable_order_type);
MergeTreeNode *root = new MergeTreeNode(vof.next());
while (!vof.done()) {
MergeTreeNode *right_child = new MergeTreeNode(vof.next());
root = new MergeTreeNode(root, right_child);
}
return utils::make_unique_ptr<MergeTree>(
root, rng, update_option);
}
unique_ptr<MergeTree> MergeTreeFactoryLinear::compute_merge_tree(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts,
const vector<int> &indices_subset) {
/*
Compute a mapping from state variables to transition system indices
that contain those variables. Also set all indices not contained in
indices_subset to "used".
*/
int num_vars = task_proxy.get_variables().size();
int num_ts = fts.get_size();
vector<int> var_to_ts_index(num_vars, -1);
vector<bool> used_ts_indices(num_ts, true);
for (int ts_index : fts) {
bool use_ts_index =
find(indices_subset.begin(), indices_subset.end(),
ts_index) != indices_subset.end();
if (use_ts_index) {
used_ts_indices[ts_index] = false;
}
const vector<int> &vars =
fts.get_transition_system(ts_index).get_incorporated_variables();
for (int var : vars) {
var_to_ts_index[var] = ts_index;
}
}
/*
Compute the merge tree, using transition systems corresponding to
variables in order given by the variable order finder, implicitly
skipping all indices not in indices_subset, because these have been set
to "used" above.
*/
variable_order_finder::VariableOrderFinder vof(task_proxy, variable_order_type);
int next_var = vof.next();
int ts_index = var_to_ts_index[next_var];
assert(ts_index != -1);
// find the first valid ts index
while (used_ts_indices[ts_index]) {
assert(!vof.done());
next_var = vof.next();
ts_index = var_to_ts_index[next_var];
assert(ts_index != -1);
}
used_ts_indices[ts_index] = true;
MergeTreeNode *root = new MergeTreeNode(ts_index);
while (!vof.done()) {
next_var = vof.next();
ts_index = var_to_ts_index[next_var];
assert(ts_index != -1);
if (!used_ts_indices[ts_index]) {
used_ts_indices[ts_index] = true;
MergeTreeNode *right_child = new MergeTreeNode(ts_index);
root = new MergeTreeNode(root, right_child);
}
}
return utils::make_unique_ptr<MergeTree>(
root, rng, update_option);
}
string MergeTreeFactoryLinear::name() const {
return "linear";
}
void MergeTreeFactoryLinear::dump_tree_specific_options() const {
dump_variable_order_type(variable_order_type);
}
void MergeTreeFactoryLinear::add_options_to_parser(
options::OptionParser &parser) {
MergeTreeFactory::add_options_to_parser(parser);
vector<string> merge_strategies;
merge_strategies.push_back("CG_GOAL_LEVEL");
merge_strategies.push_back("CG_GOAL_RANDOM");
merge_strategies.push_back("GOAL_CG_LEVEL");
merge_strategies.push_back("RANDOM");
merge_strategies.push_back("LEVEL");
merge_strategies.push_back("REVERSE_LEVEL");
parser.add_enum_option<variable_order_finder::VariableOrderType>(
"variable_order", merge_strategies,
"the order in which atomic transition systems are merged",
"CG_GOAL_LEVEL");
}
static shared_ptr<MergeTreeFactory> _parse(options::OptionParser &parser) {
MergeTreeFactoryLinear::add_options_to_parser(parser);
parser.document_synopsis(
"Linear merge trees",
"These merge trees implement several linear merge orders, which "
"are described in the paper:" + utils::format_conference_reference(
{"Malte Helmert", "Patrik Haslum", "Joerg Hoffmann"},
"Flexible Abstraction Heuristics for Optimal Sequential Planning",
"https://ai.dmi.unibas.ch/papers/helmert-et-al-icaps2007.pdf",
"Proceedings of the Seventeenth International Conference on"
" Automated Planning and Scheduling (ICAPS 2007)",
"176-183",
"AAAI Press",
"2007"));
options::Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeTreeFactoryLinear>(opts);
}
static options::Plugin<MergeTreeFactory> _plugin("linear", _parse);
}
| 5,237 |
C++
| 34.154362 | 84 | 0.654 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy.cc
|
#include "merge_strategy.h"
using namespace std;
namespace merge_and_shrink {
MergeStrategy::MergeStrategy(
const FactoredTransitionSystem &fts)
: fts(fts) {
}
}
| 172 |
C++
| 14.727271 | 40 | 0.726744 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_algorithm.cc
|
#include "merge_and_shrink_algorithm.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "fts_factory.h"
#include "label_reduction.h"
#include "labels.h"
#include "merge_and_shrink_representation.h"
#include "merge_strategy.h"
#include "merge_strategy_factory.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "types.h"
#include "utils.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../task_utils/task_properties.h"
#include "../utils/countdown_timer.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/math.h"
#include "../utils/system.h"
#include "../utils/timer.h"
#include <cassert>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
using namespace std;
using options::Bounds;
using options::OptionParser;
using options::Options;
using utils::ExitCode;
namespace merge_and_shrink {
static void log_progress(const utils::Timer &timer, string msg) {
utils::g_log << "M&S algorithm timer: " << timer << " (" << msg << ")" << endl;
}
MergeAndShrinkAlgorithm::MergeAndShrinkAlgorithm(const Options &opts) :
merge_strategy_factory(opts.get<shared_ptr<MergeStrategyFactory>>("merge_strategy")),
shrink_strategy(opts.get<shared_ptr<ShrinkStrategy>>("shrink_strategy")),
label_reduction(opts.get<shared_ptr<LabelReduction>>("label_reduction", nullptr)),
max_states(opts.get<int>("max_states")),
max_states_before_merge(opts.get<int>("max_states_before_merge")),
shrink_threshold_before_merge(opts.get<int>("threshold_before_merge")),
prune_unreachable_states(opts.get<bool>("prune_unreachable_states")),
prune_irrelevant_states(opts.get<bool>("prune_irrelevant_states")),
verbosity(opts.get<utils::Verbosity>("verbosity")),
main_loop_max_time(opts.get<double>("main_loop_max_time")),
starting_peak_memory(0) {
assert(max_states_before_merge > 0);
assert(max_states >= max_states_before_merge);
assert(shrink_threshold_before_merge <= max_states_before_merge);
}
void MergeAndShrinkAlgorithm::report_peak_memory_delta(bool final) const {
if (final)
utils::g_log << "Final";
else
utils::g_log << "Current";
utils::g_log << " peak memory increase of merge-and-shrink algorithm: "
<< utils::get_peak_memory_in_kb() - starting_peak_memory << " KB"
<< endl;
}
void MergeAndShrinkAlgorithm::dump_options() const {
if (verbosity >= utils::Verbosity::NORMAL) {
if (merge_strategy_factory) { // deleted after merge strategy extraction
merge_strategy_factory->dump_options();
utils::g_log << endl;
}
utils::g_log << "Options related to size limits and shrinking: " << endl;
utils::g_log << "Transition system size limit: " << max_states << endl
<< "Transition system size limit right before merge: "
<< max_states_before_merge << endl;
utils::g_log << "Threshold to trigger shrinking right before merge: "
<< shrink_threshold_before_merge << endl;
utils::g_log << endl;
utils::g_log << "Pruning unreachable states: "
<< (prune_unreachable_states ? "yes" : "no") << endl;
utils::g_log << "Pruning irrelevant states: "
<< (prune_irrelevant_states ? "yes" : "no") << endl;
utils::g_log << endl;
if (label_reduction) {
label_reduction->dump_options();
} else {
utils::g_log << "Label reduction disabled" << endl;
}
utils::g_log << endl;
utils::g_log << "Main loop max time in seconds: " << main_loop_max_time << endl;
utils::g_log << endl;
}
}
void MergeAndShrinkAlgorithm::warn_on_unusual_options() const {
string dashes(79, '=');
if (!label_reduction) {
utils::g_log << dashes << endl
<< "WARNING! You did not enable label reduction.\nThis may "
"drastically reduce the performance of merge-and-shrink!"
<< endl << dashes << endl;
} else if (label_reduction->reduce_before_merging() && label_reduction->reduce_before_shrinking()) {
utils::g_log << dashes << endl
<< "WARNING! You set label reduction to be applied twice in each merge-and-shrink\n"
"iteration, both before shrinking and merging. This double computation effort\n"
"does not pay off for most configurations!"
<< endl << dashes << endl;
} else {
if (label_reduction->reduce_before_shrinking() &&
(shrink_strategy->get_name() == "f-preserving"
|| shrink_strategy->get_name() == "random")) {
utils::g_log << dashes << endl
<< "WARNING! Bucket-based shrink strategies such as f-preserving random perform\n"
"best if used with label reduction before merging, not before shrinking!"
<< endl << dashes << endl;
}
if (label_reduction->reduce_before_merging() &&
shrink_strategy->get_name() == "bisimulation") {
utils::g_log << dashes << endl
<< "WARNING! Shrinking based on bisimulation performs best if used with label\n"
"reduction before shrinking, not before merging!"
<< endl << dashes << endl;
}
}
if (!prune_unreachable_states || !prune_irrelevant_states) {
utils::g_log << dashes << endl
<< "WARNING! Pruning is (partially) turned off!\nThis may "
"drastically reduce the performance of merge-and-shrink!"
<< endl << dashes << endl;
}
}
bool MergeAndShrinkAlgorithm::ran_out_of_time(
const utils::CountdownTimer &timer) const {
if (timer.is_expired()) {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Ran out of time, stopping computation." << endl;
utils::g_log << endl;
}
return true;
}
return false;
}
void MergeAndShrinkAlgorithm::main_loop(
FactoredTransitionSystem &fts,
const TaskProxy &task_proxy) {
utils::CountdownTimer timer(main_loop_max_time);
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Starting main loop ";
if (main_loop_max_time == numeric_limits<double>::infinity()) {
utils::g_log << "without a time limit." << endl;
} else {
utils::g_log << "with a time limit of "
<< main_loop_max_time << "s." << endl;
}
}
int maximum_intermediate_size = 0;
for (int i = 0; i < fts.get_size(); ++i) {
int size = fts.get_transition_system(i).get_size();
if (size > maximum_intermediate_size) {
maximum_intermediate_size = size;
}
}
if (label_reduction) {
label_reduction->initialize(task_proxy);
}
unique_ptr<MergeStrategy> merge_strategy =
merge_strategy_factory->compute_merge_strategy(task_proxy, fts);
merge_strategy_factory = nullptr;
auto log_main_loop_progress = [&timer](const string &msg) {
utils::g_log << "M&S algorithm main loop timer: "
<< timer.get_elapsed_time()
<< " (" << msg << ")" << endl;
};
int iteration_counter = 0;
while (fts.get_num_active_entries() > 1) {
// Choose next transition systems to merge
pair<int, int> merge_indices = merge_strategy->get_next();
if (ran_out_of_time(timer)) {
break;
}
int merge_index1 = merge_indices.first;
int merge_index2 = merge_indices.second;
assert(merge_index1 != merge_index2);
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Next pair of indices: ("
<< merge_index1 << ", " << merge_index2 << ")" << endl;
if (verbosity >= utils::Verbosity::VERBOSE) {
fts.statistics(merge_index1);
fts.statistics(merge_index2);
}
log_main_loop_progress("after computation of next merge");
}
// Label reduction (before shrinking)
if (label_reduction && label_reduction->reduce_before_shrinking()) {
bool reduced = label_reduction->reduce(merge_indices, fts, verbosity);
if (verbosity >= utils::Verbosity::NORMAL && reduced) {
log_main_loop_progress("after label reduction");
}
}
if (ran_out_of_time(timer)) {
break;
}
// Shrinking
bool shrunk = shrink_before_merge_step(
fts,
merge_index1,
merge_index2,
max_states,
max_states_before_merge,
shrink_threshold_before_merge,
*shrink_strategy,
verbosity);
if (verbosity >= utils::Verbosity::NORMAL && shrunk) {
log_main_loop_progress("after shrinking");
}
if (ran_out_of_time(timer)) {
break;
}
// Label reduction (before merging)
if (label_reduction && label_reduction->reduce_before_merging()) {
bool reduced = label_reduction->reduce(merge_indices, fts, verbosity);
if (verbosity >= utils::Verbosity::NORMAL && reduced) {
log_main_loop_progress("after label reduction");
}
}
if (ran_out_of_time(timer)) {
break;
}
// Merging
int merged_index = fts.merge(merge_index1, merge_index2, verbosity);
int abs_size = fts.get_transition_system(merged_index).get_size();
if (abs_size > maximum_intermediate_size) {
maximum_intermediate_size = abs_size;
}
if (verbosity >= utils::Verbosity::NORMAL) {
if (verbosity >= utils::Verbosity::VERBOSE) {
fts.statistics(merged_index);
}
log_main_loop_progress("after merging");
}
if (ran_out_of_time(timer)) {
break;
}
// Pruning
if (prune_unreachable_states || prune_irrelevant_states) {
bool pruned = prune_step(
fts,
merged_index,
prune_unreachable_states,
prune_irrelevant_states,
verbosity);
if (verbosity >= utils::Verbosity::NORMAL && pruned) {
if (verbosity >= utils::Verbosity::VERBOSE) {
fts.statistics(merged_index);
}
log_main_loop_progress("after pruning");
}
}
/*
NOTE: both the shrink strategy classes and the construction
of the composite transition system require the input
transition systems to be non-empty, i.e. the initial state
not to be pruned/not to be evaluated as infinity.
*/
if (!fts.is_factor_solvable(merged_index)) {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Abstract problem is unsolvable, stopping "
"computation. " << endl << endl;
}
break;
}
if (ran_out_of_time(timer)) {
break;
}
// End-of-iteration output.
if (verbosity >= utils::Verbosity::VERBOSE) {
report_peak_memory_delta();
}
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << endl;
}
++iteration_counter;
}
utils::g_log << "End of merge-and-shrink algorithm, statistics:" << endl;
utils::g_log << "Main loop runtime: " << timer.get_elapsed_time() << endl;
utils::g_log << "Maximum intermediate abstraction size: "
<< maximum_intermediate_size << endl;
shrink_strategy = nullptr;
label_reduction = nullptr;
}
FactoredTransitionSystem MergeAndShrinkAlgorithm::build_factored_transition_system(
const TaskProxy &task_proxy) {
if (starting_peak_memory) {
cerr << "Calling build_factored_transition_system twice is not "
<< "supported!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
starting_peak_memory = utils::get_peak_memory_in_kb();
utils::Timer timer;
utils::g_log << "Running merge-and-shrink algorithm..." << endl;
task_properties::verify_no_axioms(task_proxy);
dump_options();
warn_on_unusual_options();
utils::g_log << endl;
const bool compute_init_distances =
shrink_strategy->requires_init_distances() ||
merge_strategy_factory->requires_init_distances() ||
prune_unreachable_states;
const bool compute_goal_distances =
shrink_strategy->requires_goal_distances() ||
merge_strategy_factory->requires_goal_distances() ||
prune_irrelevant_states;
FactoredTransitionSystem fts =
create_factored_transition_system(
task_proxy,
compute_init_distances,
compute_goal_distances,
verbosity);
if (verbosity >= utils::Verbosity::NORMAL) {
log_progress(timer, "after computation of atomic factors");
}
/*
Prune all atomic factors according to the chosen options. Stop early if
one factor is unsolvable.
TODO: think about if we can prune already while creating the atomic FTS.
*/
bool pruned = false;
bool unsolvable = false;
for (int index = 0; index < fts.get_size(); ++index) {
assert(fts.is_active(index));
if (prune_unreachable_states || prune_irrelevant_states) {
bool pruned_factor = prune_step(
fts,
index,
prune_unreachable_states,
prune_irrelevant_states,
verbosity);
pruned = pruned || pruned_factor;
}
if (!fts.is_factor_solvable(index)) {
utils::g_log << "Atomic FTS is unsolvable, stopping computation." << endl;
unsolvable = true;
break;
}
}
if (verbosity >= utils::Verbosity::NORMAL) {
if (pruned) {
log_progress(timer, "after pruning atomic factors");
}
utils::g_log << endl;
}
if (!unsolvable && main_loop_max_time > 0) {
main_loop(fts, task_proxy);
}
const bool final = true;
report_peak_memory_delta(final);
utils::g_log << "Merge-and-shrink algorithm runtime: " << timer << endl;
utils::g_log << endl;
return fts;
}
void add_merge_and_shrink_algorithm_options_to_parser(OptionParser &parser) {
// Merge strategy option.
parser.add_option<shared_ptr<MergeStrategyFactory>>(
"merge_strategy",
"See detailed documentation for merge strategies. "
"We currently recommend SCC-DFP, which can be achieved using "
"{{{merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector="
"score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order"
"]))}}}");
// Shrink strategy option.
parser.add_option<shared_ptr<ShrinkStrategy>>(
"shrink_strategy",
"See detailed documentation for shrink strategies. "
"We currently recommend non-greedy shrink_bisimulation, which can be "
"achieved using {{{shrink_strategy=shrink_bisimulation(greedy=false)}}}");
// Label reduction option.
parser.add_option<shared_ptr<LabelReduction>>(
"label_reduction",
"See detailed documentation for labels. There is currently only "
"one 'option' to use label_reduction, which is {{{label_reduction=exact}}} "
"Also note the interaction with shrink strategies.",
OptionParser::NONE);
// Pruning options.
parser.add_option<bool>(
"prune_unreachable_states",
"If true, prune abstract states unreachable from the initial state.",
"true");
parser.add_option<bool>(
"prune_irrelevant_states",
"If true, prune abstract states from which no goal state can be "
"reached.",
"true");
add_transition_system_size_limit_options_to_parser(parser);
/*
silent: no output during construction, only starting and final statistics
normal: basic output during construction, starting and final statistics
verbose: full output during construction, starting and final statistics
debug: full output with additional debug output
*/
utils::add_verbosity_option_to_parser(parser);
parser.add_option<double>(
"main_loop_max_time",
"A limit in seconds on the runtime of the main loop of the algorithm. "
"If the limit is exceeded, the algorithm terminates, potentially "
"returning a factored transition system with several factors. Also "
"note that the time limit is only checked between transformations "
"of the main loop, but not during, so it can be exceeded if a "
"transformation is runtime-intense.",
"infinity",
Bounds("0.0", "infinity"));
}
void add_transition_system_size_limit_options_to_parser(OptionParser &parser) {
parser.add_option<int>(
"max_states",
"maximum transition system size allowed at any time point.",
"-1",
Bounds("-1", "infinity"));
parser.add_option<int>(
"max_states_before_merge",
"maximum transition system size allowed for two transition systems "
"before being merged to form the synchronized product.",
"-1",
Bounds("-1", "infinity"));
parser.add_option<int>(
"threshold_before_merge",
"If a transition system, before being merged, surpasses this soft "
"transition system size limit, the shrink strategy is called to "
"possibly shrink the transition system.",
"-1",
Bounds("-1", "infinity"));
}
void handle_shrink_limit_options_defaults(Options &opts) {
int max_states = opts.get<int>("max_states");
int max_states_before_merge = opts.get<int>("max_states_before_merge");
int threshold = opts.get<int>("threshold_before_merge");
// If none of the two state limits has been set: set default limit.
if (max_states == -1 && max_states_before_merge == -1) {
max_states = 50000;
}
// If exactly one of the max_states options has been set, set the other
// so that it imposes no further limits.
if (max_states_before_merge == -1) {
max_states_before_merge = max_states;
} else if (max_states == -1) {
int n = max_states_before_merge;
if (utils::is_product_within_limit(n, n, INF)) {
max_states = n * n;
} else {
max_states = INF;
}
}
if (max_states_before_merge > max_states) {
utils::g_log << "warning: max_states_before_merge exceeds max_states, "
<< "correcting." << endl;
max_states_before_merge = max_states;
}
if (max_states < 1) {
cerr << "error: transition system size must be at least 1" << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
if (max_states_before_merge < 1) {
cerr << "error: transition system size before merge must be at least 1"
<< endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
if (threshold == -1) {
threshold = max_states;
}
if (threshold < 1) {
cerr << "error: threshold must be at least 1" << endl;
utils::exit_with(ExitCode::SEARCH_INPUT_ERROR);
}
if (threshold > max_states) {
utils::g_log << "warning: threshold exceeds max_states, correcting" << endl;
threshold = max_states;
}
opts.set<int>("max_states", max_states);
opts.set<int>("max_states_before_merge", max_states_before_merge);
opts.set<int>("threshold_before_merge", threshold);
}
}
| 20,008 |
C++
| 36.470037 | 107 | 0.588365 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bisimulation.cc
|
#include "shrink_bisimulation.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "label_equivalence_relation.h"
#include "transition_system.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/system.h"
#include <algorithm>
#include <cassert>
#include <limits>
#include <iostream>
#include <memory>
#include <unordered_map>
using namespace std;
namespace merge_and_shrink {
/* A successor signature characterizes the behaviour of an abstract
state in so far as bisimulation cares about it. States with
identical successor signature are not distinguished by
bisimulation.
Each entry in the vector is a pair of (label group ID, equivalence class of
successor). The bisimulation algorithm requires that the vector is
sorted and uniquified. */
using SuccessorSignature = vector<pair<int, int>>;
/*
As we use SENTINEL numeric_limits<int>::max() as a sentinel signature and
irrelevant states have a distance of INF = numeric_limits<int>::max(), we
use INF - 1 as the distance value for all irrelevant states. This guarantees
that also irrelevant states are always ordered before the sentinel.
*/
const int SENTINEL = numeric_limits<int>::max();
const int IRRELEVANT = SENTINEL - 1;
/*
The following class encodes all we need to know about a state for
bisimulation: its h value, which equivalence class ("group") it currently
belongs to, its successor signature (see above), and what the original
state is.
*/
struct Signature {
int h_and_goal; // -1 for goal states; h value for non-goal states
int group;
SuccessorSignature succ_signature;
int state;
Signature(int h, bool is_goal, int group_,
const SuccessorSignature &succ_signature_,
int state_)
: group(group_), succ_signature(succ_signature_), state(state_) {
if (is_goal) {
assert(h == 0);
h_and_goal = -1;
} else {
h_and_goal = h;
}
}
bool operator<(const Signature &other) const {
if (h_and_goal != other.h_and_goal)
return h_and_goal < other.h_and_goal;
if (group != other.group)
return group < other.group;
if (succ_signature != other.succ_signature)
return succ_signature < other.succ_signature;
return state < other.state;
}
void dump() const {
utils::g_log << "Signature(h_and_goal = " << h_and_goal
<< ", group = " << group
<< ", state = " << state
<< ", succ_sig = [";
for (size_t i = 0; i < succ_signature.size(); ++i) {
if (i)
utils::g_log << ", ";
utils::g_log << "(" << succ_signature[i].first
<< "," << succ_signature[i].second
<< ")";
}
utils::g_log << "])" << endl;
}
};
ShrinkBisimulation::ShrinkBisimulation(const Options &opts)
: greedy(opts.get<bool>("greedy")),
at_limit(opts.get<AtLimit>("at_limit")) {
}
int ShrinkBisimulation::initialize_groups(
const TransitionSystem &ts,
const Distances &distances,
vector<int> &state_to_group) const {
/* Group 0 holds all goal states.
Each other group holds all states with one particular h value.
Note that some goal state *must* exist because irrelevant and
unreachable states are pruned before we shrink and we never
perform the shrinking if that pruning shows that the problem is
unsolvable.
*/
typedef unordered_map<int, int> GroupMap;
GroupMap h_to_group;
int num_groups = 1; // Group 0 is for goal states.
for (int state = 0; state < ts.get_size(); ++state) {
int h = distances.get_goal_distance(state);
if (h == INF) {
h = IRRELEVANT;
}
if (ts.is_goal_state(state)) {
assert(h == 0);
state_to_group[state] = 0;
} else {
pair<GroupMap::iterator, bool> result = h_to_group.insert(
make_pair(h, num_groups));
state_to_group[state] = result.first->second;
if (result.second) {
// We inserted a new element => a new group was started.
++num_groups;
}
}
}
return num_groups;
}
void ShrinkBisimulation::compute_signatures(
const TransitionSystem &ts,
const Distances &distances,
vector<Signature> &signatures,
const vector<int> &state_to_group) const {
assert(signatures.empty());
// Step 1: Compute bare state signatures (without transition information).
signatures.push_back(Signature(-2, false, -1, SuccessorSignature(), -1));
for (int state = 0; state < ts.get_size(); ++state) {
int h = distances.get_goal_distance(state);
if (h == INF) {
h = IRRELEVANT;
}
Signature signature(h, ts.is_goal_state(state),
state_to_group[state], SuccessorSignature(),
state);
signatures.push_back(signature);
}
signatures.push_back(Signature(SENTINEL, false, -1, SuccessorSignature(), -1));
// Step 2: Add transition information.
int label_group_counter = 0;
/*
Note that the final result of the bisimulation may depend on the
order in which transitions are considered below.
If label groups were sorted (every group by increasing label numbers,
groups by smallest label number), then the following configuration
gives a different result on parcprinter-08-strips:p06.pddl:
astar(merge_and_shrink(
merge_strategy=merge_stateless(merge_selector=
score_based_filtering(scoring_functions=[goal_relevance,dfp,
total_order])),
shrink_strategy=shrink_bisimulation(greedy=false),
label_reduction=exact(before_shrinking=true,before_merging=false),
max_states=50000,threshold_before_merge=1))
The same behavioral difference can be obtained even without modifying
the merge-and-shrink code, using the two revisions c66ee00a250a and
d2e317621f2c. Running the above config, adapted to the old syntax,
yields the same difference:
astar(merge_and_shrink(merge_strategy=merge_dfp,
shrink_strategy=shrink_bisimulation(greedy=false,max_states=50000,
threshold=1),
label_reduction=exact(before_shrinking=true,before_merging=false)))
*/
for (GroupAndTransitions gat : ts) {
const LabelGroup &label_group = gat.label_group;
const vector<Transition> &transitions = gat.transitions;
for (const Transition &transition : transitions) {
assert(signatures[transition.src + 1].state == transition.src);
bool skip_transition = false;
if (greedy) {
int src_h = distances.get_goal_distance(transition.src);
int target_h = distances.get_goal_distance(transition.target);
if (src_h == INF || target_h == INF) {
// We skip transitions connected to an irrelevant state.
skip_transition = true;
} else {
int cost = label_group.get_cost();
assert(target_h + cost >= src_h);
skip_transition = (target_h + cost != src_h);
}
}
if (!skip_transition) {
int target_group = state_to_group[transition.target];
assert(target_group != -1 && target_group != SENTINEL);
signatures[transition.src + 1].succ_signature.push_back(
make_pair(label_group_counter, target_group));
}
}
++label_group_counter;
}
/* Step 3: Canonicalize the representation. The resulting
signatures must satisfy the following properties:
1. Signature::operator< defines a total order with the correct
sentinels at the start and end. The signatures vector is
sorted according to that order.
2. Goal states come before non-goal states, and low-h states come
before high-h states.
3. States that currently fall into the same group form contiguous
subsequences.
4. Two signatures compare equal according to Signature::operator<
iff we don't want to distinguish their states in the current
bisimulation round.
*/
for (size_t i = 0; i < signatures.size(); ++i) {
SuccessorSignature &succ_sig = signatures[i].succ_signature;
::sort(succ_sig.begin(), succ_sig.end());
succ_sig.erase(::unique(succ_sig.begin(), succ_sig.end()),
succ_sig.end());
}
::sort(signatures.begin(), signatures.end());
}
StateEquivalenceRelation ShrinkBisimulation::compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const {
assert(distances.are_goal_distances_computed());
int num_states = ts.get_size();
vector<int> state_to_group(num_states);
vector<Signature> signatures;
signatures.reserve(num_states + 2);
int num_groups = initialize_groups(ts, distances, state_to_group);
// utils::g_log << "number of initial groups: " << num_groups << endl;
// TODO: We currently violate this; see issue250
// assert(num_groups <= target_size);
bool stable = false;
bool stop_requested = false;
while (!stable && !stop_requested && num_groups < target_size) {
stable = true;
signatures.clear();
compute_signatures(ts, distances, signatures, state_to_group);
// Verify size of signatures and presence of sentinels.
assert(static_cast<int>(signatures.size()) == num_states + 2);
assert(signatures[0].h_and_goal == -2);
assert(signatures[num_states + 1].h_and_goal == SENTINEL);
int sig_start = 1; // Skip over initial sentinel.
while (true) {
int h_and_goal = signatures[sig_start].h_and_goal;
if (h_and_goal == SENTINEL) {
// We have hit the end sentinel.
assert(sig_start + 1 == static_cast<int>(signatures.size()));
break;
}
// Compute the number of groups needed after splitting.
int num_old_groups = 0;
int num_new_groups = 0;
int sig_end;
for (sig_end = sig_start; true; ++sig_end) {
if (signatures[sig_end].h_and_goal != h_and_goal) {
break;
}
const Signature &prev_sig = signatures[sig_end - 1];
const Signature &curr_sig = signatures[sig_end];
if (sig_end == sig_start) {
assert(prev_sig.group != curr_sig.group);
}
if (prev_sig.group != curr_sig.group) {
++num_old_groups;
++num_new_groups;
} else if (prev_sig.succ_signature != curr_sig.succ_signature) {
++num_new_groups;
}
}
assert(sig_end > sig_start);
if (at_limit == AtLimit::RETURN &&
num_groups - num_old_groups + num_new_groups > target_size) {
/* Can't split the group (or the set of groups for
this h value) -- would exceed bound on abstract
state number.
*/
stop_requested = true;
break;
} else if (num_new_groups != num_old_groups) {
// Split into new groups.
stable = false;
int new_group_no = -1;
for (int i = sig_start; i < sig_end; ++i) {
const Signature &prev_sig = signatures[i - 1];
const Signature &curr_sig = signatures[i];
if (prev_sig.group != curr_sig.group) {
// Start first group of a block; keep old group no.
new_group_no = curr_sig.group;
} else if (prev_sig.succ_signature
!= curr_sig.succ_signature) {
new_group_no = num_groups++;
assert(num_groups <= target_size);
}
assert(new_group_no != -1);
state_to_group[curr_sig.state] = new_group_no;
if (num_groups == target_size)
break;
}
if (num_groups == target_size)
break;
}
sig_start = sig_end;
}
}
/* Reduce memory pressure before generating the equivalence
relation since this is one of the code parts relevant to peak
memory. */
utils::release_vector_memory(signatures);
// Generate final result.
StateEquivalenceRelation equivalence_relation;
equivalence_relation.resize(num_groups);
for (int state = 0; state < num_states; ++state) {
int group = state_to_group[state];
if (group != -1) {
assert(group >= 0 && group < num_groups);
equivalence_relation[group].push_front(state);
}
}
return equivalence_relation;
}
string ShrinkBisimulation::name() const {
return "bisimulation";
}
void ShrinkBisimulation::dump_strategy_specific_options() const {
utils::g_log << "Bisimulation type: " << (greedy ? "greedy" : "exact") << endl;
utils::g_log << "At limit: ";
if (at_limit == AtLimit::RETURN) {
utils::g_log << "return";
} else if (at_limit == AtLimit::USE_UP) {
utils::g_log << "use up limit";
} else {
ABORT("Unknown setting for at_limit.");
}
utils::g_log << endl;
}
static shared_ptr<ShrinkStrategy>_parse(OptionParser &parser) {
parser.document_synopsis(
"Bismulation based shrink strategy",
"This shrink strategy implements the algorithm described in"
" the paper:" + utils::format_conference_reference(
{"Raz Nissim", "Joerg Hoffmann", "Malte Helmert"},
"Computing Perfect Heuristics in Polynomial Time: On Bisimulation"
" and Merge-and-Shrink Abstractions in Optimal Planning.",
"https://ai.dmi.unibas.ch/papers/nissim-et-al-ijcai2011.pdf",
"Proceedings of the Twenty-Second International Joint Conference"
" on Artificial Intelligence (IJCAI 2011)",
"1983-1990",
"AAAI Press",
"2011"));
parser.document_note(
"shrink_bisimulation(greedy=true)",
"Combine this with the merge-and-shrink options max_states=infinity "
"and threshold_before_merge=1 and with the linear merge strategy "
"reverse_level to obtain the variant 'greedy bisimulation without size "
"limit', called M&S-gop in the IJCAI 2011 paper. "
"When we last ran experiments on interaction of shrink strategies "
"with label reduction, this strategy performed best when used with "
"label reduction before shrinking (and no label reduction before "
"merging).");
parser.document_note(
"shrink_bisimulation(greedy=false)",
"Combine this with the merge-and-shrink option max_states=N (where N "
"is a numerical parameter for which sensible values include 1000, "
"10000, 50000, 100000 and 200000) and with the linear merge strategy "
"reverse_level to obtain the variant 'exact bisimulation with a size "
"limit', called DFP-bop in the IJCAI 2011 paper. "
"When we last ran experiments on interaction of shrink strategies "
"with label reduction, this strategy performed best when used with "
"label reduction before shrinking (and no label reduction before "
"merging).");
parser.add_option<bool>("greedy", "use greedy bisimulation", "false");
vector<string> at_limit;
at_limit.push_back("RETURN");
at_limit.push_back("USE_UP");
parser.add_enum_option<AtLimit>(
"at_limit", at_limit,
"what to do when the size limit is hit", "RETURN");
Options opts = parser.parse();
if (parser.help_mode())
return nullptr;
if (parser.dry_run())
return nullptr;
else
return make_shared<ShrinkBisimulation>(opts);
}
static Plugin<ShrinkStrategy> _plugin("shrink_bisimulation", _parse);
}
| 16,724 |
C++
| 37.360092 | 83 | 0.585326 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bisimulation.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_BISIMULATION_H
#define MERGE_AND_SHRINK_SHRINK_BISIMULATION_H
#include "shrink_strategy.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
struct Signature;
enum class AtLimit {
RETURN,
USE_UP
};
class ShrinkBisimulation : public ShrinkStrategy {
const bool greedy;
const AtLimit at_limit;
void compute_abstraction(
const TransitionSystem &ts,
const Distances &distances,
int target_size,
StateEquivalenceRelation &equivalence_relation) const;
int initialize_groups(
const TransitionSystem &ts,
const Distances &distances,
std::vector<int> &state_to_group) const;
void compute_signatures(
const TransitionSystem &ts,
const Distances &distances,
std::vector<Signature> &signatures,
const std::vector<int> &state_to_group) const;
protected:
virtual void dump_strategy_specific_options() const override;
virtual std::string name() const override;
public:
explicit ShrinkBisimulation(const options::Options &opts);
virtual ~ShrinkBisimulation() override = default;
virtual StateEquivalenceRelation compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 1,521 |
C
| 24.366666 | 66 | 0.693623 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/labels.cc
|
#include "labels.h"
#include "types.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
Labels::Labels(vector<unique_ptr<Label>> &&labels)
: labels(move(labels)),
max_size(0) {
if (!this->labels.empty()) {
max_size = this->labels.size() * 2 - 1;
}
}
void Labels::reduce_labels(const vector<int> &old_label_nos) {
/*
Even though we currently only support exact label reductions where
reduced labels are of equal cost, to support non-exact label reductions,
we compute the cost of the new label as the minimum cost of all old
labels reduced to it to satisfy admissibility.
*/
int new_label_cost = INF;
for (size_t i = 0; i < old_label_nos.size(); ++i) {
int old_label_no = old_label_nos[i];
int cost = get_label_cost(old_label_no);
if (cost < new_label_cost) {
new_label_cost = cost;
}
labels[old_label_no] = nullptr;
}
labels.push_back(utils::make_unique_ptr<Label>(new_label_cost));
}
bool Labels::is_current_label(int label_no) const {
assert(utils::in_bounds(label_no, labels));
return labels[label_no] != nullptr;
}
int Labels::get_label_cost(int label_no) const {
assert(labels[label_no]);
return labels[label_no]->get_cost();
}
void Labels::dump_labels() const {
utils::g_log << "active labels:" << endl;
for (size_t label_no = 0; label_no < labels.size(); ++label_no) {
if (labels[label_no]) {
utils::g_log << "label " << label_no
<< ", cost " << labels[label_no]->get_cost()
<< endl;
}
}
}
}
| 1,773 |
C++
| 27.15873 | 78 | 0.593909 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_precomputed.cc
|
#include "merge_strategy_precomputed.h"
#include "factored_transition_system.h"
#include "merge_tree.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeStrategyPrecomputed::MergeStrategyPrecomputed(
const FactoredTransitionSystem &fts, unique_ptr<MergeTree> merge_tree)
: MergeStrategy(fts), merge_tree(move(merge_tree)) {
}
pair<int, int> MergeStrategyPrecomputed::get_next() {
assert(!merge_tree->done());
int next_merge_index = fts.get_size();
pair<int, int> next_merge = merge_tree->get_next_merge(next_merge_index);
assert(fts.is_active(next_merge.first));
assert(fts.is_active(next_merge.second));
return next_merge;
}
}
| 690 |
C++
| 26.639999 | 77 | 0.723188 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_sccs.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_SCCS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_SCCS_H
#include "merge_strategy.h"
#include <memory>
#include <vector>
class TaskProxy;
namespace merge_and_shrink {
class MergeSelector;
class MergeTreeFactory;
class MergeTree;
class MergeStrategySCCs : public MergeStrategy {
const TaskProxy &task_proxy;
std::shared_ptr<MergeTreeFactory> merge_tree_factory;
std::shared_ptr<MergeSelector> merge_selector;
std::vector<std::vector<int>> non_singleton_cg_sccs;
std::vector<int> indices_of_merged_sccs;
// Active "merge strategies" while merging a set of indices
std::unique_ptr<MergeTree> current_merge_tree;
std::vector<int> current_ts_indices;
public:
MergeStrategySCCs(
const FactoredTransitionSystem &fts,
const TaskProxy &task_proxy,
const std::shared_ptr<MergeTreeFactory> &merge_tree_factory,
const std::shared_ptr<MergeSelector> &merge_selector,
std::vector<std::vector<int>> non_singleton_cg_sccs,
std::vector<int> indices_of_merged_sccs);
virtual ~MergeStrategySCCs() override;
virtual std::pair<int, int> get_next() override;
};
}
#endif
| 1,184 |
C
| 29.384615 | 68 | 0.719595 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bucket_based.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_BUCKET_BASED_H
#define MERGE_AND_SHRINK_SHRINK_BUCKET_BASED_H
#include "shrink_strategy.h"
#include <memory>
#include <vector>
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
/* A base class for bucket-based shrink strategies.
A bucket-based strategy partitions the states into an ordered
vector of buckets, from low to high priority, and then abstracts
them to a given target size according to the following rules:
Repeat until we respect the target size:
If any bucket still contains two states:
Combine two random states from the non-singleton bucket
with the lowest priority.
Otherwise:
Combine the two lowest-priority buckets.
For the (usual) case where the target size is larger than the
number of buckets, this works out in such a way that the
high-priority buckets are not abstracted at all, the low-priority
buckets are abstracted by combining all states in each bucket, and
(up to) one bucket "in the middle" is partially abstracted.
*/
class ShrinkBucketBased : public ShrinkStrategy {
protected:
using Bucket = std::vector<int>;
std::shared_ptr<utils::RandomNumberGenerator> rng;
private:
StateEquivalenceRelation compute_abstraction(
const std::vector<Bucket> &buckets,
int target_size) const;
protected:
virtual std::vector<Bucket> partition_into_buckets(
const TransitionSystem &ts,
const Distances &Distances) const = 0;
public:
explicit ShrinkBucketBased(const options::Options &opts);
virtual ~ShrinkBucketBased() override = default;
virtual StateEquivalenceRelation compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const override;
static void add_options_to_parser(options::OptionParser &parser);
};
}
#endif
| 1,974 |
C
| 29.859375 | 69 | 0.72999 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_heuristic.h
|
#ifndef MERGE_AND_SHRINK_MERGE_AND_SHRINK_HEURISTIC_H
#define MERGE_AND_SHRINK_MERGE_AND_SHRINK_HEURISTIC_H
#include "../heuristic.h"
#include <memory>
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeAndShrinkRepresentation;
class MergeAndShrinkHeuristic : public Heuristic {
const utils::Verbosity verbosity;
// The final merge-and-shrink representations, storing goal distances.
std::vector<std::unique_ptr<MergeAndShrinkRepresentation>> mas_representations;
void extract_factor(FactoredTransitionSystem &fts, int index);
bool extract_unsolvable_factor(FactoredTransitionSystem &fts);
void extract_nontrivial_factors(FactoredTransitionSystem &fts);
void extract_factors(FactoredTransitionSystem &fts);
protected:
virtual int compute_heuristic(const State &ancestor_state) override;
public:
explicit MergeAndShrinkHeuristic(const options::Options &opts);
};
}
#endif
| 978 |
C
| 27.794117 | 83 | 0.784254 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.cc
|
#include "merge_scoring_function_miasm_utils.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "utils.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <algorithm>
using namespace std;
namespace merge_and_shrink {
/*
Compute a state equivalence relation for the given transition system with
the given shrink strategy, respecting the given size limit new_size. If the
result of applying it actually reduced the size of the transition system,
copy the transition system, apply the state equivalence relation to it and
return the result. Return nullptr otherwise.
*/
unique_ptr<TransitionSystem> copy_and_shrink_ts(
const TransitionSystem &ts,
const Distances &distances,
const ShrinkStrategy &shrink_strategy,
int new_size,
utils::Verbosity verbosity) {
/*
TODO: think about factoring out common logic of this function and the
function shrink_factor in utils.cc
*/
StateEquivalenceRelation equivalence_relation =
shrink_strategy.compute_equivalence_relation(ts, distances, new_size);
// TODO: We currently violate this; see issue250
//assert(equivalence_relation.size() <= target_size);
int new_num_states = equivalence_relation.size();
if (new_num_states < ts.get_size()) {
/*
If we actually shrink the transition system, we first need to copy it,
then shrink it and return it.
*/
vector<int> abstraction_mapping = compute_abstraction_mapping(
ts.get_size(), equivalence_relation);
unique_ptr<TransitionSystem> ts_copy =
utils::make_unique_ptr<TransitionSystem>(ts);
ts_copy->apply_abstraction(
equivalence_relation, abstraction_mapping, verbosity);
return ts_copy;
} else {
return nullptr;
}
}
unique_ptr<TransitionSystem> shrink_before_merge_externally(
const FactoredTransitionSystem &fts,
int index1,
int index2,
const ShrinkStrategy &shrink_strategy,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge) {
const TransitionSystem &original_ts1 = fts.get_transition_system(index1);
const TransitionSystem &original_ts2 = fts.get_transition_system(index2);
/*
Determine size limits and if shrinking is necessary or possible as done
in the merge-and-shrink loop.
*/
pair<int, int> new_sizes = compute_shrink_sizes(
original_ts1.get_size(),
original_ts2.get_size(),
max_states_before_merge,
max_states);
bool must_shrink_ts1 = original_ts1.get_size() > min(new_sizes.first, shrink_threshold_before_merge);
bool must_shrink_ts2 = original_ts2.get_size() > min(new_sizes.second, shrink_threshold_before_merge);
/*
If we need to shrink, copy_and_shrink_ts will take care of computing
a copy, shrinking it, and returning it. (In cases where shrinking is
only triggered due to the threshold being passed but no perfect
shrinking is possible, the method returns a null pointer.)
*/
utils::Verbosity verbosity = utils::Verbosity::SILENT;
unique_ptr<TransitionSystem> ts1 = nullptr;
if (must_shrink_ts1) {
ts1 = copy_and_shrink_ts(
original_ts1,
fts.get_distances(index1),
shrink_strategy,
new_sizes.first,
verbosity);
}
unique_ptr<TransitionSystem> ts2 = nullptr;
if (must_shrink_ts2) {
ts2 = copy_and_shrink_ts(
original_ts2,
fts.get_distances(index2),
shrink_strategy,
new_sizes.second,
verbosity);
}
/*
Return the product, using either the original transition systems or
the copied and shrunk ones.
*/
return TransitionSystem::merge(
fts.get_labels(),
(ts1 ? *ts1 : original_ts1),
(ts2 ? *ts2 : original_ts2),
verbosity);
}
}
| 4,014 |
C++
| 33.316239 | 106 | 0.664175 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_selector.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SELECTOR_H
#define MERGE_AND_SHRINK_MERGE_SELECTOR_H
#include <string>
#include <vector>
class TaskProxy;
namespace merge_and_shrink {
class FactoredTransitionSystem;
class MergeSelector {
protected:
virtual std::string name() const = 0;
virtual void dump_specific_options() const {}
std::vector<std::pair<int, int>> compute_merge_candidates(
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset) const;
public:
MergeSelector() = default;
virtual ~MergeSelector() = default;
virtual std::pair<int, int> select_merge(
const FactoredTransitionSystem &fts,
const std::vector<int> &indices_subset = std::vector<int>()) const = 0;
virtual void initialize(const TaskProxy &task_proxy) = 0;
void dump_options() const;
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
};
}
#endif
| 953 |
C
| 28.812499 | 79 | 0.701994 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/label_equivalence_relation.h
|
#ifndef MERGE_AND_SHRINK_LABEL_EQUIVALENCE_RELATION_H
#define MERGE_AND_SHRINK_LABEL_EQUIVALENCE_RELATION_H
#include "types.h"
#include <list>
#include <unordered_set>
#include <vector>
namespace merge_and_shrink {
class Labels;
using LabelIter = std::list<int>::iterator;
using LabelConstIter = std::list<int>::const_iterator;
class LabelGroup {
/*
A label group contains a set of locally equivalent labels, possibly of
different cost, and stores the minimum cost of all labels of the group.
*/
std::list<int> labels;
int cost;
public:
LabelGroup() : cost(INF) {
}
void set_cost(int cost_) {
cost = cost_;
}
LabelIter insert(int label) {
return labels.insert(labels.end(), label);
}
void erase(LabelIter pos) {
labels.erase(pos);
}
void clear() {
labels.clear();
}
LabelConstIter begin() const {
return labels.begin();
}
LabelConstIter end() const {
return labels.end();
}
bool empty() const {
return labels.empty();
}
int get_cost() const {
return cost;
}
};
class LabelEquivalenceRelation {
/*
This class groups labels together and allows easy access to the group
and position within a group for every label. It is used by the class
TransitionSystem to group locally equivalent labels. Label groups
have implicit IDs defined by their index in grouped_labels.
*/
const Labels &labels;
std::vector<LabelGroup> grouped_labels;
/* Maps each label to its group's ID (index in grouped_labels) and its
iterator within the group. */
std::vector<std::pair<int, LabelIter>> label_to_positions;
void add_label_to_group(int group_id, int label_no);
public:
LabelEquivalenceRelation(
const Labels &labels, const std::vector<std::vector<int>> &label_groups);
/*
NOTE: we need a custom copy constructor here because we need to fill
label_to_positions with correct LabelIter objects that point to the
copied LabelGroup objects rather than to those of the given
LabelEquivalenceRelation other.
*/
LabelEquivalenceRelation(const LabelEquivalenceRelation &other);
/*
The given label mappings (from label reduction) contain the new label
and the old labels that were reduced to the new one.
If affected_group_ids is not given, then all old labels must have been
in the same group before, and the new labels are added to this group.
Otherwise, all old labels are removed from their group(s) and the new
label is added to a new group. Furthermore, the costs of the affected
groups are recomputed.
*/
void apply_label_mapping(
const std::vector<std::pair<int, std::vector<int>>> &label_mapping,
const std::unordered_set<int> *affected_group_ids = nullptr);
// Moves all labels from one group into the other.
void move_group_into_group(int from_group_id, int to_group_id);
int add_label_group(const std::vector<int> &new_labels);
bool is_empty_group(int group_id) const {
return grouped_labels[group_id].empty();
}
int get_group_id(int label_no) const {
return label_to_positions[label_no].first;
}
int get_size() const {
return grouped_labels.size();
}
const LabelGroup &get_group(int group_id) const {
return grouped_labels.at(group_id);
}
};
}
#endif
| 3,474 |
C
| 27.252032 | 81 | 0.661773 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_stateless.h
|
#ifndef MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_STATELESS_H
#define MERGE_AND_SHRINK_MERGE_STRATEGY_FACTORY_STATELESS_H
#include "merge_strategy_factory.h"
namespace options {
class Options;
}
namespace merge_and_shrink {
class MergeSelector;
class MergeStrategyFactoryStateless : public MergeStrategyFactory {
std::shared_ptr<MergeSelector> merge_selector;
protected:
virtual std::string name() const override;
virtual void dump_strategy_specific_options() const override;
public:
explicit MergeStrategyFactoryStateless(options::Options &options);
virtual ~MergeStrategyFactoryStateless() override = default;
virtual std::unique_ptr<MergeStrategy> compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) override;
virtual bool requires_init_distances() const override;
virtual bool requires_goal_distances() const override;
};
}
#endif
| 923 |
C
| 30.862068 | 70 | 0.772481 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_H
#include "merge_scoring_function.h"
#include <memory>
namespace options {
class Options;
}
namespace merge_and_shrink {
class ShrinkStrategy;
class MergeScoringFunctionMIASM : public MergeScoringFunction {
std::shared_ptr<ShrinkStrategy> shrink_strategy;
const int max_states;
const int max_states_before_merge;
const int shrink_threshold_before_merge;
protected:
virtual std::string name() const override;
public:
explicit MergeScoringFunctionMIASM(const options::Options &options);
virtual ~MergeScoringFunctionMIASM() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return true;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 1,043 |
C
| 25.76923 | 75 | 0.732502 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/label_equivalence_relation.cc
|
#include "label_equivalence_relation.h"
#include "labels.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
LabelEquivalenceRelation::LabelEquivalenceRelation(
const Labels &labels, const vector<vector<int>> &label_groups)
: labels(labels) {
/* In the worst case, each label forms a singleton group, and thus with
label reduction, we could have labels.get_max_size() many groups. */
grouped_labels.reserve(labels.get_max_size());
label_to_positions.resize(labels.get_max_size());
for (const vector<int> &label_group : label_groups) {
add_label_group(label_group);
}
}
LabelEquivalenceRelation::LabelEquivalenceRelation(
const LabelEquivalenceRelation &other)
: labels(other.labels) {
// For the reserve call, see the comment in the constructor above.
grouped_labels.reserve(labels.get_max_size());
/*
Note that we do not copy label_to_positions because copying iterators
from potentially uninitialized iterators causes problems in debug mode.
This also means that label_to_positions contains uninitialized values
at all positions corresponding to already reduced labels (inactive
labels).
*/
label_to_positions.resize(other.label_to_positions.size());
for (size_t other_group_id = 0;
other_group_id < other.grouped_labels.size();
++other_group_id) {
// Add a new empty label group.
int group_id = grouped_labels.size();
assert(group_id == static_cast<int>(other_group_id));
grouped_labels.push_back(LabelGroup());
LabelGroup &label_group = grouped_labels.back();
/*
Go over the other label group, add all labels to this group.
To obtain exact copies of the label groups with the same cost, we do
not use add_label_to_group, which would recompute costs based on
given labels and leave cost=infinity for empty groups, but we
manually set the group's cost to match the other group's cost.
*/
const LabelGroup &other_label_group =
other.grouped_labels[other_group_id];
for (int other_label_no : other_label_group) {
LabelIter label_it = label_group.insert(other_label_no);
assert(*label_it == other_label_no);
label_to_positions[other_label_no] = make_pair(group_id, label_it);
}
label_group.set_cost(other_label_group.get_cost());
}
}
void LabelEquivalenceRelation::add_label_to_group(int group_id,
int label_no) {
LabelIter label_it = grouped_labels[group_id].insert(label_no);
label_to_positions[label_no] = make_pair(group_id, label_it);
int label_cost = labels.get_label_cost(label_no);
if (label_cost < grouped_labels[group_id].get_cost())
grouped_labels[group_id].set_cost(label_cost);
}
void LabelEquivalenceRelation::apply_label_mapping(
const vector<pair<int, vector<int>>> &label_mapping,
const unordered_set<int> *affected_group_ids) {
for (const pair<int, vector<int>> &mapping : label_mapping) {
int new_label_no = mapping.first;
const vector<int> &old_label_nos = mapping.second;
// Add new label to group
int canonical_group_id = get_group_id(old_label_nos.front());
if (!affected_group_ids) {
add_label_to_group(canonical_group_id, new_label_no);
} else {
add_label_group({new_label_no});
}
// Remove old labels from group
for (int old_label_no : old_label_nos) {
if (!affected_group_ids) {
assert(canonical_group_id == get_group_id(old_label_no));
}
LabelIter label_it = label_to_positions[old_label_no].second;
grouped_labels[get_group_id(old_label_no)].erase(label_it);
}
}
if (affected_group_ids) {
// Recompute the cost of all affected label groups.
const unordered_set<int> &group_ids = *affected_group_ids;
for (int group_id : group_ids) {
LabelGroup &label_group = grouped_labels[group_id];
// Setting cost to infinity for empty groups does not hurt.
label_group.set_cost(INF);
for (int label_no : label_group) {
int cost = labels.get_label_cost(label_no);
if (cost < label_group.get_cost()) {
label_group.set_cost(cost);
}
}
}
}
}
void LabelEquivalenceRelation::move_group_into_group(
int from_group_id, int to_group_id) {
assert(!is_empty_group(from_group_id));
assert(!is_empty_group(to_group_id));
LabelGroup &from_group = grouped_labels[from_group_id];
for (int label_no : from_group) {
add_label_to_group(to_group_id, label_no);
}
from_group.clear();
}
int LabelEquivalenceRelation::add_label_group(const vector<int> &new_labels) {
int new_group_id = grouped_labels.size();
grouped_labels.push_back(LabelGroup());
for (int label_no : new_labels) {
add_label_to_group(new_group_id, label_no);
}
return new_group_id;
}
}
| 5,185 |
C++
| 37.414815 | 79 | 0.627965 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/utils.cc
|
#include "utils.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "../utils/logging.h"
#include "../utils/math.h"
#include <algorithm>
#include <cassert>
#include <cmath>
using namespace std;
namespace merge_and_shrink {
pair<int, int> compute_shrink_sizes(
int size1,
int size2,
int max_states_before_merge,
int max_states_after_merge) {
// Bound both sizes by max allowed size before merge.
int new_size1 = min(size1, max_states_before_merge);
int new_size2 = min(size2, max_states_before_merge);
if (!utils::is_product_within_limit(
new_size1, new_size2, max_states_after_merge)) {
int balanced_size = int(sqrt(max_states_after_merge));
if (new_size1 <= balanced_size) {
// Size of the first transition system is small enough. Use whatever
// is left for the second transition system.
new_size2 = max_states_after_merge / new_size1;
} else if (new_size2 <= balanced_size) {
// Inverted case as before.
new_size1 = max_states_after_merge / new_size2;
} else {
// Both transition systems are too big. We set both target sizes
// to balanced_size. An alternative would be to set one to
// N1 = balanced_size and the other to N2 = max_states_after_merge /
// balanced_size, to get closer to the allowed maximum.
// However, this would make little difference (N2 would
// always be N1, N1 + 1 or N1 + 2), and our solution has the
// advantage of treating the transition systems symmetrically.
new_size1 = balanced_size;
new_size2 = balanced_size;
}
}
assert(new_size1 <= size1 && new_size2 <= size2);
assert(new_size1 <= max_states_before_merge);
assert(new_size2 <= max_states_before_merge);
assert(new_size1 * new_size2 <= max_states_after_merge);
return make_pair(new_size1, new_size2);
}
/*
This method checks if the transition system of the factor at index violates
the size limit given via new_size (e.g. as computed by compute_shrink_sizes)
or the threshold shrink_threshold_before_merge that triggers shrinking even
if the size limit is not violated. If so, trigger the shrinking process.
Return true iff the factor was actually shrunk.
*/
bool shrink_factor(
FactoredTransitionSystem &fts,
int index,
int new_size,
int shrink_threshold_before_merge,
const ShrinkStrategy &shrink_strategy,
utils::Verbosity verbosity) {
/*
TODO: think about factoring out common logic of this function and the
function copy_and_shrink_ts in merge_scoring_function_miasm_utils.cc.
*/
const TransitionSystem &ts = fts.get_transition_system(index);
int num_states = ts.get_size();
if (num_states > min(new_size, shrink_threshold_before_merge)) {
if (verbosity >= utils::Verbosity::VERBOSE) {
utils::g_log << ts.tag() << "current size: " << num_states;
if (new_size < num_states)
utils::g_log << " (new size limit: " << new_size;
else
utils::g_log << " (shrink threshold: " << shrink_threshold_before_merge;
utils::g_log << ")" << endl;
}
const Distances &distances = fts.get_distances(index);
StateEquivalenceRelation equivalence_relation =
shrink_strategy.compute_equivalence_relation(ts, distances, new_size);
// TODO: We currently violate this; see issue250
//assert(equivalence_relation.size() <= target_size);
return fts.apply_abstraction(index, equivalence_relation, verbosity);
}
return false;
}
bool shrink_before_merge_step(
FactoredTransitionSystem &fts,
int index1,
int index2,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge,
const ShrinkStrategy &shrink_strategy,
utils::Verbosity verbosity) {
/*
Compute the size limit for both transition systems as imposed by
max_states and max_states_before_merge.
*/
pair<int, int> new_sizes = compute_shrink_sizes(
fts.get_transition_system(index1).get_size(),
fts.get_transition_system(index2).get_size(),
max_states_before_merge,
max_states);
/*
For both transition systems, possibly compute and apply an
abstraction.
TODO: we could better use the given limit by increasing the size limit
for the second shrinking if the first shrinking was larger than
required.
*/
bool shrunk1 = shrink_factor(
fts,
index1,
new_sizes.first,
shrink_threshold_before_merge,
shrink_strategy,
verbosity);
if (verbosity >= utils::Verbosity::VERBOSE && shrunk1) {
fts.statistics(index1);
}
bool shrunk2 = shrink_factor(
fts,
index2,
new_sizes.second,
shrink_threshold_before_merge,
shrink_strategy,
verbosity);
if (verbosity >= utils::Verbosity::VERBOSE && shrunk2) {
fts.statistics(index2);
}
return shrunk1 || shrunk2;
}
bool prune_step(
FactoredTransitionSystem &fts,
int index,
bool prune_unreachable_states,
bool prune_irrelevant_states,
utils::Verbosity verbosity) {
assert(prune_unreachable_states || prune_irrelevant_states);
const TransitionSystem &ts = fts.get_transition_system(index);
const Distances &distances = fts.get_distances(index);
int num_states = ts.get_size();
StateEquivalenceRelation state_equivalence_relation;
state_equivalence_relation.reserve(num_states);
int unreachable_count = 0;
int irrelevant_count = 0;
int dead_count = 0;
for (int state = 0; state < num_states; ++state) {
/* If pruning both unreachable and irrelevant states, a state which is
dead is counted for both statistics! */
bool prune_state = false;
if (prune_unreachable_states) {
assert(distances.are_init_distances_computed());
if (distances.get_init_distance(state) == INF) {
++unreachable_count;
prune_state = true;
}
}
if (prune_irrelevant_states) {
assert(distances.are_goal_distances_computed());
if (distances.get_goal_distance(state) == INF) {
++irrelevant_count;
prune_state = true;
}
}
if (prune_state) {
++dead_count;
} else {
StateEquivalenceClass state_equivalence_class;
state_equivalence_class.push_front(state);
state_equivalence_relation.push_back(state_equivalence_class);
}
}
if (verbosity >= utils::Verbosity::VERBOSE &&
(unreachable_count || irrelevant_count)) {
utils::g_log << ts.tag()
<< "unreachable: " << unreachable_count << " states, "
<< "irrelevant: " << irrelevant_count << " states ("
<< "total dead: " << dead_count << " states)" << endl;
}
return fts.apply_abstraction(index, state_equivalence_relation, verbosity);
}
vector<int> compute_abstraction_mapping(
int num_states,
const StateEquivalenceRelation &equivalence_relation) {
vector<int> abstraction_mapping(num_states, PRUNED_STATE);
for (size_t class_no = 0; class_no < equivalence_relation.size(); ++class_no) {
const StateEquivalenceClass &state_equivalence_class =
equivalence_relation[class_no];
for (int state : state_equivalence_class) {
assert(abstraction_mapping[state] == PRUNED_STATE);
abstraction_mapping[state] = class_no;
}
}
return abstraction_mapping;
}
bool is_goal_relevant(const TransitionSystem &ts) {
int num_states = ts.get_size();
for (int state = 0; state < num_states; ++state) {
if (!ts.is_goal_state(state)) {
return true;
}
}
return false;
}
}
| 8,123 |
C++
| 35.594594 | 88 | 0.623661 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/utils.h
|
#ifndef MERGE_AND_SHRINK_UTILS_H
#define MERGE_AND_SHRINK_UTILS_H
#include "types.h"
#include <memory>
#include <vector>
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class ShrinkStrategy;
class TransitionSystem;
/*
Compute target sizes for shrinking two transition systems with sizes size1
and size2 before they are merged. Use the following rules:
1) Right before merging, the transition systems may have at most
max_states_before_merge states.
2) Right after merging, the product may have most max_states_after_merge
states.
3) Transition systems are shrunk as little as necessary to satisfy the above
constraints. (If possible, neither is shrunk at all.)
There is often a Pareto frontier of solutions following these rules. In this
case, balanced solutions (where the target sizes are close to each other)
are preferred over less balanced ones.
*/
extern std::pair<int, int> compute_shrink_sizes(
int size1,
int size2,
int max_states_before_merge,
int max_states_after_merge);
/*
This function first determines if any of the two factors at indices index1
and index2 must be shrunk according to the given size limits max_states and
max_states_before_merge, using the function compute_shrink_sizes (see above).
If not, then the function further checks if any of the two factors has a
size larger than shrink_treshold_before_merge, in which case shrinking is
still triggered.
If shrinking is triggered, apply the abstraction to the two factors
within the factored transition system. Return true iff at least one of the
factors was shrunk.
*/
extern bool shrink_before_merge_step(
FactoredTransitionSystem &fts,
int index1,
int index2,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge,
const ShrinkStrategy &shrink_strategy,
utils::Verbosity verbosity);
/*
Prune unreachable and/or irrelevant states of the factor at index. This
requires that init and/or goal distances have been computed accordingly.
Return true iff any states have been pruned.
TODO: maybe this functionality belongs to a new class PruneStrategy.
*/
extern bool prune_step(
FactoredTransitionSystem &fts,
int index,
bool prune_unreachable_states,
bool prune_irrelevant_states,
utils::Verbosity verbosity);
/*
Compute the abstraction mapping based on the given state equivalence
relation.
*/
extern std::vector<int> compute_abstraction_mapping(
int num_states,
const StateEquivalenceRelation &equivalence_relation);
extern bool is_goal_relevant(const TransitionSystem &ts);
}
#endif
| 2,687 |
C
| 30.623529 | 79 | 0.756978 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/types.cc
|
#include "types.h"
#include <limits>
using namespace std;
namespace merge_and_shrink {
const int INF = numeric_limits<int>::max();
const int MINUSINF = numeric_limits<int>::min();
const int PRUNED_STATE = -1;
}
| 214 |
C++
| 16.916665 | 48 | 0.705607 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_single_random.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_SINGLE_RANDOM_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_SINGLE_RANDOM_H
#include "merge_scoring_function.h"
#include <memory>
namespace options {
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
class TransitionSystem;
class MergeScoringFunctionSingleRandom : public MergeScoringFunction {
int random_seed; // only for dump options
std::shared_ptr<utils::RandomNumberGenerator> rng;
protected:
virtual std::string name() const override;
virtual void dump_function_specific_options() const override;
public:
explicit MergeScoringFunctionSingleRandom(const options::Options &options);
virtual ~MergeScoringFunctionSingleRandom() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 1,138 |
C
| 26.119047 | 79 | 0.744288 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm.cc
|
#include "merge_scoring_function_miasm.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "merge_and_shrink_algorithm.h"
#include "shrink_strategy.h"
#include "transition_system.h"
#include "merge_scoring_function_miasm_utils.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
using namespace std;
namespace merge_and_shrink {
MergeScoringFunctionMIASM::MergeScoringFunctionMIASM(
const options::Options &options)
: shrink_strategy(options.get<shared_ptr<ShrinkStrategy>>("shrink_strategy")),
max_states(options.get<int>("max_states")),
max_states_before_merge(options.get<int>("max_states_before_merge")),
shrink_threshold_before_merge(options.get<int>("threshold_before_merge")) {
}
vector<double> MergeScoringFunctionMIASM::compute_scores(
const FactoredTransitionSystem &fts,
const vector<pair<int, int>> &merge_candidates) {
vector<double> scores;
scores.reserve(merge_candidates.size());
for (pair<int, int> merge_candidate : merge_candidates) {
int index1 = merge_candidate.first;
int index2 = merge_candidate.second;
unique_ptr<TransitionSystem> product = shrink_before_merge_externally(
fts,
index1,
index2,
*shrink_strategy,
max_states,
max_states_before_merge,
shrink_threshold_before_merge);
// Compute distances for the product and count the alive states.
unique_ptr<Distances> distances = utils::make_unique_ptr<Distances>(*product);
const bool compute_init_distances = true;
const bool compute_goal_distances = true;
const utils::Verbosity verbosity = utils::Verbosity::SILENT;
distances->compute_distances(compute_init_distances, compute_goal_distances, verbosity);
int num_states = product->get_size();
int alive_states_count = 0;
for (int state = 0; state < num_states; ++state) {
if (distances->get_init_distance(state) != INF &&
distances->get_goal_distance(state) != INF) {
++alive_states_count;
}
}
/*
Compute the score as the ratio of alive states of the product
compared to the number of states of the full product.
*/
assert(num_states);
double score = static_cast<double>(alive_states_count) /
static_cast<double>(num_states);
scores.push_back(score);
}
return scores;
}
string MergeScoringFunctionMIASM::name() const {
return "miasm";
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"MIASM",
"This scoring function favors merging transition systems such that in "
"their product, there are many dead states, which can then be pruned "
"without sacrificing information. In particular, the score it assigns "
"to a product is the ratio of alive states to the total number of "
"states. To compute this score, this class thus computes the product "
"of all pairs of transition systems, potentially copying and shrinking "
"the transition systems before if otherwise their product would exceed "
"the specified size limits. A stateless merge strategy using this "
"scoring function is called dyn-MIASM (nowadays also called sbMIASM "
"for score-based MIASM) and is described in the following paper:"
+ utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Planning and "
"Scheduling (ICAPS 2016)",
"2358-2366",
"AAAI Press",
"2016"));
parser.document_note(
"Note",
"To obtain the configurations called dyn-MIASM described in the paper, "
"use the following configuration of the merge-and-shrink heuristic "
"and adapt the tie-breaking criteria of {{{total_order}}} as desired:\n "
"{{{\nmerge_and_shrink(merge_strategy=merge_stateless(merge_selector="
"score_based_filtering(scoring_functions=[sf_miasm(shrink_strategy="
"shrink_bisimulation(greedy=false),max_states=50000,"
"threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,"
"product_ts_order=new_to_old,atomic_before_product=true)])),"
"shrink_strategy=shrink_bisimulation(greedy=false),label_reduction="
"exact(before_shrinking=true,before_merging=false),max_states=50000,"
"threshold_before_merge=1)\n}}}");
parser.document_note(
"Note",
"Unless you know what you are doing, we recommend using the same "
"options related to shrinking for {{{sf_miasm}}} as for {{{"
"merge_and_shrink}}}, i.e. the options {{{shrink_strategy}}}, {{{"
"max_states}}}, and {{{threshold_before_merge}}} should be set "
"identically. Furthermore, as this scoring function maximizes the "
"amount of possible pruning, merge-and-shrink should be configured to "
"use full pruning, i.e. {{{prune_unreachable_states=true}}} and {{{"
"prune_irrelevant_states=true}}} (the default).");
// TODO: use shrink strategy and limit options from MergeAndShrinkHeuristic
// instead of having the identical options here again.
parser.add_option<shared_ptr<ShrinkStrategy>>(
"shrink_strategy",
"We recommend setting this to match the shrink strategy configuration "
"given to {{{merge_and_shrink}}}, see note below.");
add_transition_system_size_limit_options_to_parser(parser);
options::Options options = parser.parse();
if (parser.help_mode()) {
return nullptr;
}
handle_shrink_limit_options_defaults(options);
if (parser.dry_run()) {
return nullptr;
} else {
return make_shared<MergeScoringFunctionMIASM>(options);
}
}
static options::Plugin<MergeScoringFunction> _plugin("sf_miasm", _parse);
}
| 6,323 |
C++
| 42.315068 | 96 | 0.659023 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/factored_transition_system.cc
|
#include "factored_transition_system.h"
#include "distances.h"
#include "labels.h"
#include "merge_and_shrink_representation.h"
#include "transition_system.h"
#include "utils.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include "../utils/system.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
FTSConstIterator::FTSConstIterator(
const FactoredTransitionSystem &fts,
bool end)
: fts(fts), current_index((end ? fts.get_size() : 0)) {
next_valid_index();
}
void FTSConstIterator::next_valid_index() {
while (current_index < fts.get_size()
&& !fts.is_active(current_index)) {
++current_index;
}
}
void FTSConstIterator::operator++() {
++current_index;
next_valid_index();
}
FactoredTransitionSystem::FactoredTransitionSystem(
unique_ptr<Labels> labels,
vector<unique_ptr<TransitionSystem>> &&transition_systems,
vector<unique_ptr<MergeAndShrinkRepresentation>> &&mas_representations,
vector<unique_ptr<Distances>> &&distances,
const bool compute_init_distances,
const bool compute_goal_distances,
utils::Verbosity verbosity)
: labels(move(labels)),
transition_systems(move(transition_systems)),
mas_representations(move(mas_representations)),
distances(move(distances)),
compute_init_distances(compute_init_distances),
compute_goal_distances(compute_goal_distances),
num_active_entries(this->transition_systems.size()) {
for (size_t index = 0; index < this->transition_systems.size(); ++index) {
if (compute_init_distances || compute_goal_distances) {
this->distances[index]->compute_distances(
compute_init_distances, compute_goal_distances, verbosity);
}
assert(is_component_valid(index));
}
}
FactoredTransitionSystem::FactoredTransitionSystem(FactoredTransitionSystem &&other)
: labels(move(other.labels)),
transition_systems(move(other.transition_systems)),
mas_representations(move(other.mas_representations)),
distances(move(other.distances)),
compute_init_distances(move(other.compute_init_distances)),
compute_goal_distances(move(other.compute_goal_distances)),
num_active_entries(move(other.num_active_entries)) {
/*
This is just a default move constructor. Unfortunately Visual
Studio does not support "= default" for move construction or
move assignment as of this writing.
*/
}
FactoredTransitionSystem::~FactoredTransitionSystem() {
}
void FactoredTransitionSystem::assert_index_valid(int index) const {
assert(utils::in_bounds(index, transition_systems));
assert(utils::in_bounds(index, mas_representations));
assert(utils::in_bounds(index, distances));
if (!(transition_systems[index] && mas_representations[index] && distances[index]) &&
!(!transition_systems[index] && !mas_representations[index] && !distances[index])) {
cerr << "Factor at index is in an inconsistent state!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
}
bool FactoredTransitionSystem::is_component_valid(int index) const {
assert(is_active(index));
if (compute_init_distances && !distances[index]->are_init_distances_computed()) {
return false;
}
if (compute_goal_distances && !distances[index]->are_goal_distances_computed()) {
return false;
}
return transition_systems[index]->are_transitions_sorted_unique() &&
transition_systems[index]->in_sync_with_label_equivalence_relation();
}
void FactoredTransitionSystem::assert_all_components_valid() const {
for (size_t index = 0; index < transition_systems.size(); ++index) {
if (transition_systems[index]) {
assert(is_component_valid(index));
}
}
}
void FactoredTransitionSystem::apply_label_mapping(
const vector<pair<int, vector<int>>> &label_mapping,
int combinable_index) {
assert_all_components_valid();
for (const auto &new_label_old_labels : label_mapping) {
assert(new_label_old_labels.first == labels->get_size());
labels->reduce_labels(new_label_old_labels.second);
}
for (size_t i = 0; i < transition_systems.size(); ++i) {
if (transition_systems[i]) {
transition_systems[i]->apply_label_reduction(
label_mapping, static_cast<int>(i) != combinable_index);
}
}
assert_all_components_valid();
}
bool FactoredTransitionSystem::apply_abstraction(
int index,
const StateEquivalenceRelation &state_equivalence_relation,
utils::Verbosity verbosity) {
assert(is_component_valid(index));
int new_num_states = state_equivalence_relation.size();
if (new_num_states == transition_systems[index]->get_size()) {
return false;
}
vector<int> abstraction_mapping = compute_abstraction_mapping(
transition_systems[index]->get_size(), state_equivalence_relation);
transition_systems[index]->apply_abstraction(
state_equivalence_relation, abstraction_mapping, verbosity);
if (compute_init_distances || compute_goal_distances) {
distances[index]->apply_abstraction(
state_equivalence_relation,
compute_init_distances,
compute_goal_distances,
verbosity);
}
mas_representations[index]->apply_abstraction_to_lookup_table(
abstraction_mapping);
/* If distances need to be recomputed, this already happened in the
Distances object. */
assert(is_component_valid(index));
return true;
}
int FactoredTransitionSystem::merge(
int index1,
int index2,
utils::Verbosity verbosity) {
assert(is_component_valid(index1));
assert(is_component_valid(index2));
transition_systems.push_back(
TransitionSystem::merge(
*labels,
*transition_systems[index1],
*transition_systems[index2],
verbosity));
distances[index1] = nullptr;
distances[index2] = nullptr;
transition_systems[index1] = nullptr;
transition_systems[index2] = nullptr;
mas_representations.push_back(
utils::make_unique_ptr<MergeAndShrinkRepresentationMerge>(
move(mas_representations[index1]),
move(mas_representations[index2])));
mas_representations[index1] = nullptr;
mas_representations[index2] = nullptr;
const TransitionSystem &new_ts = *transition_systems.back();
distances.push_back(utils::make_unique_ptr<Distances>(new_ts));
int new_index = transition_systems.size() - 1;
// Restore the invariant that distances are computed.
if (compute_init_distances || compute_goal_distances) {
distances[new_index]->compute_distances(
compute_init_distances, compute_goal_distances, verbosity);
}
--num_active_entries;
assert(is_component_valid(new_index));
return new_index;
}
pair<unique_ptr<MergeAndShrinkRepresentation>, unique_ptr<Distances>>
FactoredTransitionSystem::extract_factor(int index) {
assert(is_component_valid(index));
return make_pair(move(mas_representations[index]),
move(distances[index]));
}
void FactoredTransitionSystem::statistics(int index) const {
assert(is_component_valid(index));
const TransitionSystem &ts = *transition_systems[index];
ts.statistics();
const Distances &dist = *distances[index];
dist.statistics();
}
void FactoredTransitionSystem::dump(int index) const {
assert_index_valid(index);
transition_systems[index]->dump_labels_and_transitions();
mas_representations[index]->dump();
}
void FactoredTransitionSystem::dump() const {
for (int index : *this) {
dump(index);
}
}
bool FactoredTransitionSystem::is_factor_solvable(int index) const {
assert(is_component_valid(index));
return transition_systems[index]->is_solvable(*distances[index]);
}
bool FactoredTransitionSystem::is_factor_trivial(int index) const {
assert(is_component_valid(index));
if (!mas_representations[index]->is_total()) {
return false;
}
const TransitionSystem &ts = *transition_systems[index];
for (int state = 0; state < ts.get_size(); ++state) {
if (!ts.is_goal_state(state)) {
return false;
}
}
return true;
}
bool FactoredTransitionSystem::is_active(int index) const {
assert_index_valid(index);
return transition_systems[index] != nullptr;
}
}
| 8,533 |
C++
| 33.550607 | 92 | 0.677019 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_miasm_utils.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_UTILS_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_MIASM_UTILS_H
#include <memory>
namespace merge_and_shrink {
class FactoredTransitionSystem;
class ShrinkStrategy;
class TransitionSystem;
/*
Copy the two transition systems at the given indices, possibly shrink them
according to the same rules as merge-and-shrink does, and return their
product.
*/
extern std::unique_ptr<TransitionSystem> shrink_before_merge_externally(
const FactoredTransitionSystem &fts,
int index1,
int index2,
const ShrinkStrategy &shrink_strategy,
int max_states,
int max_states_before_merge,
int shrink_threshold_before_merge);
}
#endif
| 711 |
C
| 25.370369 | 76 | 0.767932 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_GOAL_RELEVANCE_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_GOAL_RELEVANCE_H
#include "merge_scoring_function.h"
namespace merge_and_shrink {
class MergeScoringFunctionGoalRelevance : public MergeScoringFunction {
protected:
virtual std::string name() const override;
public:
MergeScoringFunctionGoalRelevance() = default;
virtual ~MergeScoringFunctionGoalRelevance() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 815 |
C
| 28.142856 | 75 | 0.736196 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_total_order.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_TOTAL_ORDER_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_TOTAL_ORDER_H
#include "merge_scoring_function.h"
#include <memory>
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class RandomNumberGenerator;
}
namespace merge_and_shrink {
class MergeScoringFunctionTotalOrder : public MergeScoringFunction {
enum class AtomicTSOrder {
REVERSE_LEVEL,
LEVEL,
RANDOM
};
AtomicTSOrder atomic_ts_order;
enum class ProductTSOrder {
OLD_TO_NEW,
NEW_TO_OLD,
RANDOM
};
ProductTSOrder product_ts_order;
bool atomic_before_product;
int random_seed; // only for dump options
std::shared_ptr<utils::RandomNumberGenerator> rng;
std::vector<std::pair<int, int>> merge_candidate_order;
protected:
virtual std::string name() const override;
virtual void dump_function_specific_options() const override;
public:
explicit MergeScoringFunctionTotalOrder(const options::Options &options);
virtual ~MergeScoringFunctionTotalOrder() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual void initialize(const TaskProxy &task_proxy) override;
static void add_options_to_parser(options::OptionParser &parser);
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return false;
}
};
}
#endif
| 1,610 |
C
| 26.775862 | 77 | 0.711801 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_algorithm.h
|
#ifndef MERGE_AND_SHRINK_MERGE_AND_SHRINK_ALGORITHM_H
#define MERGE_AND_SHRINK_MERGE_AND_SHRINK_ALGORITHM_H
#include <memory>
class TaskProxy;
namespace options {
class OptionParser;
class Options;
}
namespace utils {
class CountdownTimer;
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
class LabelReduction;
class MergeStrategyFactory;
class ShrinkStrategy;
class MergeAndShrinkAlgorithm {
// TODO: when the option parser supports it, the following should become
// unique pointers.
std::shared_ptr<MergeStrategyFactory> merge_strategy_factory;
std::shared_ptr<ShrinkStrategy> shrink_strategy;
std::shared_ptr<LabelReduction> label_reduction;
// Options for shrinking
// Hard limit: the maximum size of a transition system at any point.
const int max_states;
// Hard limit: the maximum size of a transition system before being merged.
const int max_states_before_merge;
/* A soft limit for triggering shrinking even if the hard limits
max_states and max_states_before_merge are not violated. */
const int shrink_threshold_before_merge;
// Options for pruning
const bool prune_unreachable_states;
const bool prune_irrelevant_states;
const utils::Verbosity verbosity;
const double main_loop_max_time;
long starting_peak_memory;
void report_peak_memory_delta(bool final = false) const;
void dump_options() const;
void warn_on_unusual_options() const;
bool ran_out_of_time(const utils::CountdownTimer &timer) const;
void statistics(int maximum_intermediate_size) const;
void main_loop(
FactoredTransitionSystem &fts,
const TaskProxy &task_proxy);
public:
explicit MergeAndShrinkAlgorithm(const options::Options &opts);
FactoredTransitionSystem build_factored_transition_system(const TaskProxy &task_proxy);
};
extern void add_merge_and_shrink_algorithm_options_to_parser(options::OptionParser &parser);
extern void add_transition_system_size_limit_options_to_parser(options::OptionParser &parser);
extern void handle_shrink_limit_options_defaults(options::Options &opts);
}
#endif
| 2,156 |
C
| 30.720588 | 94 | 0.752319 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/fts_factory.h
|
#ifndef MERGE_AND_SHRINK_FTS_FACTORY_H
#define MERGE_AND_SHRINK_FTS_FACTORY_H
/*
Factory for factored transition systems.
Takes a planning task and produces a factored transition system that
represents the planning task. This provides the main bridge from
planning tasks to the concepts on which merge-and-shrink abstractions
are based (transition systems, labels, etc.). The "internal" classes of
merge-and-shrink should not need to know about planning task concepts.
*/
class TaskProxy;
namespace utils {
enum class Verbosity;
}
namespace merge_and_shrink {
class FactoredTransitionSystem;
extern FactoredTransitionSystem create_factored_transition_system(
const TaskProxy &task_proxy,
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
}
#endif
| 819 |
C
| 25.451612 | 73 | 0.777778 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_and_shrink_representation.h
|
#ifndef MERGE_AND_SHRINK_MERGE_AND_SHRINK_REPRESENTATION_H
#define MERGE_AND_SHRINK_MERGE_AND_SHRINK_REPRESENTATION_H
#include <memory>
#include <vector>
class State;
namespace merge_and_shrink {
class Distances;
class MergeAndShrinkRepresentation {
protected:
int domain_size;
public:
explicit MergeAndShrinkRepresentation(int domain_size);
virtual ~MergeAndShrinkRepresentation() = 0;
int get_domain_size() const;
// Store distances instead of abstract state numbers.
virtual void set_distances(const Distances &) = 0;
virtual void apply_abstraction_to_lookup_table(
const std::vector<int> &abstraction_mapping) = 0;
/*
Return the value that state is mapped to. This is either an abstract
state (if set_distances has not been called) or a distance (if it has).
If the represented function is not total, the returned value is DEAD_END
if the abstract state is PRUNED_STATE or if the (distance) value is INF.
*/
virtual int get_value(const State &state) const = 0;
/* Return true iff the represented function is total, i.e., does not map
to PRUNED_STATE. */
virtual bool is_total() const = 0;
virtual void dump() const = 0;
};
class MergeAndShrinkRepresentationLeaf : public MergeAndShrinkRepresentation {
const int var_id;
std::vector<int> lookup_table;
public:
MergeAndShrinkRepresentationLeaf(int var_id, int domain_size);
virtual ~MergeAndShrinkRepresentationLeaf() = default;
virtual void set_distances(const Distances &) override;
virtual void apply_abstraction_to_lookup_table(
const std::vector<int> &abstraction_mapping) override;
virtual int get_value(const State &state) const override;
virtual bool is_total() const override;
virtual void dump() const override;
};
class MergeAndShrinkRepresentationMerge : public MergeAndShrinkRepresentation {
std::unique_ptr<MergeAndShrinkRepresentation> left_child;
std::unique_ptr<MergeAndShrinkRepresentation> right_child;
std::vector<std::vector<int>> lookup_table;
public:
MergeAndShrinkRepresentationMerge(
std::unique_ptr<MergeAndShrinkRepresentation> left_child,
std::unique_ptr<MergeAndShrinkRepresentation> right_child);
virtual ~MergeAndShrinkRepresentationMerge() = default;
virtual void set_distances(const Distances &distances) override;
virtual void apply_abstraction_to_lookup_table(
const std::vector<int> &abstraction_mapping) override;
virtual int get_value(const State &state) const override;
virtual bool is_total() const override;
virtual void dump() const override;
};
}
#endif
| 2,662 |
C
| 34.039473 | 79 | 0.732156 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_strategy.h
|
#ifndef MERGE_AND_SHRINK_SHRINK_STRATEGY_H
#define MERGE_AND_SHRINK_SHRINK_STRATEGY_H
#include "types.h"
#include <string>
#include <vector>
namespace merge_and_shrink {
class Distances;
class TransitionSystem;
class ShrinkStrategy {
protected:
virtual std::string name() const = 0;
virtual void dump_strategy_specific_options() const = 0;
public:
ShrinkStrategy() = default;
virtual ~ShrinkStrategy() = default;
/*
Compute a state equivalence relation over the states of the given
transition system such that its new number of states after abstracting
it according to this equivalence relation is at most target_size
(currently violated; see issue250). dist must be the distances
information associated with the given transition system.
Note that if target_size equals the current size of the transition system,
the shrink strategy is not required to compute an equivalence relation
that results in actually shrinking the size of the transition system.
However, it may attempt to e.g. compute an equivalence relation that
results in shrinking the transition system in an information-preserving
way.
*/
virtual StateEquivalenceRelation compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const = 0;
virtual bool requires_init_distances() const = 0;
virtual bool requires_goal_distances() const = 0;
void dump_options() const;
std::string get_name() const;
};
}
#endif
| 1,561 |
C
| 31.541666 | 80 | 0.724536 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_factory_sccs.cc
|
#include "merge_strategy_factory_sccs.h"
#include "merge_strategy_sccs.h"
#include "merge_selector.h"
#include "merge_tree_factory.h"
#include "transition_system.h"
#include "../task_proxy.h"
#include "../algorithms/sccs.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../task_utils/causal_graph.h"
#include "../utils/logging.h"
#include "../utils/markup.h"
#include "../utils/system.h"
#include <algorithm>
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
bool compare_sccs_increasing(const vector<int> &lhs, const vector<int> &rhs) {
return lhs.size() < rhs.size();
}
bool compare_sccs_decreasing(const vector<int> &lhs, const vector<int> &rhs) {
return lhs.size() > rhs.size();
}
MergeStrategyFactorySCCs::MergeStrategyFactorySCCs(const options::Options &options)
: order_of_sccs(options.get<OrderOfSCCs>("order_of_sccs")),
merge_tree_factory(nullptr),
merge_selector(nullptr) {
if (options.contains("merge_tree")) {
merge_tree_factory = options.get<shared_ptr<MergeTreeFactory>>("merge_tree");
}
if (options.contains("merge_selector")) {
merge_selector = options.get<shared_ptr<MergeSelector>>("merge_selector");
}
}
unique_ptr<MergeStrategy> MergeStrategyFactorySCCs::compute_merge_strategy(
const TaskProxy &task_proxy,
const FactoredTransitionSystem &fts) {
VariablesProxy vars = task_proxy.get_variables();
int num_vars = vars.size();
// Compute SCCs of the causal graph.
vector<vector<int>> cg;
cg.reserve(num_vars);
for (VariableProxy var : vars) {
const vector<int> &successors =
task_proxy.get_causal_graph().get_successors(var.get_id());
cg.push_back(successors);
}
vector<vector<int>> sccs(sccs::compute_maximal_sccs(cg));
// Put the SCCs in the desired order.
switch (order_of_sccs) {
case OrderOfSCCs::TOPOLOGICAL:
// SCCs are computed in topological order.
break;
case OrderOfSCCs::REVERSE_TOPOLOGICAL:
// SCCs are computed in topological order.
reverse(sccs.begin(), sccs.end());
break;
case OrderOfSCCs::DECREASING:
sort(sccs.begin(), sccs.end(), compare_sccs_decreasing);
break;
case OrderOfSCCs::INCREASING:
sort(sccs.begin(), sccs.end(), compare_sccs_increasing);
break;
}
/*
Compute the indices at which the merged SCCs can be found when all
SCCs have been merged.
*/
int index = num_vars - 1;
utils::g_log << "SCCs of the causal graph:" << endl;
vector<vector<int>> non_singleton_cg_sccs;
vector<int> indices_of_merged_sccs;
indices_of_merged_sccs.reserve(sccs.size());
for (const vector<int> &scc : sccs) {
utils::g_log << scc << endl;
int scc_size = scc.size();
if (scc_size == 1) {
indices_of_merged_sccs.push_back(scc.front());
} else {
index += scc_size - 1;
indices_of_merged_sccs.push_back(index);
non_singleton_cg_sccs.push_back(scc);
}
}
if (sccs.size() == 1) {
utils::g_log << "Only one single SCC" << endl;
}
if (static_cast<int>(sccs.size()) == num_vars) {
utils::g_log << "Only singleton SCCs" << endl;
assert(non_singleton_cg_sccs.empty());
}
if (merge_selector) {
merge_selector->initialize(task_proxy);
}
return utils::make_unique_ptr<MergeStrategySCCs>(
fts,
task_proxy,
merge_tree_factory,
merge_selector,
move(non_singleton_cg_sccs),
move(indices_of_merged_sccs));
}
bool MergeStrategyFactorySCCs::requires_init_distances() const {
if (merge_tree_factory) {
return merge_tree_factory->requires_init_distances();
} else {
return merge_selector->requires_init_distances();
}
}
bool MergeStrategyFactorySCCs::requires_goal_distances() const {
if (merge_tree_factory) {
return merge_tree_factory->requires_goal_distances();
} else {
return merge_selector->requires_goal_distances();
}
}
void MergeStrategyFactorySCCs::dump_strategy_specific_options() const {
utils::g_log << "Merge order of sccs: ";
switch (order_of_sccs) {
case OrderOfSCCs::TOPOLOGICAL:
utils::g_log << "topological";
break;
case OrderOfSCCs::REVERSE_TOPOLOGICAL:
utils::g_log << "reverse topological";
break;
case OrderOfSCCs::DECREASING:
utils::g_log << "decreasing";
break;
case OrderOfSCCs::INCREASING:
utils::g_log << "increasing";
break;
}
utils::g_log << endl;
utils::g_log << "Merge strategy for merging within sccs: " << endl;
if (merge_tree_factory) {
merge_tree_factory->dump_options();
}
if (merge_selector) {
merge_selector->dump_options();
}
}
string MergeStrategyFactorySCCs::name() const {
return "sccs";
}
static shared_ptr<MergeStrategyFactory>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Merge strategy SSCs",
"This merge strategy implements the algorithm described in the paper "
+ utils::format_conference_reference(
{"Silvan Sievers", "Martin Wehrle", "Malte Helmert"},
"An Analysis of Merge Strategies for Merge-and-Shrink Heuristics",
"https://ai.dmi.unibas.ch/papers/sievers-et-al-icaps2016.pdf",
"Proceedings of the 26th International Conference on Planning and "
"Scheduling (ICAPS 2016)",
"2358-2366",
"AAAI Press",
"2016") +
"In a nutshell, it computes the maximal SCCs of the causal graph, "
"obtaining a partitioning of the task's variables. Every such "
"partition is then merged individually, using the specified fallback "
"merge strategy, considering the SCCs in a configurable order. "
"Afterwards, all resulting composite abstractions are merged to form "
"the final abstraction, again using the specified fallback merge "
"strategy and the configurable order of the SCCs.");
vector<string> order_of_sccs;
order_of_sccs.push_back("topological");
order_of_sccs.push_back("reverse_topological");
order_of_sccs.push_back("decreasing");
order_of_sccs.push_back("increasing");
parser.add_enum_option<OrderOfSCCs>(
"order_of_sccs",
order_of_sccs,
"choose an ordering of the SCCs: topological/reverse_topological or "
"decreasing/increasing in the size of the SCCs. The former two options "
"refer to the directed graph where each obtained SCC is a "
"'supervertex'. For the latter two options, the tie-breaking is to "
"use the topological order according to that same graph of SCC "
"supervertices.",
"topological");
parser.add_option<shared_ptr<MergeTreeFactory>>(
"merge_tree",
"the fallback merge strategy to use if a precomputed strategy should "
"be used.",
options::OptionParser::NONE);
parser.add_option<shared_ptr<MergeSelector>>(
"merge_selector",
"the fallback merge strategy to use if a stateless strategy should "
"be used.",
options::OptionParser::NONE);
options::Options options = parser.parse();
if (parser.help_mode()) {
return nullptr;
} else if (parser.dry_run()) {
bool merge_tree = options.contains("merge_tree");
bool merge_selector = options.contains("merge_selector");
if ((merge_tree && merge_selector) || (!merge_tree && !merge_selector)) {
cerr << "You have to specify exactly one of the options merge_tree "
"and merge_selector!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_INPUT_ERROR);
}
return nullptr;
} else {
return make_shared<MergeStrategyFactorySCCs>(options);
}
}
static options::Plugin<MergeStrategyFactory> _plugin("merge_sccs", _parse);
}
| 8,099 |
C++
| 33.913793 | 85 | 0.63514 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_random.cc
|
#include "shrink_random.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "../option_parser.h"
#include "../plugin.h"
#include <cassert>
#include <memory>
using namespace std;
namespace merge_and_shrink {
ShrinkRandom::ShrinkRandom(const Options &opts)
: ShrinkBucketBased(opts) {
}
vector<ShrinkBucketBased::Bucket> ShrinkRandom::partition_into_buckets(
const TransitionSystem &ts,
const Distances &) const {
vector<Bucket> buckets;
buckets.resize(1);
Bucket &big_bucket = buckets.back();
big_bucket.reserve(ts.get_size());
int num_states = ts.get_size();
for (int state = 0; state < num_states; ++state)
big_bucket.push_back(state);
assert(!big_bucket.empty());
return buckets;
}
string ShrinkRandom::name() const {
return "random";
}
static shared_ptr<ShrinkStrategy>_parse(OptionParser &parser) {
parser.document_synopsis("Random", "");
ShrinkBucketBased::add_options_to_parser(parser);
Options opts = parser.parse();
if (parser.help_mode())
return nullptr;
if (parser.dry_run())
return nullptr;
else
return make_shared<ShrinkRandom>(opts);
}
static Plugin<ShrinkStrategy> _plugin("shrink_random", _parse);
}
| 1,260 |
C++
| 23.25 | 71 | 0.678571 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_sccs.cc
|
#include "merge_strategy_sccs.h"
#include "factored_transition_system.h"
#include "merge_selector.h"
#include "merge_tree.h"
#include "merge_tree_factory.h"
#include "transition_system.h"
#include <algorithm>
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
MergeStrategySCCs::MergeStrategySCCs(
const FactoredTransitionSystem &fts,
const TaskProxy &task_proxy,
const shared_ptr<MergeTreeFactory> &merge_tree_factory,
const shared_ptr<MergeSelector> &merge_selector,
vector<vector<int>> non_singleton_cg_sccs,
vector<int> indices_of_merged_sccs)
: MergeStrategy(fts),
task_proxy(task_proxy),
merge_tree_factory(merge_tree_factory),
merge_selector(merge_selector),
non_singleton_cg_sccs(move(non_singleton_cg_sccs)),
indices_of_merged_sccs(move(indices_of_merged_sccs)),
current_merge_tree(nullptr) {
}
MergeStrategySCCs::~MergeStrategySCCs() {
}
pair<int, int> MergeStrategySCCs::get_next() {
// We did not already start merging an SCC/all finished SCCs, so we
// do not have a current set of indices we want to finish merging.
if (current_ts_indices.empty()) {
// Get the next indices we need to merge
if (non_singleton_cg_sccs.empty()) {
assert(indices_of_merged_sccs.size() > 1);
current_ts_indices = move(indices_of_merged_sccs);
} else {
vector<int> ¤t_scc = non_singleton_cg_sccs.front();
assert(current_scc.size() > 1);
current_ts_indices = move(current_scc);
non_singleton_cg_sccs.erase(non_singleton_cg_sccs.begin());
}
// If using a merge tree factory, compute a merge tree for this set
if (merge_tree_factory) {
current_merge_tree = merge_tree_factory->compute_merge_tree(
task_proxy, fts, current_ts_indices);
}
} else {
// Add the most recent merge to the current indices set
current_ts_indices.push_back(fts.get_size() - 1);
}
// Select the next merge for the current set of indices, either using the
// tree or the selector.
pair<int, int > next_pair;
int merged_ts_index = fts.get_size();
if (current_merge_tree) {
assert(!current_merge_tree->done());
next_pair = current_merge_tree->get_next_merge(merged_ts_index);
if (current_merge_tree->done()) {
current_merge_tree = nullptr;
}
} else {
assert(merge_selector);
next_pair = merge_selector->select_merge(fts, current_ts_indices);
}
// Remove the two merged indices from the current set of indices.
for (vector<int>::iterator it = current_ts_indices.begin();
it != current_ts_indices.end();) {
if (*it == next_pair.first || *it == next_pair.second) {
it = current_ts_indices.erase(it);
} else {
++it;
}
}
return next_pair;
}
}
| 2,967 |
C++
| 33.114942 | 77 | 0.630266 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/types.h
|
#ifndef MERGE_AND_SHRINK_TYPES_H
#define MERGE_AND_SHRINK_TYPES_H
#include <forward_list>
#include <list>
#include <vector>
namespace merge_and_shrink {
// Positive infinity. The name "INFINITY" is taken by an ISO C99 macro.
extern const int INF;
extern const int MINUSINF;
extern const int PRUNED_STATE;
/*
An equivalence class is a set of abstract states that shall be
mapped (shrunk) to the same abstract state.
An equivalence relation is a partitioning of states into
equivalence classes. It may omit certain states entirely; these
will be dropped completely and receive an h value of infinity.
*/
using StateEquivalenceClass = std::forward_list<int>;
using StateEquivalenceRelation = std::vector<StateEquivalenceClass>;
}
#endif
| 749 |
C
| 26.777777 | 71 | 0.771696 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_tree_factory.cc
|
#include "merge_tree_factory.h"
#include "merge_tree.h"
#include "../options/option_parser.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/rng_options.h"
#include "../utils/system.h"
#include <iostream>
using namespace std;
namespace merge_and_shrink {
MergeTreeFactory::MergeTreeFactory(const options::Options &options)
: rng(utils::parse_rng_from_options(options)),
update_option(options.get<UpdateOption>("update_option")) {
}
void MergeTreeFactory::dump_options() const {
utils::g_log << "Merge tree options: " << endl;
utils::g_log << "Type: " << name() << endl;
utils::g_log << "Update option: ";
switch (update_option) {
case UpdateOption::USE_FIRST:
utils::g_log << "use first";
break;
case UpdateOption::USE_SECOND:
utils::g_log << "use second";
break;
case UpdateOption::USE_RANDOM:
utils::g_log << "use random";
break;
}
utils::g_log << endl;
dump_tree_specific_options();
}
unique_ptr<MergeTree> MergeTreeFactory::compute_merge_tree(
const TaskProxy &,
const FactoredTransitionSystem &,
const vector<int> &) {
cerr << "This merge tree does not support being computed on a subset "
"of indices for a given factored transition system!" << endl;
utils::exit_with(utils::ExitCode::SEARCH_CRITICAL_ERROR);
}
void MergeTreeFactory::add_options_to_parser(options::OptionParser &parser) {
utils::add_rng_options(parser);
vector<string> update_option;
update_option.push_back("use_first");
update_option.push_back("use_second");
update_option.push_back("use_random");
parser.add_enum_option<UpdateOption>(
"update_option",
update_option,
"When the merge tree is used within another merge strategy, how "
"should it be updated when a merge different to a merge from the "
"tree is performed: choose among use_first, use_second, and "
"use_random to choose which node of the tree should survive and "
"represent the new merged index. Specify use_first (use_second) to "
"let the node represententing the index that would have been merged "
"earlier (later) survive. use_random chooses a random node.",
"use_random");
}
static options::PluginTypePlugin<MergeTreeFactory> _type_plugin(
"MergeTree",
"This page describes the available merge trees that can be used to "
"precompute a merge strategy, either for the entire task or a given "
"subset of transition systems of a given factored transition system.\n"
"Merge trees are typically used in the merge strategy of type "
"'precomputed', but they can also be used as fallback merge strategies in "
"'combined' merge strategies.");
}
| 2,787 |
C++
| 34.743589 | 79 | 0.672049 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_single_random.cc
|
#include "merge_scoring_function_single_random.h"
#include "types.h"
#include "../options/option_parser.h"
#include "../options/options.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
using namespace std;
namespace merge_and_shrink {
MergeScoringFunctionSingleRandom::MergeScoringFunctionSingleRandom(
const options::Options &options)
: random_seed(options.get<int>("random_seed")),
rng(utils::parse_rng_from_options(options)) {
}
vector<double> MergeScoringFunctionSingleRandom::compute_scores(
const FactoredTransitionSystem &,
const vector<pair<int, int>> &merge_candidates) {
int chosen_index = (*rng)(merge_candidates.size());
vector<double> scores;
scores.reserve(merge_candidates.size());
for (size_t candidate_index = 0; candidate_index < merge_candidates.size();
++candidate_index) {
if (static_cast<int>(candidate_index) == chosen_index) {
scores.push_back(0);
} else {
scores.push_back(INF);
}
}
return scores;
}
string MergeScoringFunctionSingleRandom::name() const {
return "single random";
}
void MergeScoringFunctionSingleRandom::dump_function_specific_options() const {
utils::g_log << "Random seed: " << random_seed << endl;
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Single random",
"This scoring function assigns exactly one merge candidate a score of "
"0, chosen randomly, and infinity to all others.");
utils::add_rng_options(parser);
options::Options options = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionSingleRandom>(options);
}
static options::Plugin<MergeScoringFunction> _plugin("single_random", _parse);
}
| 1,932 |
C++
| 28.738461 | 79 | 0.684783 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/shrink_bucket_based.cc
|
#include "shrink_bucket_based.h"
#include "../utils/logging.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
#include <iostream>
#include <vector>
using namespace std;
namespace merge_and_shrink {
ShrinkBucketBased::ShrinkBucketBased(const options::Options &opts)
: rng(utils::parse_rng_from_options(opts)) {
}
void ShrinkBucketBased::add_options_to_parser(options::OptionParser &parser) {
utils::add_rng_options(parser);
}
StateEquivalenceRelation ShrinkBucketBased::compute_abstraction(
const vector<Bucket> &buckets, int target_size) const {
bool show_combine_buckets_warning = true;
StateEquivalenceRelation equiv_relation;
equiv_relation.reserve(target_size);
size_t num_states_to_go = 0;
for (size_t bucket_no = 0; bucket_no < buckets.size(); ++bucket_no)
num_states_to_go += buckets[bucket_no].size();
for (size_t bucket_no = 0; bucket_no < buckets.size(); ++bucket_no) {
const vector<int> &bucket = buckets[bucket_no];
int states_used_up = static_cast<int>(equiv_relation.size());
int remaining_state_budget = target_size - states_used_up;
num_states_to_go -= bucket.size();
int budget_for_this_bucket = remaining_state_budget - num_states_to_go;
if (budget_for_this_bucket >= static_cast<int>(bucket.size())) {
// Each state in bucket can become a singleton group.
for (size_t i = 0; i < bucket.size(); ++i) {
StateEquivalenceClass group;
group.push_front(bucket[i]);
equiv_relation.push_back(group);
}
} else if (budget_for_this_bucket <= 1) {
// The whole bucket must form one group.
int remaining_buckets = buckets.size() - bucket_no;
if (remaining_state_budget >= remaining_buckets) {
equiv_relation.push_back(StateEquivalenceClass());
} else {
if (bucket_no == 0)
equiv_relation.push_back(StateEquivalenceClass());
if (show_combine_buckets_warning) {
show_combine_buckets_warning = false;
utils::g_log << "Very small node limit, must combine buckets."
<< endl;
}
}
StateEquivalenceClass &group = equiv_relation.back();
group.insert_after(group.before_begin(), bucket.begin(), bucket.end());
} else {
// Complicated case: must combine until bucket budget is met.
// First create singleton groups.
vector<StateEquivalenceClass> groups(bucket.size());
for (size_t i = 0; i < bucket.size(); ++i)
groups[i].push_front(bucket[i]);
// Then combine groups until required size is reached.
assert(budget_for_this_bucket >= 2 &&
budget_for_this_bucket < static_cast<int>(groups.size()));
while (static_cast<int>(groups.size()) > budget_for_this_bucket) {
auto it1 = rng->choose(groups);
auto it2 = it1;
while (it1 == it2) {
it2 = rng->choose(groups);
}
it1->splice_after(it1->before_begin(), *it2);
swap(*it2, groups.back());
assert(groups.back().empty());
groups.pop_back();
}
// Finally add these groups to the result.
for (size_t i = 0; i < groups.size(); ++i) {
equiv_relation.push_back(StateEquivalenceClass());
equiv_relation.back().swap(groups[i]);
}
}
}
return equiv_relation;
}
StateEquivalenceRelation ShrinkBucketBased::compute_equivalence_relation(
const TransitionSystem &ts,
const Distances &distances,
int target_size) const {
vector<Bucket> buckets = partition_into_buckets(ts, distances);
return compute_abstraction(buckets, target_size);
}
}
| 4,024 |
C++
| 38.460784 | 83 | 0.578777 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/fts_factory.cc
|
#include "fts_factory.h"
#include "distances.h"
#include "factored_transition_system.h"
#include "label_equivalence_relation.h"
#include "labels.h"
#include "merge_and_shrink_representation.h"
#include "transition_system.h"
#include "types.h"
#include "../task_proxy.h"
#include "../utils/collections.h"
#include "../utils/logging.h"
#include "../utils/memory.h"
#include <algorithm>
#include <cassert>
#include <unordered_map>
#include <vector>
using namespace std;
namespace merge_and_shrink {
class FTSFactory {
const TaskProxy &task_proxy;
struct TransitionSystemData {
// The following two attributes are only used for statistics
int num_variables;
vector<int> incorporated_variables;
unique_ptr<LabelEquivalenceRelation> label_equivalence_relation;
vector<vector<int>> label_groups;
vector<vector<Transition>> transitions_by_group_id;
vector<bool> relevant_labels;
int num_states;
vector<bool> goal_states;
int init_state;
TransitionSystemData(TransitionSystemData &&other)
: num_variables(other.num_variables),
incorporated_variables(move(other.incorporated_variables)),
label_equivalence_relation(move(other.label_equivalence_relation)),
label_groups(move(other.label_groups)),
transitions_by_group_id(move(other.transitions_by_group_id)),
relevant_labels(move(other.relevant_labels)),
num_states(other.num_states),
goal_states(move(other.goal_states)),
init_state(other.init_state) {
}
TransitionSystemData() = default;
TransitionSystemData(TransitionSystemData &other) = delete;
TransitionSystemData &operator=(TransitionSystemData &other) = delete;
};
vector<TransitionSystemData> transition_system_data_by_var;
// see TODO in build_transitions()
int task_has_conditional_effects;
vector<unique_ptr<Label>> create_labels();
void build_state_data(VariableProxy var);
void initialize_transition_system_data(const Labels &labels);
bool is_relevant(int var_no, int label_no) const;
void mark_as_relevant(int var_no, int label_no);
unordered_map<int, int> compute_preconditions(OperatorProxy op);
void handle_operator_effect(
OperatorProxy op,
EffectProxy effect,
const unordered_map<int, int> &pre_val,
vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var);
void handle_operator_precondition(
OperatorProxy op,
FactProxy precondition,
const vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var);
void build_transitions_for_operator(OperatorProxy op);
void build_transitions_for_irrelevant_ops(VariableProxy variable);
void build_transitions();
vector<unique_ptr<TransitionSystem>> create_transition_systems(const Labels &labels);
vector<unique_ptr<MergeAndShrinkRepresentation>> create_mas_representations() const;
vector<unique_ptr<Distances>> create_distances(
const vector<unique_ptr<TransitionSystem>> &transition_systems) const;
public:
explicit FTSFactory(const TaskProxy &task_proxy);
~FTSFactory();
/*
Note: create() may only be called once. We don't worry about
misuse because the class is only used internally in this file.
*/
FactoredTransitionSystem create(
bool compute_init_distances,
bool compute_goal_distances,
utils::Verbosity verbosity);
};
FTSFactory::FTSFactory(const TaskProxy &task_proxy)
: task_proxy(task_proxy), task_has_conditional_effects(false) {
}
FTSFactory::~FTSFactory() {
}
vector<unique_ptr<Label>> FTSFactory::create_labels() {
vector<unique_ptr<Label>> result;
int num_ops = task_proxy.get_operators().size();
if (num_ops > 0) {
int max_num_labels = 2 * num_ops - 1;
result.reserve(max_num_labels);
}
for (OperatorProxy op : task_proxy.get_operators()) {
result.push_back(utils::make_unique_ptr<Label>(op.get_cost()));
}
return result;
}
void FTSFactory::build_state_data(VariableProxy var) {
int var_id = var.get_id();
TransitionSystemData &ts_data = transition_system_data_by_var[var_id];
ts_data.init_state = task_proxy.get_initial_state()[var_id].get_value();
int range = task_proxy.get_variables()[var_id].get_domain_size();
ts_data.num_states = range;
int goal_value = -1;
GoalsProxy goals = task_proxy.get_goals();
for (FactProxy goal : goals) {
if (goal.get_variable().get_id() == var_id) {
assert(goal_value == -1);
goal_value = goal.get_value();
break;
}
}
ts_data.goal_states.resize(range, false);
for (int value = 0; value < range; ++value) {
if (value == goal_value || goal_value == -1) {
ts_data.goal_states[value] = true;
}
}
}
void FTSFactory::initialize_transition_system_data(const Labels &labels) {
VariablesProxy variables = task_proxy.get_variables();
int num_labels = task_proxy.get_operators().size();
transition_system_data_by_var.resize(variables.size());
for (VariableProxy var : variables) {
TransitionSystemData &ts_data = transition_system_data_by_var[var.get_id()];
ts_data.num_variables = variables.size();
ts_data.incorporated_variables.push_back(var.get_id());
ts_data.transitions_by_group_id.reserve(labels.get_max_size());
ts_data.relevant_labels.resize(num_labels, false);
build_state_data(var);
}
}
bool FTSFactory::is_relevant(int var_no, int label_no) const {
return transition_system_data_by_var[var_no].relevant_labels[label_no];
}
void FTSFactory::mark_as_relevant(int var_no, int label_no) {
transition_system_data_by_var[var_no].relevant_labels[label_no] = true;
}
unordered_map<int, int> FTSFactory::compute_preconditions(OperatorProxy op) {
unordered_map<int, int> pre_val;
for (FactProxy precondition : op.get_preconditions())
pre_val[precondition.get_variable().get_id()] =
precondition.get_value();
return pre_val;
}
void FTSFactory::handle_operator_effect(
OperatorProxy op,
EffectProxy effect,
const unordered_map<int, int> &pre_val,
vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var) {
int label_no = op.get_id();
FactProxy fact = effect.get_fact();
VariableProxy var = fact.get_variable();
int var_no = var.get_id();
has_effect_on_var[var_no] = true;
int post_value = fact.get_value();
// Determine possible values that var can have when this
// operator is applicable.
int pre_value = -1;
auto pre_val_it = pre_val.find(var_no);
if (pre_val_it != pre_val.end())
pre_value = pre_val_it->second;
int pre_value_min, pre_value_max;
if (pre_value == -1) {
pre_value_min = 0;
pre_value_max = var.get_domain_size();
} else {
pre_value_min = pre_value;
pre_value_max = pre_value + 1;
}
/*
cond_effect_pre_value == x means that the effect has an
effect condition "var == x".
cond_effect_pre_value == -1 means no effect condition on var.
has_other_effect_cond is true iff there exists an effect
condition on a variable other than var.
*/
EffectConditionsProxy effect_conditions = effect.get_conditions();
int cond_effect_pre_value = -1;
bool has_other_effect_cond = false;
for (FactProxy condition : effect_conditions) {
if (condition.get_variable() == var) {
cond_effect_pre_value = condition.get_value();
} else {
has_other_effect_cond = true;
}
}
// Handle transitions that occur when the effect triggers.
for (int value = pre_value_min; value < pre_value_max; ++value) {
/*
Only add a transition if it is possible that the effect
triggers. We can rule out that the effect triggers if it has
a condition on var and this condition is not satisfied.
*/
if (cond_effect_pre_value == -1 || cond_effect_pre_value == value)
transitions_by_var[var_no].emplace_back(value, post_value);
}
// Handle transitions that occur when the effect does not trigger.
if (!effect_conditions.empty()) {
for (int value = pre_value_min; value < pre_value_max; ++value) {
/*
Add self-loop if the effect might not trigger.
If the effect has a condition on another variable, then
it can fail to trigger no matter which value var has.
If it only has a condition on var, then the effect
fails to trigger if this condition is false.
*/
if (has_other_effect_cond || value != cond_effect_pre_value)
transitions_by_var[var_no].emplace_back(value, value);
}
task_has_conditional_effects = true;
}
mark_as_relevant(var_no, label_no);
}
void FTSFactory::handle_operator_precondition(
OperatorProxy op,
FactProxy precondition,
const vector<bool> &has_effect_on_var,
vector<vector<Transition>> &transitions_by_var) {
int label_no = op.get_id();
int var_no = precondition.get_variable().get_id();
if (!has_effect_on_var[var_no]) {
int value = precondition.get_value();
transitions_by_var[var_no].emplace_back(value, value);
mark_as_relevant(var_no, label_no);
}
}
void FTSFactory::build_transitions_for_operator(OperatorProxy op) {
/*
- Mark op as relevant in the transition systems corresponding
to variables on which it has a precondition or effect.
- Add transitions induced by op in these transition systems.
*/
unordered_map<int, int> pre_val = compute_preconditions(op);
int num_variables = task_proxy.get_variables().size();
vector<bool> has_effect_on_var(task_proxy.get_variables().size(), false);
vector<vector<Transition>> transitions_by_var(num_variables);
for (EffectProxy effect : op.get_effects())
handle_operator_effect(op, effect, pre_val, has_effect_on_var, transitions_by_var);
/*
We must handle preconditions *after* effects because handling
the effects sets has_effect_on_var.
*/
for (FactProxy precondition : op.get_preconditions())
handle_operator_precondition(op, precondition, has_effect_on_var, transitions_by_var);
int label_no = op.get_id();
for (int var_no = 0; var_no < num_variables; ++var_no) {
if (!is_relevant(var_no, label_no)) {
/*
We do not want to add transitions of irrelevant labels here,
since they are handled together in a separate step.
*/
continue;
}
vector<Transition> &transitions = transitions_by_var[var_no];
/*
TODO: Our method for generating transitions is only guarantueed
to generate sorted and unique transitions if the task has no
conditional effects.
*/
if (task_has_conditional_effects) {
utils::sort_unique(transitions);
} else {
assert(utils::is_sorted_unique(transitions));
}
vector<vector<Transition>> &existing_transitions_by_group_id =
transition_system_data_by_var[var_no].transitions_by_group_id;
vector<vector<int>> &label_groups = transition_system_data_by_var[var_no].label_groups;
assert(existing_transitions_by_group_id.size() == label_groups.size());
bool found_locally_equivalent_label_group = false;
for (size_t group_id = 0; group_id < existing_transitions_by_group_id.size(); ++group_id) {
const vector<Transition> &group_transitions = existing_transitions_by_group_id[group_id];
if (transitions == group_transitions) {
label_groups[group_id].push_back(label_no);
found_locally_equivalent_label_group = true;
break;
}
}
if (!found_locally_equivalent_label_group) {
existing_transitions_by_group_id.push_back(move(transitions));
label_groups.push_back({label_no});
}
}
}
void FTSFactory::build_transitions_for_irrelevant_ops(VariableProxy variable) {
int var_no = variable.get_id();
int num_states = variable.get_domain_size();
int num_labels = task_proxy.get_operators().size();
// Collect all irrelevant labels for this variable.
vector<int> irrelevant_labels;
for (int label_no = 0; label_no < num_labels; ++label_no) {
if (!is_relevant(var_no, label_no)) {
irrelevant_labels.push_back(label_no);
}
}
TransitionSystemData &ts_data = transition_system_data_by_var[var_no];
if (!irrelevant_labels.empty()) {
vector<Transition> transitions;
transitions.reserve(num_states);
for (int state = 0; state < num_states; ++state)
transitions.emplace_back(state, state);
ts_data.label_groups.push_back(move(irrelevant_labels));
ts_data.transitions_by_group_id.push_back(move(transitions));
}
}
void FTSFactory::build_transitions() {
/*
- Compute all transitions of all operators for all variables, grouping
transitions of locally equivalent labels for a given variable.
- Computes relevant operator information as a side effect.
*/
for (OperatorProxy op : task_proxy.get_operators())
build_transitions_for_operator(op);
/*
Compute transitions of irrelevant operators for each variable only
once and put the labels into a single label group.
*/
for (VariableProxy variable : task_proxy.get_variables())
build_transitions_for_irrelevant_ops(variable);
}
vector<unique_ptr<TransitionSystem>> FTSFactory::create_transition_systems(const Labels &labels) {
// Create the actual TransitionSystem objects.
int num_variables = task_proxy.get_variables().size();
// We reserve space for the transition systems added later by merging.
vector<unique_ptr<TransitionSystem>> result;
assert(num_variables >= 1);
result.reserve(num_variables * 2 - 1);
for (int var_no = 0; var_no < num_variables; ++var_no) {
TransitionSystemData &ts_data = transition_system_data_by_var[var_no];
/* Construct the label equivalence relation from the previously
computed label groups. */
ts_data.label_equivalence_relation =
utils::make_unique_ptr<LabelEquivalenceRelation>(
labels, ts_data.label_groups);
result.push_back(utils::make_unique_ptr<TransitionSystem>(
ts_data.num_variables,
move(ts_data.incorporated_variables),
move(ts_data.label_equivalence_relation),
move(ts_data.transitions_by_group_id),
ts_data.num_states,
move(ts_data.goal_states),
ts_data.init_state
));
}
return result;
}
vector<unique_ptr<MergeAndShrinkRepresentation>> FTSFactory::create_mas_representations() const {
// Create the actual MergeAndShrinkRepresentation objects.
int num_variables = task_proxy.get_variables().size();
// We reserve space for the transition systems added later by merging.
vector<unique_ptr<MergeAndShrinkRepresentation>> result;
assert(num_variables >= 1);
result.reserve(num_variables * 2 - 1);
for (int var_no = 0; var_no < num_variables; ++var_no) {
int range = task_proxy.get_variables()[var_no].get_domain_size();
result.push_back(
utils::make_unique_ptr<MergeAndShrinkRepresentationLeaf>(var_no, range));
}
return result;
}
vector<unique_ptr<Distances>> FTSFactory::create_distances(
const vector<unique_ptr<TransitionSystem>> &transition_systems) const {
// Create the actual Distances objects.
int num_variables = task_proxy.get_variables().size();
// We reserve space for the transition systems added later by merging.
vector<unique_ptr<Distances>> result;
assert(num_variables >= 1);
result.reserve(num_variables * 2 - 1);
for (int var_no = 0; var_no < num_variables; ++var_no) {
result.push_back(
utils::make_unique_ptr<Distances>(*transition_systems[var_no]));
}
return result;
}
FactoredTransitionSystem FTSFactory::create(
const bool compute_init_distances,
const bool compute_goal_distances,
utils::Verbosity verbosity) {
if (verbosity >= utils::Verbosity::NORMAL) {
utils::g_log << "Building atomic transition systems... " << endl;
}
unique_ptr<Labels> labels = utils::make_unique_ptr<Labels>(create_labels());
initialize_transition_system_data(*labels);
build_transitions();
vector<unique_ptr<TransitionSystem>> transition_systems =
create_transition_systems(*labels);
vector<unique_ptr<MergeAndShrinkRepresentation>> mas_representations =
create_mas_representations();
vector<unique_ptr<Distances>> distances =
create_distances(transition_systems);
return FactoredTransitionSystem(
move(labels),
move(transition_systems),
move(mas_representations),
move(distances),
compute_init_distances,
compute_goal_distances,
verbosity);
}
FactoredTransitionSystem create_factored_transition_system(
const TaskProxy &task_proxy,
const bool compute_init_distances,
const bool compute_goal_distances,
utils::Verbosity verbosity) {
return FTSFactory(task_proxy).create(
compute_init_distances,
compute_goal_distances,
verbosity);
}
}
| 17,939 |
C++
| 37.170213 | 101 | 0.646915 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_selector.cc
|
#include "merge_selector.h"
#include "factored_transition_system.h"
#include "../options/plugin.h"
#include "../utils/logging.h"
#include <cassert>
#include <iostream>
using namespace std;
namespace merge_and_shrink {
vector<pair<int, int>> MergeSelector::compute_merge_candidates(
const FactoredTransitionSystem &fts,
const vector<int> &indices_subset) const {
vector<pair<int, int>> merge_candidates;
if (indices_subset.empty()) {
for (int ts_index1 = 0; ts_index1 < fts.get_size(); ++ts_index1) {
if (fts.is_active(ts_index1)) {
for (int ts_index2 = ts_index1 + 1; ts_index2 < fts.get_size();
++ts_index2) {
if (fts.is_active(ts_index2)) {
merge_candidates.emplace_back(ts_index1, ts_index2);
}
}
}
}
} else {
assert(indices_subset.size() > 1);
for (size_t i = 0; i < indices_subset.size(); ++i) {
int ts_index1 = indices_subset[i];
assert(fts.is_active(ts_index1));
for (size_t j = i + 1; j < indices_subset.size(); ++j) {
int ts_index2 = indices_subset[j];
assert(fts.is_active(ts_index2));
merge_candidates.emplace_back(ts_index1, ts_index2);
}
}
}
return merge_candidates;
}
void MergeSelector::dump_options() const {
utils::g_log << "Merge selector options:" << endl;
utils::g_log << "Name: " << name() << endl;
dump_specific_options();
}
static options::PluginTypePlugin<MergeSelector> _type_plugin(
"MergeSelector",
"This page describes the available merge selectors. They are used to "
"compute the next merge purely based on the state of the given factored "
"transition system. They are used in the merge strategy of type "
"'stateless', but they can also easily be used in different 'combined' "
"merged strategies.");
}
| 1,984 |
C++
| 32.644067 | 79 | 0.584173 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_strategy_stateless.cc
|
#include "merge_strategy_stateless.h"
#include "merge_selector.h"
using namespace std;
namespace merge_and_shrink {
MergeStrategyStateless::MergeStrategyStateless(
const FactoredTransitionSystem &fts,
const shared_ptr<MergeSelector> &merge_selector)
: MergeStrategy(fts),
merge_selector(merge_selector) {
}
pair<int, int> MergeStrategyStateless::get_next() {
return merge_selector->select_merge(fts);
}
}
| 431 |
C++
| 21.736841 | 52 | 0.74478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_dfp.h
|
#ifndef MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_DFP_H
#define MERGE_AND_SHRINK_MERGE_SCORING_FUNCTION_DFP_H
#include "merge_scoring_function.h"
namespace merge_and_shrink {
class TransitionSystem;
class MergeScoringFunctionDFP : public MergeScoringFunction {
std::vector<int> compute_label_ranks(
const FactoredTransitionSystem &fts, int index) const;
protected:
virtual std::string name() const override;
public:
MergeScoringFunctionDFP() = default;
virtual ~MergeScoringFunctionDFP() override = default;
virtual std::vector<double> compute_scores(
const FactoredTransitionSystem &fts,
const std::vector<std::pair<int, int>> &merge_candidates) override;
virtual bool requires_init_distances() const override {
return false;
}
virtual bool requires_goal_distances() const override {
return true;
}
};
}
#endif
| 891 |
C
| 27.774193 | 75 | 0.722783 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/merge_and_shrink/merge_scoring_function_goal_relevance.cc
|
#include "merge_scoring_function_goal_relevance.h"
#include "factored_transition_system.h"
#include "transition_system.h"
#include "utils.h"
#include "../options/option_parser.h"
#include "../options/plugin.h"
using namespace std;
namespace merge_and_shrink {
vector<double> MergeScoringFunctionGoalRelevance::compute_scores(
const FactoredTransitionSystem &fts,
const vector<pair<int, int>> &merge_candidates) {
int num_ts = fts.get_size();
vector<bool> goal_relevant(num_ts, false);
for (int ts_index : fts) {
const TransitionSystem &ts = fts.get_transition_system(ts_index);
if (is_goal_relevant(ts)) {
goal_relevant[ts_index] = true;
}
}
vector<double> scores;
scores.reserve(merge_candidates.size());
for (pair<int, int> merge_candidate : merge_candidates) {
int ts_index1 = merge_candidate.first;
int ts_index2 = merge_candidate.second;
int score = INF;
if (goal_relevant[ts_index1] || goal_relevant[ts_index2]) {
score = 0;
}
scores.push_back(score);
}
return scores;
}
string MergeScoringFunctionGoalRelevance::name() const {
return "goal relevance";
}
static shared_ptr<MergeScoringFunction>_parse(options::OptionParser &parser) {
parser.document_synopsis(
"Goal relevance scoring",
"This scoring function assigns a merge candidate a value of 0 iff at "
"least one of the two transition systems of the merge candidate is "
"goal relevant in the sense that there is an abstract non-goal state. "
"All other candidates get a score of positive infinity.");
if (parser.dry_run())
return nullptr;
else
return make_shared<MergeScoringFunctionGoalRelevance>();
}
static options::Plugin<MergeScoringFunction> _plugin("goal_relevance", _parse);
}
| 1,867 |
C++
| 31.206896 | 79 | 0.670595 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/best_first_open_list.cc
|
#include "best_first_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/memory.h"
#include <cassert>
#include <deque>
#include <map>
using namespace std;
namespace standard_scalar_open_list {
template<class Entry>
class BestFirstOpenList : public OpenList<Entry> {
typedef deque<Entry> Bucket;
map<int, Bucket> buckets;
int size;
shared_ptr<Evaluator> evaluator;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit BestFirstOpenList(const Options &opts);
BestFirstOpenList(const shared_ptr<Evaluator> &eval, bool preferred_only);
virtual ~BestFirstOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
};
template<class Entry>
BestFirstOpenList<Entry>::BestFirstOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
size(0),
evaluator(opts.get<shared_ptr<Evaluator>>("eval")) {
}
template<class Entry>
BestFirstOpenList<Entry>::BestFirstOpenList(
const shared_ptr<Evaluator> &evaluator, bool preferred_only)
: OpenList<Entry>(preferred_only),
size(0),
evaluator(evaluator) {
}
template<class Entry>
void BestFirstOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
int key = eval_context.get_evaluator_value(evaluator.get());
buckets[key].push_back(entry);
++size;
}
template<class Entry>
Entry BestFirstOpenList<Entry>::remove_min() {
assert(size > 0);
auto it = buckets.begin();
assert(it != buckets.end());
Bucket &bucket = it->second;
assert(!bucket.empty());
Entry result = bucket.front();
bucket.pop_front();
if (bucket.empty())
buckets.erase(it);
--size;
return result;
}
template<class Entry>
bool BestFirstOpenList<Entry>::empty() const {
return size == 0;
}
template<class Entry>
void BestFirstOpenList<Entry>::clear() {
buckets.clear();
size = 0;
}
template<class Entry>
void BestFirstOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool BestFirstOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
return eval_context.is_evaluator_value_infinite(evaluator.get());
}
template<class Entry>
bool BestFirstOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
return is_dead_end(eval_context) && evaluator->dead_ends_are_reliable();
}
BestFirstOpenListFactory::BestFirstOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
BestFirstOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<BestFirstOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
BestFirstOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<BestFirstOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Best-first open list",
"Open list that uses a single evaluator and FIFO tiebreaking.");
parser.document_note(
"Implementation Notes",
"Elements with the same evaluator value are stored in double-ended "
"queues, called \"buckets\". The open list stores a map from evaluator "
"values to buckets. Pushing and popping from a bucket runs in constant "
"time. Therefore, inserting and removing an entry from the open list "
"takes time O(log(n)), where n is the number of buckets.");
parser.add_option<shared_ptr<Evaluator>>("eval", "evaluator");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<BestFirstOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("single", _parse);
}
| 4,462 |
C++
| 28.169934 | 82 | 0.697221 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/tiebreaking_open_list.cc
|
#include "tiebreaking_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/memory.h"
#include <cassert>
#include <deque>
#include <map>
#include <utility>
#include <vector>
using namespace std;
namespace tiebreaking_open_list {
template<class Entry>
class TieBreakingOpenList : public OpenList<Entry> {
using Bucket = deque<Entry>;
map<const vector<int>, Bucket> buckets;
int size;
vector<shared_ptr<Evaluator>> evaluators;
/*
If allow_unsafe_pruning is true, we ignore (don't insert) states
which the first evaluator considers a dead end, even if it is
not a safe heuristic.
*/
bool allow_unsafe_pruning;
int dimension() const;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit TieBreakingOpenList(const Options &opts);
virtual ~TieBreakingOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
};
template<class Entry>
TieBreakingOpenList<Entry>::TieBreakingOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
size(0), evaluators(opts.get_list<shared_ptr<Evaluator>>("evals")),
allow_unsafe_pruning(opts.get<bool>("unsafe_pruning")) {
}
template<class Entry>
void TieBreakingOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
vector<int> key;
key.reserve(evaluators.size());
for (const shared_ptr<Evaluator> &evaluator : evaluators)
key.push_back(eval_context.get_evaluator_value_or_infinity(evaluator.get()));
buckets[key].push_back(entry);
++size;
}
template<class Entry>
Entry TieBreakingOpenList<Entry>::remove_min() {
assert(size > 0);
typename map<const vector<int>, Bucket>::iterator it;
it = buckets.begin();
assert(it != buckets.end());
assert(!it->second.empty());
--size;
Entry result = it->second.front();
it->second.pop_front();
if (it->second.empty())
buckets.erase(it);
return result;
}
template<class Entry>
bool TieBreakingOpenList<Entry>::empty() const {
return size == 0;
}
template<class Entry>
void TieBreakingOpenList<Entry>::clear() {
buckets.clear();
size = 0;
}
template<class Entry>
int TieBreakingOpenList<Entry>::dimension() const {
return evaluators.size();
}
template<class Entry>
void TieBreakingOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool TieBreakingOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// TODO: Properly document this behaviour.
// If one safe heuristic detects a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// If the first heuristic detects a dead-end and we allow "unsafe
// pruning", return true.
if (allow_unsafe_pruning &&
eval_context.is_evaluator_value_infinite(evaluators[0].get()))
return true;
// Otherwise, return true if all heuristics agree this is a dead-end.
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (!eval_context.is_evaluator_value_infinite(evaluator.get()))
return false;
return true;
}
template<class Entry>
bool TieBreakingOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (eval_context.is_evaluator_value_infinite(evaluator.get()) &&
evaluator->dead_ends_are_reliable())
return true;
return false;
}
TieBreakingOpenListFactory::TieBreakingOpenListFactory(const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
TieBreakingOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<TieBreakingOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
TieBreakingOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<TieBreakingOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis("Tie-breaking open list", "");
parser.add_list_option<shared_ptr<Evaluator>>("evals", "evaluators");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
parser.add_option<bool>(
"unsafe_pruning",
"allow unsafe pruning when the main evaluator regards a state a dead end",
"true");
Options opts = parser.parse();
opts.verify_list_non_empty<shared_ptr<Evaluator>>("evals");
if (parser.dry_run())
return nullptr;
else
return make_shared<TieBreakingOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("tiebreaking", _parse);
}
| 5,348 |
C++
| 29.565714 | 85 | 0.690912 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/tiebreaking_open_list.h
|
#ifndef OPEN_LISTS_TIEBREAKING_OPEN_LIST_H
#define OPEN_LISTS_TIEBREAKING_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
namespace tiebreaking_open_list {
class TieBreakingOpenListFactory : public OpenListFactory {
Options options;
public:
explicit TieBreakingOpenListFactory(const Options &options);
virtual ~TieBreakingOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 575 |
C
| 27.799999 | 77 | 0.763478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/pareto_open_list.h
|
#ifndef OPEN_LISTS_PARETO_OPEN_LIST_H
#define OPEN_LISTS_PARETO_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
namespace pareto_open_list {
class ParetoOpenListFactory : public OpenListFactory {
Options options;
public:
explicit ParetoOpenListFactory(const Options &options);
virtual ~ParetoOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 545 |
C
| 26.299999 | 77 | 0.750459 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/type_based_open_list.cc
|
#include "type_based_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/hash.h"
#include "../utils/markup.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <memory>
#include <unordered_map>
#include <vector>
using namespace std;
namespace type_based_open_list {
template<class Entry>
class TypeBasedOpenList : public OpenList<Entry> {
shared_ptr<utils::RandomNumberGenerator> rng;
vector<shared_ptr<Evaluator>> evaluators;
using Key = vector<int>;
using Bucket = vector<Entry>;
vector<pair<Key, Bucket>> keys_and_buckets;
utils::HashMap<Key, int> key_to_bucket_index;
protected:
virtual void do_insertion(
EvaluationContext &eval_context, const Entry &entry) override;
public:
explicit TypeBasedOpenList(const Options &opts);
virtual ~TypeBasedOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual bool is_dead_end(EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
};
template<class Entry>
void TypeBasedOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
vector<int> key;
key.reserve(evaluators.size());
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
key.push_back(
eval_context.get_evaluator_value_or_infinity(evaluator.get()));
}
auto it = key_to_bucket_index.find(key);
if (it == key_to_bucket_index.end()) {
key_to_bucket_index[key] = keys_and_buckets.size();
keys_and_buckets.push_back(make_pair(move(key), Bucket({entry})));
} else {
size_t bucket_index = it->second;
assert(utils::in_bounds(bucket_index, keys_and_buckets));
keys_and_buckets[bucket_index].second.push_back(entry);
}
}
template<class Entry>
TypeBasedOpenList<Entry>::TypeBasedOpenList(const Options &opts)
: rng(utils::parse_rng_from_options(opts)),
evaluators(opts.get_list<shared_ptr<Evaluator>>("evaluators")) {
}
template<class Entry>
Entry TypeBasedOpenList<Entry>::remove_min() {
size_t bucket_id = (*rng)(keys_and_buckets.size());
auto &key_and_bucket = keys_and_buckets[bucket_id];
const Key &min_key = key_and_bucket.first;
Bucket &bucket = key_and_bucket.second;
int pos = (*rng)(bucket.size());
Entry result = utils::swap_and_pop_from_vector(bucket, pos);
if (bucket.empty()) {
// Swap the empty bucket with the last bucket, then delete it.
key_to_bucket_index[keys_and_buckets.back().first] = bucket_id;
key_to_bucket_index.erase(min_key);
utils::swap_and_pop_from_vector(keys_and_buckets, bucket_id);
}
return result;
}
template<class Entry>
bool TypeBasedOpenList<Entry>::empty() const {
return keys_and_buckets.empty();
}
template<class Entry>
void TypeBasedOpenList<Entry>::clear() {
keys_and_buckets.clear();
key_to_bucket_index.clear();
}
template<class Entry>
bool TypeBasedOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// If one evaluator is sure we have a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// Otherwise, return true if all evaluators agree this is a dead-end.
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
if (!eval_context.is_evaluator_value_infinite(evaluator.get()))
return false;
}
return true;
}
template<class Entry>
bool TypeBasedOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
if (evaluator->dead_ends_are_reliable() &&
eval_context.is_evaluator_value_infinite(evaluator.get()))
return true;
}
return false;
}
template<class Entry>
void TypeBasedOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const shared_ptr<Evaluator> &evaluator : evaluators) {
evaluator->get_path_dependent_evaluators(evals);
}
}
TypeBasedOpenListFactory::TypeBasedOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
TypeBasedOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<TypeBasedOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
TypeBasedOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<TypeBasedOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Type-based open list",
"Uses multiple evaluators to assign entries to buckets. "
"All entries in a bucket have the same evaluator values. "
"When retrieving an entry, a bucket is chosen uniformly at "
"random and one of the contained entries is selected "
"uniformly randomly. "
"The algorithm is based on" + utils::format_conference_reference(
{"Fan Xie", "Martin Mueller", "Robert Holte", "Tatsuya Imai"},
"Type-Based Exploration with Multiple Search Queues for"
" Satisficing Planning",
"http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/view/8472/8705",
"Proceedings of the Twenty-Eigth AAAI Conference Conference"
" on Artificial Intelligence (AAAI 2014)",
"2395-2401",
"AAAI Press",
"2014"));
parser.add_list_option<shared_ptr<Evaluator>>(
"evaluators",
"Evaluators used to determine the bucket for each entry.");
utils::add_rng_options(parser);
Options opts = parser.parse();
opts.verify_list_non_empty<shared_ptr<Evaluator>>("evaluators");
if (parser.dry_run())
return nullptr;
else
return make_shared<TypeBasedOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("type_based", _parse);
}
| 6,251 |
C++
| 32.433155 | 82 | 0.677812 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/best_first_open_list.h
|
#ifndef OPEN_LISTS_BEST_FIRST_OPEN_LIST_H
#define OPEN_LISTS_BEST_FIRST_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
/*
Open list indexed by a single int, using FIFO tie-breaking.
Implemented as a map from int to deques.
*/
namespace standard_scalar_open_list {
class BestFirstOpenListFactory : public OpenListFactory {
Options options;
public:
explicit BestFirstOpenListFactory(const Options &options);
virtual ~BestFirstOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 685 |
C
| 24.407407 | 77 | 0.745985 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/alternation_open_list.h
|
#ifndef OPEN_LISTS_ALTERNATION_OPEN_LIST_H
#define OPEN_LISTS_ALTERNATION_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
namespace alternation_open_list {
class AlternationOpenListFactory : public OpenListFactory {
Options options;
public:
explicit AlternationOpenListFactory(const Options &options);
virtual ~AlternationOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 575 |
C
| 27.799999 | 77 | 0.763478 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/epsilon_greedy_open_list.cc
|
#include "epsilon_greedy_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/collections.h"
#include "../utils/markup.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <functional>
#include <memory>
using namespace std;
namespace epsilon_greedy_open_list {
template<class Entry>
class EpsilonGreedyOpenList : public OpenList<Entry> {
shared_ptr<utils::RandomNumberGenerator> rng;
struct HeapNode {
int id;
int h;
Entry entry;
HeapNode(int id, int h, const Entry &entry)
: id(id), h(h), entry(entry) {
}
bool operator>(const HeapNode &other) const {
return make_pair(h, id) > make_pair(other.h, other.id);
}
};
vector<HeapNode> heap;
shared_ptr<Evaluator> evaluator;
double epsilon;
int size;
int next_id;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit EpsilonGreedyOpenList(const Options &opts);
virtual ~EpsilonGreedyOpenList() override = default;
virtual Entry remove_min() override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool empty() const override;
virtual void clear() override;
};
template<class HeapNode>
static void adjust_heap_up(vector<HeapNode> &heap, size_t pos) {
assert(utils::in_bounds(pos, heap));
while (pos != 0) {
size_t parent_pos = (pos - 1) / 2;
if (heap[pos] > heap[parent_pos]) {
break;
}
swap(heap[pos], heap[parent_pos]);
pos = parent_pos;
}
}
template<class Entry>
void EpsilonGreedyOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
heap.emplace_back(
next_id++, eval_context.get_evaluator_value(evaluator.get()), entry);
push_heap(heap.begin(), heap.end(), greater<HeapNode>());
++size;
}
template<class Entry>
EpsilonGreedyOpenList<Entry>::EpsilonGreedyOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
rng(utils::parse_rng_from_options(opts)),
evaluator(opts.get<shared_ptr<Evaluator>>("eval")),
epsilon(opts.get<double>("epsilon")),
size(0),
next_id(0) {
}
template<class Entry>
Entry EpsilonGreedyOpenList<Entry>::remove_min() {
assert(size > 0);
if ((*rng)() < epsilon) {
int pos = (*rng)(size);
heap[pos].h = numeric_limits<int>::min();
adjust_heap_up(heap, pos);
}
pop_heap(heap.begin(), heap.end(), greater<HeapNode>());
HeapNode heap_node = heap.back();
heap.pop_back();
--size;
return heap_node.entry;
}
template<class Entry>
bool EpsilonGreedyOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
return eval_context.is_evaluator_value_infinite(evaluator.get());
}
template<class Entry>
bool EpsilonGreedyOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
return is_dead_end(eval_context) && evaluator->dead_ends_are_reliable();
}
template<class Entry>
void EpsilonGreedyOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool EpsilonGreedyOpenList<Entry>::empty() const {
return size == 0;
}
template<class Entry>
void EpsilonGreedyOpenList<Entry>::clear() {
heap.clear();
size = 0;
next_id = 0;
}
EpsilonGreedyOpenListFactory::EpsilonGreedyOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
EpsilonGreedyOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<EpsilonGreedyOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
EpsilonGreedyOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<EpsilonGreedyOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Epsilon-greedy open list",
"Chooses an entry uniformly randomly with probability "
"'epsilon', otherwise it returns the minimum entry. "
"The algorithm is based on" + utils::format_conference_reference(
{"Richard Valenzano", "Nathan R. Sturtevant",
"Jonathan Schaeffer", "Fan Xie"},
"A Comparison of Knowledge-Based GBFS Enhancements and"
" Knowledge-Free Exploration",
"http://www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7943/8066",
"Proceedings of the Twenty-Fourth International Conference"
" on Automated Planning and Scheduling (ICAPS 2014)",
"375-379",
"AAAI Press",
"2014"));
parser.add_option<shared_ptr<Evaluator>>("eval", "evaluator");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
parser.add_option<double>(
"epsilon",
"probability for choosing the next entry randomly",
"0.2",
Bounds("0.0", "1.0"));
utils::add_rng_options(parser);
Options opts = parser.parse();
if (parser.dry_run()) {
return nullptr;
} else {
return make_shared<EpsilonGreedyOpenListFactory>(opts);
}
}
static Plugin<OpenListFactory> _plugin("epsilon_greedy", _parse);
}
| 5,716 |
C++
| 28.776042 | 86 | 0.655003 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/epsilon_greedy_open_list.h
|
#ifndef OPEN_LISTS_EPSILON_GREEDY_OPEN_LIST_H
#define OPEN_LISTS_EPSILON_GREEDY_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
/*
Epsilon-greedy open list based on Valenzano et al. (ICAPS 2014).
With probability epsilon the next entry is selected uniformly
randomly, otherwise the minimum entry is chosen. While the original
implementation by Valenzano et al. is based on buckets (personal
communication with the authors), this implementation stores entries
in a heap. It is usually desirable to let open lists break ties in
FIFO order. When using a heap, this can be achieved without using
significantly more time by assigning increasing IDs to new entries
and using the IDs as tiebreakers for entries with identical values.
On the other hand, FIFO tiebreaking induces a substantial worst-case
runtime penalty for bucket-based implementations. In the table below
we list the worst-case time complexities for the discussed
implementation strategies.
n: number of entries
m: number of buckets
Buckets Buckets (no FIFO) Heap
Insert entry O(log(m)) O(log(m)) O(log(n))
Remove random entry O(m + n) O(m) O(log(n))
Remove minimum entry O(log(m)) O(log(m)) O(log(n))
These results assume that the buckets are implemented as deques and
are stored in a sorted dictionary, mapping from evaluator values to
buckets. For inserting a new entry and removing the minimum entry the
bucket-based implementations need to find the correct bucket
(O(log(m))) and can then push or pop from one end of the deque
(O(1)). For returning a random entry, bucket-based implementations
need to loop over all buckets (O(m)) to find the one that contains
the randomly selected entry. If FIFO ordering is ignored, one can use
swap-and-pop to remove the entry in constant time. Otherwise, the
removal is linear in the number of entries in the bucket (O(n), since
there could be only one bucket).
*/
namespace epsilon_greedy_open_list {
class EpsilonGreedyOpenListFactory : public OpenListFactory {
Options options;
public:
explicit EpsilonGreedyOpenListFactory(const Options &options);
virtual ~EpsilonGreedyOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 2,574 |
C
| 43.396551 | 77 | 0.707848 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/alternation_open_list.cc
|
#include "alternation_open_list.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/memory.h"
#include "../utils/system.h"
#include <cassert>
#include <memory>
#include <vector>
using namespace std;
using utils::ExitCode;
namespace alternation_open_list {
template<class Entry>
class AlternationOpenList : public OpenList<Entry> {
vector<unique_ptr<OpenList<Entry>>> open_lists;
vector<int> priorities;
const int boost_amount;
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit AlternationOpenList(const Options &opts);
virtual ~AlternationOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void boost_preferred() override;
virtual void get_path_dependent_evaluators(
set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
};
template<class Entry>
AlternationOpenList<Entry>::AlternationOpenList(const Options &opts)
: boost_amount(opts.get<int>("boost")) {
vector<shared_ptr<OpenListFactory>> open_list_factories(
opts.get_list<shared_ptr<OpenListFactory>>("sublists"));
open_lists.reserve(open_list_factories.size());
for (const auto &factory : open_list_factories)
open_lists.push_back(factory->create_open_list<Entry>());
priorities.resize(open_lists.size(), 0);
}
template<class Entry>
void AlternationOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
for (const auto &sublist : open_lists)
sublist->insert(eval_context, entry);
}
template<class Entry>
Entry AlternationOpenList<Entry>::remove_min() {
int best = -1;
for (size_t i = 0; i < open_lists.size(); ++i) {
if (!open_lists[i]->empty() &&
(best == -1 || priorities[i] < priorities[best])) {
best = i;
}
}
assert(best != -1);
const auto &best_list = open_lists[best];
assert(!best_list->empty());
++priorities[best];
return best_list->remove_min();
}
template<class Entry>
bool AlternationOpenList<Entry>::empty() const {
for (const auto &sublist : open_lists)
if (!sublist->empty())
return false;
return true;
}
template<class Entry>
void AlternationOpenList<Entry>::clear() {
for (const auto &sublist : open_lists)
sublist->clear();
}
template<class Entry>
void AlternationOpenList<Entry>::boost_preferred() {
for (size_t i = 0; i < open_lists.size(); ++i)
if (open_lists[i]->only_contains_preferred_entries())
priorities[i] -= boost_amount;
}
template<class Entry>
void AlternationOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const auto &sublist : open_lists)
sublist->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool AlternationOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// If one sublist is sure we have a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// Otherwise, return true if all sublists agree this is a dead-end.
for (const auto &sublist : open_lists)
if (!sublist->is_dead_end(eval_context))
return false;
return true;
}
template<class Entry>
bool AlternationOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const auto &sublist : open_lists)
if (sublist->is_reliable_dead_end(eval_context))
return true;
return false;
}
AlternationOpenListFactory::AlternationOpenListFactory(const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
AlternationOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<AlternationOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
AlternationOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<AlternationOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis("Alternation open list",
"alternates between several open lists.");
parser.add_list_option<shared_ptr<OpenListFactory>>(
"sublists",
"open lists between which this one alternates");
parser.add_option<int>(
"boost",
"boost value for contained open lists that are restricted "
"to preferred successors",
"0");
Options opts = parser.parse();
opts.verify_list_non_empty<shared_ptr<OpenListFactory>>("sublists");
if (parser.dry_run())
return nullptr;
else
return make_shared<AlternationOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("alt", _parse);
}
| 5,060 |
C++
| 29.305389 | 84 | 0.674901 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/type_based_open_list.h
|
#ifndef OPEN_LISTS_TYPE_BASED_OPEN_LIST_H
#define OPEN_LISTS_TYPE_BASED_OPEN_LIST_H
#include "../open_list_factory.h"
#include "../option_parser_util.h"
/*
Type-based open list based on Xie et al. (AAAI 2014; see detailed
reference in plug-in documentation).
The original implementation uses a std::map for storing and looking
up buckets. Our implementation stores the buckets in a std::vector
and uses a std::unordered_map for looking up indexes in the vector.
In the table below we list the amortized worst-case time complexities
for the original implementation and the version below.
n = number of entries
m = number of buckets
Original Code below
Insert entry O(log(m)) O(1)
Remove entry O(m) O(1) # both use swap+pop
*/
namespace type_based_open_list {
class TypeBasedOpenListFactory : public OpenListFactory {
Options options;
public:
explicit TypeBasedOpenListFactory(const Options &options);
virtual ~TypeBasedOpenListFactory() override = default;
virtual std::unique_ptr<StateOpenList> create_state_open_list() override;
virtual std::unique_ptr<EdgeOpenList> create_edge_open_list() override;
};
}
#endif
| 1,243 |
C
| 30.099999 | 77 | 0.69992 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/src/search/open_lists/pareto_open_list.cc
|
#include "pareto_open_list.h"
#include "../evaluator.h"
#include "../open_list.h"
#include "../option_parser.h"
#include "../plugin.h"
#include "../utils/hash.h"
#include "../utils/memory.h"
#include "../utils/rng.h"
#include "../utils/rng_options.h"
#include <cassert>
#include <deque>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
using namespace std;
namespace pareto_open_list {
template<class Entry>
class ParetoOpenList : public OpenList<Entry> {
shared_ptr<utils::RandomNumberGenerator> rng;
using Bucket = deque<Entry>;
using KeyType = vector<int>;
using BucketMap = utils::HashMap<KeyType, Bucket>;
using KeySet = set<KeyType>;
BucketMap buckets;
KeySet nondominated;
bool state_uniform_selection;
vector<shared_ptr<Evaluator>> evaluators;
bool dominates(const KeyType &v1, const KeyType &v2) const;
bool is_nondominated(
const KeyType &vec, KeySet &domination_candidates) const;
void remove_key(const KeyType &key);
protected:
virtual void do_insertion(EvaluationContext &eval_context,
const Entry &entry) override;
public:
explicit ParetoOpenList(const Options &opts);
virtual ~ParetoOpenList() override = default;
virtual Entry remove_min() override;
virtual bool empty() const override;
virtual void clear() override;
virtual void get_path_dependent_evaluators(set<Evaluator *> &evals) override;
virtual bool is_dead_end(
EvaluationContext &eval_context) const override;
virtual bool is_reliable_dead_end(
EvaluationContext &eval_context) const override;
static OpenList<Entry> *_parse(OptionParser &p);
};
template<class Entry>
ParetoOpenList<Entry>::ParetoOpenList(const Options &opts)
: OpenList<Entry>(opts.get<bool>("pref_only")),
rng(utils::parse_rng_from_options(opts)),
state_uniform_selection(opts.get<bool>("state_uniform_selection")),
evaluators(opts.get_list<shared_ptr<Evaluator>>("evals")) {
}
template<class Entry>
bool ParetoOpenList<Entry>::dominates(
const KeyType &v1, const KeyType &v2) const {
assert(v1.size() == v2.size());
bool are_different = false;
for (size_t i = 0; i < v1.size(); ++i) {
if (v1[i] > v2[i])
return false;
else if (v1[i] < v2[i])
are_different = true;
}
return are_different;
}
template<class Entry>
bool ParetoOpenList<Entry>::is_nondominated(
const KeyType &vec, KeySet &domination_candidates) const {
for (const KeyType &candidate : domination_candidates)
if (dominates(candidate, vec))
return false;
return true;
}
template<class Entry>
void ParetoOpenList<Entry>::remove_key(const KeyType &key) {
/*
We must copy the key because it is likely to live inside the
data structures from which we remove it here and hence becomes
invalid at that point.
*/
vector<int> copied_key(key);
nondominated.erase(copied_key);
buckets.erase(copied_key);
KeySet candidates;
for (const auto &bucket_pair : buckets) {
const KeyType &bucket_key = bucket_pair.first;
/*
If the estimate vector of the bucket is not already in the
set of nondominated estimate vectors and the vector was
previously dominated by key and the estimate vector is not
dominated by any vector from the set of nondominated
vectors, we add it to the candidates.
*/
if (!nondominated.count(bucket_key) &&
dominates(copied_key, bucket_key) &&
is_nondominated(bucket_key, nondominated))
candidates.insert(bucket_key);
}
for (const KeyType &candidate : candidates)
if (is_nondominated(candidate, candidates))
nondominated.insert(candidate);
}
template<class Entry>
void ParetoOpenList<Entry>::do_insertion(
EvaluationContext &eval_context, const Entry &entry) {
vector<int> key;
key.reserve(evaluators.size());
for (const shared_ptr<Evaluator> &evaluator : evaluators)
key.push_back(eval_context.get_evaluator_value_or_infinity(evaluator.get()));
Bucket &bucket = buckets[key];
bool newkey = bucket.empty();
bucket.push_back(entry);
if (newkey && is_nondominated(key, nondominated)) {
/*
Delete previously nondominated keys that are now dominated
by key.
Note: this requires that nondominated is a "normal"
set (no hash set) because then iterators are not
invalidated by erase(it).
*/
auto it = nondominated.begin();
while (it != nondominated.end()) {
if (dominates(key, *it)) {
auto tmp_it = it;
++it;
nondominated.erase(tmp_it);
} else {
++it;
}
}
// Insert new key.
nondominated.insert(key);
}
}
template<class Entry>
Entry ParetoOpenList<Entry>::remove_min() {
typename KeySet::iterator selected = nondominated.begin();
int seen = 0;
for (typename KeySet::iterator it = nondominated.begin();
it != nondominated.end(); ++it) {
int numerator;
if (state_uniform_selection)
numerator = it->size();
else
numerator = 1;
seen += numerator;
if ((*rng)(seen) < numerator)
selected = it;
}
Bucket &bucket = buckets[*selected];
Entry result = bucket.front();
bucket.pop_front();
if (bucket.empty())
remove_key(*selected);
return result;
}
template<class Entry>
bool ParetoOpenList<Entry>::empty() const {
return nondominated.empty();
}
template<class Entry>
void ParetoOpenList<Entry>::clear() {
buckets.clear();
nondominated.clear();
}
template<class Entry>
void ParetoOpenList<Entry>::get_path_dependent_evaluators(
set<Evaluator *> &evals) {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
evaluator->get_path_dependent_evaluators(evals);
}
template<class Entry>
bool ParetoOpenList<Entry>::is_dead_end(
EvaluationContext &eval_context) const {
// TODO: Document this behaviour.
// If one safe heuristic detects a dead end, return true.
if (is_reliable_dead_end(eval_context))
return true;
// Otherwise, return true if all heuristics agree this is a dead-end.
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (!eval_context.is_evaluator_value_infinite(evaluator.get()))
return false;
return true;
}
template<class Entry>
bool ParetoOpenList<Entry>::is_reliable_dead_end(
EvaluationContext &eval_context) const {
for (const shared_ptr<Evaluator> &evaluator : evaluators)
if (eval_context.is_evaluator_value_infinite(evaluator.get()) &&
evaluator->dead_ends_are_reliable())
return true;
return false;
}
ParetoOpenListFactory::ParetoOpenListFactory(
const Options &options)
: options(options) {
}
unique_ptr<StateOpenList>
ParetoOpenListFactory::create_state_open_list() {
return utils::make_unique_ptr<ParetoOpenList<StateOpenListEntry>>(options);
}
unique_ptr<EdgeOpenList>
ParetoOpenListFactory::create_edge_open_list() {
return utils::make_unique_ptr<ParetoOpenList<EdgeOpenListEntry>>(options);
}
static shared_ptr<OpenListFactory> _parse(OptionParser &parser) {
parser.document_synopsis(
"Pareto open list",
"Selects one of the Pareto-optimal (regarding the sub-evaluators) "
"entries for removal.");
parser.add_list_option<shared_ptr<Evaluator>>("evals", "evaluators");
parser.add_option<bool>(
"pref_only",
"insert only nodes generated by preferred operators", "false");
parser.add_option<bool>(
"state_uniform_selection",
"When removing an entry, we select a non-dominated bucket "
"and return its oldest entry. If this option is false, we select "
"uniformly from the non-dominated buckets; if the option is true, "
"we weight the buckets with the number of entries.",
"false");
utils::add_rng_options(parser);
Options opts = parser.parse();
if (parser.dry_run())
return nullptr;
else
return make_shared<ParetoOpenListFactory>(opts);
}
static Plugin<OpenListFactory> _plugin("pareto", _parse);
}
| 8,406 |
C++
| 30.369403 | 85 | 0.652629 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/parser.py
|
#! /usr/bin/env python
import logging
import re
from lab.parser import Parser
class CommonParser(Parser):
def add_difference(self, diff, val1, val2):
def diff_func(content, props):
if props.get(val1) is None or props.get(val2) is None:
diff_val = None
else:
diff_val = props.get(val1) - props.get(val2)
props[diff] = diff_val
self.add_function(diff_func)
def _get_flags(self, flags_string):
flags = 0
for char in flags_string:
flags |= getattr(re, char)
return flags
def add_repeated_pattern(
self, name, regex, file="run.log", required=False, type=int,
flags=""):
def find_all_occurences(content, props):
matches = re.findall(regex, content, flags=self._get_flags(flags))
if required and not matches:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
props[name] = [type(m) for m in matches]
self.add_function(find_all_occurences, file=file)
def add_pattern(self, name, regex, file="run.log", required=False, type=int, flags=""):
Parser.add_pattern(self, name, regex, file=file, required=required, type=type, flags=flags)
def add_bottom_up_pattern(self, name, regex, file="run.log", required=True, type=int, flags=""):
def search_from_bottom(content, props):
reversed_content = "\n".join(reversed(content.splitlines()))
match = re.search(regex, reversed_content, flags=self._get_flags(flags))
if required and not match:
logging.error("Pattern {0} not found in file {1}".format(regex, file))
if match:
props[name] = type(match.group(1))
self.add_function(search_from_bottom, file=file)
def no_search(content, props):
if "search_start_time" not in props:
error = props.get("error")
if error is not None and error != "incomplete-search-found-no-plan":
props["error"] = "no-search-due-to-" + error
REFINEMENT_ATTRIBUTES = [
("time_for_finding_traces", r"Time for finding abstract traces: (.+)s"),
("time_for_finding_flaws", r"Time for finding flaws: (.+)s"),
("time_for_splitting_states", r"Time for splitting states: (.+)s"),
]
def compute_total_times(content, props):
for attribute, pattern in REFINEMENT_ATTRIBUTES:
props["total_" + attribute] = sum(props[attribute])
def add_time_analysis(content, props):
init_time = props.get("init_time")
if not init_time:
return
parts = []
parts.append("{init_time:.2f}:".format(**props))
for attribute, pattern in REFINEMENT_ATTRIBUTES:
time = props["total_" + attribute]
relative_time = time / init_time
print time, type(time)
parts.append("{:.2f} ({:.2f})".format(time, relative_time))
props["time_analysis"] = " ".join(parts)
def main():
parser = CommonParser()
parser.add_pattern("search_start_time", r"\[g=0, 1 evaluated, 0 expanded, t=(.+)s, \d+ KB\]", type=float)
parser.add_pattern("search_start_memory", r"\[g=0, 1 evaluated, 0 expanded, t=.+s, (\d+) KB\]", type=int)
parser.add_pattern("init_time", r"Time for initializing additive Cartesian heuristic: (.+)s", type=float)
parser.add_pattern("cartesian_states", r"^Cartesian states: (\d+)\n", type=int)
for attribute, pattern in REFINEMENT_ATTRIBUTES:
parser.add_repeated_pattern(attribute, pattern, type=float, required=False)
parser.add_function(no_search)
parser.add_function(compute_total_times)
parser.add_function(add_time_analysis)
parser.parse()
if __name__ == "__main__":
main()
| 3,743 |
Python
| 34.657143 | 109 | 0.617686 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import subprocess
import sys
from lab.experiment import ARGPARSER
from lab import tools
from downward.experiment import FastDownwardExperiment
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import ComparativeReport
from downward.reports.scatter import ScatterPlotReport
from relativescatter import RelativeScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
DEFAULT_OPTIMAL_SUITE = [
'agricola-opt18-strips', 'airport', 'barman-opt11-strips',
'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips',
'data-network-opt18-strips', 'depot', 'driverlog',
'elevators-opt08-strips', 'elevators-opt11-strips',
'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime',
'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips',
'openstacks-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'organic-synthesis-opt18-strips',
'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips',
'parcprinter-opt11-strips', 'parking-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-08-strips',
'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips',
'snake-opt18-strips', 'sokoban-opt08-strips',
'sokoban-opt11-strips', 'spider-opt18-strips', 'storage',
'termes-opt18-strips', 'tetris-opt14-strips',
'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt08-strips', 'transport-opt11-strips',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips',
'visitall-opt14-strips', 'woodworking-opt08-strips',
'woodworking-opt11-strips', 'zenotravel']
DEFAULT_SATISFICING_SUITE = [
'agricola-sat18-strips', 'airport', 'assembly',
'barman-sat11-strips', 'barman-sat14-strips', 'blocks',
'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl',
'childsnack-sat14-strips', 'citycar-sat14-adl',
'data-network-sat18-strips', 'depot', 'driverlog',
'elevators-sat08-strips', 'elevators-sat11-strips',
'flashfill-sat18-adl', 'floortile-sat11-strips',
'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid',
'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98',
'maintenance-sat14-adl', 'miconic', 'miconic-fulladl',
'miconic-simpleadl', 'movie', 'mprime', 'mystery',
'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks',
'openstacks-sat08-adl', 'openstacks-sat08-strips',
'openstacks-sat11-strips', 'openstacks-sat14-strips',
'openstacks-strips', 'optical-telegraphs',
'organic-synthesis-sat18-strips',
'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips',
'parcprinter-sat11-strips', 'parking-sat11-strips',
'parking-sat14-strips', 'pathways', 'pathways-noneg',
'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-large',
'psr-middle', 'psr-small', 'rovers', 'satellite',
'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule',
'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips',
'sokoban-sat11-strips', 'spider-sat18-strips', 'storage',
'termes-sat18-strips', 'tetris-sat14-strips',
'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp',
'transport-sat08-strips', 'transport-sat11-strips',
'transport-sat14-strips', 'trucks', 'trucks-strips',
'visitall-sat11-strips', 'visitall-sat14-strips',
'woodworking-sat08-strips', 'woodworking-sat11-strips',
'zenotravel']
def get_script():
"""Get file name of main script."""
return tools.get_script_path()
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch")
def is_test_run():
return ARGS.test_run == "yes" or (
ARGS.test_run == "auto" and not is_running_on_cluster())
def get_algo_nick(revision, config_nick):
return "{revision}-{config_nick}".format(**locals())
class IssueConfig(object):
"""Hold information about a planner configuration.
See FastDownwardExperiment.add_algorithm() for documentation of the
constructor's options.
"""
def __init__(self, nick, component_options,
build_options=None, driver_options=None):
self.nick = nick
self.component_options = component_options
self.build_options = build_options
self.driver_options = driver_options
class IssueExperiment(FastDownwardExperiment):
"""Subclass of FastDownwardExperiment with some convenience features."""
DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"]
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"planner_memory",
"planner_time",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, revisions=None, configs=None, path=None, **kwargs):
"""
You can either specify both *revisions* and *configs* or none
of them. If they are omitted, you will need to call
exp.add_algorithm() manually.
If *revisions* is given, it must be a non-empty list of
revision identifiers, which specify which planner versions to
use in the experiment. The same versions are used for
translator, preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"], ...)
If *configs* is given, it must be a non-empty list of
IssueConfig objects. ::
IssueExperiment(..., configs=[
IssueConfig("ff", ["--search", "eager_greedy(ff())"]),
IssueConfig(
"lama", [],
driver_options=["--alias", "seq-sat-lama-2011"]),
])
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
"""
path = path or get_data_dir()
FastDownwardExperiment.__init__(self, path=path, **kwargs)
if (revisions and not configs) or (not revisions and configs):
raise ValueError(
"please provide either both or none of revisions and configs")
for rev in revisions:
for config in configs:
self.add_algorithm(
get_algo_nick(rev, config.nick),
get_repo_base(),
rev,
config.component_options,
build_options=config.build_options,
driver_options=config.driver_options)
self._revisions = revisions
self._configs = configs
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't compare
revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If the
keyword argument *attributes* is not specified, a default list
of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = os.path.join(
self.eval_dir,
get_experiment_name() + "." + report.output_format)
self.add_report(report, outfile=outfile)
self.add_step(
'publish-absolute-report', subprocess.call, ['publish', outfile])
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revisions. Each report pairs up the runs of the same config and
lists the two absolute attribute values and their difference
for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareConfigsReport class.
If the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
compared_configs = []
for config in self._configs:
config_nick = config.nick
compared_configs.append(
("%s-%s" % (rev1, config_nick),
"%s-%s" % (rev2, config_nick),
"Diff (%s)" % config_nick))
report = ComparativeReport(compared_configs, **kwargs)
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.%s" % (
self.name, rev1, rev2, report.output_format))
report(self.eval_dir, outfile)
def publish_comparison_tables():
for rev1, rev2 in itertools.combinations(self._revisions, 2):
outfile = os.path.join(
self.eval_dir,
"%s-%s-%s-compare.html" % (self.name, rev1, rev2))
subprocess.call(["publish", outfile])
self.add_step("make-comparison-tables", make_comparison_tables)
self.add_step(
"publish-comparison-tables", publish_comparison_tables)
def add_scatter_plot_step(self, relative=False, attributes=None):
"""Add step creating (relative) scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revisions pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if relative:
report_class = RelativeScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-relative")
step_name = "make-relative-scatter-plots"
else:
report_class = ScatterPlotReport
scatter_dir = os.path.join(self.eval_dir, "scatter-absolute")
step_name = "make-absolute-scatter-plots"
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "{}-{}".format(rev1, config_nick)
algo2 = "{}-{}".format(rev2, config_nick)
report = report_class(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(
self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config in self._configs:
for rev1, rev2 in itertools.combinations(self._revisions, 2):
for attribute in self.get_supported_attributes(
config.nick, attributes):
make_scatter_plot(config.nick, rev1, rev2, attribute)
self.add_step(step_name, make_scatter_plots)
| 14,786 |
Python
| 36.435443 | 82 | 0.618355 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BUILD = "release64"
REVISIONS = ["issue880-base", "issue880-v1"]
DRIVER_OPTIONS = ["--build", BUILD]
CONFIGS = [
IssueConfig(
nick + "-" + max_transitions_nick,
config,
build_options=[BUILD],
driver_options=DRIVER_OPTIONS)
for max_transitions_nick, max_transitions in [("1M", 1000000), ("10M", 10000000)]
for nick, config in [
("cegar-original", ["--search", "astar(cegar(subtasks=[original()], max_transitions={max_transitions}))".format(**locals())]),
("cegar-landmarks-goals", ["--search", "astar(cegar(max_transitions={max_transitions}))".format(**locals())]),
]
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="jendrik.seipp@unibas.ch",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = [
#"depot:p02.pddl",
"gripper:prob01.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
#exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser(os.path.join(DIR, "parser.py"))
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
REFINEMENT_ATTRIBUTES = [
"time_for_finding_traces",
"time_for_finding_flaws",
"time_for_splitting_states",
]
attributes = (
IssueExperiment.DEFAULT_TABLE_ATTRIBUTES +
["search_start_memory", "init_time", "time_analysis"] +
REFINEMENT_ATTRIBUTES +
["total_" + attr for attr in REFINEMENT_ATTRIBUTES])
#exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
if len(REVISIONS) == 2:
for attribute in ["init_time", "expansions_until_last_jump", "total_time_for_splitting_states", "total_time_for_finding_traces"]:
for config in CONFIGS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}".format(rev, config.nick) for rev in REVISIONS],
get_category=lambda run1, run2: run1.get("domain")),
outfile="{}-{}-{}-{}-{}.png".format(exp.name, attribute, config.nick, *REVISIONS))
exp.run_steps()
| 2,796 |
Python
| 33.109756 | 134 | 0.670601 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue880/relativescatter.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from matplotlib import ticker
from downward.reports.scatter import ScatterPlotReport
from downward.reports.plot import PlotReport, Matplotlib, MatplotlibPlot
# TODO: handle outliers
# TODO: this is mostly copied from ScatterMatplotlib (scatter.py)
class RelativeScatterMatplotlib(Matplotlib):
@classmethod
def _plot(cls, report, axes, categories, styles):
# Display grid
axes.grid(b=True, linestyle='-', color='0.75')
has_points = False
# Generate the scatter plots
for category, coords in sorted(categories.items()):
X, Y = zip(*coords)
axes.scatter(X, Y, s=42, label=category, **styles[category])
if X and Y:
has_points = True
if report.xscale == 'linear' or report.yscale == 'linear':
plot_size = report.missing_val * 1.01
else:
plot_size = report.missing_val * 1.25
# make 5 ticks above and below 1
yticks = []
tick_step = report.ylim_top**(1/5.0)
for i in xrange(-5, 6):
yticks.append(tick_step**i)
axes.set_yticks(yticks)
axes.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
axes.set_xlim(report.xlim_left or -1, report.xlim_right or plot_size)
axes.set_ylim(report.ylim_bottom or -1, report.ylim_top or plot_size)
for axis in [axes.xaxis, axes.yaxis]:
MatplotlibPlot.change_axis_formatter(
axis,
report.missing_val if report.show_missing else None)
return has_points
class RelativeScatterPlotReport(ScatterPlotReport):
"""
Generate a scatter plot that shows a relative comparison of two
algorithms with regard to the given attribute. The attribute value
of algorithm 1 is shown on the x-axis and the relation to the value
of algorithm 2 on the y-axis.
"""
def __init__(self, show_missing=True, get_category=None, **kwargs):
ScatterPlotReport.__init__(self, show_missing, get_category, **kwargs)
if self.output_format == 'tex':
raise "not supported"
else:
self.writer = RelativeScatterMatplotlib
def _fill_categories(self, runs):
# We discard the *runs* parameter.
# Map category names to value tuples
categories = defaultdict(list)
self.ylim_bottom = 2
self.ylim_top = 0.5
self.xlim_left = float("inf")
for (domain, problem), runs in self.problem_runs.items():
if len(runs) != 2:
continue
run1, run2 = runs
assert (run1['algorithm'] == self.algorithms[0] and
run2['algorithm'] == self.algorithms[1])
val1 = run1.get(self.attribute)
val2 = run2.get(self.attribute)
if not val1 or not val2:
continue
category = self.get_category(run1, run2)
assert val1 > 0, (domain, problem, self.algorithms[0], val1)
assert val2 > 0, (domain, problem, self.algorithms[1], val2)
x = val1
y = val2 / float(val1)
categories[category].append((x, y))
self.ylim_top = max(self.ylim_top, y)
self.ylim_bottom = min(self.ylim_bottom, y)
self.xlim_left = min(self.xlim_left, x)
# center around 1
if self.ylim_bottom < 1:
self.ylim_top = max(self.ylim_top, 1 / float(self.ylim_bottom))
if self.ylim_top > 1:
self.ylim_bottom = min(self.ylim_bottom, 1 / float(self.ylim_top))
return categories
def _set_scales(self, xscale, yscale):
# ScatterPlot uses log-scaling on the x-axis by default.
PlotReport._set_scales(
self, xscale or self.attribute.scale or 'log', 'log')
| 3,867 |
Python
| 35.490566 | 78 | 0.598397 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue635/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue635-base", "issue635-v1"]
CONFIGS = [
IssueConfig(
heuristic,
["--search", "astar({})".format(heuristic)],
driver_options=["--search-time-limit", "10m"])
for heuristic in ["hm(m=2)", "ipdb()", "cea()", "cg()"]
]
SUITE = [
'airport', 'barman-opt14-strips', 'blocks',
'childsnack-opt14-strips', 'depot', 'driverlog',
'elevators-opt11-strips', 'floortile-opt14-strips', 'freecell',
'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips',
'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', 'mystery',
'nomystery-opt11-strips', 'openstacks-opt14-strips',
'openstacks-strips', 'parcprinter-opt11-strips',
'parking-opt14-strips', 'pathways-noneg', 'pegsol-opt11-strips',
'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers',
'satellite', 'scanalyzer-opt11-strips', 'sokoban-opt11-strips',
'storage', 'tetris-opt14-strips', 'tidybot-opt14-strips', 'tpp',
'transport-opt14-strips', 'trucks-strips', 'visitall-opt14-strips',
'woodworking-opt11-strips', 'zenotravel']
ENVIRONMENT = MaiaEnvironment(
priority=0, email="jendrik.seipp@unibas.ch")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step(relative=True, attributes=["memory", "total_time"])
exp()
| 1,814 |
Python
| 33.245282 | 77 | 0.684675 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4-single-cegar-allow-merging-options.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4"]
random_seed=2018
CONFIGS = [
### cpdbs
IssueConfig('cpdbs-singlecegar-wildcardplans-allowmergingall-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-allowmergingprec-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=precondition_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-forbidmerging-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=never,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
### pho
IssueConfig('pho-singlecegar-wildcardplans-allowmergingall-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]),
IssueConfig('pho-singlecegar-wildcardplans-allowmergingprec-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=precondition_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]),
IssueConfig('pho-singlecegar-wildcardplans-forbidmerging-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(operatorcounting(constraint_generators=[pho_constraints(patterns=single_cegar(max_refinements=infinity,allow_merging=never,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal))]),verbosity=silent)'.format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="silvan.sievers@unibas.ch",
partition="infai_2",
export=[],
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('v3-parser.py')
attributes=exp.DEFAULT_TABLE_ATTRIBUTES
attributes.extend([
'single_cegar_pdbs_solved_without_search',
'single_cegar_pdbs_computation_time',
'single_cegar_pdbs_timed_out',
'single_cegar_pdbs_num_iterations',
'single_cegar_pdbs_collection_num_patterns',
'single_cegar_pdbs_collection_summed_pdb_size',
])
exp.add_absolute_report_step(attributes=attributes)
exp.add_comparison_table_step(attributes=attributes)
exp.run_steps()
| 4,453 |
Python
| 61.732393 | 425 | 0.777229 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v16-v17-ipdb-sys.py
|
#! /usr/bin/env python3
import itertools
import math
import os
import subprocess
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-base-v2", "issue1007-v16", "issue1007-v17"]
random_seed=2018
MAX_TIME=900
if common_setup.is_test_run():
MAX_TIME=2
CONFIGS = [
IssueConfig(f'cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}', ['--search', f'astar(cpdbs(hillclimbing(max_time={MAX_TIME},pdb_max_size=1000000,collection_max_size=10000000,random_seed={random_seed})),verbosity=silent)']),
IssueConfig('cpdbs-sys2', ['--search', 'astar(cpdbs(systematic(pattern_max_size=2)),verbosity=silent)']),
IssueConfig('cpdbs-sys3', ['--search', 'astar(cpdbs(systematic(pattern_max_size=3)),verbosity=silent)']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="silvan.sievers@unibas.ch",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_parser('cpdbs-parser.py')
cpdbs_num_patterns = Attribute('cpdbs_num_patterns', absolute=False, min_wins=True)
cpdbs_total_pdb_size = Attribute('cpdbs_total_pdb_size', absolute=False, min_wins=True)
cpdbs_computation_time = Attribute('cpdbs_computation_time', absolute=False, min_wins=True)
score_cpdbs_computation_time = Attribute('score_cpdbs_computation_time', absolute=True, min_wins=False)
attributes = [
cpdbs_num_patterns,
cpdbs_total_pdb_size,
cpdbs_computation_time,
score_cpdbs_computation_time,
]
attributes.extend(exp.DEFAULT_TABLE_ATTRIBUTES)
attributes.append('initial_h_value')
def add_computation_time_score(run):
"""
Convert cegar/cpdbs computation time into scores in the range [0, 1].
Best possible performance in a task is counted as 1, while failure
to construct the heuristic and worst performance are counted as 0.
"""
def log_score(value, min_bound, max_bound):
assert min_bound < max_bound
if value is None:
return 0
value = max(value, min_bound)
value = min(value, max_bound)
raw_score = math.log(value) - math.log(max_bound)
best_raw_score = math.log(min_bound) - math.log(max_bound)
return raw_score / best_raw_score
run['score_cpdbs_computation_time'] = log_score(run.get('cpdbs_computation_time'), min_bound=1.0, max_bound=MAX_TIME)
return run
exp.add_absolute_report_step(attributes=attributes,filter=[add_computation_time_score])
exp.add_fetcher('data/issue1007-v15-ipdb-sys-eval', filter_algorithm=[
f'issue1007-v15-cpdbs-hillclimbing-pdb1m-pdbs10m-t100-s{random_seed}',
f'issue1007-v15-cpdbs-sys2',
f'issue1007-v15-cpdbs-sys3',
],merge=True)
exp.add_comparison_table_step_for_revision_pairs(
revision_pairs=[
("issue1007-v15", "issue1007-v16"),
("issue1007-v16", "issue1007-v17"),
("issue1007-v15", "issue1007-v17"),
("issue1007-base-v2", "issue1007-v16"),
("issue1007-base-v2", "issue1007-v17"),
],
attributes=attributes,
filter=[add_computation_time_score],
)
exp.run_steps()
| 4,860 |
Python
| 39.173553 | 808 | 0.717695 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v4.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1007-v4"]
random_seed=2018
CONFIGS = [
### single cegar
IssueConfig('cpdbs-singlecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
IssueConfig('cpdbs-singlecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', 'astar(cpdbs(single_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,random_seed={},verbosity=normal)),verbosity=silent)'.format(random_seed)]),
### multiple cegar
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-regularplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=false,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=infinity,blacklist_trigger_time=1,blacklist_on_stagnation=false,verbosity=silent)),verbosity=silent)".format(random_seed)]),
IssueConfig('cpdbs-multiplecegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20-s{}'.format(random_seed), ['--search', "astar(cpdbs(multiple_cegar(max_refinements=infinity,allow_merging=all_flaws,wildcard_plans=true,max_time=100,max_pdb_size=1000000,max_collection_size=10000000,initial_random_seed={},total_collection_max_size=10000000,total_time_limit=100,stagnation_limit=20,blacklist_trigger_time=0.75,blacklist_on_stagnation=true,verbosity=silent)),verbosity=silent)".format(random_seed)]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="silvan.sievers@unibas.ch",
partition="infai_2",
export=[],
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
exp.run_steps()
| 4,902 |
Python
| 72.179103 | 808 | 0.771114 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/cpdbs-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('cpdbs_num_patterns', 'Canonical PDB heuristic number of patterns: (\d+)', required=False, type=int)
parser.add_pattern('cpdbs_total_pdb_size', 'Canonical PDB heuristic total PDB size: (\d+)', required=False, type=int)
parser.add_pattern('cpdbs_computation_time', 'Canonical PDB heuristic computation time: (.+)s', required=False, type=float)
parser.parse()
| 451 |
Python
| 40.090905 | 123 | 0.742794 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v3-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('single_cegar_pdbs_computation_time', 'CEGAR_PDBs: computation time: (.+)s', required=False, type=float)
parser.add_pattern('single_cegar_pdbs_num_iterations', 'CEGAR_PDBs: number of iterations: (\d+)', required=False, type=int)
parser.add_pattern('single_cegar_pdbs_collection_num_patterns', 'CEGAR_PDBs: final collection number of patterns: (.+)', required=False, type=int)
parser.add_pattern('single_cegar_pdbs_collection_summed_pdb_size', 'CEGAR_PDBs: final collection summed PDB sizes: (.+)', required=False, type=int)
def parse_lines(content, props):
single_cegar_pdbs_timed_out = False
single_cegar_pdbs_solved_without_search = False
for line in content.split('\n'):
if line == 'CEGAR_PDBs: time limit reached':
single_cegar_pdbs_timed_out = True
if line == 'CEGAR_PDBs: task solved during computation of abstract solutions':
single_cegar_pdbs_solved_without_search = True
props['single_cegar_pdbs_timed_out'] = single_cegar_pdbs_timed_out
props['single_cegar_pdbs_solved_without_search'] = single_cegar_pdbs_solved_without_search
parser.add_function(parse_lines)
parser.parse()
| 1,241 |
Python
| 48.679998 | 147 | 0.720387 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/cegar-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
parser = Parser()
parser.add_pattern('cegar_num_iterations', 'CEGAR number of iterations: (\d+)', required=False, type=int)
parser.add_pattern('cegar_num_patterns', 'CEGAR number of patterns: (\d+)', required=False, type=int)
parser.add_pattern('cegar_total_pdb_size', 'CEGAR total PDB size: (\d+)', required=False, type=int)
parser.add_pattern('cegar_computation_time', 'CEGAR computation time: (.+)s', required=False, type=float)
parser.parse()
| 503 |
Python
| 40.999997 | 105 | 0.725646 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v3-single-cegar-wildcard-average.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
REVISIONS = ["issue1007-v2", "issue1007-v3"]
CONFIGS = [
IssueConfig('cpdbs-single-cegar-allgoals-wildcardplans-pdb1m-pdbs10m-t100', []),
]
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
)
exp.add_comparison_table_step(
attributes=['coverage', 'single_cegar_pdbs_solved_without_search',
'single_cegar_pdbs_computation_time', 'search_time', 'total_time',
'expansions_until_last_jump']
)
exp.run_steps()
| 858 |
Python
| 25.030302 | 84 | 0.754079 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/v2-best-average.py
|
#! /usr/bin/env python3
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from average_report import AverageAlgorithmReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
REVISIONS = ["issue1007-v1", "issue1007-v2"]
CONFIGS = [
IssueConfig('cpdbs-multiple-cegar-wildcardplans-pdb1m-pdbs10m-t100-blacklist0.75-stag20', []),
]
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
)
exp.add_comparison_table_step(
attributes=['coverage', 'search_time', 'total_time', 'expansions_until_last_jump']
)
exp.run_steps()
| 783 |
Python
| 24.290322 | 98 | 0.759898 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1007/average_report.py
|
# -*- coding: utf-8 -*-
from downward.reports import PlanningReport
from lab import tools
from lab.reports import geometric_mean
import os
DEBUG=False
class AverageAlgorithmReport(PlanningReport):
"""
This currently only works for some hard-coded attributes.
"""
def __init__(self, algo_name_suffixes, **kwargs):
PlanningReport.__init__(self, **kwargs)
self.algo_name_suffixes=algo_name_suffixes
def get_text(self):
if not self.outfile.endswith("properties"):
raise ValueError("outfile must be a path to a properties file")
algo_infixes = set()
for algo in self.algorithms:
for suffix in self.algo_name_suffixes:
if suffix in algo:
algo_infixes.add(algo.replace(suffix, ''))
break
# print(algo_infixes)
# print(self.algo_name_suffixes)
props = tools.Properties(self.outfile)
for domain, problem in self.problem_runs.keys():
if DEBUG:
print(domain, problem)
for algo in algo_infixes:
if DEBUG:
print("Consider ", algo)
properties_key = algo + '-' + domain + '-' + problem
average_algo_dict = {}
average_algo_dict['algorithm'] = algo
average_algo_dict['domain'] = domain
average_algo_dict['problem'] = problem
average_algo_dict['id'] = [algo, domain, problem]
for attribute in self.attributes:
if DEBUG:
print("Consider ", attribute)
values = []
for suffix in self.algo_name_suffixes:
real_algo = algo + suffix
# if DEBUG:
# print("Composed algo ", real_algo)
real_algo_run = self.runs[(domain, problem, real_algo)]
values.append(real_algo_run.get(attribute))
if DEBUG:
print(values)
values_without_none = [value for value in values if value is not None]
if attribute in [
'coverage', 'cegar_num_iterations',
'cegar_num_patterns',
'cegar_total_pdb_size', 'initial_h_value'
'coverage', 'initial_h_value',
'cpdbs_num_patterns', 'cpdbs_total_pdb_size',
'cegar_num_iterations', 'cegar_num_patterns',
'cegar_total_pdb_size',
] or 'score' in attribute:
# if 'score' not in attribute:
# assert len(values_without_none) == 10 # does not hold for scores
average_value = sum(values_without_none)/float(len(values))
elif 'time' in attribute or 'expansions' in attribute:
if len(values_without_none) == 10:
average_value = geometric_mean(values_without_none)
else:
average_value = None
else:
print("Don't know how to handle {}".format(attribute))
exit(1)
average_algo_dict[attribute] = average_value
props[properties_key] = average_algo_dict
return str(props)
| 3,535 |
Python
| 43.759493 | 94 | 0.490523 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue629/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
try:
from relativescatter import RelativeScatterPlotReport
matplotlib = True
except ImportError:
print 'matplotlib not available, scatter plots not available'
matplotlib = False
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=suites.suite_optimal_strips()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())']),
IssueConfig('astar-blind-sss', ['--search', 'astar(blind(), pruning=stubborn_sets_simple())']),
IssueConfig('astar-blind-ssec', ['--search', 'astar(blind(), pruning=stubborn_sets_ec())']),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='florian.pommerening@unibas.ch',
)
exp.add_comparison_table_step()
if matplotlib:
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
main(revisions=['issue629-v2-base', 'issue629-v4'])
| 1,679 |
Python
| 30.698113 | 103 | 0.597975 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue629/suites.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import textwrap
HELP = "Convert suite name to list of domains or tasks."
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-mco14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(set(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat()))
def suite_unsolvable():
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl() + suite_ipc14_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt() + suite_ipc14_opt_strips())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl() + suite_ipc14_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat() + suite_ipc14_sat_strips())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_ipc14() + suite_alternative_formulations())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("suite", help="suite name")
return parser.parse_args()
def main():
prefix = "suite_"
suite_names = [
name[len(prefix):] for name in sorted(globals().keys())
if name.startswith(prefix)]
parser = argparse.ArgumentParser(description=HELP)
parser.add_argument("suite", choices=suite_names, help="suite name")
parser.add_argument(
"--width", default=72, type=int,
help="output line width (default: %(default)s). Use 1 for single "
"column.")
args = parser.parse_args()
suite_func = globals()[prefix + args.suite]
print(textwrap.fill(
str(suite_func()),
width=args.width,
break_long_words=False,
break_on_hyphens=False))
if __name__ == "__main__":
main()
| 8,551 |
Python
| 23.364672 | 77 | 0.595954 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue629/experimental-branches.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import suites
from lab.reports import Attribute, gm
from common_setup import IssueConfig, IssueExperiment
def main(revisions=None):
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks')
suite=[
'airport',
'depot',
'driverlog',
'elevators-opt08-strips',
'elevators-opt11-strips',
'freecell',
'hiking-opt14-strips',
'pipesworld-tankage',
]
configs = {
IssueConfig(
'astar-blind-ssec',
['--search', 'astar(blind(), pruning=stubborn_sets_ec())']
),
}
exp = IssueExperiment(
benchmarks_dir=benchmarks_dir,
suite=suite,
revisions=revisions,
configs=configs,
test_suite=['depot:p01.pddl'],
processes=4,
email='florian.pommerening@unibas.ch',
)
exp.add_comparison_table_step()
exp()
# issue629-experimental-base is based on issue629-v2-base and only removed the ordering of actions after pruning
# issue629-experimental is based on issue629-v4 and only removed the ordering of actions after pruning
# Both branches will not be merged.
main(revisions=['issue629-experimental-base', 'issue629-experimental'])
| 1,276 |
Python
| 25.604166 | 113 | 0.637147 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue182/suites.py
|
# Benchmark suites from the Fast Downward benchmark collection.
def suite_alternative_formulations():
return ['airport-adl', 'no-mprime', 'no-mystery']
def suite_ipc98_to_ipc04_adl():
return [
'assembly', 'miconic-fulladl', 'miconic-simpleadl',
'optical-telegraphs', 'philosophers', 'psr-large',
'psr-middle', 'schedule',
]
def suite_ipc98_to_ipc04_strips():
return [
'airport', 'blocks', 'depot', 'driverlog', 'freecell', 'grid',
'gripper', 'logistics00', 'logistics98', 'miconic', 'movie',
'mprime', 'mystery', 'pipesworld-notankage', 'psr-small',
'satellite', 'zenotravel',
]
def suite_ipc98_to_ipc04():
# All IPC1-4 domains, including the trivial Movie.
return sorted(suite_ipc98_to_ipc04_adl() + suite_ipc98_to_ipc04_strips())
def suite_ipc06_adl():
return [
'openstacks',
'pathways',
'trucks',
]
def suite_ipc06_strips_compilations():
return [
'openstacks-strips',
'pathways-noneg',
'trucks-strips',
]
def suite_ipc06_strips():
return [
'pipesworld-tankage',
'rovers',
'storage',
'tpp',
]
def suite_ipc06():
return sorted(suite_ipc06_adl() + suite_ipc06_strips())
def suite_ipc08_common_strips():
return [
'parcprinter-08-strips',
'pegsol-08-strips',
'scanalyzer-08-strips',
]
def suite_ipc08_opt_adl():
return ['openstacks-opt08-adl']
def suite_ipc08_opt_strips():
return sorted(suite_ipc08_common_strips() + [
'elevators-opt08-strips',
'openstacks-opt08-strips',
'sokoban-opt08-strips',
'transport-opt08-strips',
'woodworking-opt08-strips',
])
def suite_ipc08_opt():
return sorted(suite_ipc08_opt_strips() + suite_ipc08_opt_adl())
def suite_ipc08_sat_adl():
return ['openstacks-sat08-adl']
def suite_ipc08_sat_strips():
return sorted(suite_ipc08_common_strips() + [
# Note: cyber-security is missing.
'elevators-sat08-strips',
'openstacks-sat08-strips',
'sokoban-sat08-strips',
'transport-sat08-strips',
'woodworking-sat08-strips',
])
def suite_ipc08_sat():
return sorted(suite_ipc08_sat_strips() + suite_ipc08_sat_adl())
def suite_ipc08():
return sorted(set(suite_ipc08_opt() + suite_ipc08_sat()))
def suite_ipc11_opt():
return [
'barman-opt11-strips',
'elevators-opt11-strips',
'floortile-opt11-strips',
'nomystery-opt11-strips',
'openstacks-opt11-strips',
'parcprinter-opt11-strips',
'parking-opt11-strips',
'pegsol-opt11-strips',
'scanalyzer-opt11-strips',
'sokoban-opt11-strips',
'tidybot-opt11-strips',
'transport-opt11-strips',
'visitall-opt11-strips',
'woodworking-opt11-strips',
]
def suite_ipc11_sat():
return [
'barman-sat11-strips',
'elevators-sat11-strips',
'floortile-sat11-strips',
'nomystery-sat11-strips',
'openstacks-sat11-strips',
'parcprinter-sat11-strips',
'parking-sat11-strips',
'pegsol-sat11-strips',
'scanalyzer-sat11-strips',
'sokoban-sat11-strips',
'tidybot-sat11-strips',
'transport-sat11-strips',
'visitall-sat11-strips',
'woodworking-sat11-strips',
]
def suite_ipc11():
return sorted(suite_ipc11_opt() + suite_ipc11_sat())
def suite_ipc14_agl_adl():
return [
'cavediving-agl14-adl',
'citycar-agl14-adl',
'maintenance-agl14-adl',
]
def suite_ipc14_agl_strips():
return [
'barman-agl14-strips',
'childsnack-agl14-strips',
'floortile-agl14-strips',
'ged-agl14-strips',
'hiking-agl14-strips',
'openstacks-agl14-strips',
'parking-agl14-strips',
'tetris-agl14-strips',
'thoughtful-agl14-strips',
'transport-agl14-strips',
'visitall-agl14-strips',
]
def suite_ipc14_agl():
return sorted(suite_ipc14_agl_adl() + suite_ipc14_agl_strips())
def suite_ipc14_mco_adl():
return [
'cavediving-mco14-adl',
'citycar-mco14-adl',
'maintenance-mco14-adl',
]
def suite_ipc14_mco_strips():
return [
'barman-mco14-strips',
'childsnack-mco14-strips',
'floortile-mco14-strips',
'ged-mco14-strips',
'hiking-mco14-strips',
'openstacks-mco14-strips',
'parking-mco14-strips',
'tetris-mco14-strips',
'thoughtful-mco14-strips',
'transport-mco14-strips',
'visitall-mco14-strips',
]
def suite_ipc14_mco():
return sorted(suite_ipc14_mco_adl() + suite_ipc14_mco_strips())
def suite_ipc14_opt_adl():
return [
'cavediving-opt14-adl',
'citycar-opt14-adl',
'maintenance-opt14-adl',
]
def suite_ipc14_opt_strips():
return [
'barman-opt14-strips',
'childsnack-opt14-strips',
'floortile-opt14-strips',
'ged-opt14-strips',
'hiking-opt14-strips',
'openstacks-opt14-strips',
'parking-opt14-strips',
'tetris-opt14-strips',
'tidybot-opt14-strips',
'transport-opt14-strips',
'visitall-opt14-strips',
]
def suite_ipc14_opt():
return sorted(suite_ipc14_opt_adl() + suite_ipc14_opt_strips())
def suite_ipc14_sat_adl():
return [
'cavediving-sat14-adl',
'citycar-sat14-adl',
'maintenance-sat14-adl',
]
def suite_ipc14_sat_strips():
return [
'barman-sat14-strips',
'childsnack-sat14-strips',
'floortile-sat14-strips',
'ged-sat14-strips',
'hiking-sat14-strips',
'openstacks-sat14-strips',
'parking-sat14-strips',
'tetris-sat14-strips',
'thoughtful-sat14-strips',
'transport-sat14-strips',
'visitall-sat14-strips',
]
def suite_ipc14_sat():
return sorted(suite_ipc14_sat_adl() + suite_ipc14_sat_strips())
def suite_ipc14():
return sorted(
suite_ipc14_agl() + suite_ipc14_mco() +
suite_ipc14_opt() + suite_ipc14_sat())
def suite_unsolvable():
# TODO: Add other unsolvable problems (Miconic-FullADL).
# TODO: Add 'fsc-grid-r:prize5x5_R.pddl' and 't0-uts:uts_r-02.pddl'
# if the extra-domains branch is merged.
return sorted(
['mystery:prob%02d.pddl' % index
for index in [4, 5, 7, 8, 12, 16, 18, 21, 22, 23, 24]] +
['miconic-fulladl:f21-3.pddl', 'miconic-fulladl:f30-2.pddl'])
def suite_optimal_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_opt_adl())
def suite_optimal_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_opt_strips() +
suite_ipc11_opt())
def suite_optimal():
return sorted(suite_optimal_adl() + suite_optimal_strips())
def suite_satisficing_adl():
return sorted(
suite_ipc98_to_ipc04_adl() + suite_ipc06_adl() +
suite_ipc08_sat_adl())
def suite_satisficing_strips():
return sorted(
suite_ipc98_to_ipc04_strips() + suite_ipc06_strips() +
suite_ipc06_strips_compilations() + suite_ipc08_sat_strips() +
suite_ipc11_sat())
def suite_satisficing():
return sorted(suite_satisficing_adl() + suite_satisficing_strips())
def suite_all():
return sorted(
suite_ipc98_to_ipc04() + suite_ipc06() +
suite_ipc06_strips_compilations() + suite_ipc08() +
suite_ipc11() + suite_alternative_formulations())
| 7,695 |
Python
| 23.35443 | 77 | 0.596231 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue182/v1-no-cache.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import common_setup
from common_setup import IssueConfig, IssueExperiment
import suites
heuristics = [
"{}(cache_estimates=false)".format(h) for h in (
"pdb", "cpdbs", "diverse_potentials", "all_states_potential",
"initial_state_potential", "sample_based_potentials")]
max_eval = "max([{}])".format(",".join(heuristics))
ipc_max = "ipc_max([{}],cache_estimates=false)".format(",".join(heuristics))
configs = [
IssueConfig(
name,
["--search", "astar({})".format(eval_)])
for name, eval_ in [("max", max_eval), ("ipc_max", ipc_max)]
]
revision = "8f1563b36fc7"
exp = IssueExperiment(
revisions=[revision],
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="jendrik.seipp@unibas.ch",
)
exp.add_absolute_report_step()
exp.add_report(
common_setup.CompareConfigsReport(
[(revision + "-" + "ipc_max", revision + "-" + "max")],
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name=common_setup.get_experiment_name() + "-compare")
exp()
| 1,117 |
Python
| 26.268292 | 76 | 0.636526 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue182/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from common_setup import IssueConfig, IssueExperiment
import suites
configs = [
IssueConfig(
func,
["--search", "astar({}([ipdb(max_time=5),diverse_potentials(),all_states_potential(),initial_state_potential(),sample_based_potentials()]))".format(func)])
for func in ["max", "ipc_max"]
]
revisions = ["8f1563b36fc7"]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suites.suite_optimal_strips(),
test_suite=["depot:pfile1"],
email="jendrik.seipp@unibas.ch",
)
exp.add_absolute_report_step()
exp()
| 613 |
Python
| 21.74074 | 163 | 0.662316 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue387/common_setup.py
|
# -*- coding: utf-8 -*-
import os.path
from lab.environments import MaiaEnvironment
from lab.steps import Step
from downward.checkouts import Translator, Preprocessor, Planner
from downward.experiments import DownwardExperiment
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the filename of the main script, e.g.
"/ham/spam/eggs.py" => "eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Found by searching upwards in the directory tree from the main
script until a directory with a subdirectory named ".hg" is found."""
path = os.path.abspath(get_script_dir())
while True:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
class MyExperiment(DownwardExperiment):
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"total_time",
"search_time",
"memory",
"expansions_until_last_jump",
]
"""Wrapper for DownwardExperiment with a few convenience features."""
def __init__(self, configs=None, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
If "configs" is specified, it should be a dict of {nick:
cmdline} pairs that sets the planner configurations to test.
If "grid_priority" is specified and no environment is
specifically requested in **kwargs, use the maia environment
with the specified priority.
If "path" is not specified, the experiment data path is
derived automatically from the main script's filename.
If "repo" is not specified, the repository base is derived
automatically from the main script's path.
If "revisions" is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search.
If "search_revisions" is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All experiments use the
translator and preprocessor component of the first
revision.
If "suite" is specified, it should specify a problem suite.
Options "combinations" (from the base class), "revisions" and
"search_revisions" are mutually exclusive."""
if grid_priority is not None and "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
num_rev_opts_specified = (
int(revisions is not None) +
int(search_revisions is not None) +
int(kwargs.get("combinations") is not None))
if num_rev_opts_specified > 1:
raise ValueError('must specify exactly one of "revisions", '
'"search_revisions" or "combinations"')
# See add_comparison_table_step for more on this variable.
self._HACK_revisions = revisions
if revisions is not None:
if not revisions:
raise ValueError("revisions cannot be empty")
combinations = [(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions]
kwargs["combinations"] = combinations
if search_revisions is not None:
if not search_revisions:
raise ValueError("search_revisions cannot be empty")
base_rev = search_revisions[0]
translator = Translator(repo, base_rev)
preprocessor = Preprocessor(repo, base_rev)
combinations = [(translator, preprocessor, Planner(repo, rev))
for rev in search_revisions]
kwargs["combinations"] = combinations
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
if configs is not None:
for nick, config in configs.items():
self.add_config(nick, config)
if suite is not None:
self.add_suite(suite)
self._report_prefix = get_experiment_name()
def add_comparison_table_step(self, attributes=None):
revisions = self._HACK_revisions
if revisions is None:
# TODO: It's not clear to me what a "revision" in the
# overall context of the code really is, e.g. when keeping
# the translator and preprocessor method fixed and only
# changing the search component. It's also not really
# clear to me how the interface of the Compare... reports
# works and how to use it more generally. Hence the
# present hack.
# Ideally, this method should look at the table columns we
# have (defined by planners and planner configurations),
# pair them up in a suitable way, either controlled by a
# convenience parameter or a more general grouping method,
# and then use this to define which pairs go together.
raise NotImplementedError(
"only supported when specifying revisions in __init__")
if attributes is None:
attributes = self.DEFAULT_TABLE_ATTRIBUTES
report = CompareRevisionsReport(*revisions, attributes=attributes)
self.add_report(report, outfile="%s-compare.html" % self._report_prefix)
def add_scatter_plot_step(self, attributes=None):
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
revisions = self._HACK_revisions
if revisions is None:
# TODO: See add_comparison_table_step.
raise NotImplementedError(
"only supported when specifying revisions in __init__")
if len(revisions) != 2:
# TODO: Should generalize this, too, by offering a general
# grouping function and then comparing any pair of
# settings in the same group.
raise NotImplementedError("need two revisions")
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plots():
configs = [conf[0] for conf in self.configs]
for nick in configs:
config_before = "%s-%s" % (revisions[0], nick)
config_after = "%s-%s" % (revisions[1], nick)
for attribute in attributes:
name = "%s-%s-%s" % (self._report_prefix, attribute, nick)
report = ScatterPlotReport(
filter_config=[config_before, config_after],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir, os.path.join(scatter_dir, name))
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 8,551 |
Python
| 37.008889 | 80 | 0.609987 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue387/issue387.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_optimal_with_ipc11
from downward.configs import default_configs_optimal
import common_setup
REVS = ["issue387-base", "issue387-v1"]
CONFIGS = default_configs_optimal()
# remove config that is disabled in this branch
del CONFIGS['astar_selmax_lmcut_lmcount']
TEST_RUN = True
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_optimal_with_ipc11()
PRIORITY = 0 # number means maia experiment
# TODO: I'd like to specify "search_revisions" (which uses the same
# translator and preprocessor for everything) instead of "revisions"
# here, but I can't seem to make this work with the REVS argument for
# CompareRevisionsReport.
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE
)
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp()
| 988 |
Python
| 21.999999 | 69 | 0.720648 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue536/common_setup.py
|
# -*- coding: utf-8 -*-
import itertools
import os
import platform
import sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from lab.experiment import ARGPARSER
from lab.steps import Step
from downward.experiments import DownwardExperiment, _get_rev_nick
from downward.checkouts import Translator, Preprocessor, Planner
from downward.reports.absolute import AbsoluteReport
from downward.reports.compare import CompareRevisionsReport
from downward.reports.scatter import ScatterPlotReport
def parse_args():
ARGPARSER.add_argument(
"--test",
choices=["yes", "no", "auto"],
default="auto",
dest="test_run",
help="test experiment locally on a small suite if --test=yes or "
"--test=auto and we are not on a cluster")
return ARGPARSER.parse_args()
ARGS = parse_args()
def get_script():
"""Get file name of main script."""
import __main__
return __main__.__file__
def get_script_dir():
"""Get directory of main script.
Usually a relative directory (depends on how it was called by the user.)"""
return os.path.dirname(get_script())
def get_experiment_name():
"""Get name for experiment.
Derived from the absolute filename of the main script, e.g.
"/ham/spam/eggs.py" => "spam-eggs"."""
script = os.path.abspath(get_script())
script_dir = os.path.basename(os.path.dirname(script))
script_base = os.path.splitext(os.path.basename(script))[0]
return "%s-%s" % (script_dir, script_base)
def get_data_dir():
"""Get data dir for the experiment.
This is the subdirectory "data" of the directory containing
the main script."""
return os.path.join(get_script_dir(), "data", get_experiment_name())
def get_repo_base():
"""Get base directory of the repository, as an absolute path.
Search upwards in the directory tree from the main script until a
directory with a subdirectory named ".hg" is found.
Abort if the repo base cannot be found."""
path = os.path.abspath(get_script_dir())
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, ".hg")):
return path
path = os.path.dirname(path)
sys.exit("repo base could not be found")
def is_running_on_cluster():
node = platform.node()
return ("cluster" in node or
node.startswith("gkigrid") or
node in ["habakuk", "turtur"])
def is_test_run():
return ARGS.test_run == "yes" or (ARGS.test_run == "auto" and
not is_running_on_cluster())
class IssueExperiment(DownwardExperiment):
"""Wrapper for DownwardExperiment with a few convenience features."""
DEFAULT_TEST_SUITE = "gripper:prob01.pddl"
DEFAULT_TABLE_ATTRIBUTES = [
"cost",
"coverage",
"error",
"evaluations",
"expansions",
"expansions_until_last_jump",
"generated",
"memory",
"quality",
"run_dir",
"score_evaluations",
"score_expansions",
"score_generated",
"score_memory",
"score_search_time",
"score_total_time",
"search_time",
"total_time",
]
DEFAULT_SCATTER_PLOT_ATTRIBUTES = [
"evaluations",
"expansions",
"expansions_until_last_jump",
"initial_h_value",
"memory",
"search_time",
"total_time",
]
PORTFOLIO_ATTRIBUTES = [
"cost",
"coverage",
"error",
"plan_length",
"run_dir",
]
def __init__(self, configs, suite, grid_priority=None, path=None,
repo=None, revisions=None, search_revisions=None,
test_suite=None, **kwargs):
"""Create a DownwardExperiment with some convenience features.
*configs* must be a non-empty dict of {nick: cmdline} pairs
that sets the planner configurations to test. ::
IssueExperiment(configs={
"lmcut": ["--search", "astar(lmcut())"],
"ipdb": ["--search", "astar(ipdb())"]})
*suite* sets the benchmarks for the experiment. It must be a
single string or a list of strings specifying domains or
tasks. The downward.suites module has many predefined
suites. ::
IssueExperiment(suite=["grid", "gripper:prob01.pddl"])
from downward import suites
IssueExperiment(suite=suites.suite_all())
IssueExperiment(suite=suites.suite_satisficing_with_ipc11())
IssueExperiment(suite=suites.suite_optimal())
Use *grid_priority* to set the job priority for cluster
experiments. It must be in the range [-1023, 0] where 0 is the
highest priority. By default the priority is 0. ::
IssueExperiment(grid_priority=-500)
If *path* is specified, it must be the path to where the
experiment should be built (e.g.
/home/john/experiments/issue123/exp01/). If omitted, the
experiment path is derived automatically from the main
script's filename. Example::
script = experiments/issue123/exp01.py -->
path = experiments/issue123/data/issue123-exp01/
If *repo* is specified, it must be the path to the root of a
local Fast Downward repository. If omitted, the repository
is derived automatically from the main script's path. Example::
script = /path/to/fd-repo/experiments/issue123/exp01.py -->
repo = /path/to/fd-repo
If *revisions* is specified, it should be a non-empty
list of revisions, which specify which planner versions to use
in the experiment. The same versions are used for translator,
preprocessor and search. ::
IssueExperiment(revisions=["issue123", "4b3d581643"])
If *search_revisions* is specified, it should be a non-empty
list of revisions, which specify which search component
versions to use in the experiment. All runs use the
translator and preprocessor component of the first
revision. ::
IssueExperiment(search_revisions=["default", "issue123"])
If you really need to specify the (translator, preprocessor,
planner) triples manually, use the *combinations* parameter
from the base class (might be deprecated soon). The options
*revisions*, *search_revisions* and *combinations* can be
freely mixed, but at least one of them must be given.
Specify *test_suite* to set the benchmarks for experiment test
runs. By default the first gripper task is used.
IssueExperiment(test_suite=["depot:pfile1", "tpp:p01.pddl"])
"""
if is_test_run():
kwargs["environment"] = LocalEnvironment()
suite = test_suite or self.DEFAULT_TEST_SUITE
elif "environment" not in kwargs:
kwargs["environment"] = MaiaEnvironment(priority=grid_priority)
if path is None:
path = get_data_dir()
if repo is None:
repo = get_repo_base()
kwargs.setdefault("combinations", [])
if not any([revisions, search_revisions, kwargs["combinations"]]):
raise ValueError('At least one of "revisions", "search_revisions" '
'or "combinations" must be given')
if revisions:
kwargs["combinations"].extend([
(Translator(repo, rev),
Preprocessor(repo, rev),
Planner(repo, rev))
for rev in revisions])
if search_revisions:
base_rev = search_revisions[0]
# Use the same nick for all parts to get short revision nick.
kwargs["combinations"].extend([
(Translator(repo, base_rev, nick=rev),
Preprocessor(repo, base_rev, nick=rev),
Planner(repo, rev, nick=rev))
for rev in search_revisions])
DownwardExperiment.__init__(self, path=path, repo=repo, **kwargs)
self._config_nicks = []
for nick, config in configs.items():
self.add_config(nick, config)
self.add_suite(suite)
@property
def revision_nicks(self):
# TODO: Once the add_algorithm() API is available we should get
# rid of the call to _get_rev_nick() and avoid inspecting the
# list of combinations by setting and saving the algorithm nicks.
return [_get_rev_nick(*combo) for combo in self.combinations]
@classmethod
def _is_portfolio(cls, config_nick):
return "fdss" in config_nick
@classmethod
def get_supported_attributes(cls, config_nick, attributes):
if cls._is_portfolio(config_nick):
return [attr for attr in attributes
if attr in cls.PORTFOLIO_ATTRIBUTES]
return attributes
def add_config(self, nick, config, timeout=None):
DownwardExperiment.add_config(self, nick, config, timeout=timeout)
self._config_nicks.append(nick)
def add_absolute_report_step(self, **kwargs):
"""Add step that makes an absolute report.
Absolute reports are useful for experiments that don't
compare revisions.
The report is written to the experiment evaluation directory.
All *kwargs* will be passed to the AbsoluteReport class. If
the keyword argument *attributes* is not specified, a
default list of attributes is used. ::
exp.add_absolute_report_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
report = AbsoluteReport(**kwargs)
outfile = get_experiment_name() + "." + report.output_format
self.add_report(report, outfile=outfile)
def add_comparison_table_step(self, **kwargs):
"""Add a step that makes pairwise revision comparisons.
Create comparative reports for all pairs of Fast Downward
revision triples. Each report pairs up the runs of the same
config and lists the two absolute attribute values and their
difference for all attributes in kwargs["attributes"].
All *kwargs* will be passed to the CompareRevisionsReport
class. If the keyword argument *attributes* is not
specified, a default list of attributes is used. ::
exp.add_comparison_table_step(attributes=["coverage"])
"""
kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES)
def make_comparison_tables():
for rev1, rev2 in itertools.combinations(self.revision_nicks, 2):
report = CompareRevisionsReport(rev1, rev2, **kwargs)
outfile = os.path.join(self.eval_dir,
"%s-%s-%s-compare.html" %
(self.name, rev1, rev2))
report(self.eval_dir, outfile)
self.add_step(Step("make-comparison-tables", make_comparison_tables))
def add_scatter_plot_step(self, attributes=None):
"""Add a step that creates scatter plots for all revision pairs.
Create a scatter plot for each combination of attribute,
configuration and revision pair. If *attributes* is not
specified, a list of common scatter plot attributes is used.
For portfolios all attributes except "cost", "coverage" and
"plan_length" will be ignored. ::
exp.add_scatter_plot_step(attributes=["expansions"])
"""
if attributes is None:
attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES
scatter_dir = os.path.join(self.eval_dir, "scatter")
def make_scatter_plot(config_nick, rev1, rev2, attribute):
name = "-".join([self.name, rev1, rev2, attribute, config_nick])
print "Make scatter plot for", name
algo1 = "%s-%s" % (rev1, config_nick)
algo2 = "%s-%s" % (rev2, config_nick)
report = ScatterPlotReport(
filter_config=[algo1, algo2],
attributes=[attribute],
get_category=lambda run1, run2: run1["domain"],
legend_location=(1.3, 0.5))
report(self.eval_dir,
os.path.join(scatter_dir, rev1 + "-" + rev2, name))
def make_scatter_plots():
for config_nick in self._config_nicks:
for rev1, rev2 in itertools.combinations(
self.revision_nicks, 2):
for attribute in self.get_supported_attributes(
config_nick, attributes):
make_scatter_plot(config_nick, rev1, rev2, attribute)
self.add_step(Step("make-scatter-plots", make_scatter_plots))
| 12,856 |
Python
| 34.913408 | 79 | 0.612943 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue536/ipdb.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue536-base", "issue536-v1", "issue536-v2"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"ipdb": ["--search", "astar(ipdb())"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_absolute_report_step()
exp()
| 450 |
Python
| 16.346153 | 54 | 0.646667 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue838/v2-cache-size.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue838-v2"]
CONFIG_NICKS = [
("lazy-greedy-cg-cache-size-{cache_size}".format(**locals()), [
"--heuristic", "h=cg(max_cache_size={cache_size})".format(**locals()),
"--search", "lazy_greedy([h],preferred=[h])"])
for cache_size in ["0", "1K", "1M", "2M", "5M", "10M", "20M", "50M", "100M", "1000M"]
]
CONFIGS = [
IssueConfig(config_nick, config)
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
email="jendrik.seipp@unibas.ch",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
#exp.add_comparison_table_step()
#attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
#algorithm_pairs = [
# ("issue838-v1-{build}-lazy-greedy-cg-use-cache-False".format(**locals()),
# "issue838-v1-{build}-lazy-greedy-cg-use-cache-True".format(**locals()),
# "Diff ({build})".format(**locals()))
# for build in BUILDS]
#exp.add_report(
# ComparativeReport(algorithm_pairs, attributes=attributes),
# name="{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 2,110 |
Python
| 29.594202 | 89 | 0.704739 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue838/v1-use-cache.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue838-v1"]
BUILDS = ["release32", "release64"]
CONFIG_NICKS = [
("lazy-greedy-cg-use-cache-{use_cache}".format(**locals()), [
"--heuristic", "h=cg(use_cache={use_cache})".format(**locals()),
"--search", "lazy_greedy([h],preferred=[h])"])
for use_cache in [True, False]
]
CONFIGS = [
IssueConfig(
build + "-" + config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for rev in REVISIONS
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="jendrik.seipp@unibas.ch",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
algorithm_pairs = [
("issue838-v1-{build}-lazy-greedy-cg-use-cache-False".format(**locals()),
"issue838-v1-{build}-lazy-greedy-cg-use-cache-True".format(**locals()),
"Diff ({build})".format(**locals()))
for build in BUILDS]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 2,229 |
Python
| 28.342105 | 77 | 0.701211 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue621/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
configs = [
IssueConfig(
"cegar-10K-original",
["--search", "astar(cegar(subtasks=[original()],max_states=10000,max_time=infinity))"]),
]
revisions = ["issue621-base", "issue621-v1"]
exp = IssueExperiment(
revisions=revisions,
configs=configs,
suite=suites.suite_optimal_with_ipc11(),
test_suite=["depot:pfile1"],
email="jendrik.seipp@unibas.ch",
)
exp.add_comparison_table_step()
for attribute in ["memory", "total_time"]:
for config in configs:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config.nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config.nick)
)
exp()
| 1,067 |
Python
| 25.699999 | 96 | 0.632615 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue752/v1-new.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue752-v1"]
CONFIGS = [
IssueConfig('astar-blind', ["--search", "astar(blind())"],
build_options=["release64"], driver_options=["--build", "release64"]),
IssueConfig('astar-seq-cplex1271', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=cplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.run_steps()
| 1,220 |
Python
| 31.999999 | 127 | 0.729508 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue752/v1-soplex.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue752-v1"]
CONFIGS = [
IssueConfig('astar-seq-cplex', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=cplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
IssueConfig('astar-seq-soplex', ["--search", "astar(operatorcounting([state_equation_constraints()], lpsolver=soplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
IssueConfig('astar-seq-pho-cplex', ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
IssueConfig('astar-seq-pho-soplex', ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
IssueConfig('astar-seq-lmcut-cplex', ["--search", "astar(operatorcounting([state_equation_constraints(), pho_constraints(patterns=systematic(2))], lpsolver=cplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
IssueConfig('astar-seq-lmcut-soplex', ["--search", "astar(operatorcounting([state_equation_constraints(), pho_constraints(patterns=systematic(2))], lpsolver=soplex))"],
build_options=["release64"], driver_options=["--build", "release64"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="florian.pommerening@unibas.ch", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
for attribute in ["total_time"]:
for config in ["astar-seq-pho", "astar-seq-lmcut"]:
for rev in REVISIONS:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=["{}-{}-{}".format(rev, config, solver) for solver in ["cplex", "soplex"]],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config)
)
exp.run_steps()
| 2,767 |
Python
| 47.561403 | 172 | 0.674377 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue752/v3.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue752-v3"]
CONFIGS = [
IssueConfig("opcount-seq-lmcut-soplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=soplex))"]),
IssueConfig("diverse-potentials-soplex", ["--search", "astar(diverse_potentials(lpsolver=soplex))"]),
IssueConfig("optimal-lmcount-soplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=soplex))"]),
IssueConfig("opcount-seq-lmcut-cplex", ["--search", "astar(operatorcounting([state_equation_constraints(), lmcut_constraints()], lpsolver=cplex))"]),
IssueConfig("diverse-potentials-cplex", ["--search", "astar(diverse_potentials(lpsolver=cplex))"]),
IssueConfig("optimal-lmcount-cplex", ["--search", "astar(lmcount(lm_merged([lm_rhw(),lm_hm(m=1)]), admissible=true, optimal=true, lpsolver=cplex))"]),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(partition="infai_2", email="florian.pommerening@unibas.ch")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step()
for nick in ["opcount-seq-lmcut", "diverse-potentials", "optimal-lmcount"]:
exp.add_report(RelativeScatterPlotReport(
attributes=["total_time"],
filter_algorithm=["issue752-v3-%s-%s" % (nick, solver) for solver in ["cplex", "soplex"]],
get_category=lambda r1, r2: r1["domain"]),
outfile="issue752-v3-scatter-total-time-%s.png" % nick)
exp.run_steps()
| 2,310 |
Python
| 40.267856 | 156 | 0.723377 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue665/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from lab.reports import Attribute, gm
import os
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
def main(revisions=None):
suite = suites.suite_optimal_with_ipc11()
configs = {
IssueConfig('astar-blind', ['--search', 'astar(blind())'],
driver_options=['--search-time-limit', '5m']),
}
exp = IssueExperiment(
benchmarks_dir=os.path.expanduser('~/projects/downward/benchmarks'),
revisions=revisions,
configs=configs,
suite=suite,
test_suite=['depot:pfile1'],
processes=4,
email='florian.pommerening@unibas.ch',
)
exp.add_comparison_table_step()
attribute = "total_time"
config_nick = 'astar-blind'
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_config=["{}-{}".format(rev, config_nick) for rev in revisions],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile="{}-{}-{}.png".format(exp.name, attribute, config_nick)
)
exp()
main(revisions=['issue665-base', 'issue665-v1'])
| 1,242 |
Python
| 26.021739 | 82 | 0.621578 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue698/custom-parser.py
|
#! /usr/bin/env python
from lab.parser import Parser
class CustomParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern(
"successor_generator_time",
"Building successor generator...done! \[t=(.+)s\]",
required=False,
type=float)
if __name__ == "__main__":
parser = CustomParser()
print "Running custom parser"
parser.parse()
| 430 |
Python
| 20.549999 | 63 | 0.560465 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue698/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from lab.environments import LocalEnvironment, MaiaEnvironment
from common_setup import IssueConfig, IssueExperiment, is_test_run
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue698-base", "issue698-v1"]
CONFIGS = [
IssueConfig(
"blind",
["--search", "astar(blind())"],
driver_options=["--search-time-limit", "60s"]
)
]
sys.path.append(BENCHMARKS_DIR)
import suites
SUITE = suites.suite_optimal_strips()
ENVIRONMENT = MaiaEnvironment(
priority=0, email="florian.pommerening@unibas.ch")
if is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_command("parser", ["custom-parser.py"])
exp.add_comparison_table_step(
attributes=exp.DEFAULT_TABLE_ATTRIBUTES +
["successor_generator_time", "reopened_until_last_jump"])
exp.add_scatter_plot_step(attributes=["successor_generator_time"])
exp()
| 1,138 |
Python
| 23.760869 | 72 | 0.702109 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue453/v4.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
from downward.reports.absolute import AbsoluteReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, get_repo_base
from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
BENCHMARKS_DIR_CUSTOM = "/infai/simsal00/translate-pddls"
SUITE = DERIVED_VARIABLES_SUITE
SUITE_CUSTOM = ["bwnc", "citycar", "cups", "failed-negation", "graph", "layers", "mst"]
CONFIGS = {'blind': (["--search", "astar(blind())"],[],[]),
'ff-eager': (["--evaluator", "heur=ff", "--search",
"eager_greedy([heur], preferred=[heur])"],[],[]),
'ff-lazy': (["--evaluator", "heur=ff", "--search",
"lazy_greedy([heur], preferred=[heur])"],[],[]),
'lama-first': ([],[],["--alias", "lama-first"]) }
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(environment=ENVIRONMENT, revisions=[], configs=[])
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_suite(BENCHMARKS_DIR_CUSTOM, SUITE_CUSTOM)
#exp.add_suite(BENCHMARKS_DIR, ["psr-middle:p01-s17-n2-l2-f30.pddl"])
for name, config in CONFIGS.items():
exp.add_algorithm(name+'-base',get_repo_base(),'issue453-base',config[0],config[1],config[2])
exp.add_algorithm(name+'-v4',get_repo_base(),'issue453-v4',config[0],config[1],config[2])
exp.add_algorithm(name+'-v4-max-layers',get_repo_base(),'issue453-v4',config[0]+['--translate-options', '--layer-strategy=max'], config[1],config[2])
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser("parser.py")
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
attributes = (['translator_axioms',
'translator_derived_variables',
'translator_axioms_removed',
'translator_task_size',
'translator_time_done',
'translator_time_processing_axioms',
'cost',
'coverage',
'error',
'evaluations',
'expansions',
'initial_h_value',
'generated',
'memory',
'planner_memory',
'planner_time',
'run_dir',
'search_time',
'total_time',
'score_evaluations',
'score_search_time',
'score_total_time',
])
exp.add_absolute_report_step(attributes=attributes)
exp.run_steps()
| 3,150 |
Python
| 35.639534 | 153 | 0.623492 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue453/airport-adl-exp.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue453-v1"]
CONFIGS = []
# Add heuristics using axioms
HEURISTICS = ['ff', 'cg', 'cea', 'add']
for h in HEURISTICS:
CONFIGS.append(IssueConfig(h+"-normal-axiom-rules", ["--evaluator", "heur=%s" % h,
"--search", "lazy_greedy([heur], preferred=[heur])"]))
CONFIGS.append(IssueConfig(h+"-overapprox-axiom-rules", ["--evaluator", "heur=%s" % h,
"--search", "lazy_greedy([heur], preferred=[heur])",
"--translate-options", "--overapproximate-axioms"]),)
# Add lama-first
CONFIGS.append(IssueConfig("lama-normal-axiom-rules", [], driver_options=["--alias", "lama-first"]))
CONFIGS.append(IssueConfig("lama-overapprox-axiom-rules", ["--translate-options", "--overapproximate-axioms"],
driver_options=["--alias", "lama-first"]),)
# Add A* with blind
CONFIGS.append(IssueConfig("blind-normal-axiom-rules", ["--search", "astar(blind)"]))
CONFIGS.append(IssueConfig("blind-overapprox-axiom-rules", ["--search", "astar(blind)",
"--translate-options", "--overapproximate-axioms"]),)
SUITE = ["airport-adl"]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_1",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser("parser.py")
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
attributes = (['translator_axioms',
'translator_derived_variables',
'translator_axioms_removed',
'translator_time_done',
'translator_time_processing_axioms',
'cost',
'coverage',
'error',
'evaluations',
'expansions',
'initial_h_value',
'generated',
'memory',
'planner_memory',
'planner_time',
'run_dir',
'search_time',
'total_time',])
exp.add_absolute_report_step(attributes=attributes)
#exp.add_report(DerivedVariableInstances())
exp.run_steps()
| 3,281 |
Python
| 37.16279 | 119 | 0.608656 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue453/derived_variables_instances.py
|
from downward.reports import PlanningReport
DERIVED_VARIABLES_SUITE=['airport-adl',
'assembly',
'miconic-fulladl',
'openstacks',
'openstacks-opt08-adl',
'openstacks-sat08-adl',
'optical-telegraphs',
'philosophers',
'psr-large',
'psr-middle',
'trucks']
class DerivedVariableInstances(PlanningReport):
def get_text(self):
selected_runs = []
for (dom, prob), runs in self.problem_runs.items():
for run in runs:
if run.get("translator_derived_variables") > 0:
selected_runs.append((dom, prob))
return "\n".join(["{}:{},".format(*item) for item in selected_runs])
| 889 |
Python
| 34.599999 | 76 | 0.460067 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue453/v2-custom-pddls.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
from downward.reports.absolute import AbsoluteReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
#from relativescatter import RelativeScatterPlotReport
from derived_variables_instances import DerivedVariableInstances, DERIVED_VARIABLES_SUITE
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = '/infai/simsal00/translate-pddls'
REVISIONS = ["tip"]
CONFIGS = []
# Add heuristics using axioms
HEURISTICS = ['ff', 'blind']
LAYER_STRATEGY = ['max', 'min']
OVERAPPROXIMATE = ['none','cycles','all']
KEEP_REDUNDANT_POSITIVE_AXIOMS = [True, False]
NECESSARY_LITERALS = ['exact', 'non-derived', 'positive']
for h in HEURISTICS:
for ls in LAYER_STRATEGY:
for overapprox in OVERAPPROXIMATE:
for rd in KEEP_REDUNDANT_POSITIVE_AXIOMS:
for lit in NECESSARY_LITERALS:
options = ["--evaluator", "heur=%s" % h, "--search", "lazy_greedy([heur], preferred=[heur])", "--translate-options"]
options += ["--layer_strategy", ls]
options += ["--overapproximate_negated_axioms", overapprox]
options += ["--overapproximate_necessary_literals", lit]
name = "%s-%s-%s-%s" % (h,ls,overapprox,lit)
if rd:
options += ["--keep_redundant_positive_axioms"]
name += '-kr'
CONFIGS.append(IssueConfig(name, options))
#for h in HEURISTICS:
# CONFIGS.append(IssueConfig(h+"-min-layers", ["--evaluator", "heur=%s" % h,
# "--search", "lazy_greedy([heur], preferred=[heur])",
# "--translate-options", "--layer_strategy", "min"]),)
# CONFIGS.append(IssueConfig(h+"-max-layers", ["--evaluator", "heur=%s" % h,
# "--search", "lazy_greedy([heur], preferred=[heur])",
# "--translate-options", "--layer_strategy", "max"]),)
# Add A* with blind
#CONFIGS.append(IssueConfig("blind-min-layers", ["--search", "astar(blind)",
# "--translate-options", "--layer_strategy", "min"]),)
#CONFIGS.append(IssueConfig("blind-max-layers", ["--search", "astar(blind)",
# "--translate-options", "--layer_strategy", "max"]),)
#SUITE = ["psr-middle:p01-s17-n2-l2-f30.pddl"]
SUITE = ["bwnc", "citycar", "cups", "failed-negation", "graph", "layers", "mst"]
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ["depot:p01.pddl", "gripper:prob01.pddl", "psr-middle:p01-s17-n2-l2-f30.pddl"]
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_parser("parser.py")
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
attributes = (['translator_axioms',
'translator_derived_variables',
'translator_axioms_removed',
'translator_task_size',
'translator_time_done',
'translator_time_processing_axioms',
'cost',
'coverage',
'error',
'evaluations',
'expansions',
'initial_h_value',
'generated',
'memory',
'planner_memory',
'planner_time',
'run_dir',
'search_time',
'total_time',])
def get_keep_redundant_pairs():
pairs = []
for h in HEURISTICS:
for ls in LAYER_STRATEGY:
for overapprox in OVERAPPROXIMATE:
for lit in NECESSARY_LITERALS:
pairs.append(("tip-%s-%s-%s-%s" % (h,ls,overapprox,lit), "tip-%s-%s-%s-%s-kr" % (h,ls, overapprox,lit)))
return pairs
exp.add_absolute_report_step(attributes=attributes)
exp.add_report(ComparativeReport(get_keep_redundant_pairs(), attributes=attributes), outfile="issue453-v2-compare_keep_redundant.html")
exp.run_steps()
| 4,791 |
Python
| 40.310344 | 136 | 0.570862 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue524/v2.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, geometric_mean
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISION_CACHE = os.path.expanduser('~/lab/revision-cache')
REVISIONS = ["issue524-base-v2", "issue524-v2"]
CONFIGS = [
IssueConfig('lm_hm', [
'--landmarks', 'l=lm_hm()',
'--heuristic', 'h=lmcount(l)',
'--search', 'eager_greedy([h])']),
] + [
IssueConfig('lm_rhw', [
'--landmarks', 'l=lm_rhw()',
'--heuristic', 'h=lmcount(l)',
'--search', 'eager_greedy([h])']),
] + [
IssueConfig('lm_zg', [
'--landmarks', 'l=lm_zg()',
'--heuristic', 'h=lmcount(l)',
'--search', 'eager_greedy([h])']),
] + [
IssueConfig('lm_exhaust', [
'--landmarks', 'l=lm_exhaust()',
'--heuristic', 'h=lmcount(l)',
'--search', 'eager_greedy([h])']),
] + [
IssueConfig('lm_merged', [
'--landmarks', 'l1=lm_exhaust()',
'--landmarks', 'l2=lm_rhw()',
'--landmarks', 'l=lm_merged([l1, l2])',
'--heuristic', 'h=lmcount(l)',
'--search', 'eager_greedy([h])']),
] + [
IssueConfig(
"lama-first", [], driver_options=["--alias", "lama-first"])
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
email="cedric.geissmann@unibas.ch"
)
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
revision_cache=REVISION_CACHE,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_absolute_report_step()
exp.add_comparison_table_step()
exp.add_scatter_plot_step()
exp.run_steps()
| 2,067 |
Python
| 27.328767 | 68 | 0.622158 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue554/issue554.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue554-base", "issue554-v1"]
LIMITS = {"search_time": 1800}
SUITE = suites.suite_optimal_with_ipc11()
CONFIGS = {
"astar_hmax": ["--search", "astar(hmax())"],
"gbfs_gc": ["--search", "eager_greedy(goalcount())"],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
limits=LIMITS,
)
exp.add_comparison_table_step()
exp()
| 498 |
Python
| 18.959999 | 57 | 0.640562 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue611/peak-memory-microbenchmark/language.h
|
#ifndef UTILS_LANGUAGE_H
#define UTILS_LANGUAGE_H
// TODO: this should depend on the compiler, not on the OS.
#if defined(_WIN32)
#define NO_RETURN __declspec(noreturn)
#else
#define NO_RETURN __attribute__((noreturn))
#endif
namespace Utils {
template<typename T>
void unused_parameter(const T &) {
}
}
#endif
| 314 |
C
| 16.499999 | 59 | 0.726115 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue611/peak-memory-microbenchmark/main.cc
|
#include <ctime>
#include <functional>
#include <iostream>
#include <unistd.h>
#include "system.h"
using namespace std;
using namespace Utils;
void benchmark(const string &desc, int num_calls,
const function<void()> &func) {
cout << "Running " << desc << " " << num_calls << " times:" << flush;
clock_t start = clock();
for (int i = 0; i < num_calls; ++i)
func();
clock_t end = clock();
double duration = static_cast<double>(end - start) / CLOCKS_PER_SEC;
cout << " " << duration << " seconds" << endl;
}
int main(int, char **) {
// const int NUM_ITERATIONS = 100000000;
const int NUM_ITERATIONS = 1000000;
benchmark("nothing", NUM_ITERATIONS, [] () {});
benchmark("get_peak_memory_in_kb",
NUM_ITERATIONS,
[&]() {get_peak_memory_in_kb();});
benchmark("sbrk",
NUM_ITERATIONS,
[&]() {sbrk(0);});
cout << endl;
return 0;
}
| 959 |
C++
| 23.615384 | 73 | 0.551616 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue611/peak-memory-microbenchmark/system.h
|
#ifndef UTILS_SYSTEM_H
#define UTILS_SYSTEM_H
#define LINUX 0
#define OSX 1
#define WINDOWS 2
#if defined(_WIN32)
#define OPERATING_SYSTEM WINDOWS
#include "system_windows.h"
#elif defined(__APPLE__)
#define OPERATING_SYSTEM OSX
#include "system_unix.h"
#else
#define OPERATING_SYSTEM LINUX
#include "system_unix.h"
#endif
#include "language.h"
#include <iostream>
#define ABORT(msg) \
( \
(std::cerr << "Critical error in file " << __FILE__ \
<< ", line " << __LINE__ << ": " << std::endl \
<< (msg) << std::endl), \
(abort()), \
(void)0 \
)
namespace Utils {
enum class ExitCode {
PLAN_FOUND = 0,
CRITICAL_ERROR = 1,
INPUT_ERROR = 2,
UNSUPPORTED = 3,
// Task is provably unsolvable with current bound. Currently unused (see issue377).
UNSOLVABLE = 4,
// Search ended without finding a solution.
UNSOLVED_INCOMPLETE = 5,
OUT_OF_MEMORY = 6
};
NO_RETURN extern void exit_with(ExitCode returncode);
int get_peak_memory_in_kb();
const char *get_exit_code_message_reentrant(ExitCode exitcode);
bool is_exit_code_error_reentrant(ExitCode exitcode);
void register_event_handlers();
void report_exit_code_reentrant(ExitCode exitcode);
int get_process_id();
}
#endif
| 1,275 |
C
| 21.385965 | 87 | 0.646275 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue611/peak-memory-microbenchmark/system.cc
|
#include "system.h"
namespace Utils {
const char *get_exit_code_message_reentrant(ExitCode exitcode) {
switch (exitcode) {
case ExitCode::PLAN_FOUND:
return "Solution found.";
case ExitCode::CRITICAL_ERROR:
return "Unexplained error occurred.";
case ExitCode::INPUT_ERROR:
return "Usage error occurred.";
case ExitCode::UNSUPPORTED:
return "Tried to use unsupported feature.";
case ExitCode::UNSOLVABLE:
return "Task is provably unsolvable.";
case ExitCode::UNSOLVED_INCOMPLETE:
return "Search stopped without finding a solution.";
case ExitCode::OUT_OF_MEMORY:
return "Memory limit has been reached.";
default:
return nullptr;
}
}
bool is_exit_code_error_reentrant(ExitCode exitcode) {
switch (exitcode) {
case ExitCode::PLAN_FOUND:
case ExitCode::UNSOLVABLE:
case ExitCode::UNSOLVED_INCOMPLETE:
case ExitCode::OUT_OF_MEMORY:
return false;
case ExitCode::CRITICAL_ERROR:
case ExitCode::INPUT_ERROR:
case ExitCode::UNSUPPORTED:
default:
return true;
}
}
void exit_with(ExitCode exitcode) {
report_exit_code_reentrant(exitcode);
exit(static_cast<int>(exitcode));
}
}
| 1,238 |
C++
| 25.934782 | 64 | 0.667205 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue455/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
REVS = ["issue455"]
SUITE = suites.suite_satisficing_with_ipc11()
CONFIGS = {
"01-ff": [
"--heuristic",
"hff=ff(cost_type=one)",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true)]),"
"preferred=[hff],cost_type=one)"
],
"02-ff-type-const": [
"--heuristic",
"hff=ff(cost_type=one)",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), type_based([const(1)])]),"
"preferred=[hff],cost_type=one)"
],
"03-lama-first": [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true)]),"
"preferred=[hff,hlm],cost_type=one)"
],
"04-lama-first-types-ff-g": [
"--heuristic",
"hlm,hff=lm_ff_syn(lm_rhw(reasonable_orders=true,lm_cost_type=one,cost_type=one))",
"--search",
"lazy(alt([single(hff),single(hff, pref_only=true), single(hlm), single(hlm, pref_only=true), type_based([hff, g()])]),"
"preferred=[hff,hlm],cost_type=one)"
],
}
exp = common_setup.IssueExperiment(
search_revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_absolute_report_step()
exp()
| 1,494 |
Python
| 28.313725 | 132 | 0.546854 |
makolon/hsr_isaac_tamp/hsr_tamp/downward/experiments/issue1018/v1.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from lab.reports import Attribute, geometric_mean
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue1018-base", "issue1018-v1"]
BUILDS = ["release"]
CONFIG_NICKS = [
('pdb-greedy', ['--search', 'astar(pdb(greedy()))']),
('cpdbs-hct900', ['--search', 'astar(cpdbs(hillclimbing(max_time=900)))']),
('zopdbs-ga', ['--search', 'astar(zopdbs(genetic()))']),
]
CONFIGS = [
IssueConfig(
config_nick,
config,
build_options=[build],
driver_options=["--build", build])
for build in BUILDS
for config_nick, config in CONFIG_NICKS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="silvan.sievers@unibas.ch",
# paths obtained via:
# module purge
# module -q load CMake/3.15.3-GCCcore-8.3.0
# module -q load GCC/8.3.0
# echo $PATH
# echo $LD_LIBRARY_PATH
setup='export PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/bin:/scicore/soft/apps/CMake/3.15.3-GCCcore-8.3.0/bin:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/bin:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/bin:/scicore/soft/apps/GCCcore/8.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.32-GCCcore-8.3.0/lib:/scicore/soft/apps/cURL/7.66.0-GCCcore-8.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-8.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-8.3.0/lib:/scicore/soft/apps/ncurses/6.1-GCCcore-8.3.0/lib:/scicore/soft/apps/GCCcore/8.3.0/lib64:/scicore/soft/apps/GCCcore/8.3.0/lib')
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_parser(exp.SINGLE_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
attributes = exp.DEFAULT_TABLE_ATTRIBUTES
exp.add_comparison_table_step(attributes=attributes)
exp.run_steps()
| 2,738 |
Python
| 37.577464 | 808 | 0.717677 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.