python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import tokenize
import typing as tp
from io import BytesIO
import black # type: ignore
from codegen_sources.preprocessing.obfuscation.bobskater_obfuscator import (
obfuscateString,
)
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import dico_to_string
from .lang_processor import LangProcessor, NEWLINE_TOK
from .tokenization_utils import process_string
class PythonProcessor(LangProcessor):
def __init__(self) -> None:
self.spetoken2char = {
"STOKEN00": "#",
"STOKEN1": "\\n",
"STOKEN2": '"""',
"STOKEN3": "'''",
}
self.char2spetoken = {
value: " " + key + " " for key, value in self.spetoken2char.items()
}
@property
def language(self) -> str:
return "py" # TODO would "python" (default) be breaking stuff? this is unclear
def tokenize_code(self, code, keep_comments=False, process_strings=True):
assert isinstance(code, str)
code = code.replace(r"\r", "")
code = code.replace("\r", "")
tokens = []
try:
iterator = tokenize.tokenize(BytesIO(code.encode("utf-8")).readline)
except SyntaxError as excep:
raise SyntaxError(excep)
removed_docstr = 0
while True:
try:
toktype, tok, _, _, line = next(iterator)
except (
tokenize.TokenError,
IndentationError,
SyntaxError,
UnicodeDecodeError,
) as e:
raise ValueError(
f'Impossible to parse tokens because of incorrect source code "{e}" ...'
)
except StopIteration:
raise StopIteration(f"End of iterator before ENDMARKER token.")
if toktype == tokenize.ENCODING or toktype == tokenize.NL:
continue
elif toktype == tokenize.NEWLINE:
if removed_docstr == 1:
removed_docstr = 0
continue
tokens.append(NEWLINE_TOK)
elif toktype == tokenize.COMMENT:
if keep_comments:
com = process_string(
tok,
self.char2spetoken,
self.spetoken2char,
True,
do_whole_processing=process_strings,
)
if len(com) > 0:
tokens.append(com)
else:
continue
elif toktype == tokenize.STRING:
if tok == line.strip(): # docstring
if not keep_comments:
removed_docstr = 1
continue
else:
coms = process_string(
tok,
self.char2spetoken,
self.spetoken2char,
True,
do_whole_processing=process_strings,
)
if len(coms) > 0:
tokens.append(coms)
else:
removed_docstr = 1
else:
tokens.append(
process_string(
tok,
self.char2spetoken,
self.spetoken2char,
False,
do_whole_processing=process_strings,
)
)
elif toktype == tokenize.INDENT:
tokens.append("INDENT")
elif toktype == tokenize.DEDENT:
# empty block
if tokens[-1] == "INDENT":
tokens = tokens[:-1]
else:
tokens.append("DEDENT")
elif toktype == tokenize.ENDMARKER:
tokens.append("ENDMARKER")
break
else:
tokens.append(tok)
assert tokens[-1] == "ENDMARKER", "Error, no end marker"
return tokens[:-1]
def detokenize_code(self, code):
# replace recreate lines with \n and appropriate indent / dedent
# removing indent/ dedent tokens
assert isinstance(code, str) or isinstance(code, list)
if isinstance(code, list):
code = " ".join(code)
code = code.replace("ENDCOM", NEWLINE_TOK)
code = code.replace("▁", "SPACETOKEN")
lines = code.split(NEWLINE_TOK)
tabs = ""
for i, line in enumerate(lines):
line = line.strip()
if line.startswith("INDENT "):
tabs += " "
line = line.replace("INDENT ", tabs)
elif line.startswith("DEDENT"):
number_dedent = line.count("DEDENT")
tabs = tabs[4 * number_dedent :]
line = line.replace("DEDENT", "")
line = line.strip()
line = tabs + line
elif line == "DEDENT":
line = ""
else:
line = tabs + line
lines[i] = line
untok_s = "\n".join(lines)
# find string and comment with parser and detokenize string correctly
try:
for toktype, tok, _, _, line in tokenize.tokenize(
BytesIO(untok_s.encode("utf-8")).readline
):
if toktype == tokenize.STRING or toktype == tokenize.COMMENT:
tok_ = (
tok.replace("STRNEWLINE", "\n")
.replace("TABSYMBOL", "\t")
.replace(" ", "")
.replace("SPACETOKEN", " ")
)
untok_s = untok_s.replace(tok, tok_)
except KeyboardInterrupt:
raise
except:
# TODO raise ValueError(f'Invalid python function \n {code}\n')
pass
# detokenize imports
untok_s = (
untok_s.replace(". ", ".")
.replace(" .", ".")
.replace("import.", "import .")
.replace("from.", "from .")
)
# special strings
string_modifiers = ["r", "u", "f", "rf", "fr", "b", "rb", "br"]
for modifier in string_modifiers + [s.upper() for s in string_modifiers]:
untok_s = untok_s.replace(f" {modifier} '", f" {modifier}'").replace(
f' {modifier} "', f' {modifier}"'
)
untok_s = untok_s.replace("> >", ">>").replace("< <", "<<")
return untok_s
def obfuscate_code(self, code):
res, dico = obfuscateString(code, obfuscateNames=True, removeDocstrings=False)
return res, dico_to_string(dico)
def extract_functions(
self, code: tp.Union[str, tp.List[str]], tokenized: bool = True,
) -> tp.Tuple[tp.List[str], tp.List[str]]:
"""Extract functions from tokenized python code"""
if not tokenized:
raise ValueError(
"Function extraction not available for PythonProcessor and untokenized files. Please use PythonTreeSitterProcessor"
)
if isinstance(code, str):
tokenized_code = code.split()
else:
tokenized_code = code
assert isinstance(tokenized_code, list)
tokens = iter(tokenized_code)
functions_standalone = []
functions_class = []
number_indent = 0
try:
token = next(tokens)
except StopIteration:
return [], []
while True:
try:
if token == "def":
function = ["def"]
while not (token == "DEDENT" and number_indent == 0):
token = next(tokens)
if token == "INDENT":
number_indent += 1
elif token == "DEDENT":
number_indent -= 1
function.append(token)
try:
str_function = " ".join(function)
if is_python_2(str_function):
token = next(tokens)
continue
if function[function.index("(") + 1] == "self":
functions_class.append(str_function)
else:
functions_standalone.append(str_function)
except KeyboardInterrupt:
raise
except:
print(function)
token = next(tokens)
else:
token = next(tokens)
except StopIteration:
break
return functions_standalone, functions_class
def get_function_name(self, function):
assert isinstance(function, str) or isinstance(function, list)
if isinstance(function, str):
function = function.split()
return function[function.index("def") + 1]
@staticmethod
def format(code: str) -> str:
"""normalizes the input code by formatting it"""
return apply_black(code)
def is_python_2(code: str) -> bool:
if (
re.search("print [^(]", code) is None
and re.search("raise \w+ ,", code) is None
and re.search("except \w+ ,", code) is None
and re.search("[^ ]+ = \d+ L", code) is None
and re.search(".[ ]*iterkeys[ ]*\([ ]*\)", code) is None
and re.search(".[ ]*itervalues[ ]*\([ ]*\)", code) is None
and re.search(".[ ]*iteritems[ ]*\([ ]*\)", code) is None
and re.search("xrange[ ]*\(", code) is None
and re.search("imap[ ]*\(", code) is None
):
return False
else:
return True
def apply_black(code: str, line_length: int = 88):
"""Apply black to code"""
try:
mode = black.FileMode(line_length=line_length)
return black.format_str(code, mode=mode)
except KeyboardInterrupt:
raise
except Exception:
return code
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/python_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
from pathlib import Path
from .java_processor import JAVA_CHAR2TOKEN, JAVA_TOKEN2CHAR
from .tokenization_utils import ind_iter
from .tree_sitter_processor import (
COMMENT_TYPES,
NEWLINE_TOK,
TreeSitterLangProcessor,
TREE_SITTER_ROOT,
)
import tree_sitter as ts
import typing as tp
RUST_TOKEN2CHAR = JAVA_TOKEN2CHAR.copy()
RUST_CHAR2TOKEN = JAVA_CHAR2TOKEN.copy()
class RustProcessor(TreeSitterLangProcessor):
def __init__(self, root_folder: Path = TREE_SITTER_ROOT) -> None:
super().__init__(
ast_nodes_type_string=[
"comment",
"line_comment",
"block_comment",
"string_literal",
"raw_string_literal",
"char_literal",
],
stokens_to_chars=RUST_TOKEN2CHAR,
chars_to_stokens=RUST_CHAR2TOKEN,
root_folder=root_folder,
)
def dfs(
self,
code: bytes,
node: ts.Node,
tokens: tp.List[str],
tokens_type: tp.List[str],
scope_info: bool = False,
) -> None:
previous_endpoints = [0]
return self._dfs(code, node, tokens, tokens_type, previous_endpoints)
def _dfs(self, code, node, tokens, tokens_type, previous_endpoints):
if len(node.children) == 0 or node.type in self.ast_nodes_type_string:
snippet = code[node.start_byte : node.end_byte]
if node.start_byte > previous_endpoints[-1]:
previous_snippet = (
code[previous_endpoints[-1] : node.start_byte]
.strip()
.decode("utf8")
)
if len(previous_snippet) > 0:
tokens.append(previous_snippet)
tokens_type.append("was_missing")
previous_endpoints.append(node.start_byte)
if isinstance(snippet, bytes):
snippet = snippet.decode("utf8")
if len(snippet) > 0:
tokens.append(snippet)
tokens_type.append(node.type)
previous_endpoints.append(node.end_byte)
return
for child in node.children:
self._dfs(code, child, tokens, tokens_type, previous_endpoints)
def detokenize_code(self, code):
assert isinstance(code, str) or isinstance(code, list)
if isinstance(code, list):
code = " ".join(code)
code = re.sub(r"' (.) '", r"'\1'", code)
return super().detokenize_code(code)
def get_function_name(self, function):
assert isinstance(function, str) or isinstance(
function, list
), f"function is not the right type, should be str or list : {function}"
if isinstance(function, str):
function = function.split()
assert "fn" in function, "function definition in rust should contain token 'fn'"
return function[function.index("fn") + 1]
def extract_arguments(self, function):
return self.extract_arguments_using_parentheses(function)
def extract_functions(
self, code: tp.Union[str, tp.List[str]], tokenized: bool = True,
) -> tp.Tuple[tp.List[str], tp.List[str]]:
"""Extract functions from tokenized rust code"""
# TODO: make it use the AST to work on untokenized code
if not tokenized:
assert isinstance(code, str)
code = " ".join(self.tokenize_code(code))
if isinstance(code, list):
code_str = " ".join(code)
else:
code_str = code
try:
code_str = (
code_str.replace("ENDCOM", "\n")
.replace("▁", "SPACETOKEN")
.replace(NEWLINE_TOK, "\n")
)
tokens, token_types = self.get_tokens_and_types(code_str)
tokens_types = list(zip(tokens, token_types))
except KeyboardInterrupt:
raise
except:
return [], []
i = ind_iter(len(tokens_types))
functions_standalone = []
functions_class = []
in_class = False
class_indentation = 0
try:
token, token_type = tokens_types[i.i]
except:
return [], []
while True:
try:
if token == "struct" or token == "trait" or token == "impl":
in_class = True
if in_class and token == "{":
class_indentation += 1
if in_class and token == "}":
class_indentation -= 1
if class_indentation < 0:
raise ValueError("Issue parsing the scopes of the file")
if class_indentation == 0:
in_class = False
# detect function
if token == "fn":
# We are at the beginning of the function
token, token_type = tokens_types[i.i]
function = [token]
token_types = [token_type]
definition_only = False
while token != "{":
i.next()
token, token_type = tokens_types[i.i]
if token == ";":
definition_only = True
function = []
break
if token_type in COMMENT_TYPES:
token = token.strip()
token += " ENDCOM"
function.append(token)
token_types.append(token_type)
if definition_only:
continue
if token == "{":
number_indent = 1
while not (token == "}" and number_indent == 0):
try:
i.next()
token, token_type = tokens_types[i.i]
if token == "{":
number_indent += 1
elif token == "}":
number_indent -= 1
if token_type in COMMENT_TYPES:
token = token.strip()
token += " ENDCOM"
function.append(token)
except StopIteration:
break
function_str = " ".join(function)
function_str = function_str.strip()
function_str = function_str.replace("\n", "ENDCOM").replace(
"SPACETOKEN", "▁"
)
if in_class:
functions_class.append(function_str)
else:
functions_standalone.append(function_str)
i.next()
token = tokens_types[i.i][0]
except KeyboardInterrupt:
raise
except:
break
return functions_standalone, functions_class
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/rust_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
from .lang_processor import LangProcessor, NEWLINE_TOK
IR_LANGUAGE_NAME = "ir"
class IRProcessor(LangProcessor):
def tokenize_code(
self, code: str, keep_comments: bool = False, process_strings: bool = True
):
code = code.replace("\n", f" NEW_LINE ").replace("\r", "")
return re.sub(r"\s+", " ", code).split(" ")
def detokenize_code(self, code):
return code.replace(f" NEW_LINE ", "\n").replace(NEWLINE_TOK, "\n")
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/ir_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .lang_processor import LangProcessor as LangProcessor # for explicit reimport
from .tree_sitter_processor import TREE_SITTER_ROOT as TREE_SITTER_ROOT
from .cpp_processor import CppProcessor as CppProcessor
from .go_processor import GoProcessor as GoProcessor
from .java_processor import JavaProcessor as JavaProcessor
from .python_processor import PythonProcessor as PythonProcessor
from .python_tree_sitter_processor import (
PythonTreeSitterProcessor as PythonTreeSitterProcessor,
)
from .rust_processor import RustProcessor as RustProcessor
from .ir_processor import IRProcessor as IRProcessor
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/__init__.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import uuid
import logging
import itertools
import dataclasses
import typing as tp
from pathlib import Path
from io import BytesIO
import tokenize
import tree_sitter as ts
from codegen_sources.model.src.data.dictionary import (
ENDBLOCK,
ENDCLASS,
ENDFUNC,
)
from codegen_sources.preprocessing.obfuscation import utils_deobfuscation
from codegen_sources.preprocessing.obfuscation.bobskater_obfuscator import (
obfuscateString,
)
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import dico_to_string
from . import tree_sitter_processor as tsp
from .python_processor import is_python_2
from .utils import obfuscation_tokens
INDENT = "INDENT"
DEDENT = "DEDENT"
logger = logging.getLogger(__name__)
class PythonTreeSitterProcessor(tsp.TreeSitterLangProcessor):
def __init__(self, root_folder: Path = tsp.TREE_SITTER_ROOT) -> None:
spetoken2char = {
"STOKEN00": "#",
"STOKEN1": "\\n",
"STOKEN2": '"""',
"STOKEN3": "'''",
}
char2spetoken = {value: " " + key + " " for key, value in spetoken2char.items()}
super().__init__(
ast_nodes_type_string=[
"string",
"docstring",
"comment",
"string_literal",
"character_literal",
],
stokens_to_chars=spetoken2char,
chars_to_stokens=char2spetoken,
root_folder=root_folder,
new_line_sensitive=True,
)
@property
def language(self) -> str:
return "python" # legacy PythonProcessor uses "py"
def detokenize_code(self, code: tp.Union[str, tp.List[str]]) -> str:
# replace recreate lines with \n and appropriate indent / dedent
# removing indent/ dedent tokens
# current known issues:
# - a comment inside a type node would be included in the type
assert isinstance(code, (str, list))
if isinstance(code, list):
code = " ".join(code)
code = code.replace("ENDCOM", tsp.NEWLINE_TOK)
code = code.replace(ENDBLOCK, "").replace(ENDFUNC, "").replace(ENDCLASS, "")
code = code.replace("▁", "SPACETOKEN")
lines = code.split(tsp.NEWLINE_TOK)
tabs = ""
for i, line in enumerate(lines):
line = line.strip()
if line.startswith(INDENT + " "):
tabs += " "
line = line.replace(INDENT + " ", tabs)
elif line.startswith(DEDENT):
number_dedent = line.count(DEDENT)
tabs = tabs[4 * number_dedent :]
line = line.replace(DEDENT, "")
line = line.strip()
line = tabs + line
elif line == DEDENT:
line = ""
else:
line = tabs + line
lines[i] = line
untok_s = "\n".join(lines)
# find string and comment with parser and detokenize string correctly
ref_line = ""
wip_line = ""
try:
for toktype, tok, _, _, line in tokenize.tokenize(
BytesIO(untok_s.encode("utf-8")).readline
):
if ref_line != line:
ref_line = line
# we'll be update "wip_line" little by little
# and replacing this line in the full code
# little by little as well
wip_line = line
if toktype in (tokenize.STRING, tokenize.COMMENT):
tok_ = (
tok.replace("STRNEWLINE", "\n")
.replace("TABSYMBOL", "\t")
.replace(" ", "")
.replace("SPACETOKEN", " ")
)
# replace in line so that replacing in the whole file
# has limited border effect
new_line = wip_line.replace(tok, tok_)
untok_s = untok_s.replace(wip_line, new_line)
wip_line = new_line
except KeyboardInterrupt as e:
raise e
except Exception as e: # pylint: disable=broad-except
# TODO raise ValueError(f'Invalid python function \n {code}\n') from e
pass
# detokenize imports
untok_s = (
untok_s.replace(". ", ".")
.replace(" .", ".")
.replace("import.", "import .")
.replace("from.", "from .")
)
# special strings
string_modifiers = ["r", "u", "f", "rf", "fr", "b", "rb", "br"]
for modifier in string_modifiers + [s.upper() for s in string_modifiers]:
untok_s = untok_s.replace(f" {modifier} '", f" {modifier}'").replace(
f' {modifier} "', f' {modifier}"'
)
untok_s = untok_s.replace("> >", ">>").replace("< <", "<<")
return untok_s
def obfuscate_code(self, code: str):
res, dico = obfuscateString(code, obfuscateNames=True, removeDocstrings=False)
return res, dico_to_string(dico)
def _get_functions_from_ast(
self,
code: str,
node: ts.Node,
class_funcs: tp.List[str],
standalone_funcs: tp.List[str],
_in_class: bool = False,
) -> None:
if node.type == "function_definition":
if _in_class:
class_funcs.append(code[node.start_byte : node.end_byte])
else:
standalone_funcs.append(code[node.start_byte : node.end_byte])
for child in node.children:
self._get_functions_from_ast(
code,
child,
class_funcs,
standalone_funcs,
_in_class or node.type == "class_definition",
)
def extract_functions(
self,
code: tp.Union[str, tp.List[str]],
tokenized: bool = True,
remove_python_2: bool = True,
) -> tp.Tuple[tp.List[str], tp.List[str]]:
"""
Extract functions from python code
tokenized; whether the code is tokenized or not
"""
standalone_funcs, class_funcs = super(
PythonTreeSitterProcessor, self
).extract_functions(code, tokenized)
if remove_python_2:
standalone_funcs = [x for x in standalone_funcs if not is_python_2(x)]
class_funcs = [x for x in class_funcs if not is_python_2(x)]
return standalone_funcs, class_funcs
def dfs(
self,
code: bytes,
node: ts.Node,
tokens: tp.List[str],
tokens_type: tp.List[str],
scope_info: bool = False, # TODO propagate?
) -> None:
if len(node.children) == 0 or node.type in self.ast_nodes_type_string:
bsnippet = code[node.start_byte : node.end_byte].strip(b" ")
snippet = bsnippet.decode("utf8")
is_docstring = False
# identify docstrings
if node.type == "string":
p = node.parent # parent should be a statement, with only the docstring
if p.type == "expression_statement" and len(p.children) == 1:
is_docstring = True
if len(snippet) > 0:
tokens.append(snippet)
tokens_type.append("docstring" if is_docstring else node.type)
if node.type == "comment":
# comments at the end of line are marked as next line
# because next lines are added for expression statements :s
if len(tokens) > 1 and tokens[-2] == tsp.NEWLINE_TOK:
prev = node.prev_sibling
if prev is not None and prev.end_point[0] == node.end_point[0]:
tokens[-1], tokens[-2] = tokens[-2], tokens[-1]
tokens_type[-1], tokens_type[-2] = (
tokens_type[-2],
tokens_type[-1],
)
return
if node.type == "block":
tokens.append(tsp.NEWLINE_TOK)
tokens_type.append(tsp.NEWLINE_TOK)
tokens.append(INDENT)
tokens_type.append(INDENT)
for child in node.children:
self.dfs(code, child, tokens, tokens_type, scope_info)
if (
node.type in ("decorator", "block") or node.type.endswith("_statement")
) and tokens[-1] not in (tsp.NEWLINE_TOK, DEDENT):
# after = node.next_sibling
# if after is None or after.end_point[0] > node.start_point[0]:
tokens.append(tsp.NEWLINE_TOK)
tokens_type.append(tsp.NEWLINE_TOK)
if node.type == "block":
tokens.append(DEDENT)
tokens_type.append(DEDENT)
if scope_info:
tokens.append(ENDBLOCK)
tokens_type.append(ENDBLOCK)
if scope_info and node.type in {"function_definition"}:
tokens.append(ENDFUNC)
tokens_type.append(ENDFUNC)
if scope_info and node.type in {"class_definition"}:
tokens.append(ENDCLASS)
tokens_type.append(ENDCLASS)
def get_function_name(self, function: tp.Union[str, tp.List[str]]) -> str:
assert isinstance(function, (str, list))
if isinstance(function, str):
function = function.split()
return function[function.index("def") + 1]
def obfuscate_types(self, code: str) -> tp.Tuple[str, str]:
"""Obfuscates all the type hints of the code"""
obf_code, hints = self.extract_type_hints(code)
ready = []
toprep = []
# prefill main special methods
specials = {
".__len__": "int",
".__str__": "str",
".__repr__": "str",
".__hash__": "int",
}
for name in ["bool", "contains", "lt", "le", "eq", "ne", "gt", "ge"]:
specials[f".__{name}__"] = "bool"
for name in ["init", "init_subclass", "setattr", "setattribute"]:
specials[f".__{name}__"] = "None"
special_names = tuple(specials)
for hint in hints:
if hint.kind == "return" and hint.name.endswith(special_names):
end = hint.name.split(".")[-1]
ready.append(hint.with_value(specials["." + end]))
elif name.endswith(
(".__getattr__", ".__getattribute__")
): # nearly impossible to type
ready.append(hint)
elif not hint.value: # nothing to predict
ready.append(hint)
else:
toprep.append(hint)
# replace!
tokens = obfuscation_tokens()
cleaner = TypeCleaner()
dico = {}
for hint in toprep:
tok = next(tokens)
value = cleaner.clean(hint.value)
# add optional if actually optional
if hint.default is not None and hint.default == "None":
if not value.startswith("Optional"):
value = f"Optional [ {value} ]"
dico[tok] = " ".join(self.tokenize_code(value)[:-1])
ready.append(hint.with_value(tok))
repl = {h.uid: h.to_string() for h in ready}
# probably slow but safer that format which may do weird stuff
for name, val in repl.items():
obf_code = obf_code.replace("{" + name + "}", val)
for string in cleaner.get_imports(obf_code):
obf_code = obf_code.replace(string, "")
return obf_code, utils_deobfuscation.dico_to_string(dico)
def extract_type_hints(self, code: str) -> tp.Tuple[str, tp.List["TypeHint"]]:
"""Extract all type hint emplacements in the code
Parameter
---------
code: str
Python code to process
Returns
-------
str
the code, where type hint emplacements are filled with identifiers
replacing this identifier with "" is equivalent to having no type,
otherwise the replacement must include ":" or "->" depending on the kind
of type hint
list of TypeHint:
the list of TypeHint objects which gathers the identifier and all
type hint related information such as variable name, default value etc
"""
handler = CodeHandler(code)
tree = self.get_ast(handler.code)
traversal = tsp.traverse_tree(tree, final=("parameters", "assignment"))
hints = []
for node in traversal:
# deal with function parameters
if node.type == "parameters":
scope = ".".join(handler.get_scopes(node))
params = 0
for sub in node.children:
parts = []
if sub.type == "identifier":
parts = [sub]
elif "parameter" in sub.type:
parts = sub.children
if not parts:
continue
name = handler.read(parts[0])
if not params and name == "self":
continue
start = parts[0].end_byte
end = start
value = ""
for p in parts:
if p.type == "type":
end = p.end_byte
value = handler.read(p)
# check for default
default = None
if "default_parameter" in sub.type:
if parts[-2].type == "=":
default = handler.read(parts[-1])
h = TypeHint(
f"{scope}.{name}",
value=value,
kind="parameter",
default=default,
)
handler.add_replacement(start, end, "{" + h.uid + "}")
hints.append(h)
params += 1
# safeguard for errors
children = itertools.takewhile(
lambda n: n.type not in ("block", "expression_statement"),
node.parent.children,
)
sequence = ("parameters", "->", "type", ":")
parts = [n for n in children if n.type in sequence][-4:]
# now deal with function definitions
if tuple(n.type for n in parts) == sequence: # type is present
value = handler.read(parts[2])
h = TypeHint(scope, value=value, kind="return")
handler.add_replacement(
parts[0].end_byte, parts[2].end_byte, "{" + h.uid + "}"
)
hints.append(h)
elif parts: # no type
h = TypeHint(scope, value="", kind="return")
start = parts[-1].start_byte
handler.add_replacement(start, start, "{" + h.uid + "}")
hints.append(h)
# deal with typed variables outside functions
elif node.type == "assignment":
parts = node.children[:3]
if tuple(n.type for n in parts[1:]) == (":", "type"):
if parts[0].type not in ["identifier", "attribute"]:
continue
id_nodes = parts[:1]
scopes = handler.get_scopes(node)
if parts[0].type == "attribute":
id_nodes = [
x for x in parts[0].children[1:] if x.type == "identifier"
]
if scopes[-1] == "__init__":
scopes = scopes[:-1]
name = ".".join(scopes + [handler.read(x) for x in id_nodes])
value = handler.read(node.children[2])
h = TypeHint(name, value=value, kind="variable")
handler.add_replacement(
node.children[0].end_byte,
node.children[2].end_byte,
"{" + h.uid + "}",
)
hints.append(h)
return handler.generate(), hints
@dataclasses.dataclass
class Replacement:
start: int
end: int
value: bytes
class CodeHandler:
"""Simpler operations on code with tree sitter nodes
Parameters
----------
code: str or bytes
Code parsed by tree sitter
"""
def __init__(self, code: tp.Union[str, bytes]) -> None:
if not isinstance(code, bytes):
code = code.encode("utf8")
self.code = code
self.replacements: tp.List[Replacement] = []
def read(self, node: tp.Union[Replacement, ts.Node]) -> str:
"""Reads the string referred by the provided node"""
if isinstance(node, Replacement):
return self.read_range(node.start, node.end)
return self.read_range(node.start_byte, node.end_byte)
def read_range(self, start: int, end: int) -> str:
"""Reads the string within the provided range"""
return self.code[start:end].decode("utf8")
def add_replacement(
self, start: int, end: int, string: tp.Union[bytes, str]
) -> None:
"""Adds a range from the base string which needs to be replaced by the given string
Caution: ranges must be added in increasing sequence, and must be non-overlapping
"""
if isinstance(string, str):
string = string.encode("utf8")
repl = Replacement(start, end, string)
if self.replacements:
last = self.replacements[-1]
if start < last.end:
error = ["Overlapping or unsorted replacements"]
for z in (repl, last):
error.append(f"({z}, {self.read(z)} -> {z.value.decode('utf8')})")
raise ValueError("\n".join(error))
self.replacements.append(repl)
def add_replacement_from_node(
self, node: ts.Node, string: tp.Union[bytes, str]
) -> None:
"""Adds a node which needs to be replaced by a given string
Caution: nodes must be added in increasing sequence, and must be non-overlapping
"""
self.add_replacement(node.start_byte, node.end_byte, string)
def generate(self) -> str:
"""Generate the new code with replacements"""
parts = []
start = 0
for repl in self.replacements:
# code up to the node
parts.append(self.code[start : repl.start])
parts.append(repl.value)
start = repl.end
parts.append(self.code[start:])
return b"".join(parts).decode("utf8")
def get_scopes(self, node: ts.Node) -> tp.List[str]:
scopes = []
while node is not None:
if "_definition" in node.type:
ids = [n for n in node.children if n.type == "identifier"]
if ids:
scopes.append(self.read(ids[0]))
node = node.parent
return list(reversed(scopes))
class TypeCleaner:
"""Aims at:
- identifying imports so as to remove them (avoids biasing the model)
- uniformize how types are expressed
"""
# TODO
# - order in unions
# - add optional ??
_TAG = "tag2Replace"
def __init__(self) -> None:
self.processor = PythonTreeSitterProcessor()
self.typing_classes = set(x for x in dir(tp) if x[0].isupper())
def get_imports(self, code: tp.Union[str, bytes]) -> tp.List[str]:
"""Get a list of imports involving typing module"""
handler = CodeHandler(code)
tree = self.processor.get_ast(code)
import_node = [
"import_from_statement",
"import_statement",
"future_import_statement",
]
finals = ["assignment", "block", "function_definition", "class_definition"]
traversal = tsp.traverse_tree(tree, final=import_node + finals)
typing_imports_code = []
for node in traversal:
if "import" not in node.type:
continue
data = [(n.type, handler.read(n)) for n in node.children]
if len(data) < 2 or "typing" not in data[1][1].split(" ", maxsplit=1)[0]:
continue # a bit ugly but robust enough for now
typing_imports_code.append(handler.read(node))
return typing_imports_code
def clean(self, typestr: str) -> str:
"""Clean a type string to make it as uniform as possible
Eg: dict -> tp.Dict[str, tp.Any]
"""
# using a tag instead, we can replace later on by whatever we want
# eg: tag2Replace.Dict -> tp.Dict
tag = self._TAG
handler = CodeHandler(typestr)
tree = self.processor.get_ast(handler.code)
traversal = tsp.traverse_tree(tree)
updater = {x: x[0].upper() + x[1:] for x in ["set", "list", "dict", "tuple"]}
updater["object"] = "Any"
move_after = -1
for node in traversal:
current = node
if current.start_byte < move_after:
continue
if current.type == "comment":
handler.add_replacement_from_node(current, "")
move_after = current.end_byte
continue
val = handler.read(current)
val = updater.get(val, val)
if val in self.typing_classes:
if current.parent is not None and current.parent.type == "attribute":
current = current.parent
handler.add_replacement_from_node(current, f"{tag}.{val}")
move_after = current.end_byte
output = handler.generate()
textio = f"{tag}.TextIO"
textio_ph = "!#TEXTIO_PLACEHOLDER#!" # avoid replacing TextIO by strIO
replacements = {textio: textio_ph, f"{tag}.Text": "str"}
# to be decided: replace np.ndarray?
# replacements.update({x: "NDArray" for x in ["np.ndarray", "ndarray", "np.ndarray"]})
# no line break, and use type, not string of type
replacements.update({x: "" for x in ["\n", "\r", "'", '"']})
for sin, sout in replacements.items():
output = output.replace(sin, sout)
output = output.replace(textio_ph, textio)
# sanitize main containers
list_like = ["Set", "List", "Iterator", "Iterable", "Sequence"]
add_any = {f"{tag}.{x}": f"{tag}.Any" for x in list_like}
add_any.update(
{
f"{tag}.Dict": f"str,{tag}.Any",
f"{tag}.Mapping": f"str,{tag}.Any",
f"{tag}.Callable": f"...,{tag}.Any",
f"{tag}.Generator": f"{tag}.Any,None,None",
f"{tag}.Tuple": f"{tag}.Any,...",
}
)
for cls, content in add_any.items():
# when there is no subtype, add the default one
output = re.sub(cls + r"(?!\[)", f"{cls}[{content}]", output)
output = self._reorder_union(output)
output = output.replace(" ", "").replace(tag + ".", "") # remove tag
return output
def _reorder_union(self, string: str) -> str:
"""Uniformize all unions (Union and | syntax)
by applying Union syntax and ordering the options
"""
union = f"{self._TAG}.Union"
if not any(x in string for x in [union, "|"]):
return string
handler = CodeHandler(string)
tree = self.processor.get_ast(handler.code)
traversal = tsp.traverse_tree(tree)
move_after = -1
for node in traversal:
if node.start_byte < move_after:
continue
children = self._extract_union_children(node, string)
# test
if children:
content = [handler.read(n).strip() for n in children]
content = sorted(set(self._reorder_union(x) for x in content))
is_opt = "None" in content
if is_opt:
content = [c for c in content if c != "None"]
replacement = f"{union}[{','.join(content)}]"
if len(content) == 1:
replacement = content[0]
if is_opt:
replacement = f"{self._TAG}.Optional[{replacement}]"
handler.add_replacement_from_node(node, replacement)
move_after = node.end_byte
return handler.generate()
def _extract_union_children(self, node: ts.Node, string: str) -> tp.List[ts.Node]:
"""Recursively extract all the types in the union, whatever the format
(Union or |)
"""
children = []
union = f"{self._TAG}.Union"
handler = CodeHandler(string)
if len(node.children) == 1:
child = node.children[0]
if node.start_byte == child.start_byte:
if node.end_byte == child.end_byte:
return self._extract_union_children(child, string)
if node.type == "binary_operator" and node.children[1].type == "|":
children = node.children
if (
node.type in ["expression_statement", "subscript"]
and len(node.children) > 1
):
if handler.read(node.children[0]) == union:
children = node.children[2:-1]
children = [c for c in children if c.type not in "|,"]
resplit = [self._extract_union_children(c, string) for c in children]
out = list(
itertools.chain.from_iterable(
r if r else [c] for r, c in zip(resplit, children)
)
)
return out
_counter = itertools.count()
def id_maker() -> str:
"""Safe id for replacement in the string"""
num = _counter.__next__()
return f"cg_{num}_" + uuid.uuid4().hex[:4]
@dataclasses.dataclass
class TypeHint:
"""Keep track of all information about a type hint
including a uid to use as placeholder in the origin string
"""
name: str
value: str
kind: str
default: tp.Optional[str] = None
uid: str = dataclasses.field(default_factory=id_maker, init=False)
def __post_init__(self) -> None:
if self.kind not in ("return", "parameter", "variable"):
raise ValueError(f"Unknown kind {self.kind}")
if self.default is not None and self.kind != "parameter":
raise ValueError("Default can only be specified for parameters")
def with_value(self, value: str) -> "TypeHint":
"""Creates a new TypeHint with all field similar but the value"""
out = dataclasses.replace(self, value=value)
out.uid = self.uid
return out
def to_string(self) -> str:
"""Returns the code for replacing the placeholder in the code.
This includes the : or -> operator when the type hint is specified.
"""
op = " -> " if self.kind == "return" else ": "
return "" if not self.value else op + self.value
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/python_tree_sitter_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import random
import re
import subprocess
import uuid
from pathlib import Path
import typing as tp
import codegen_sources
from codegen_sources.preprocessing.obfuscation import javalang_obfuscator
from codegen_sources.preprocessing.obfuscation.utils_deobfuscation import dico_to_string
# BEWARE: two differente new line tokens?
from .tokenization_utils import ind_iter
from .tree_sitter_processor import (
TreeSitterLangProcessor,
TREE_SITTER_ROOT,
NEWLINE_TOK,
)
import tree_sitter as ts
from ...code_runners.code_runner import RUN_ROOT_DIR
from ...code_runners.utils import MAX_VIRTUAL_MEMORY, limit_virtual_memory
import typing as tp
GOOGLE_JAVA_FORMAT_PATH = (
Path(codegen_sources.__file__).parents[1]
/ "data"
/ "tools"
/ "google-java-format-1.15.0-all-deps.jar"
)
JAVA_TOKEN2CHAR: tp.Dict[str, str] = {
"STOKEN00": "//",
"STOKEN01": "/*",
"STOKEN02": "*/",
"STOKEN03": "/**",
"STOKEN04": "**/",
"STOKEN05": '"""',
"STOKEN06": "\\n",
"STOKEN07": "\\r",
"STOKEN08": ";",
"STOKEN09": "{",
"STOKEN10": "}",
"STOKEN11": r"\'",
"STOKEN12": r"\"",
"STOKEN13": r"\\",
}
JAVA_CHAR2TOKEN: tp.Dict[str, str] = {
value: " " + key + " " for key, value in JAVA_TOKEN2CHAR.items()
}
class JavaProcessor(TreeSitterLangProcessor):
def __init__(self, root_folder: Path = TREE_SITTER_ROOT) -> None:
super().__init__(
ast_nodes_type_string=[
"comment",
"block_comment",
"line_comment",
"string_literal",
"character_literal",
],
stokens_to_chars=JAVA_TOKEN2CHAR,
chars_to_stokens=JAVA_CHAR2TOKEN,
root_folder=root_folder,
)
def obfuscate_code(self, code):
res, dico = javalang_obfuscator.obfuscate(code)
return res, dico_to_string(dico)
def _get_functions_from_ast(
self,
code: str,
node: ts.Node,
class_funcs: tp.List[str],
standalone_funcs: tp.List[str],
_in_class: bool = False, # ignored
) -> None:
if node.type == "method_declaration":
function = code[node.start_byte : node.end_byte]
# There can be some issues where "{" is not in the function string.
# In that case, it is not a proper function
if "{" in function:
if "static" in function[0 : function.index("{")]:
standalone_funcs.append(function)
else:
class_funcs.append(function)
for child in node.children:
self._get_functions_from_ast(
code, child, class_funcs, standalone_funcs,
)
@staticmethod
def remove_annotation(function):
return re.sub(
r"^@ (Override|Deprecated|SuppressWarnings) (\( .*? \) )", "", function
)
def get_function_name(self, function):
return self.get_first_token_before_first_parenthesis(function)
def extract_arguments(self, function):
return self.extract_arguments_using_parentheses(function)
@staticmethod
def get_class_name(tokenized_java):
if isinstance(tokenized_java, str):
tokenized_java = tokenized_java.split()
assert (
"class" in tokenized_java
), f"No class definition or bad tokenization for {tokenized_java}"
return tokenized_java[tokenized_java.index("class") + 1]
@staticmethod
def format(code: str) -> str:
output_dir = RUN_ROOT_DIR / "formatting" / "java_formatting"
output_dir.mkdir(exist_ok=True, parents=True)
filename = f"{uuid.uuid4()}.java"
filepath = output_dir / filename
try:
with open(filepath, "w") as f:
f.write(code)
cmd = f"clang-format {filepath}"
proc = subprocess.run(
cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True
)
if proc.returncode != 0:
raise ValueError(
f"Failed to format code with error: {proc.stderr.decode()}\nThe code was:\n{code}\nFull command: {cmd}"
)
except Exception:
raise
finally:
filepath.unlink(missing_ok=True)
return proc.stdout.decode()
def get_java_compilation_errors(code, timeout=20):
file = write_java_function(code)
comp_cmd = (
f"{limit_virtual_memory(MAX_VIRTUAL_MEMORY)}; module load java; javac {file}"
)
timed_out = False
try:
proc = subprocess.run(
comp_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
timeout=timeout,
)
except subprocess.TimeoutExpired:
return "timeout"
file.unlink()
classfile = file.with_suffix(".class")
assert (
timed_out or proc.returncode != 0 or classfile.is_file()
), "compilation succeeded but .class file does not exist"
assert "tmp_folder" in str(file.parent), file.parent
for compiled_f in file.parent.glob("*"):
compiled_f.unlink()
file.parent.rmdir()
if timed_out:
return "timeout"
return "success" if proc.returncode == 0 else proc.stderr.decode()
def write_java_function(f: str, out_path: Path = Path("/tmp/java_functions/")) -> Path:
rand_folder = str(random.getrandbits(64))
classname = f"JAVA_FUNC"
tmp_folder = out_path.joinpath(f"tmp_folder_{rand_folder}")
out_file = tmp_folder.joinpath(classname + ".java")
tmp_folder.mkdir(parents=True, exist_ok=True)
java_processor = JavaProcessor()
with open(out_file, "w") as writefile:
writefile.write(
"""
import java.util.*;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
"""
)
writefile.write("public class " + classname + "{\n")
code = f.replace("\r", "")
writefile.write(java_processor.detokenize_code(code))
writefile.write("}\n")
return out_file
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/java_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import typing as tp
from codegen_sources.model.src.data.dictionary import (
OBF,
OBFS,
)
def obfuscation_tokens(raise_finished: bool = True) -> tp.Iterator[str]:
"""Iterates on all obfuscation tokens"""
for name in ["VAR", "FUNC", "CLASS"]:
for k in range(OBFS[name]):
yield OBF[name] % k
if raise_finished:
raise RuntimeError("Running out of obfuscation tokens")
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from abc import ABC
import typing as tp
NEWLINE_TOK = "NEW_LINE" # different name to avoid confusions by the tokenizer
class LangProcessor(ABC):
processors: tp.Dict[str, tp.Type["LangProcessor"]] = {}
@classmethod
def _language(cls) -> str:
# note: properties only work on instances, not on the class
# (unless we reimplement the decorator), so it's simpler to have
# a method on the class for when we need it, and the property on
# the instance for a simpler API
parts = cls.__name__.split("Processor")
if len(parts) != 2 or parts[1]:
raise RuntimeError(
"language processors class name should be that format: "
f"YourlanguageProcessor (got: {cls.__name__})"
)
return parts[0].lower()
@property
def language(self) -> str:
"""Language of the processor"""
return self._language()
@classmethod
def __init_subclass__(cls) -> None:
super().__init_subclass__()
cls.processors[cls._language()] = cls
def tokenize_code(
self, code: str, keep_comments: bool = False, process_strings: bool = True
) -> tp.List[str]:
raise NotImplementedError
def detokenize_code(self, code: tp.Union[tp.List[str], str]) -> str:
raise NotImplementedError
def obfuscate_code(self, code):
raise NotImplementedError
def obfuscate_types(self, code: str) -> tp.Tuple[str, str]:
raise NotImplementedError
def extract_functions(
self, code: tp.Union[str, tp.List[str]], tokenized: bool = True,
) -> tp.Tuple[tp.List[str], tp.List[str]]:
raise NotImplementedError
def get_function_name(self, function: str) -> str:
raise NotImplementedError
def extract_arguments(self, function):
raise NotImplementedError
@staticmethod
def format(code: str) -> str:
raise NotImplementedError
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/lang_processor.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
from pathlib import Path
from .java_processor import JAVA_CHAR2TOKEN, JAVA_TOKEN2CHAR
from .tokenization_utils import ind_iter
from .tree_sitter_processor import (
COMMENT_TYPES,
NEWLINE_TOK,
TreeSitterLangProcessor,
TREE_SITTER_ROOT,
)
import typing as tp
GO_TOKEN2CHAR = JAVA_TOKEN2CHAR.copy()
GO_CHAR2TOKEN = JAVA_CHAR2TOKEN.copy()
class GoProcessor(TreeSitterLangProcessor):
def __init__(self, root_folder: Path = TREE_SITTER_ROOT) -> None:
super().__init__(
ast_nodes_type_string=[
"comment",
"line_comment",
"block_comment",
"string_literal",
"raw_string_literal",
"interpreted_string_literal",
"char_literal",
],
stokens_to_chars=GO_TOKEN2CHAR,
chars_to_stokens=GO_CHAR2TOKEN,
root_folder=root_folder,
new_line_sensitive=True,
)
def detokenize_code(self, code):
assert isinstance(code, str) or isinstance(code, list)
if isinstance(code, list):
code = " ".join(code)
code = re.sub(r"' (.) '", r"'\1'", code)
return super().detokenize_code(code)
def get_function_name(self, function):
assert isinstance(function, str) or isinstance(
function, list
), f"function is not the right type, should be str or list : {function}"
if isinstance(function, str):
function = function.split()
assert (
"func" in function
), "function definition in go should contain token 'func'"
index = function.index("func") + 1
if function[index].startswith("("): # e.g. 'func (l *List) First() *Node {'
while not function[index].endswith(")"):
index += 1
index += 1
return function[index]
def extract_arguments(self, function):
return self.extract_arguments_using_parentheses(function)
def extract_functions(
self, code: tp.Union[str, tp.List[str]], tokenized: bool = True
) -> tp.Tuple[tp.List[str], tp.List[str]]:
"""Extract functions from tokenized go code"""
# TODO: make it use the AST to work on untokenized code
if not tokenized:
assert isinstance(code, str)
code = " ".join(self.tokenize_code(code))
if isinstance(code, list):
code_str = " ".join(code)
else:
code_str = code
assert isinstance(code_str, str)
try:
code_str = (
code_str.replace("ENDCOM", "\n")
.replace("▁", "SPACETOKEN")
.replace(NEWLINE_TOK, "\n")
)
tokens, token_types = self.get_tokens_and_types(code_str)
tokens_types = list(zip(tokens, token_types))
except KeyboardInterrupt:
raise
except:
return [], []
i = ind_iter(len(tokens_types))
functions_standalone = []
functions_class = []
in_class = False
class_indentation = 0
try:
token, token_type = tokens_types[i.i]
except:
return [], []
while True:
try:
# detect function
if token == "func":
# We are at the beginning of the function
token, token_type = tokens_types[i.i]
function = [token]
token_types = [token_type]
while token != "{":
i.next()
token, token_type = tokens_types[i.i]
if token_type in COMMENT_TYPES:
token = token.strip()
token += " ENDCOM"
function.append(token)
token_types.append(token_type)
if token == "{":
number_indent = 1
while not (token == "}" and number_indent == 0):
try:
i.next()
token, token_type = tokens_types[i.i]
if token == "{":
number_indent += 1
elif token == "}":
number_indent -= 1
if token_type in COMMENT_TYPES:
token = token.strip()
token += " ENDCOM"
function.append(token)
except StopIteration:
break
function_str = " ".join(function)
function_str = function_str.strip()
function_str = function_str.replace("\n", NEWLINE_TOK).replace(
"SPACETOKEN", "▁"
)
if in_class:
functions_class.append(function_str)
else:
functions_standalone.append(function_str)
i.next()
token = tokens_types[i.i][0]
except KeyboardInterrupt:
raise
except:
break
return functions_standalone, functions_class
|
CodeGen-main
|
codegen_sources/preprocessing/lang_processors/go_processor.py
|
CodeGen-main
|
codegen_sources/test_generation/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones
# used in the model.
import argparse
import typing as tp
from concurrent.futures import ThreadPoolExecutor
from hashlib import sha256
from itertools import repeat
from pathlib import Path
import numpy as np
import submitit
from tqdm import tqdm
from codegen_sources.preprocessing.lang_processors.java_processor import (
get_java_compilation_errors,
)
from codegen_sources.preprocessing.lang_processors import JavaProcessor
from codegen_sources.preprocessing.utils import bool_flag
from codegen_sources.test_generation.utils import chunks
CHUNKSIZE = 2500
primitive_types = {"short", "int", "long", "float", "double", "boolean", "char"}
java_standard_types = {
"Double",
"Float",
"String",
"Integer",
"Boolean",
"Long",
"Short",
}
java_simple_types = primitive_types | java_standard_types
java_supported_types = (
java_simple_types
| {f"{t}[]" for t in java_simple_types}
| {f"ArrayList<{t}>" for t in java_simple_types}
)
def uses_ios(codestring):
return (
"java.io.File(" in codestring.replace(" ", "")
or "io.FileWriter" in codestring.replace(" ", "")
or "zip.ZipFile" in codestring.replace(" ", "")
or "IOException" in codestring
)
def extract_return_type_java(f):
return f.split("(", 1)[0].split()[-2]
def is_simple_standalone_func(func):
global java_processor
try:
args = java_processor.extract_arguments(func)
return_type = extract_return_type_java(func)
if all(
[
arg.replace("final ", "").replace(" ", "")
in java_supported_types | {"None"}
for arg in args[0]
]
) and return_type in java_supported_types | {"void"}:
if (
return_type == "void"
and not any(
[
"[]" in arg.replace(" ", "") or "List" in arg or "Array" in arg
for arg in args[0]
]
)
or java_processor.get_function_name(func).strip() == "main"
):
return False
if (
get_java_compilation_errors(
java_processor.detokenize_code(func), timeout=120
)
== "success"
):
return True
return False
except ValueError:
return False
except IndexError:
return False
def select_functions(funcpath):
executor = ThreadPoolExecutor()
global java_processor
java_processor = JavaProcessor()
jobs = []
with open(funcpath, "r") as f:
functions = list(set([line.split(" | ", 1)[1] for line in f.readlines()]))
for func in functions:
jobs.append(executor.submit(is_simple_standalone_func, func))
mask = [j.result() for j in jobs]
return np.array(functions)[mask]
def select_functions_for_file(f_path, output_path):
selected_functions = select_functions(f_path)
out_filepath = output_path.joinpath(f_path.name)
with open(out_filepath, "w") as out_f:
print(f"Writing {len(selected_functions)} lines to {out_filepath}")
out_f.writelines(selected_functions)
if __name__ == "__main__":
print("#" * 10, "Selecting input functions", "#" * 10)
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--input_path", help="path to the input files",
)
parser.add_argument(
"--output_path", type=str, help="where the files should be outputed",
)
parser.add_argument(
"--local",
type=bool_flag,
default=True,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
parser.add_argument(
"--rerun",
type=bool_flag,
default=False,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
args = parser.parse_args()
input_path = Path(args.input_path)
assert input_path.is_dir()
output_path = Path(args.output_path)
output_path.mkdir(parents=True, exist_ok=True)
cluster: tp.Optional[submitit.AutoExecutor] = None
if args.local is False:
cluster = submitit.AutoExecutor(output_path / "log")
cluster.update_parameters(cpus_per_task=80, mem_gb=300)
cluster.update_parameters(timeout_min=40)
tok_files_names = "java.[0-9]*.sa.tok"
tok_files = sorted(list(input_path.glob(tok_files_names)))
if not args.rerun:
tok_files = [
f_path
for f_path in tok_files
if not (output_path.joinpath(f_path.name).is_file())
]
if cluster is None:
for f in tqdm(tok_files):
select_functions_for_file(f, output_path)
else:
jobs = cluster.map_array(
select_functions_for_file, tok_files, repeat(output_path)
)
for j in tqdm(jobs):
j.result()
selected_files = sorted(list(output_path.glob(tok_files_names)))
all_funcs = []
for f_path in selected_files:
with open(f_path) as f:
all_funcs.extend(f.readlines())
deduped_funcs = list(set(all_funcs))
deduped_funcs = [f for f in deduped_funcs if not uses_ios(f)]
deduped_funcs = [
f'{sha256(f.encode("utf8")).hexdigest()} | {f}' for f in deduped_funcs
]
deduped_funcs_chunks = list(chunks(deduped_funcs, CHUNKSIZE))
deduped_output_path = output_path.joinpath("deduped")
deduped_output_path.mkdir(exist_ok=True)
for i, chunk in enumerate(deduped_funcs_chunks):
out_file = deduped_output_path.joinpath(f"java.{i:012}.sa.tok")
with open(out_file, "w", encoding="utf-8", errors="ignore") as out:
for line in chunk:
out.write(line.strip())
out.write("\n")
print("\n")
|
CodeGen-main
|
codegen_sources/test_generation/select_java_inputs.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
import sys
from pathlib import Path
ROOT_PATH = Path(__file__).absolute().parents[2]
def chunks_df(df, n):
"""Yield successive n-sized chunks from df"""
for i in range(0, len(df), n):
yield df.iloc[i : i + n]
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def compute_results_one_test(test, translations, test_runner, truncate_errors=150):
# executor = ThreadPoolExecutor(max_workers=5)
# return list(executor.map(test_runner.get_tests_results, translations, repeat(test)))
return [
test_runner.get_tests_results(code, test=test, truncate_errors=truncate_errors)
for code in translations
]
def get_beam_size(input_df, results_columns="translated_python_functions_beam_"):
beam_size = 0
while f"{results_columns}{beam_size}" in input_df:
beam_size += 1
return beam_size
def add_root_to_path():
print(f"adding {ROOT_PATH} to path")
sys.path.append(str(ROOT_PATH))
|
CodeGen-main
|
codegen_sources/test_generation/utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import argparse
import pickle
from concurrent.futures.process import ProcessPoolExecutor
from itertools import repeat
from pathlib import Path
import numpy as np
import pandas as pd
from submitit import AutoExecutor
from tqdm import tqdm
root_path = Path(__file__).absolute().parents[2]
print(f"adding {root_path} to path")
sys.path.append(str(root_path))
from codegen_sources.model.src.logger import create_logger
from codegen_sources.preprocessing.utils import bool_flag
from codegen_sources.test_generation.evosuite_tests_translators.evosuite_to_cpp import (
EvosuiteToCpp,
)
from codegen_sources.test_generation.evosuite_tests_translators.evosuite_to_python import (
EvosuiteToPython,
)
from codegen_sources.code_runners import test_runners
from codegen_sources.test_generation.utils import (
chunks,
compute_results_one_test,
get_beam_size,
)
logger = create_logger(None, 0)
CHUNKSIZE_TEST_RESULTS = 2000
def get_joined_func_tests_df(csv_path, functions_path):
assert Path(csv_path).is_file(), csv_path
tests_dataframe = pd.read_csv(csv_path)
java_functions_path = Path(functions_path)
# reading functions to DF
java_functions = [
func
for f in java_functions_path.glob("java.0000*.sa.tok")
for func in open(f).readlines()
]
java_functions = pd.DataFrame(
{
"func_ids": [f.split(" | ")[0] for f in java_functions],
"java_function": [f.split(" | ")[1] for f in java_functions],
}
)
# getting the IDs of the functions. The class name is created from it
tests_dataframe["func_ids"] = tests_dataframe.TARGET_CLASS.apply(
lambda x: x.replace("CLASS_", "", 1)
)
merged = tests_dataframe.merge(java_functions, how="inner", on="func_ids")
return merged
def compute_all_tests_results(tests, functions, test_runner, output_path=None):
executor = ProcessPoolExecutor()
assert len(tests) == len(
functions
), f"tests of length {len(tests)} while functions are of length {len(functions)}"
jobs = [
executor.submit(compute_results_one_test, t, fs, test_runner, 150)
for t, fs in zip(tests, functions)
]
res = []
for i, job in enumerate(jobs):
res.append(job.result())
if i % 100 == 0:
logger.info(f"computed results for {i} tests over {len(tests)}")
logger.info(f"Successes: {sum([r[0][0] == 'success' for r in res])}")
logger.info(f"timeouts: {sum([r[0][0] == 'timeout' for r in res])}")
# print(res[-1])
if output_path is not None:
with open(output_path, "wb") as f:
pickle.dump(res, f)
return res
def translate_tests(java_tests, translator):
executor = ProcessPoolExecutor()
return list(executor.map(safe_translate_test, repeat(translator), java_tests))
def safe_translate_test(test_translator, code):
try:
return test_translator.translate(code)
except AssertionError as e:
return f"AssertionError : {e}"
except TypeError as e:
return f"TypeError : {e}"
def compute_test_results(
translations_csv_path, target_language, output_folder, local, rerun=False
):
logger.info("#" * 10 + "Computing Test Results" + "#" * 10)
output_folder.mkdir(exist_ok=True, parents=True)
logger.info(
f"Computing test results for language {target_language} in {translations_csv_path}\n Results will be outputed in {output_folder}"
)
input_df = pd.read_csv(translations_csv_path)
if target_language == "python":
test_runner = test_runners.PythonEvosuiteTestRunner()
test_translator = EvosuiteToPython()
else:
assert target_language == "cpp"
test_runner = test_runners.CppEvosuiteTestRunner(compilation_timeout=30)
test_translator = EvosuiteToCpp()
translated_func_col = f"translated_{target_language}_functions_beam_"
beam_size = get_beam_size(input_df, translated_func_col)
translated_functions = np.array(
[input_df[f"{translated_func_col}{i}"].values for i in range(beam_size)]
).transpose()
assert len(translated_functions) == len(
input_df
), f"{translated_functions.shape} / {len(input_df)}"
assert translated_functions.shape[1] == beam_size
logger.info(
f"computing output for {len(translated_functions)} tests and {beam_size} functions per test"
)
logger.info(f"Translating Tests")
translated_tests = translate_tests(input_df.tests_strings.values, test_translator)
assert len(translated_tests) == len(input_df)
input_df[f"{target_language}_translated_tests"] = translated_tests
logger.info(
f"Finished translating {len(translated_tests)} tests to {target_language}"
)
if local is False:
cluster = AutoExecutor(output_folder.joinpath("log"))
cluster.update_parameters(
cpus_per_task=40, mem_gb=300, partition="learnlab",
)
cluster.update_parameters(timeout_min=500)
else:
cluster = None
tests_chunks = list(chunks(translated_tests, CHUNKSIZE_TEST_RESULTS))
func_chuncs = list(chunks(translated_functions, CHUNKSIZE_TEST_RESULTS))
logger.info(f"{len(tests_chunks)} chunks of size {len(tests_chunks[0])}")
assert len(tests_chunks) == len(func_chuncs)
chunk_output_paths = [
output_folder.joinpath(f"{target_language}_chunk_{i}.pkl")
for i in range(len(tests_chunks))
]
missing_output_files = chunk_output_paths
if not rerun:
indices_to_run = [
i for i, p in enumerate(chunk_output_paths) if not (p.is_file())
]
logger.info(
f"Running on the remaining {len(indices_to_run)} among {len(chunk_output_paths)} files"
)
tests_chunks = [tests_chunks[i] for i in indices_to_run]
func_chuncs = [func_chuncs[i] for i in indices_to_run]
missing_output_files = [chunk_output_paths[i] for i in indices_to_run]
if cluster is None:
for tc, fc, output in zip(tests_chunks, func_chuncs, missing_output_files):
compute_all_tests_results(tc, fc, test_runner, output)
else:
jobs = cluster.map_array(
compute_all_tests_results,
tests_chunks,
func_chuncs,
repeat(test_runner),
missing_output_files,
)
for j in tqdm(jobs):
_ = j.result()
results = []
for p in chunk_output_paths:
with open(p, "rb") as pickle_file:
results.append(pickle.load(pickle_file))
results = [code for r in results for code in r]
for i in range(beam_size):
input_df[f"test_results_{target_language}_{i}"] = [res[i] for res in results]
outpath = output_folder.joinpath(f"test_results_{target_language}_df.csv")
logger.info(f"Writing results in {outpath}")
input_df.to_csv(outpath, index=False)
logger.info("\n" * 2)
def parse_arguments():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--translations_csv_path", help="path to the input files",
)
parser.add_argument(
"--output_path", type=str, help="where the files should be outputed",
)
parser.add_argument(
"--local",
type=bool_flag,
default=True,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
parser.add_argument(
"--target_language", help="target language. python or cpp", default="python",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
compute_test_results(
args.translations_csv_path,
args.target_language,
Path(args.output_path),
args.local,
)
|
CodeGen-main
|
codegen_sources/test_generation/compute_test_results.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import os
import sys
import subprocess
import typing as tp
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
import argparse
import submitit
from tqdm import tqdm
import pandas as pd
root_path = Path(__file__).absolute().parents[2]
print(f"adding {root_path} to path")
sys.path.append(str(root_path))
from codegen_sources.model.src.utils import get_java_bin_path
from codegen_sources.preprocessing.lang_processors import (
LangProcessor,
JavaProcessor,
TREE_SITTER_ROOT,
)
from codegen_sources.preprocessing.utils import bool_flag
import numpy as np
EVOSUITE_JAR_PATH = Path(
Path(__file__).absolute().parent.joinpath("evosuite-1.1.0.jar")
)
assert (
EVOSUITE_JAR_PATH.is_file()
), "EvoSuite Jar is missing, run wget https://github.com/EvoSuite/evosuite/releases/download/v1.1.0/evosuite-1.1.0.jar"
MUTATION_SCORE_CUTOFF = 0.9
MAX_JAVA_MEM = 4096
REPORT_FILE = "statistics.csv"
def write_javacode_onefunctionperfile(
codestring: str, line_number: int, folder: Path, with_id: bool = False
):
if "java.io.File(" in codestring.replace(
" ", ""
) or "io.FileWriter" in codestring.replace(" ", ""):
return
functionname = codestring.split("(")[0].strip().split(" ")[-1]
if with_id:
assert " | " in codestring, f'missing " | " in input: {codestring}'
id_string, codestring = codestring.split(" | ", 1)
classname = f"CLASS_{id_string}"
else:
classname = "CLASS_" + functionname.upper() + f"_{line_number}"
print(classname)
filepath = folder.joinpath(classname + ".java")
writefile = open(filepath, "w")
writefile.write(
"""
import java.util.*;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;\n
"""
)
lang_processor = JavaProcessor(root_folder=TREE_SITTER_ROOT)
writefile.write("public class " + classname + "{\n")
code = codestring.replace("\r", "")
writefile.write(lang_processor.detokenize_code(code))
writefile.write("}\n")
writefile.close()
def run_command_compile_java_file(folderpath):
print(f"compiling files in {folderpath}")
files = os.listdir(folderpath)
executor = ThreadPoolExecutor()
jobs = []
for file in files:
jobs.append(executor.submit(compile_file, file, folderpath))
[j.result() for j in jobs]
def compile_file(file, folderpath):
try:
proc = subprocess.Popen(
f"ulimit -S -v {2 * 1024 * 1024 * 1024}; cd {folderpath} && {os.path.join(get_java_bin_path(), 'javac')} "
+ file,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
executable="/bin/bash",
)
out, err = proc.communicate(timeout=180)
except subprocess.TimeoutExpired:
return
err = err.decode("utf-8").strip()
if len(err) > 0:
print(err)
def run_command_test_generation(folderpath):
print(f"Generating tests in {folderpath}")
executor = ThreadPoolExecutor()
files = os.listdir(folderpath)
jobs = []
report_dirs = []
for file in [f for f in files if f.endswith(".class")]:
report_name = "es_report_" + file.replace(".class", "")
report_dirs.append(report_name)
jobs.append(executor.submit(create_tests, file, folderpath, report_name))
job_res = [j.result() for j in jobs]
print(
f"Percentage of timeouts: {len([j for j in job_res if j == 'timeout'])/len(job_res):.2%}"
)
consolidated_report_path = get_consolidated_report_path(folderpath)
consolidated_report_path.mkdir(exist_ok=True)
consolidated_report_path = consolidated_report_path.joinpath(REPORT_FILE)
consolidate_reports(consolidated_report_path, report_dirs, folderpath)
def get_consolidated_report_path(folderpath):
return Path(folderpath).joinpath("es-consolidated-report")
def create_tests(file, folderpath, report_name):
print(file)
cmd = (
f"{os.path.join(get_java_bin_path(), 'java')} -jar {EVOSUITE_JAR_PATH} -class "
+ file.replace(".class", "")
+ f" -projectCP . "
f'-criterion "LINE:BRANCH:WEAKMUTATION:OUTPUT:METHOD:CBRANCH:STRONGMUTATION" '
f" -Dshow_progress=false "
f"-Dassertion_strategy=MUTATION "
f"-Dminimize=true "
f"-Dsearch_budget=20 "
f"-Ddouble_precision=0.0001 "
f"-Dmax_mutants_per_test 200 "
f'-Danalysis_criteria="LINE,BRANCH,EXCEPTION,WEAKMUTATION,OUTPUT,METHOD,METHODNOEXCEPTION,CBRANCH,STRONGMUTATION" '
f"-Doutput_variables=TARGET_CLASS,Random_Seed,criterion,Size,Length,BranchCoverage,Lines,Coverage,Covered_Lines,LineCoverage,MethodCoverage,Size,Length,Total_Goals,Covered_Goals,MutationScore,OutputCoverage "
f"-Dmax_int {int(math.sqrt(2 ** 31 - 1))} "
f"-mem={MAX_JAVA_MEM} "
f"-Dextra_timeout=180 "
f"-Dreport_dir={report_name}"
)
print(cmd)
try:
return subprocess.call(
cmd, shell=True, timeout=1500, cwd=folderpath, executable="/bin/bash"
)
except subprocess.TimeoutExpired:
return "timeout"
def consolidate_reports(consolidated_report_path, report_dirs, folderpath):
with open(consolidated_report_path, "w") as output_report:
header_printed = False
for report_dir in report_dirs:
report_path = Path(folderpath).joinpath(report_dir).joinpath(REPORT_FILE)
if report_path.is_file():
with open(report_path, "r") as f:
report_lines = f.readlines()
output_report.writelines(
report_lines if not header_printed else report_lines[1:]
)
header_printed = True
report_path.unlink()
report_path.parent.rmdir()
def generate_javafiles_withclass(filepath: Path, output_folder: Path):
print(f"creating files from {filepath} in {output_folder}")
lines = open(filepath).readlines()
for i, line in enumerate(lines):
write_javacode_onefunctionperfile(line, i, output_folder, with_id=True)
def generate_tests_pipeline(in_file: Path, out_path: Path):
print(f"Creating tests for {in_file}, outputting them in {out_path}")
out_path.mkdir(exist_ok=True)
generate_javafiles_withclass(in_file, out_path)
run_command_compile_java_file(out_path)
run_command_test_generation(out_path)
def output_selected_tests_summary(tests_path):
subfolders = [p for p in list(tests_path.glob("*")) if not str(p).endswith("/log")]
csv_dfs = []
for folder in subfolders:
csv_file = get_consolidated_report_path(folder).joinpath("statistics.csv")
if csv_file.is_file():
csv = pd.read_csv(csv_file)
csv["folder"] = folder
csv_dfs.append(csv)
concat_df = pd.concat(csv_dfs).reset_index(drop=True)
concat_df = concat_df[concat_df["TARGET_CLASS"].apply(lambda x: not pd.isna(x))]
concat_df["path_to_test"] = concat_df.apply(
lambda row: row["folder"]
.joinpath("evosuite-tests")
.joinpath(row["TARGET_CLASS"] + "_ESTest.java"),
axis="columns",
)
test_exists = concat_df["path_to_test"].apply(lambda x: x.is_file())
print(
f"{(~test_exists).sum() / len(test_exists):.2%} of the tests in the summary could not be found"
)
concat_df = concat_df[test_exists]
concat_df.to_csv(tests_path.joinpath("tests_summary.csv"), index=False)
test_string = []
for p in concat_df.path_to_test:
assert p.is_file(), f"test {p} does not exist"
with open(p, "r", encoding="utf8") as input_file:
test_string.append(input_file.read())
concat_df["tests_strings"] = test_string
selected_df = concat_df[(concat_df.MutationScore > MUTATION_SCORE_CUTOFF)]
selected_df = selected_df[selected_df.path_to_test.apply(lambda x: x.is_file())]
selected_df.to_csv(tests_path.joinpath("selected_tests_summary.csv"), index=False)
selected_df.to_csv(tests_path.joinpath("selected_tests.csv"), index=False)
if __name__ == "__main__":
print("#" * 10, "Creating Tests", "#" * 10)
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--input_path", help="path to the input files",
)
parser.add_argument(
"--output_path", type=str, help="where the files should be outputed",
)
parser.add_argument(
"--local",
type=bool_flag,
default=True,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
parser.add_argument(
"--rerun",
type=bool_flag,
default=False,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
args = parser.parse_args()
input_path = Path(args.input_path)
assert input_path.exists(), f"{input_path} does not exist"
output_path = Path(args.output_path)
output_path.mkdir(exist_ok=True)
cluster: tp.Optional[submitit.AutoExecutor] = None
if args.local is False:
cluster = submitit.AutoExecutor(output_path / "log")
cluster.update_parameters(cpus_per_task=80, mem_gb=300, partition="learnlab")
input_path = Path(args.input_path)
if input_path.is_file():
infiles = [input_path]
else:
infiles = sorted(list(input_path.glob("java.000*.sa.tok")))
out_folder = Path(args.output_path)
sub_out_folders = [
out_folder.joinpath(func_file.name.replace(".", "_")) for func_file in infiles
]
if not args.rerun:
indices_to_run = [
i
for i, p in enumerate(sub_out_folders)
if not (
get_consolidated_report_path(p).is_dir()
and get_consolidated_report_path(p).joinpath(REPORT_FILE).is_file()
)
]
print(
f"Running on the remaining {len(indices_to_run)} among {len(sub_out_folders)} files"
)
infiles = np.array(infiles)[indices_to_run] # type: ignore
sub_out_folders = np.array(sub_out_folders)[ # type: ignore
indices_to_run
]
if cluster is None:
# Running everything locally in parallel can use too much memory
for file, out_path in tqdm(list(zip(infiles, sub_out_folders))):
generate_tests_pipeline(file, out_path)
else:
jobs = cluster.map_array(generate_tests_pipeline, infiles, sub_out_folders)
for j in tqdm(jobs):
j.result()
output_selected_tests_summary(out_folder)
print("\n" * 2)
|
CodeGen-main
|
codegen_sources/test_generation/create_tests.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import fastBPE
import torch
from pathlib import Path
import pandas as pd
INITIAL_CACHE_FOLDER = "initial_cache"
LANGUAGES = ["cpp", "java", "python"]
from logging import getLogger
import numpy as np
from utils import ROOT_PATH, add_root_to_path # type: ignore
add_root_to_path()
from codegen_sources.model.src.cache import ListCache
from codegen_sources.model.preprocess import XLM_preprocess
from codegen_sources.model.src.data.dataset import MUTATION_SCORE, ASSERTS_COUNT
logger = getLogger()
class Params:
def __init__(self, pad_index=0, eos_index=1) -> None:
self.pad_index = pad_index
self.eos_index = eos_index
self.tokens_per_batch = 1000
self.st_remove_proba = 0.3
def get_tensors(reloaded_data):
pos = reloaded_data["positions"]
sent = reloaded_data["sentences"]
sentences = [sent[p[0] : p[1]] for p in pos]
lengths = [torch.tensor(len(s) + 2) for s in sentences]
out_sentences = []
for s in sentences:
l = len(s) + 2
out_s = torch.LongTensor(l).fill_(1)
out_s[1 : l - 1].copy_(torch.from_numpy(s.astype(np.int64)))
out_sentences.append(out_s)
return out_sentences, lengths
def initialize_cache(dataset_path, output_path: Path):
languages = [l + "_sa" for l in LANGUAGES]
for l1 in languages:
for l2 in [l for l in languages if l > l1]:
print(f"computing initial cache for {l1}-{l2}")
reloaded_data = torch.load(
dataset_path.joinpath(f"train.{l1}-{l2}.{l1}.pth")
)
sents1, len1 = get_tensors(reloaded_data)
sents2, len2 = get_tensors(
torch.load(dataset_path.joinpath(f"train.{l1}-{l2}.{l2}.pth"))
)
assert len(sents1) == len(sents2) == len(len1) == len(len2)
elements = list(zip(sents1, len1, sents2, len2))
ListCache(elements, Params()).save(
output_path.joinpath(f"cache_{l1}-{l2}.pkl")
)
def add_self_trained_dataset(data_df, dataset_path, vocab_path):
logger.info(f"Self labelled dataset to {dataset_path}")
bpe_model = fastBPE.fastBPE(
str(ROOT_PATH.joinpath("data/bpe/cpp-java-python/codes"))
)
print("unfiltered df:", len(data_df))
data_df = data_df[
data_df.python_translated_tests.apply(lambda x: x.count("assert")) > 1
]
print("filtered df:", len(data_df))
java_functions_with_indices = bpe_model.apply(
pd.Series(data_df["TARGET_CLASS"] + " | " + data_df["java_function"])
)
output_folder = dataset_path
output_files = [
open(
output_folder.joinpath(f"self_training.java_sa.{i}.bpe"),
"w",
encoding="utf-8",
errors="ignore",
)
for i in range(args.n_gpus)
]
output_files_all = open(
output_folder.joinpath(f"self_training.java_sa.bpe"),
"w",
encoding="utf-8",
errors="ignore",
)
for i, l in enumerate(sorted(java_functions_with_indices)):
output_files_all.write(l.strip())
output_files_all.write("\n")
output_files[i % 8].write(l.strip())
output_files[i % 8].write("\n")
for f in output_files:
f.close()
output_files_all.close()
for file_path in Path(output_folder).glob("*.bpe"):
print(f"Processing {file_path} with vocab {Path(vocab_path).absolute()}")
XLM_preprocess(
str(Path(vocab_path).absolute()),
str(file_path),
str(file_path).replace(".bpe", ".pth"),
)
def output_multilingual_tests_dataset(df_python, df_cpp, output_path):
data_df = df_python[
["TARGET_CLASS", "java_function", "path_to_test", "python_translated_tests"]
]
data_df["cpp_translated_tests"] = df_cpp["cpp_translated_tests"]
data_df[MUTATION_SCORE] = df_python["MutationScore"]
data_df[ASSERTS_COUNT] = data_df.python_translated_tests.apply(
lambda x: x.count("assert")
)
data_df[
[
"TARGET_CLASS",
"path_to_test",
"python_translated_tests",
"cpp_translated_tests",
MUTATION_SCORE,
ASSERTS_COUNT,
]
].to_json(
output_path.joinpath("translated_tests.json"), orient="records", lines=True
)
return data_df
if __name__ == "__main__":
logger.info("#" * 10 + "Creating data for Online Self-Training" + "#" * 10)
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--dataset_path", help="path to the offline dataset",
)
parser.add_argument(
"--input_dfs_path", help="Path to input dataframes",
)
parser.add_argument(
"--output_path",
type=str,
help="where the files should be outputed",
default=Path(ROOT_PATH).joinpath("data"),
)
parser.add_argument(
"--n_gpus", type=int, help="number of train set splits", default=8
)
parser.add_argument(
"--vocab_path",
type=str,
help="Path to vocab",
default=Path(ROOT_PATH).joinpath("data", "bpe", "cpp-java-python", "vocab"),
)
args = parser.parse_args()
output_path = Path(args.output_path)
dataset_path = Path(args.dataset_path)
initialize_cache(dataset_path, output_path.joinpath(INITIAL_CACHE_FOLDER))
input_dfs_path = Path(args.input_dfs_path)
assert input_dfs_path.is_dir()
input_dfs_paths = {
lang: input_dfs_path.joinpath(f"test_results_{lang}_df.csv")
for lang in ["python", "cpp"]
}
test_results_dfs = {
lang: pd.read_csv(path) for lang, path in input_dfs_paths.items()
}
data_df = output_multilingual_tests_dataset(
test_results_dfs["python"], test_results_dfs["cpp"], output_path
)
add_self_trained_dataset(data_df, output_path, args.vocab_path)
logger.info("\n" * 2)
|
CodeGen-main
|
codegen_sources/test_generation/create_data_for_online_st.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import argparse
from itertools import repeat
from logging import getLogger
from pathlib import Path
import pandas as pd
import submitit
from tqdm import tqdm
root_path = Path(__file__).absolute().parents[2]
print(f"adding {root_path} to path")
sys.path.append(str(root_path))
from codegen_sources.test_generation.utils import chunks_df, add_root_to_path
from codegen_sources.model.src.utils import set_MKL_env_vars
from codegen_sources.model.translate import Translator
from codegen_sources.preprocessing.utils import bool_flag
from codegen_sources.test_generation.compute_test_results import compute_test_results
CHUNKSIZE = 2500
SUPPORTED_LANGUAGES = ["python", "cpp"]
primitive_types = {"short", "int", "long", "float", "double", "boolean", "char"}
logger = getLogger()
java_standard_types = {
"Double",
"Float",
"String",
"Integer",
"Boolean",
"Long",
"Short",
}
java_simple_types = primitive_types | java_standard_types
java_supported_types = (
java_simple_types
| {f"{t}[]" for t in java_simple_types}
| {f"ArrayList<{t}>" for t in java_simple_types}
)
def get_joined_func_tests_df(csv_path, functions_path):
assert Path(csv_path).is_file(), csv_path
tests_dataframe = pd.read_csv(csv_path)
java_functions_path = Path(functions_path)
# reading functions to DF
java_functions = [
func
for f in java_functions_path.glob("java.0000*.sa.tok")
for func in open(f).readlines()
]
java_functions = pd.DataFrame(
{
"func_ids": [f.split(" | ")[0] for f in java_functions],
"java_function": [f.split(" | ")[1] for f in java_functions],
}
)
# getting the IDs of the functions. The class name is created from it
tests_dataframe["func_ids"] = tests_dataframe.TARGET_CLASS.apply(
lambda x: x.replace("CLASS_", "", 1)
)
merged = tests_dataframe.merge(java_functions, how="inner", on="func_ids")
return merged
def compute_transcoder_translation(
df,
output_file,
model_path,
bpe_path,
target_language,
len_penalty=1.0,
beam_size=20,
):
transcoder = Translator(model_path, bpe_path)
res = [[] for _ in range(beam_size)]
for i, func in enumerate(df["java_function"]):
if i % 100 == 0:
logger.info(f"computed {i} translations / {len(df)}")
translations = transcoder.translate(
func,
"java",
target_language,
beam_size=beam_size,
tokenized=True,
detokenize=False,
max_tokens=1024,
length_penalty=len_penalty,
)
for i, res_i in enumerate(translations):
res[i].append(res_i)
for i, res_i in enumerate(res):
df[f"translated_{target_language}_functions_beam_{i}"] = res_i
df.to_csv(output_file, index=False)
def main(args):
output_folder = Path(args.output_folder)
output_folder.mkdir(exist_ok=True, parents=True)
transcoder_output_folder = "transcoder_outputs"
output_folder_translations = output_folder.joinpath(transcoder_output_folder)
if args.local is False:
logger.info("Executing on cluster")
cluster = submitit.AutoExecutor(output_folder_translations.joinpath("log"))
cluster.update_parameters(
cpus_per_task=10,
gpus_per_node=1,
mem_gb=300,
timeout_min=4319,
constraint="volta32gb",
partition="learnlab",
)
else:
logger.info("Executing locally")
cluster = None
merged_df = get_joined_func_tests_df(args.csv_path, args.functions_path)
chunks = list(chunks_df(merged_df, CHUNKSIZE))
output_files = [
output_folder_translations.joinpath(f"{args.target_language}_chunk_{i}.csv")
for i in range(len(chunks))
]
assert (
len(chunks) > 0
), f"No chunks created from {args.csv_path } and {args.functions_path}"
logger.info(f"{len(chunks)} chunks of size {CHUNKSIZE}")
missing_output_files = output_files
if not args.rerun:
indices_to_run = [i for i, p in enumerate(output_files) if not (p.is_file())]
# indices_to_run = [8]
logger.info(
f"Running on the remaining {len(indices_to_run)} among {len(output_files)} files"
)
chunks = [chunks[i] for i in indices_to_run]
missing_output_files = [output_files[i] for i in indices_to_run]
assert len(chunks) == len(missing_output_files)
if len(chunks) > 0:
if cluster is None:
for c, output_f in zip(chunks, missing_output_files):
compute_transcoder_translation(
c,
output_f,
args.model_path,
args.bpe_path,
args.target_language,
args.len_penalty,
)
else:
jobs = cluster.map_array(
compute_transcoder_translation,
chunks,
missing_output_files,
repeat(args.model_path),
repeat(args.bpe_path),
repeat(args.target_language),
repeat(args.len_penalty),
)
for j in tqdm(jobs):
j.result()
chunks_files = [
output_folder_translations.joinpath(f"{args.target_language}_chunk_{i}.csv")
for i in range(len(output_files))
]
output_csv_path = output_folder_translations.joinpath(
f"{args.target_language}_transcoder_translation.csv"
)
pd.concat([pd.read_csv(chunk) for chunk in chunks_files], axis=0).to_csv(
output_csv_path, index=False
)
compute_test_results(
output_csv_path,
args.target_language,
output_folder.joinpath("test_results"),
local=args.local,
)
def parse_arguments():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--functions_path", help="path to the input files",
)
parser.add_argument(
"--csv_path", help="path to the input test csv",
)
parser.add_argument(
"--output_folder", help="output path",
)
parser.add_argument(
"--target_language", help="target language. python or cpp", default="python",
)
parser.add_argument(
"--model_path", type=str, help="where the files should be outputed",
)
parser.add_argument(
"--bpe_path", type=str, help="where the files should be outputted",
)
parser.add_argument(
"--len_penalty", type=float, help="Length penalty for generations", default=0.5,
)
parser.add_argument(
"--local",
type=bool_flag,
default=True,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
parser.add_argument(
"--rerun",
type=bool_flag,
default=False,
help="True if you want to run the processing pipeline locally, false if want to use submitit.",
)
# parser.add_argument('--filter_several_tests', type=bool_flag, default=True, help='Filter to keep only the examples with at least 2 tests')
args = parser.parse_args()
assert Path(args.bpe_path).is_file(), args.bpe_path
assert Path(args.model_path).is_file()
assert args.target_language in SUPPORTED_LANGUAGES
return args
if __name__ == "__main__":
logger.info("#" * 10 + "Computing Translations" + "#" * 10)
set_MKL_env_vars()
args = parse_arguments()
main(args)
logger.info("\n" * 2)
|
CodeGen-main
|
codegen_sources/test_generation/compute_transcoder_translations.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
import sys
import argparse
from logging import getLogger
from pathlib import Path
import fastBPE
import numpy as np
import pandas as pd
root_path = Path(__file__).absolute().parents[2]
print(f"adding {root_path} to path")
sys.path.append(str(root_path))
from codegen_sources.test_generation.utils import get_beam_size
from codegen_sources.model.preprocess import XLM_preprocess
SOURCE_LANG = "java"
FAILURE = "failure"
logger = getLogger()
def get_arguments():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--input_df", help="path to the input files",
)
parser.add_argument(
"--output_folder", type=str, help="where the files should be outputed",
)
parser.add_argument(
"--langs",
type=list,
nargs="+",
help="List of target langs",
default=["python", "cpp"],
)
parser.add_argument(
"--bpe_path", type=str, help="where the files should be outputed",
)
parser.add_argument(
"--bpe_vocab", type=str, help="where the files should be outputed",
)
args = parser.parse_args()
return args
def main(input_path, output_folder, langs, bpe_model, bpe_vocab):
input_path = Path(input_path)
input_dfs_paths = {
lang: input_path.joinpath(f"test_results_{lang}_df.csv") for lang in langs
}
test_results_dfs = {
lang: pd.read_csv(path) for lang, path in input_dfs_paths.items()
}
test_results_dfs = select_tests_several_asserts(test_results_dfs)
for ref_l in langs[1:]:
assert len(test_results_dfs[ref_l]) == len(
test_results_dfs[langs[0]]
), f"length of input {len(test_results_dfs[ref_l])} for {ref_l} while it is {len(test_results_dfs[langs[0]])} for {langs[0]}"
assert (
test_results_dfs[ref_l][f"{SOURCE_LANG}_function"]
== test_results_dfs[langs[0]][f"{SOURCE_LANG}_function"]
).all(), f"Dataframes order for {ref_l} and {langs[0]} do not match"
langs = sorted(langs)
assert len(langs) == len(set(langs)), langs
output_folder = Path(output_folder)
output_folder.mkdir(exist_ok=True, parents=True)
number_examples = len(test_results_dfs[langs[0]])
for ref_l in langs[1:]:
assert number_examples == len(
test_results_dfs[langs[0]]
), f"length of input {number_examples} for {ref_l} while it is {len(test_results_dfs[langs[0]])} for {langs[0]}"
assert (
test_results_dfs[ref_l][f"{SOURCE_LANG}_function"]
== test_results_dfs[langs[0]][f"{SOURCE_LANG}_function"]
).all(), f"Dataframes order for {ref_l} and {langs[0]} do not match"
langs = sorted(langs)
assert len(langs) == len(set(langs)), langs
output_folder = Path(output_folder)
output_folder.mkdir(exist_ok=True, parents=True)
first_successful_code = {
lang: get_first_success(test_results_dfs[lang], lang) for lang in langs
}
for lang in langs:
successful_translations_df = test_results_dfs[lang][
pd.Series(first_successful_code[lang]).apply(lambda x: x != FAILURE)
]
successful_translations_df["first_successful_translation"] = [
c for c in first_successful_code[lang] if c != FAILURE
]
logger.info(
f"{SOURCE_LANG}-{lang}: {len(successful_translations_df)} among {number_examples} ({len(successful_translations_df) / number_examples:.1%})"
)
print(
f"{SOURCE_LANG}-{lang}: {len(successful_translations_df)} among {number_examples} ({len(successful_translations_df) / number_examples:.1%})"
)
write_bpe_files(
output_folder,
successful_translations_df["java_function"],
successful_translations_df["first_successful_translation"],
SOURCE_LANG,
lang,
bpe_model=bpe_model,
)
for lang1 in langs:
for lang2 in langs[langs.index(lang1) + 1 :]:
# the only parallel data we have is when the tests are successful for both languages
successful_pairs = [
(c1, c2)
for c1, c2 in zip(
first_successful_code[lang1], first_successful_code[lang2]
)
if c1 != FAILURE and c2 != FAILURE
]
print(
f"{lang1}-{lang2}: {len(successful_pairs)} among {number_examples} ({len(successful_pairs) / number_examples:.1%})"
)
write_bpe_files(
output_folder,
[c1 for c1, c2 in successful_pairs],
[c2 for c1, c2 in successful_pairs],
lang1,
lang2,
bpe_model=bpe_model,
)
for file_path in Path(output_folder).glob("*.bpe"):
XLM_preprocess(
str(bpe_vocab), str(file_path), str(file_path).replace(".bpe", ".pth")
)
def select_tests_several_asserts(test_results_dfs):
tests_several_asserts = test_results_dfs["python"].python_translated_tests.apply(
lambda x: x.count("assert ") > 1
)
test_results_dfs = {
lang: df[tests_several_asserts].reset_index(drop=True)
for lang, df in test_results_dfs.items()
}
new_length = len(test_results_dfs["python"])
logger.info(
f"removed {len(tests_several_asserts) - new_length} tests with only one assert ({1 - new_length / len(tests_several_asserts):.2%})"
)
return test_results_dfs
def get_first_success(test_results, language):
beam_size = get_beam_size(
test_results, results_columns=f"translated_{language}_functions_beam_"
)
test_results_columns = [f"test_results_{language}_{i}" for i in range(beam_size)]
translations_columns = [
f"translated_{language}_functions_beam_{beam}" for beam in range(beam_size)
]
for col in test_results_columns:
test_results[col] = test_results[col].apply(lambda x: eval(x))
translations = np.array(
[test_results[col] for col in translations_columns]
).transpose()
logger.info("getting the first successful function")
tests_results = np.array(
[test_results[col] for col in test_results_columns]
).transpose((1, 0, 2))
code = []
min_successful_len = float("inf")
for translations_i, result_i in zip(translations, tests_results):
any_successful = False
for translated_code, res in zip(translations_i, result_i):
if res[0] == "success":
if not any_successful:
code.append(translated_code)
min_successful_len = len(translated_code)
any_successful = True
elif len(translated_code) < min_successful_len:
min_successful_len = len(translated_code)
code[-1] = translated_code
if not any_successful:
code.append(FAILURE)
assert len(code) == len(test_results)
first_successful_code = code
return first_successful_code
def write_bpe_files(output_folder, lang1_funcs, lang2_funcs, lang1, lang2, bpe_model):
if not lang1 < lang2:
lang1, lang2 = lang2, lang1
lang1_funcs, lang2_funcs = lang2_funcs, lang1_funcs
lang1_funcs = bpe_model.apply([f.strip() for f in lang1_funcs])
lang2_funcs = bpe_model.apply([f.strip() for f in lang2_funcs])
output_files = {
lang1: [
open(
output_folder.joinpath(
f"train.{lang1}_sa-{lang2}_sa.{lang1}_sa.{i}.bpe"
),
"w",
)
for i in range(8)
],
lang2: [
open(
output_folder.joinpath(
f"train.{lang1}_sa-{lang2}_sa.{lang2}_sa.{i}.bpe"
),
"w",
)
for i in range(8)
],
}
output_files_all = {
lang1: open(
output_folder.joinpath(f"train.{lang1}_sa-{lang2}_sa.{lang1}_sa.bpe"), "w"
),
lang2: open(
output_folder.joinpath(f"train.{lang1}_sa-{lang2}_sa.{lang2}_sa.bpe"), "w"
),
}
for i, (c1, c2) in enumerate(zip(lang1_funcs, lang2_funcs)):
c1 = c1.strip()
c2 = c2.strip()
output_files_all[lang1].write(c1)
output_files_all[lang1].write("\n")
output_files_all[lang2].write(c2)
output_files_all[lang2].write("\n")
output_files[lang1][i % 8].write(c1)
output_files[lang1][i % 8].write("\n")
output_files[lang2][i % 8].write(c2)
output_files[lang2][i % 8].write("\n")
for o in output_files[lang1] + output_files[lang2]:
o.close()
if __name__ == "__main__":
args = get_arguments()
bpe_model = fastBPE.fastBPE(args.bpe_path) # type: ignore
main(Path(args.input_df), args.output_folder, args.langs, bpe_model, args.bpe_vocab)
|
CodeGen-main
|
codegen_sources/test_generation/select_successful_tests.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
import re
from codegen_sources.preprocessing.utils import split_arguments
class EvosuiteTranslator:
def __init__(self) -> None:
self.primitive_types = {
"short",
"int",
"long",
"float",
"double",
"boolean",
"char",
}
self.java_standard_types = {
"Double",
"Float",
"String",
"Integer",
"Boolean",
"Long",
"Short",
"Character",
}
self.java_simple_types = self.primitive_types | self.java_standard_types
self.java_separator_chars = "[^0-9A-Za-z_]"
self.java_arrays = {f"{t}[]" for t in self.java_simple_types}
self.java_arrays_regexp = {
x.replace("[", "\[").replace("]", "\]") for x in self.java_arrays
}
self.supported_list_objects = ["ArrayList", "LinkedList", "List"]
self.java_list_objects = {
f"{list_object}<{t}>"
for t in self.java_simple_types
for list_object in self.supported_list_objects
}
self.java_supported_types = (
self.java_simple_types | self.java_arrays | self.java_list_objects
)
self.supported_asserts = [
"assertTrue",
"assertFalse",
"assertEquals",
"assertArrayEquals",
"assertNotNull",
"assertNotSame",
"assertSame",
"assertNull",
]
self.assert_argument_extractors = {
assert_name: re.compile(assert_name + r"\((.+?)\);")
for assert_name in self.supported_asserts
}
# translate_type_castings
self.type_casting_regexp = {
t: re.compile(r"\(%s\)[ ]*([^;,\n ?]+)" % t)
for t in self.java_simple_types
| self.java_list_objects
| self.java_arrays_regexp
| {"Object"}
}
# method and class name regexp
self.method_name_regexp = re.compile(
"public void test([0-9]+)\(\) throws Throwable {"
)
# translate_equals
self.equals_regexp = re.compile(r".equals\(([^;\n]*)\)")
# translate_value_initializations
self.double_initialization_regexp = re.compile(
f"({self.java_separator_chars})([0-9]*\.[0-9E]+)[fFdD]({self.java_separator_chars})"
)
self.long_initialization_regexp = re.compile(
f"({self.java_separator_chars})([0-9]+)[lL]({self.java_separator_chars})"
)
self.null_pointers_regexp = re.compile(
f"({self.java_separator_chars})null({self.java_separator_chars})"
)
# translate variable definitions
self.object_variable_definition = {
t: re.compile(r"%s ([^=;]+?) = new %s(\([^;]+?\));" % (t, t))
for t in self.java_standard_types
}
self.primitive_variable_definition = {
t: re.compile(r"%s ([^=;]+?) = ([^;]+?);" % t)
for t in self.primitive_types | self.java_standard_types
}
# array translation
self.regexp_match_array_content_definition = {
t: re.compile(r"new %s\[\] \{(.+)\}" % t) for t in self.java_simple_types
} # \1 matches the content in the array definition
self.regexp_match_array_definition_with_length = {
t: re.compile(r"%s\[\] (.+?) = new %s\[([0-9]+)\];" % (t, t))
for t in self.java_simple_types
} # \1 is the token identifier and \2 the length of the array
self.regexp_match_array_length_getter = {
t: re.compile(r" (%sArray[0-9]+)\.length" % t.lower())
for t in self.java_simple_types
} # \1 is the array identifier name
## list translations
self.list_objects_definitions = {
t: re.compile(r"%s ([^=;]+?) = new %s\(\);" % (t, t))
for t in self.java_list_objects
}
self.regexp_match_list_definition = {
simple_type: {
list_type: re.compile(f"new {list_type}<{simple_type}>\(\)")
for list_type in self.supported_list_objects
}
for simple_type in self.java_simple_types
} # \1 matches the content in the array definition
self.regexp_match_add_to_list = {
list_type: re.compile(f"({self.type_to_varname(list_type)}[0-9]+)\.add\(")
for list_type in self.supported_list_objects
}
self.regexp_match_list_contains = {
list_type: re.compile(
f"({self.type_to_varname(list_type)}[0-9]+)\.contains\(([^\n]*?)\)"
)
for list_type in self.supported_list_objects
}
def type_to_varname(self, t):
return t[0].lower() + t[1:]
def get_asserts_arguments(self, code):
return {
assert_name: self.specifics_assert_args(code, assert_name)
for assert_name in self.supported_asserts
}
def specifics_assert_args(self, code, assert_name):
arguments = set(self.assert_argument_extractors[assert_name].findall(code))
return [split_arguments(a) for a in set(arguments)]
@staticmethod
def replace_func_calls(classname, code):
return re.sub(
f"([{classname[0].lower()}, {classname[0]}]{classname[1:]}(0|1|_0)?)"
+ r"\.(.+?)\(",
r"f_filled(",
code,
)
@staticmethod
def args_to_string(args_list):
return ",".join(args_list)
def get_default_value(self, t):
if t not in self.primitive_types:
return "None"
elif t == "char":
return "'\u0000'"
elif t == "boolean":
return "False"
else:
return "0"
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/evosuite_translator.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
import re
from .evosuite_translator import EvosuiteTranslator
class EvosuiteToPython(EvosuiteTranslator):
def __init__(self) -> None:
super().__init__()
self.imports = "import numpy as np \nimport math\nfrom math import *\nimport collections\nfrom collections import *\nimport heapq\nimport itertools\nimport random\nimport sys\nimport unittest\n"
self.remove_casting_null = {
t: re.compile(r"%s\(null\)" % t)
for t in ["int", "str", "float", "bool", "list"]
}
def translate(self, code):
code = self.translate_class_method_name(code)
code = self.replace_asserts(code)
code = self.translate_arrays(code)
code = self.translate_variable_definitions(code)
code = self.translate_type_casting(code)
code = self.translate_equals(code)
code = self.translate_value_initializations(code)
code = self.translation_wrapup(code)
return code
def translate_class_method_name(self, code):
assert "_ESTest" in code, code
classname = code.split("_ESTest")[0].split()[-1].strip()
# print(classname)
code = self.method_name_regexp.sub(r"def test\1(self):", code)
code = code.replace(
f"public class {classname}_ESTest extends {classname}_ESTest_scaffolding "
+ "{",
f"class {classname}(unittest.TestCase):",
)
code = self.replace_func_calls(classname, code)
# code = code.replace(f'{classname}.', "")
return code
def translation_wrapup(self, code):
tests = code.split("@Test(timeout = ")[1:]
for t in tests:
if "f_filled" not in t:
code = code.replace(t, "")
# removing tests that don't call the method
code = code.replace("}", "")
codelines = [
l
for l in code.splitlines()
if not l.startswith("import")
and not l.startswith("@RunWith")
and not l.startswith(" @Test(timeout =")
]
for i, l in enumerate(codelines):
if "*/" in l:
break
code = "\n".join(codelines[i + 1 :])
code = "\n".join(
[self.imports]
+ ["\n\n#TOFILL\n"]
+ [code]
+ ["\nif __name__ == '__main__':\n unittest.main()"]
)
return code
def replace_asserts(self, code):
assert_args = self.get_asserts_arguments(code)
for assert_name, arguments_list in assert_args.items():
for args in arguments_list:
# print(assert_name, args, len(args))
assert_string = f"{assert_name}({self.args_to_string(args)});"
if assert_name == "assertTrue":
assert len(args) == 1, args
code = code.replace(assert_string, f"assert {args[0]}")
elif assert_name == "assertFalse":
assert len(args) == 1, args
code = code.replace(assert_string, f"assert not ({args[0]})")
elif (
assert_name == "assertEquals" or assert_name == "assertArrayEquals"
):
assert len(args) == 2 or len(args) == 3, args
if len(args) == 2:
code = code.replace(
assert_string, f"assert {args[0]} == {args[1]}"
)
if len(args) == 3:
code = code.replace(
assert_string,
f"assert abs({args[0]} - {args[1]}) <= {args[2]}",
)
elif assert_name == "assertSame":
assert len(args) == 2, args
code = code.replace(assert_string, f"assert {args[0]} is {args[1]}")
elif assert_name == "assertNotSame":
assert len(args) == 2, args
code = code.replace(
assert_string, f"assert {args[0]} is not {args[1]}"
)
elif assert_name == "assertNull":
assert len(args) == 1, args
code = code.replace(assert_string, f"assert {args[0]} is None")
elif assert_name == "assertNotNull":
assert len(args) == 1, args
code = code.replace(assert_string, f"assert {args[0]} is not None")
else:
raise NotImplementedError(f"cannot translate {assert_name}")
return code
def translate_arrays(self, code):
for t in self.java_simple_types:
code = self.regexp_match_array_content_definition[t].sub(r"[\1]", code)
code = self.regexp_match_array_definition_with_length[t].sub(
r"\1 = [%s] * \2;" % self.get_default_value(t), code
)
code = self.regexp_match_array_length_getter[t].sub(r" len(\1)", code)
for t, regexp in self.list_objects_definitions.items():
code = regexp.sub(r"\1 = []", code)
for t in self.java_simple_types:
for regexp in self.regexp_match_list_definition[t].values():
code = regexp.sub(r"[]", code)
for t, regexp in self.regexp_match_add_to_list.items():
code = regexp.sub(r"\1.append(", code)
for t, regexp in self.regexp_match_list_contains.items():
code = regexp.sub(r"\2 in \1", code)
return code
def translate_variable_definitions(self, code):
for t, regexp in self.object_variable_definition.items():
code = regexp.sub(r"\1 = \2", code)
for t, regexp in self.primitive_variable_definition.items():
code = regexp.sub(r"\1 = \2", code)
return code
def translate_type_casting(self, code):
for t in ["short", "int", "long", "Integer", "Long", "Short"]:
code = self.type_casting_regexp[t].sub(r"int(\1)", code)
for t in ["boolean", "Boolean"]:
code = self.type_casting_regexp[t].sub(r"bool(\1)", code)
for t in ["float", "Float", "double", "Double"]:
code = self.type_casting_regexp[t].sub(r"float(\1)", code)
for t in ["String", "char", "Character"]:
code = self.type_casting_regexp[t].sub(r"str(\1)", code)
for t in self.java_arrays_regexp | self.java_list_objects:
code = self.type_casting_regexp[t].sub(r"list(\1)", code)
code = self.type_casting_regexp["Object"].sub(r"\1", code)
for t, regexp in self.remove_casting_null.items():
code = regexp.sub(r"None", code)
return code
def translate_equals(self, code):
code = self.equals_regexp.sub(r" == (\1)", code)
return code
def translate_value_initializations(self, code):
code = self.double_initialization_regexp.sub(r"\1\2\3", code)
code = self.long_initialization_regexp.sub(r"\1\2\3", code)
code = self.null_pointers_regexp.sub(r"\1None\2", code)
return code
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/evosuite_to_python.py
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Translate sentences from the input stream.
# The model will be faster is sentences are sorted by length.
# Input sentences must have the same tokenization and BPE codes than the ones used in the model.
#
import re
from .evosuite_translator import EvosuiteTranslator
class EvosuiteToCpp(EvosuiteTranslator):
def __init__(self) -> None:
super().__init__()
self.imports = """#include <iostream>
#include <cstdlib>
#include <string>
#include <vector>
#include <fstream>
#include <iomanip>
#include <bits/stdc++.h>
#include "gtest/gtest.h"
#include "gmock/gmock.h"
using namespace std;
"""
self.cpp_tests_main = """int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}"""
self.java_standard_types_translations = {
"Double": "double",
"Float": "float",
"String": "string",
"Integer": "int",
"Boolean": "bool",
"boolean": "bool",
"byte": "unsigned char",
"Byte": "unsigned char",
"Long": "long",
"Short": "short",
"Character": "char",
}
self.cpp_supported_types = list(
self.java_standard_types_translations.values()
) + [f"{t}\*" for t in self.java_standard_types_translations.values()]
# + [f"vector\<{t}\>" for t in self.java_standard_types_translations.values()]
self.remove_casting_null = {
t: re.compile(r"\(%s\)[ ]*null" % t) for t in self.cpp_supported_types
}
def translate(self, code):
code = self.translate_class_method_name(code)
code = code.replace("[]", "*")
code = self.replace_asserts(code)
code = self.translate_arrays(code)
code = self.translate_variable_definitions(code)
code = self.translate_type_casting(code)
code = self.translate_equals(code)
code = self.translate_value_initializations(code)
code = self.translation_wrapup(code)
return code
def translate_class_method_name(self, code):
assert "_ESTest" in code, code
classname = code.split("_ESTest")[0].split()[-1].strip()
# print(classname)
code = self.method_name_regexp.sub(r"TEST(EvoSuiteTest, test\1){", code)
code = code.replace(
f"public class {classname}_ESTest extends {classname}_ESTest_scaffolding "
+ "{",
"",
)
code = self.replace_func_calls(classname, code)
r = f"([{classname[0].lower()}, {classname[0]}]{classname[1:]}(0|1|_0)?)"
code = re.sub(r + r"\s*" + r + r"\s*=\s*new\s*" + r + r"\s*\(.*\);", "", code)
return code
def translation_wrapup(self, code):
tests = code.split("@Test(timeout = ")[1:]
for t in tests:
if "f_filled" not in t:
code = code.replace(t, "")
code = code.replace(" @Test(timeout = 4000)\n", "")
code_lines = code.splitlines()
code_lines = [
l
for l in code_lines
if not l.startswith("import")
and not l.startswith("@RunWith")
and not l.startswith(" @Test(timeout =")
and not l.startswith(" System.setCurrentTimeMillis(")
]
assert len(code_lines) > 0, "input to translation_wrapup is empty"
for i, l in enumerate(code_lines):
if "*/" in l:
break
code_lines = code_lines[i + 1 :]
return "\n".join(
[self.imports]
+ ["\n", "//TOFILL"]
+ code_lines
+ ["\n"]
+ [self.cpp_tests_main]
)
def replace_asserts(self, code):
assert_args = self.get_asserts_arguments(code)
assert_that_num = 0
for assert_name, arguments_list in assert_args.items():
for args in arguments_list:
# print(assert_name, args, len(args))
assert_string = f"{assert_name}({self.args_to_string(args)});"
if assert_name == "assertTrue":
assert len(args) == 1, args
code = code.replace(assert_string, f"ASSERT_TRUE ({args[0]});")
elif assert_name == "assertFalse":
assert len(args) == 1, args
code = code.replace(assert_string, f"ASSERT_FALSE ({args[0]});")
elif assert_name == "assertEquals":
assert len(args) == 2 or len(args) == 3, args
if len(args) == 2:
code = code.replace(
assert_string, f"ASSERT_EQ ({args[0]}, {args[1]});",
)
if len(args) == 3:
code = code.replace(
assert_string,
f"ASSERT_NEAR ({args[0]},{args[1]},{args[2]});",
)
elif assert_name == "assertArrayEquals":
if args[0].startswith("new "):
args[0] = re.sub(
r"new\s*(.+)\*",
f"vector<\g<1>> assert_array{assert_that_num} =",
args[0],
)
code = code.replace(
assert_string,
f"{args[0]};\nASSERT_THAT(assert_array{assert_that_num}, ::testing::ContainerEq({args[1]}));",
)
assert_that_num += 1
else:
code = code.replace(
assert_string,
f"ASSERT_THAT({args[0]}, ::testing::ContainerEq({args[1]}));",
)
elif assert_name == "assertSame":
assert len(args) == 2, args
code = code.replace(
assert_string, f"ASSERT_EQ(*{args[0]}, *{args[1]});",
)
elif assert_name == "assertNotSame":
assert len(args) == 2, args
code = code.replace(
assert_string, f"ASSERT_NE(*{args[0]}, *{args[1]});",
)
elif assert_name == "assertNull":
assert len(args) == 1, args
code = code.replace(
assert_string,
"",
# assert_string, f"ASSERT_EQ ({args[0]}, nullptr);",
)
elif assert_name == "assertNotNull":
assert len(args) == 1, args
code = code.replace(
assert_string,
"",
# assert_string, f"ASSERT_NE ({args[0]}, nullptr);",
)
else:
raise NotImplementedError(f"cannot translate {assert_name}")
code = code.strip()
c = "".join(code.split())
if len(c) >= 2 and c[-2:] == "}}":
code = code[:-1]
return code
def translate_arrays(self, code):
code = code.replace("ArrayList", "vector")
code = re.sub(f" = new vector<" + r".+" + ">\(\);", ";", code)
# code = re.sub(
# r"(\s*)(.+)\* (.+) = new\s*(.+)\[(.*)\];",
# f"\g<1>vector<\g<2>> \g<3>(\g<5>);",
# code,
# )
# for t in self.java_simple_types:
# code = self.regexp_match_array_content_definition[t].sub(
# r"// C++ array with elements [\1]", code
# )
# code = self.regexp_match_array_definition_with_length[t].sub(
# r"// C++ array definition with name \1 and \2 elements of default value %s;"
# % self.get_default_value(t),
# code,
# )
# code = self.regexp_match_array_length_getter[t].sub(
# r"// C++ to get length of array \1", code
# )
for t, regexp in self.list_objects_definitions.items():
pass
for t in self.java_simple_types:
for regexp in self.regexp_match_list_definition[t].values():
# code = regexp.sub(r"[]", code)
pass
for t, regexp in self.regexp_match_add_to_list.items():
# code = regexp.sub(r'\1.append(', code)
pass
for t, regexp in self.regexp_match_list_contains.items():
# code = regexp.sub(r'\2 in \1', code)
pass
return code
def translate_variable_definitions(self, code):
for t, regexp in self.object_variable_definition.items():
translated_type = self.java_standard_types_translations[t]
code = regexp.sub(translated_type + r" \1 = \2;", code)
for t, regexp in self.primitive_variable_definition.items():
# no need to translate
pass
for t, translated_type in self.java_standard_types_translations.items():
code = code.replace(t, translated_type)
return code
def translate_type_casting(self, code):
for t in self.java_standard_types:
translated_type = self.java_standard_types_translations[t]
code = self.type_casting_regexp[t].sub(
f"({translated_type}) " + r"\1", code
)
code = self.type_casting_regexp["Object"].sub(r"\1", code)
# for t in self.java_arrays_regexp | self.java_list_objects:
# code = self.type_casting_regexp[t].sub(r"list(\1)", code)
#
# code = self.type_casting_regexp[t].sub(r"\1", code)
#
for t, regexp in self.remove_casting_null.items():
code = regexp.sub(r"NULL", code)
return code
def translate_equals(self, code):
code = self.equals_regexp.sub(r" == (\1)", code)
return code
def translate_value_initializations(self, code):
# Translate null pointer to C++, it is NULL
code = self.null_pointers_regexp.sub(r"\1NULL\2", code)
return code
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/evosuite_to_cpp.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import difflib
from pathlib import Path
from codegen_sources.preprocessing.lang_processors.python_processor import apply_black
folder_path = Path(__file__).parent
JAVA_PATH = folder_path.joinpath("resources/java_evosuite_tests")
PYTHON_PATH = folder_path.joinpath("resources/expected_python_translations")
CPP_PATH = folder_path.joinpath("resources/expected_cpp_translations")
def translation_testing(examples_list, translator, should_apply_black=False):
for input_test, expected_translation in examples_list:
actual = translator.translate(input_test)
if should_apply_black:
actual = apply_black(actual)
diff_tester(expected_translation, actual)
def diff_tester(expected, res, split="\n"):
expected = split.join([x.rstrip() for x in expected.split(split)])
res = split.join([x.rstrip() for x in res.split(split)])
d = difflib.Differ()
if expected != res:
print("Expected:")
print(expected)
print("#" * 50)
print("Got:")
print(res)
print("#" * 50)
diff = d.compare(expected.split(split), res.split(split))
for line in diff:
print(line)
assert expected == res
def read_inputs(filename, target_lang):
java_path = JAVA_PATH.joinpath(filename).with_suffix(".java").absolute()
with open(java_path, "r") as java_file:
input_test = java_file.read()
if target_lang == "python":
with open(
PYTHON_PATH.joinpath(filename).with_suffix(".py"), "r"
) as python_file:
expected_translation = python_file.read()
elif target_lang == "cpp":
with open(CPP_PATH.joinpath(filename).with_suffix(".cpp"), "r") as python_file:
expected_translation = python_file.read()
else:
raise ValueError(f"target_lang {target_lang} not supported")
return input_test, expected_translation
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/test_utils.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .test_utils import read_inputs, translation_testing
from ..evosuite_to_python import EvosuiteToPython
ARRAYS = ["integer_array_check", "integer_array_casting"]
JAVA_ARRAYS = ["java_list"]
TEST_STRINGS = ["strings", "strings_null_casting"]
TEST_FLOATS = ["floats", "doubles"]
translator = EvosuiteToPython()
def test_array_translation():
translations_list = [read_inputs(filename, "python") for filename in ARRAYS]
translation_testing(translations_list, translator, True)
def test_lists_translation():
translations_list = [read_inputs(filename, "python") for filename in JAVA_ARRAYS]
translation_testing(translations_list, translator, True)
def test_floats():
translations_list = [read_inputs(filename, "python") for filename in TEST_FLOATS]
translation_testing(translations_list, translator, True)
def test_string_translation():
translations_list = [read_inputs(filename, "python") for filename in TEST_STRINGS]
translation_testing(translations_list, translator, True)
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/test_evosuite_to_python_translator.py
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/__init__.py
|
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .test_utils import read_inputs, translation_testing
from ..evosuite_to_cpp import EvosuiteToCpp
ARRAYS = ["integer_array_casting"]
JAVA_ARRAYS = ["java_list"]
TEST_STRINGS = ["strings", "strings_null_casting"]
TEST_FLOATS = ["floats", "doubles"]
translator = EvosuiteToCpp()
def test_array_translation():
translations_list = [read_inputs(filename, "cpp") for filename in ARRAYS]
translation_testing(translations_list, translator)
def test_lists_translation():
translations_list = [read_inputs(filename, "cpp") for filename in JAVA_ARRAYS]
translation_testing(translations_list, translator)
def test_floats():
translations_list = [read_inputs(filename, "cpp") for filename in TEST_FLOATS]
translation_testing(translations_list, translator)
def test_string_translation():
translations_list = [read_inputs(filename, "cpp") for filename in TEST_STRINGS]
translation_testing(translations_list, translator)
def test_different_object_name():
translations_list = [
read_inputs(filename, "cpp") for filename in ["different_object_name"]
]
translation_testing(translations_list, translator)
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/test_evosuite_to_cpp_translator.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb(
unittest.TestCase
):
def test0(self):
float0 = f_filled(31337.701)
assert abs(9.8205152e8 - float0) <= 0.01
def test1(self):
float0 = f_filled(0.0)
assert abs(0.0 - float0) <= 0.01
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/floats.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_196a45f8932c033f06f6a086488b268404e77353d16c9bc6407a417f237da6db(
unittest.TestCase
):
def test3(self):
int0 = f_filled(None)
assert 0 == int0
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/integer_array_casting.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1(
unittest.TestCase
):
def test0(self):
arrayList0 = []
double0 = 0.0
arrayList0.append(double0)
double1 = 1.0
f_filled(arrayList0, double1)
assert not (double1 in arrayList0)
def test1(self):
arrayList0 = []
double0 = 0.0
arrayList0.append(double0)
f_filled(arrayList0, double0)
assert 0.0 in arrayList0
def test2(self):
arrayList0 = []
double0 = 9000.554
arrayList0.append(double0)
f_filled(arrayList0, double0)
assert 9001 == arrayList0.size()
def test3(self):
arrayList0 = []
double0 = 0.0
f_filled(arrayList0, double0)
assert not (double0 in arrayList0)
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/java_list.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_4819651a89a417bce7b2158d1101004f26892e6022f6d1e6348175e23666ec38(
unittest.TestCase
):
def test0(self):
double0 = 3051.0
double1 = f_filled(double0)
assert abs(3051.0 - double1) <= 1.0e-4
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/doubles.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_c2a773c670339b0d7be430a133f7f597ae56ad8ebb7f7209c0fe9edbd248fd04(
unittest.TestCase
):
def test2(self):
boolean0 = f_filled("", None)
assert not (boolean0)
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/strings_null_casting.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class CLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace(
unittest.TestCase
):
def test1(self):
string0 = f_filled("", "")
assert string0 is None
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/strings.py
|
import numpy as np
import math
from math import *
import collections
from collections import *
import heapq
import itertools
import random
import sys
import unittest
# TOFILL
class PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K(unittest.TestCase):
def test0(self):
integerArray0 = [None] * 2
int0 = -1
integer0 = -1
assert (-1) == int(integer0)
assert integer0 == (int0)
assert integer0 is not None
integerArray0[0] = integer0
integer1 = 1
assert 1 == int(integer1)
assert not (integer1 == (int0))
assert not (integer1 == (integer0))
assert integer1 is not None
integerArray0[1] = integer1
intArray0 = [0] * 3
intArray0[2] = int0
boolean0 = f_filled(integerArray0, intArray0, 1, 0)
assert boolean0
assert [(-1), 0, 0] == intArray0
assert 2 == len(integerArray0)
assert 3 == len(intArray0)
def test1(self):
integerArray0 = [None] * 2
int0 = -1
integer0 = -1
assert (-1) == int(integer0)
assert integer0 == (int0)
assert integer0 is not None
integerArray0[0] = integer0
int1 = 1
integerArray0[1] = integer0
intArray0 = [0] * 3
intArray0[2] = int0
boolean0 = f_filled(integerArray0, intArray0, int1, (-50146))
assert boolean0
assert [(-1), 0, 0] == intArray0
assert not (int1 == int0)
assert 2 == len(integerArray0)
assert 3 == len(intArray0)
def test2(self):
integerArray0 = [None] * 2
integer0 = -1
assert (-1) == int(integer0)
assert integer0 is not None
integerArray0[0] = integer0
integerArray0[1] = integer0
intArray0 = [0] * 3
boolean0 = f_filled(integerArray0, intArray0, (-54229), 1)
assert boolean0
assert [0, 0, 0] == intArray0
assert 2 == len(integerArray0)
assert 3 == len(intArray0)
def test3(self):
integerArray0 = [None] * 2
integer0 = -1
assert (-1) == int(integer0)
assert integer0 is not None
integerArray0[0] = integer0
int0 = 1
integerArray0[1] = integerArray0[0]
intArray0 = [0] * 3
boolean0 = f_filled(integerArray0, intArray0, 1, int0)
assert not (boolean0)
assert [0, 0, 0] == intArray0
assert 2 == len(integerArray0)
assert 3 == len(intArray0)
if __name__ == "__main__":
unittest.main()
|
CodeGen-main
|
codegen_sources/test_generation/evosuite_tests_translators/tests/resources/expected_python_translations/integer_array_check.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="dadaptation",
version="3.1",
author="Aaron Defazio",
author_email="adefazio@meta.com",
description="Learning Rate Free Learning for Adam, SGD and AdaGrad",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/dadaptation",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
dadaptation-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
import logging
import os
import torch.distributed as dist
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class DAdaptAdam(torch.optim.Optimizer):
r"""
Implements Adam with D-Adaptation automatic step-sizes.
Leave LR set to 1 unless you encounter instability.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
log_every (int):
Log using print every k steps, default 0 (no logging).
decouple (boolean):
Use AdamW style decoupled weight decay
use_bias_correction (boolean):
Turn on Adam's bias correction. Off by default.
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted. Values like 1.02 give a kind of learning
rate warmup effect.
fsdp_in_use (bool):
If you're using sharded parameters, this should be set to True. The optimizer
will attempt to auto-detect this, but if you're using an implementation other
than PyTorch's builtin version, the auto-detection won't work.
"""
def __init__(self, params, lr=1.0,
betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, log_every=0,
decouple=False,
use_bias_correction=False,
d0=1e-6, growth_rate=float('inf'),
fsdp_in_use=False):
if not 0.0 < d0:
raise ValueError("Invalid d0 value: {}".format(d0))
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if decouple:
print(f"Using decoupled weight decay")
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
d = d0,
k=0,
numerator_weighted=0.0,
log_every=log_every,
growth_rate=growth_rate,
use_bias_correction=use_bias_correction,
decouple=decouple,
fsdp_in_use=fsdp_in_use)
self.d0 = d0
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
sk_l1 = 0.0
group = self.param_groups[0]
use_bias_correction = group['use_bias_correction']
numerator_weighted = group['numerator_weighted']
beta1, beta2 = group['betas']
k = group['k']
d = group['d']
lr = max(group['lr'] for group in self.param_groups)
if use_bias_correction:
bias_correction = ((1-beta2**(k+1))**0.5)/(1-beta1**(k+1))
else:
bias_correction = 1
dlr = d*lr*bias_correction
growth_rate = group['growth_rate']
decouple = group['decouple']
log_every = group['log_every']
fsdp_in_use = group['fsdp_in_use']
sqrt_beta2 = beta2**(0.5)
numerator_acum = 0.0
for group in self.param_groups:
decay = group['weight_decay']
k = group['k']
eps = group['eps']
group_lr = group['lr']
if group_lr not in [lr, 0.0]:
raise RuntimeError(f"Setting different lr values in different parameter groups is only supported for values of 0")
for p in group['params']:
if p.grad is None:
continue
if hasattr(p, "_fsdp_flattened"):
fsdp_in_use = True
grad = p.grad.data
# Apply weight decay (coupled variant)
if decay != 0 and not decouple:
grad.add_(p.data, alpha=decay)
state = self.state[p]
# State initialization
if 'step' not in state:
state['step'] = 0
state['s'] = torch.zeros_like(p.data).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data).detach()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
s = state['s']
if group_lr > 0.0:
denom = exp_avg_sq.sqrt().add_(eps)
numerator_acum += dlr * torch.dot(grad.flatten(), s.div(denom).flatten()).item()
# Adam EMA updates
exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1-beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1-beta2)
s.mul_(sqrt_beta2).add_(grad, alpha=dlr*(1-sqrt_beta2))
sk_l1 += s.abs().sum().item()
######
numerator_weighted = sqrt_beta2*numerator_weighted + (1-sqrt_beta2)*numerator_acum
d_hat = d
# if we have not done any progres, return
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
if sk_l1 == 0:
return loss
if lr > 0.0:
if fsdp_in_use:
dist_tensor = torch.zeros(2).cuda()
dist_tensor[0] = numerator_weighted
dist_tensor[1] = sk_l1
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
global_numerator_weighted = dist_tensor[0]
global_sk_l1 = dist_tensor[1]
else:
global_numerator_weighted = numerator_weighted
global_sk_l1 = sk_l1
d_hat = global_numerator_weighted/((1-sqrt_beta2)*global_sk_l1)
d = max(d, min(d_hat, d*growth_rate))
if log_every > 0 and k % log_every == 0:
logging.info(f"lr: {lr} dlr: {dlr} d_hat: {d_hat}, d: {d}. sk_l1={global_sk_l1:1.1e} numerator_weighted={global_numerator_weighted:1.1e}")
for group in self.param_groups:
group['numerator_weighted'] = numerator_weighted
group['d'] = d
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
denom = exp_avg_sq.sqrt().add_(eps)
# Apply weight decay (decoupled variant)
if decay != 0 and decouple:
p.data.add_(p.data, alpha=-decay * dlr)
### Take step
p.data.addcdiv_(exp_avg, denom, value=-1)
group['k'] = k + 1
return loss
|
dadaptation-main
|
dadaptation/dadapt_adam.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .dadapt_adagrad import DAdaptAdaGrad
from .dadapt_adam import DAdaptAdam
from .dadapt_sgd import DAdaptSGD
from .dadapt_adan import DAdaptAdan
from .dadapt_lion import DAdaptLion
|
dadaptation-main
|
dadaptation/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.optim
import pdb
import math
import logging
import torch.distributed as dist
class DAdaptSGD(torch.optim.Optimizer):
r"""
Implements SGD with D-Adaptation automatic step-sizes. Leave LR set to 1 unless you encounter instability.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
momentum (float):
Momentum value in the range [0,1) (default: 0).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
log_every (int):
Log using print every k steps, default 0 (no logging).
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted. More conservative values like 1.02 may
help if training is unstable.
fsdp_in_use (bool):
If you're using sharded parameters, this should be set to True. The optimizer
will attempt to auto-detect this, but if you're using an implementation other
than PyTorch's builtin version, the auto-detection won't work.
"""
def __init__(self, params,
lr=1.0,
momentum=0.0,
weight_decay=0,
log_every=0,
d0=1e-6, growth_rate=float('inf'),
fsdp_in_use=False):
if not 0.0 < d0:
raise ValueError("Invalid d0 value: {}".format(d0))
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(lr=lr,
momentum=momentum,
weight_decay=weight_decay, k=0,
log_every=log_every,
numerator_weighted=0.0,
d=d0,
growth_rate=growth_rate,
fsdp_in_use=fsdp_in_use)
self.loggables = {}
try:
self.rank = torch.distributed.get_rank()
except:
self.rank = 0
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
group = self.param_groups[0]
lr = max(group['lr'] for group in self.param_groups)
decay = group['weight_decay']
momentum = group['momentum']
log_every = group['log_every']
ck = 1 - momentum
k = group['k']
numerator_weighted = group['numerator_weighted']
growth_rate = group['growth_rate']
d = group['d']
fsdp_in_use = group['fsdp_in_use']
group = self.param_groups[0]
sk_sq = 0.0
if k == 0:
g_sq = 0.0
for group in self.param_groups:
group_lr = group['lr']
for p in group['params']:
if p.grad is None:
continue
if hasattr(p, "_fsdp_flattened"):
fsdp_in_use = True
grad = p.grad.data
# Apply weight decay
if decay != 0:
grad.add(p.data, alpha=decay)
state = self.state[p]
if group_lr > 0.0:
g_sq += (grad * grad).sum().item()
if fsdp_in_use:
dist_tensor = torch.zeros(1).cuda()
dist_tensor[0] = g_sq
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
global_gsq = dist_tensor[0]
else:
global_gsq = g_sq
group['g0_norm'] = g0_norm = math.sqrt(global_gsq)
g0_norm = group['g0_norm']
dlr = d*lr/g0_norm
for group in self.param_groups:
group_lr = group['lr']
if group_lr not in [lr, 0.0]:
raise RuntimeError(f"Setting different lr values in different parameter groups is only supported for values of 0")
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if 'z' not in state:
z = state['z'] = torch.clone(p.data).detach()
s = state['s'] = torch.zeros_like(p.data).detach()
x0 = state['x0'] = torch.clone(p.data).detach()
# Apply weight decay
if decay != 0:
grad.add_(p.data, alpha=decay)
s = state['s']
if group_lr > 0.0:
numerator_weighted += dlr * torch.dot(grad.flatten(), s.flatten()).item()
s.data.add_(grad, alpha=dlr)
sk_sq += (s * s).sum().item()
######
d_hat = d
if lr > 0.0:
if fsdp_in_use:
dist_tensor = torch.zeros(2).cuda()
dist_tensor[0] = sk_sq
dist_tensor[1] = numerator_weighted
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
global_sk_sq = dist_tensor[0]
global_numerator_weighted = dist_tensor[1]
else:
global_sk_sq = sk_sq
global_numerator_weighted = numerator_weighted
d_hat = 2*global_numerator_weighted/math.sqrt(global_sk_sq)
d = max(d, min(d_hat, d*growth_rate))
# if we have not done any updates
# if we have any gradients available, will have sk_sq > 0 (unless \|g\|=0)
if global_sk_sq == 0:
return loss
if log_every > 0 and k % log_every == 0:
logging.info(f"(r={self.rank},k={k}) dlr: {dlr} d_hat: {d_hat}, d: {d}. sk_norm={math.sqrt(global_sk_sq)} numerator_weighted={global_numerator_weighted} g0_norm={g0_norm}")
for group in self.param_groups:
group['numerator_weighted'] = numerator_weighted
group['d'] = d
group['g0_norm'] = g0_norm
######################################
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
s = state['s']
x0 = state['x0']
z = state['z']
# z step
z.data.copy_(x0 - s)
# x step
p.data.mul_(1-ck).add_(z, alpha=ck)
group['k'] = k + 1
return loss
|
dadaptation-main
|
dadaptation/dadapt_sgd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
import logging
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class DAdaptAdaGrad(torch.optim.Optimizer):
"""
Adagrad with D-Adaptation. We recommend Adam or SGD be used instead in most situations,
as D-Adaptation on top of AdaGrad does not adapt the learning rate as quickly in
practice as the other variants.
Leave LR set to 1 unless you encounter instability.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
log_every (int):
Log using print every k steps, default 0 (no logging).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted.
"""
def __init__(
self, params: _params_t,
lr: float = 1.0,
momentum: float = 0,
log_every: int = 0,
weight_decay: float = 0.0,
eps: float = 0.0,
d0 = 1e-6, growth_rate=float('inf')
):
if d0 <= 0:
raise ValueError("Invalid d0 value: {}".format(d0))
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if momentum < 0:
raise ValueError(f"Momentum {momentum} must be non-negative")
if eps <= 0:
raise ValueError("Invalid epsilon value: {}".format(eps))
defaults = dict(lr=lr,
momentum=momentum,
eps=eps,
weight_decay=weight_decay,
gsq_weighted=0.0,
log_every=log_every,
d=d0,
growth_rate=growth_rate,
k = 0,
sksq_weighted=0.0,
skl1=0.0)
self.d0 = d0
super().__init__(params, defaults)
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
group = self.param_groups[0]
lr = group["lr"]
momentum = group['momentum']
ck = 1 - momentum
log_every = group['log_every']
growth_rate = group['growth_rate']
gsq_weighted = group['gsq_weighted']
sksq_weighted = group['sksq_weighted']
skl1 = group['skl1']
d = group['d']
dlr = d*lr
g_sq = 0.0
sksq_weighted_change = 0.0
skl1_change = 0.0
for group in self.param_groups:
eps = group["eps"]
k = group['k']
decay = group['weight_decay']
######
for p in group['params']:
if p.grad is None:
continue
if hasattr(p, "_fsdp_flattened"):
raise RuntimeError("D-Adapt AdaGrad doesn't currently support fully-sharded data parallel. Use D-Adapt Adam instead")
grad = p.grad.data
state = self.state[p]
if "alphak" not in state:
state["alphak"] = torch.full_like(p.data, fill_value=1e-6).detach()
state['sk'] = torch.zeros_like(p.data).detach()
state["x0"] = torch.clone(p.data).detach()
if grad.is_sparse:
state['weighted_sk'] = torch.zeros_like(p.data).detach()
sk = state['sk']
alphak = state['alphak']
grad_sq = 0.0
if grad.is_sparse:
weighted_sk = state['weighted_sk']
grad = grad.coalesce()
grad_vals = grad._values()
vk_vals = grad_vals*grad_vals
sk_vals = sk.sparse_mask(grad).coalesce()._values()
old_skl1_vals = sk_vals.abs().sum().item()
sk.data.add_(grad, alpha=dlr)
sk_vals = sk.sparse_mask(grad).coalesce()._values()
alphak_vals = alphak.sparse_mask(grad).coalesce()._values()
weighted_sk_vals = weighted_sk.sparse_mask(grad).coalesce()._values()
### Update alpha before step
alphak_vals = alphak.sparse_mask(grad).coalesce()._values()
alphakp1_vals = alphak_vals + vk_vals
alphak_delta_vals = alphakp1_vals - alphak_vals
alphak_delta = torch.sparse_coo_tensor(grad.indices(), alphak_delta_vals, grad.shape)
alphak.add_(alphak_delta)
####
denominator = torch.sqrt(alphakp1_vals + eps)
grad_sq = (grad_vals * grad_vals).div(denominator).sum().item()
g_sq += grad_sq
### Update weighted sk sq tracking
weighted_skp1_vals = (sk_vals * sk_vals).div(denominator)
sksq_weighted_change += weighted_skp1_vals.sum().item() - weighted_sk_vals.sum().item()
weighted_skp1_delta_vals = weighted_skp1_vals - weighted_sk_vals
weighted_skp1_delta = torch.sparse_coo_tensor(grad.indices(), weighted_skp1_delta_vals, grad.shape)
weighted_sk.add_(weighted_skp1_delta)
skl1_vals = sk_vals.abs().sum().item()
skl1_change += skl1_vals - old_skl1_vals
else:
if decay != 0:
grad.add_(p.data, alpha=decay)
old_sksq_weighted_param = (sk * sk).div(torch.sqrt(alphak) + eps).sum().item()
old_skl1_param = sk.abs().sum().item()
alphak.data.add_(grad * grad)
grad_sq = (grad * grad).div(torch.sqrt(alphak) + eps).sum().item()
g_sq += grad_sq
sk.data.add_(grad, alpha=dlr)
sksq_weighted_param = (sk * sk).div(torch.sqrt(alphak) + eps).sum().item()
skl1_param = sk.abs().sum().item()
sksq_weighted_change += sksq_weighted_param - old_sksq_weighted_param
skl1_change += skl1_param - old_skl1_param
######
sksq_weighted = sksq_weighted + sksq_weighted_change
skl1 = skl1 + skl1_change
# if we have not done any progres, return
# if we have any gradients available, will have skl1 > 0 (unless \|g\|=0)
if skl1 == 0:
return loss
gsq_weighted = gsq_weighted + dlr*dlr*g_sq
d_hat = d
if lr > 0.0:
d_hat = (sksq_weighted - gsq_weighted)/skl1
d = group['d'] = max(d, min(d_hat, d*growth_rate))
if log_every > 0 and k % log_every == 0:
logging.info(f"d_hat: {d_hat}, d: {d}. sksq_weighted={sksq_weighted:1.1e} skl1={skl1:1.1e} gsq_weighted={gsq_weighted:1.1e} lr={lr}")
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['skl1'] = skl1
group['sksq_weighted'] = sksq_weighted
group['d'] = d
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
alphak = state["alphak"]
sk = state["sk"]
x0 = state["x0"]
if grad.is_sparse:
grad = grad.coalesce()
grad_vals = grad._values()
sk_vals = sk.sparse_mask(grad).coalesce()._values()
alphak_vals = alphak.sparse_mask(grad).coalesce()._values()
x0_vals = x0.sparse_mask(grad).coalesce()._values()
p_vals = p.data.sparse_mask(grad).coalesce()._values()
loc_vals = x0_vals - sk_vals.div(torch.sqrt(alphak_vals + eps))
loc_delta_vals = loc_vals - p_vals
loc_delta = torch.sparse_coo_tensor(grad.indices(), loc_delta_vals, grad.shape)
p.data.add_(loc_delta)
else:
z = x0 - sk.div(torch.sqrt(alphak) + eps)
if momentum != 0:
p.data.mul_(1-ck).add_(z, alpha=ck)
else:
p.data.copy_(z)
group['k'] = k + 1
return loss
|
dadaptation-main
|
dadaptation/dadapt_adagrad.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple, Optional, Callable
import torch
from torch.optim.optimizer import Optimizer
import torch.distributed as dist
import logging
import pdb
class DAdaptLion(Optimizer):
r"""
Implements Lion with D-Adaptation automatic step-sizes.
Has not been as heavily tested as DAdaptAdam and should be considered experimental.
Leave LR set to 1 unless you encounter instability.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
log_every (int):
Log using print every k steps, default 0 (no logging).
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
fsdp_in_use (bool):
If you're using sharded parameters, this should be set to True. The optimizer
will attempt to auto-detect this, but if you're using an implementation other
than PyTorch's builtin version, the auto-detection won't work.
"""
def __init__(
self,
params,
lr: float = 1.0,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0, log_every=0,
d0=1e-6, fsdp_in_use=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(
lr=lr,
betas=betas,
weight_decay=weight_decay,
d=d0, k=0,
log_every=log_every,
numerator_weighted=0.0,
fsdp_in_use=fsdp_in_use)
super().__init__(params, defaults)
def step(self, closure: Optional[Callable] = None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
numerator_weighted = group['numerator_weighted']
d = group['d']
lr = max(group['lr'] for group in self.param_groups)
dlr = d*lr
log_every = group['log_every']
fsdp_in_use = group['fsdp_in_use']
beta1, beta2 = group['betas']
sqrt_beta2 = beta2**0.5
numerator_acum = 0.0
sk_l1 = 0.0
for group in self.param_groups:
k = group['k']
group_lr = group['lr']
wd = group['weight_decay']
if group_lr not in [lr, 0.0]:
raise RuntimeError(f"Setting different lr values in different parameter groups is only supported for values of 0")
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if 'exp_avg' not in state:
state['exp_avg'] = torch.zeros_like(p).detach()
state['s'] = torch.zeros_like(p).detach()
exp_avg = state['exp_avg']
s = state['s']
#AdamW style weight decay
p.data.mul_(1-dlr*wd)
update = exp_avg.clone().mul_(beta1).add_(grad, alpha=(1-beta1)).sign_()
p.data.add_(update, alpha=-dlr)
exp_avg.mul_(beta2).add_(grad, alpha=(1-beta2)*dlr)
numerator_acum += dlr * torch.dot(update.flatten(), s.flatten()).item()
s.mul_(sqrt_beta2).add_(update, alpha=(1-sqrt_beta2)*dlr)
sk_l1 += s.abs().sum().item()
numerator_weighted = sqrt_beta2*numerator_weighted + (1-sqrt_beta2)*numerator_acum
d_hat = d
# if we have not done any progres, return
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
if sk_l1 == 0:
return loss
if lr > 0.0:
if fsdp_in_use:
dist_tensor = torch.zeros(2).cuda()
dist_tensor[0] = numerator_weighted
dist_tensor[1] = sk_l1
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
global_numerator_weighted = dist_tensor[0]
global_sk_l1 = dist_tensor[1]
else:
global_numerator_weighted = numerator_weighted
global_sk_l1 = sk_l1
d_hat = global_numerator_weighted/((1-sqrt_beta2)*global_sk_l1)
d = max(d, d_hat)
if log_every > 0 and k % log_every == 0:
logging.info(f"lr: {lr} dlr: {dlr} d_hat: {d_hat}, d: {d}. sk_l1={global_sk_l1:1.1e} numerator_weighted={global_numerator_weighted:1.1e}")
for group in self.param_groups:
group['numerator_weighted'] = numerator_weighted
group['d'] = d
group['k'] = group['k'] + 1
return loss
|
dadaptation-main
|
dadaptation/dadapt_lion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Any
import torch
import torch.optim
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
def to_real(x):
if torch.is_complex(x):
return x.real
else:
return x
class DAdaptAdan(torch.optim.Optimizer):
r"""
Implements Adan with D-Adaptation automatic step-sizes.
Has not been as heavily tested as DAdaptAdam and should be considered experimental.
Leave LR set to 1 unless you encounter instability.
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0.02).
no_prox (boolean):
how to perform the decoupled weight decay (default: False)
log_every (int):
Log using print every k steps, default 0 (no logging).
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted. Values like 1.02 give a kind of learning
rate warmup effect.
"""
def __init__(self, params, lr=1.0,
betas=(0.98, 0.92, 0.99),
eps=1e-8, weight_decay=0.02,
no_prox=False,
log_every=0, d0=1e-6,
growth_rate=float('inf')):
if not 0.0 < d0:
raise ValueError("Invalid d0 value: {}".format(d0))
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
no_prox=no_prox,
d = d0,
k=0,
gsq_weighted=0.0,
log_every=log_every,
growth_rate=growth_rate)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
# Experimental implementation of Adan's restart strategy
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['gsq_weighted'] = 0.0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
state['step'] = 0
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(to_real(p.data), memory_format=torch.preserve_format).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
g_sq = 0.0
sksq_weighted = 0.0
sk_l1 = 0.0
ngroups = len(self.param_groups)
group = self.param_groups[0]
gsq_weighted = group['gsq_weighted']
d = group['d']
lr = group['lr']
dlr = d*lr
no_prox = group['no_prox']
growth_rate = group['growth_rate']
log_every = group['log_every']
beta1, beta2, beta3 = group['betas']
for group in self.param_groups:
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if 'step' not in state:
state['step'] = 0
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(to_real(p.data), memory_format=torch.preserve_format).detach()
if state['step'] == 0:
# Previous gradient values
state['pre_grad'] = grad.clone()
exp_avg, exp_avg_diff, exp_avg_sq = state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq']
grad_diff = grad - state['pre_grad']
grad_grad = to_real(grad * grad.conj())
update = grad + beta2 * grad_diff
update_update = to_real(update * update.conj())
exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1. - beta1))
exp_avg_diff.mul_(beta2).add_(grad_diff, alpha=dlr*(1. - beta2))
exp_avg_sq.mul_(beta3).add_(update_update, alpha=1. - beta3)
denom = exp_avg_sq.sqrt().add_(eps)
g_sq += grad_grad.div_(denom).sum().item()
s = state['s']
s.mul_(beta3).add_(grad, alpha=dlr*(1. - beta3))
sksq_weighted += to_real(s * s.conj()).div_(denom).sum().item()
sk_l1 += s.abs().sum().item()
######
gsq_weighted = beta3*gsq_weighted + g_sq*(dlr**2)*(1-beta3)
d_hat = d
# if we have not done any progres, return
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
if sk_l1 == 0:
return loss
if lr > 0.0:
d_hat = (sksq_weighted/(1-beta3) - gsq_weighted)/sk_l1
d = max(d, min(d_hat, d*growth_rate))
if log_every > 0 and k % log_every == 0:
print(f"ng: {ngroups} lr: {lr} dlr: {dlr} d_hat: {d_hat}, d: {d}. sksq_weighted={sksq_weighted:1.1e} sk_l1={sk_l1:1.1e} gsq_weighted={gsq_weighted:1.1e}")
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['d'] = d
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_diff, exp_avg_sq = state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq']
state['step'] += 1
denom = exp_avg_sq.sqrt().add_(eps)
denom = denom.type(p.type())
update = (exp_avg + beta2 * exp_avg_diff).div_(denom)
### Take step
if no_prox:
p.data.mul_(1 - dlr * decay)
p.add_(update, alpha=-1)
else:
p.add_(update, alpha=-1)
p.data.div_(1 + dlr * decay)
state['pre_grad'].copy_(grad)
group['k'] = k + 1
return loss
|
dadaptation-main
|
dadaptation/dadapt_adan.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .dadapt_adan_ip import DAdaptAdanIP
from .dadapt_adam_preprint import DAdaptAdamPreprint
|
dadaptation-main
|
dadaptation/experimental/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
import logging
import os
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
def to_real(x):
if torch.is_complex(x):
return x.real
else:
return x
class DAdaptAdanIP(torch.optim.Optimizer):
r"""
Implements Adan with D-Adaptation automatic step-sizes. Leave LR set to 1 unless you encounter instability.
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
This IP variant uses a tighter bound than the non-IP version,
and so will typically choose larger step sizes. It has not
been as extensively tested.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0.02).
no_prox (boolean):
how to perform the decoupled weight decay (default: False)
log_every (int):
Log using print every k steps, default 0 (no logging).
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted. Values like 1.02 give a kind of learning
rate warmup effect.
"""
def __init__(self, params, lr=1.0,
betas=(0.98, 0.92, 0.99),
eps=1e-8, weight_decay=0.02,
no_prox=False,
log_every=0, d0=1e-6,
growth_rate=float('inf')):
if not 0.0 < d0:
raise ValueError("Invalid d0 value: {}".format(d0))
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
no_prox=no_prox,
d = d0,
k=0,
numerator_weighted=0.0,
log_every=log_every,
growth_rate=growth_rate)
self.d0 = d0
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
# Experimental implementation of Adan's restart strategy
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['numerator_weighted'] = 0.0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
state['step'] = 0
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(to_real(p.data), memory_format=torch.preserve_format).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
g_sq = 0.0
sksq_weighted = 0.0
sk_l1 = 0.0
ngroups = len(self.param_groups)
group = self.param_groups[0]
numerator_weighted = group['numerator_weighted']
d = group['d']
lr = group['lr']
dlr = d*lr
no_prox = group['no_prox']
growth_rate = group['growth_rate']
log_every = group['log_every']
beta1, beta2, beta3 = group['betas']
numerator_acum = 0.0
for group in self.param_groups:
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if 'step' not in state:
state['step'] = 0
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(to_real(p.data), memory_format=torch.preserve_format).detach()
if state['step'] == 0:
# Previous gradient values
state['pre_grad'] = grad.clone()
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq']
grad_diff = grad - state['pre_grad']
update = grad + beta2 * grad_diff
update_update = to_real(update * update.conj())
s = state['s']
denom = exp_avg_sq.sqrt().add_(eps)
numerator_acum += dlr * torch.dot(grad.flatten(), s.div(denom).flatten())
exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1. - beta1))
exp_avg_diff.mul_(beta2).add_(grad_diff, alpha=dlr*(1. - beta2))
exp_avg_sq.mul_(beta3).add_(update_update, alpha=1. - beta3)
s.mul_(beta3).add_(grad, alpha=dlr*(1. - beta3))
sk_l1 += s.abs().sum().item()
######
numerator_weighted = beta3*numerator_weighted + (1-beta3)*numerator_acum
d_hat = d
# if we have not done any progres, return
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
if sk_l1 == 0:
return loss
if lr > 0.0:
d_hat = 2*(beta3/(1-beta3))*numerator_weighted/sk_l1
d = max(d, min(d_hat, d*growth_rate))
if log_every > 0 and k % log_every == 0:
print(f"ng: {ngroups} lr: {lr} dlr: {dlr} d_hat: {d_hat}, d: {d}. sk_l1={sk_l1:1.1e} numerator_weighted={numerator_weighted:1.1e}")
for group in self.param_groups:
group['numerator_weighted'] = numerator_weighted
group['d'] = d
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq']
state['step'] += 1
denom = exp_avg_sq.sqrt().add_(eps)
denom = denom.type(p.type())
update = (exp_avg + beta2 * exp_avg_diff).div_(denom)
### Take step
if no_prox:
p.data.mul_(1 - dlr * decay)
p.add_(update, alpha=-1)
else:
p.add_(update, alpha=-1)
p.data.div_(1 + dlr * decay)
state['pre_grad'].copy_(grad)
group['k'] = k + 1
return loss
|
dadaptation-main
|
dadaptation/experimental/dadapt_adan_ip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
import logging
import os
import torch.distributed as dist
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
def to_real(x):
if torch.is_complex(x):
return x.real
else:
return x
class DAdaptAdamPreprint(torch.optim.Optimizer):
r"""
This is an earlier variant of D-Adapt Adam used in early preprints of the paper, and source
code releases V1 and V2. Use this if you encounter performance regressions after the latest update.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate adjustment parameter. Increases or decreases the D-adapted learning rate.
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-8).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
log_every (int):
Log using print every k steps, default 0 (no logging).
decouple (boolean):
Use AdamW style decoupled weight decay
d0 (float):
Initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
growth_rate (float):
prevent the D estimate from growing faster than this multiplicative rate.
Default is inf, for unrestricted. Values like 1.02 give a kind of learning
rate warmup effect.
fsdp_in_use (bool):
If you're using sharded parameters, this should be set to True. The optimizer
will attempt to auto-detect this, but if you're using an implementation other
than PyTorch's builtin version, the auto-detection won't work.
"""
def __init__(self, params, lr=1.0,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
log_every=0,
decouple=False,
d0=1e-6,
growth_rate=float('inf'),
fsdp_in_use=False):
if not 0.0 < d0:
raise ValueError("Invalid d0 value: {}".format(d0))
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if decouple:
print(f"Using decoupled weight decay")
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
d = d0,
k=0,
gsq_weighted=0.0,
log_every=log_every,
decouple=decouple,
growth_rate=growth_rate,
fsdp_in_use=fsdp_in_use)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
g_sq = 0.0
sksq_weighted = 0.0
sk_l1 = 0.0
lr = max(group['lr'] for group in self.param_groups)
group = self.param_groups[0]
gsq_weighted = group['gsq_weighted']
d = group['d']
dlr = d*lr
growth_rate = group['growth_rate']
decouple = group['decouple']
fsdp_in_use = group['fsdp_in_use']
log_every = group['log_every']
beta1, beta2 = group['betas']
for group in self.param_groups:
group_lr = group['lr']
decay = group['weight_decay']
k = group['k']
eps = group['eps']
if group_lr not in [lr, 0.0]:
raise RuntimeError(f"Setting different lr values in different parameter groups is only supported for values of 0")
for p in group['params']:
if p.grad is None:
continue
if hasattr(p, "_fsdp_flattened"):
fsdp_in_use = True
grad = p.grad.data
# Apply weight decay (coupled variant)
if decay != 0 and not decouple:
grad.add_(p.data, alpha=decay)
state = self.state[p]
# State initialization
if 'step' not in state:
state['step'] = 0
state['s'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format).detach()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(to_real(p.data), memory_format=torch.preserve_format).detach()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
grad_grad = to_real(grad * grad.conj())
# Adam EMA updates
if group_lr > 0:
exp_avg.mul_(beta1).add_(grad, alpha=dlr*(1-beta1))
exp_avg_sq.mul_(beta2).add_(grad_grad, alpha=1-beta2)
denom = exp_avg_sq.sqrt().add_(eps)
g_sq += grad_grad.div_(denom).sum().item()
s = state['s']
s.mul_(beta2).add_(grad, alpha=dlr*(1-beta2))
sksq_weighted += to_real(s * s.conj()).div_(denom).sum().item()
sk_l1 += s.abs().sum().item()
######
gsq_weighted = beta2*gsq_weighted + g_sq*(dlr**2)*(1-beta2)
d_hat = d
# if we have not done any progres, return
# if we have any gradients available, will have sk_l1 > 0 (unless \|g\|=0)
if sk_l1 == 0:
return loss
if lr > 0.0:
if fsdp_in_use:
dist_tensor = torch.zeros(3).cuda()
dist_tensor[0] = sksq_weighted
dist_tensor[1] = gsq_weighted
dist_tensor[2] = sk_l1
dist.all_reduce(dist_tensor, op=dist.ReduceOp.SUM)
global_sksq_weighted = dist_tensor[0]
global_gsq_weighted = dist_tensor[1]
global_sk_l1 = dist_tensor[2]
else:
global_sksq_weighted = sksq_weighted
global_gsq_weighted = gsq_weighted
global_sk_l1 = sk_l1
d_hat = (global_sksq_weighted/(1-beta2) - global_gsq_weighted)/global_sk_l1
d = max(d, min(d_hat, d*growth_rate))
if log_every > 0 and k % log_every == 0:
logging.info(
f"(k={k}) dlr: {dlr:1.1e} d_hat: {d_hat:1.1e}, d: {d:1.8}. "
f"sksq_weighted={global_sksq_weighted:1.1e} gsq_weighted={global_gsq_weighted:1.1e} "
f"sk_l1={global_sk_l1:1.1e}{' (FSDP)' if fsdp_in_use else ''}")
for group in self.param_groups:
group['gsq_weighted'] = gsq_weighted
group['d'] = d
group_lr = group['lr']
decay = group['weight_decay']
k = group['k']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
denom = exp_avg_sq.sqrt().add_(eps)
denom = denom.type(p.type())
# Apply weight decay (decoupled variant)
if decay != 0 and decouple and group_lr > 0:
p.data.add_(p.data, alpha=-decay * dlr)
### Take step
p.data.addcdiv_(exp_avg, denom, value=-1)
group['k'] = k + 1
return loss
|
dadaptation-main
|
dadaptation/experimental/dadapt_adam_preprint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.insert(0, './pytorch-seq2seq/')
from seq2seq.models import EncoderRNN
from seq2seq.loss import Perplexity, NLLLoss
from seq2seq.optim import Optimizer
from t2s import SamplingDecoderRNN as DecoderRNN
from t2s.util import dump_agent
from t2s import T2S, Seq2seq
from t2s.evaluator import PolyEvaluator
from t2s.trainer import MirrorTrainer
import argparse
import pickle
import random
from torch import nn
import numpy as np
import torch
import torchtext
from torchtext.data import Field
device = torch.cuda.current_device() if torch.cuda.is_available() else None
def set_seed(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
if __name__ == '__main__':
### Define params ###
parser = argparse.ArgumentParser()
parser.add_argument('--random_seed', default=1234, type=int,
help='random seed to initialize your model')
parser.add_argument('--hidden_size', default=8, type=int,
help='Size of the hidden layer of the encoder and decoder (default 8)')
parser.add_argument('--batch_size', default=32, type=int,
help='batch size (default 32)')
parser.add_argument('--n_layers', default=1, type=int,
help='number of hidden layers of the encoder and decoder (default 1)')
parser.add_argument('--n_epochs', default=10, type=int,
help='number of training epochs (default 10)')
parser.add_argument('--teacher_forcing_ratio', default=1.0, type=float,
help='teacher_forcing_ratio: teaching forcing ratio (default 1). Only applicable '
'when one of the agents is trained.')
parser.add_argument('--use_attention', default=1, type=int,
help='attention')
parser.add_argument('--polyglot', default=0, type=int,
help='how to optimize teacher part')
parser.add_argument('--generations', default=0, type=int,
help='Agent generations in the iterated learning')
parser.add_argument('--data_path_prefix', default='./data/Iconic_LessCounting/iconicity_markers',
help='where to find the supervised training set for the student and the teacher')
parser.add_argument('--pretrain_agent', default=0, type=int,
help="Toggle this option to pretrain a model as a S2S model")
parser.add_argument('--tied', default=1, type=int,
help="""Integer 0 or 1. If 1, tie the encoder's input embedding and the decoder's
outputs matrix""")
parser.add_argument('--save_model_path', default='model',
help='where to save the model')
parser.add_argument('--init_A1', type=str,
help='Load a pre-trained model')
parser.add_argument('--init_A1_from_A2', type=str,
help='Load a pre-trained model')
parser.add_argument('--max_len', default=30, type=int,
help="Maximum length of the sequences")
parser.add_argument('--explosion_train', default=20, type=int,
help="During training, how many instructions to sample")
parser.add_argument('--explosion_eval', default=120, type=int,
help="Maximal number of permutations for instructions")
parser.add_argument('--eval', default=None, type=str,
help="Evaluate the A12 agent (use with --init_A1 or --init_A1_from_A2)")
parser.add_argument('--no_dev_eval', default=0, type=int,
help="Disable evaluation during training")
parser.add_argument('--no_test_eval', default=1, type=int,
help="Disable evaluation during training")
args = parser.parse_args()
print(args, flush=True)
use_attention = args.use_attention == 1
tied = args.tied == 1
polyglot = args.polyglot == 1
if polyglot and not args.pretrain_agent:
assert False, "Shouldn't use polyglot when not pre-training"
set_seed(args.random_seed)
language = args.data_path_prefix.split('/')[-1]
save_model_path = f'./experiment/models/{args.save_model_path}_attention{args.use_attention}_hidden{args.hidden_size}_batch{args.batch_size}_epoch{args.n_epochs}_tied{args.tied}_seed{args.random_seed}.p'
teacher_train_path = f'{args.data_path_prefix}_teacher/train/action_instruction.txt'
teacher_dev_path = f'{args.data_path_prefix}_teacher/dev/action_instruction.txt'
teacher_test_path = f'{args.data_path_prefix}_teacher/test/action_instruction.txt'
all_dataset_vocab = './data/Iconic_LessCounting/vocabulary.txt'
field = Field(preprocessing=lambda x: [
'<sos>'] + x + ['<eos>'], unk_token=None, batch_first=True, include_lengths=True, pad_token='<pad>')
vocab = torchtext.data.TabularDataset(
path=all_dataset_vocab, format='tsv',
fields=[('src', field), ('tgt', field)]
)
field.build_vocab(vocab, max_size=50000)
teacher_train = torchtext.data.TabularDataset(
path=teacher_train_path, format='tsv',
fields=[('src', field), ('tgt', field)]
)
teacher_dev = torchtext.data.TabularDataset(
path=teacher_dev_path, format='tsv',
fields=[('src', field), ('tgt', field)]
)
teacher_test = torchtext.data.TabularDataset(
path=teacher_test_path, format='tsv',
fields=[('src', field), ('tgt', field)]
)
print("Vocab: {}".format(field.vocab.stoi), flush=True)
bidirectional = False
rnn_cell = 'lstm'
def get_seq2seq():
decoder = DecoderRNN(len(field.vocab.stoi), args.max_len,
args.hidden_size * 2 if bidirectional else args.hidden_size,
n_layers=args.n_layers, rnn_cell=rnn_cell,
input_dropout_p=0.0, dropout_p=0.0, use_attention=use_attention,
bidirectional=bidirectional,
eos_id=field.vocab.stoi['<eos>'], sos_id=field.vocab.stoi['<sos>']).to(device)
if tied:
# compatibility with the older code
nn.init.normal_(decoder.out.weight)
encoder = EncoderRNN(len(field.vocab.stoi), args.max_len, args.hidden_size,
input_dropout_p=0.0, dropout_p=0.0,
n_layers=args.n_layers, bidirectional=bidirectional,
rnn_cell=rnn_cell, variable_lengths=True,
embedding=(decoder.out.weight if tied else None)).to(device)
return Seq2seq(encoder, decoder)
if args.init_A1:
with open(args.init_A1, "rb") as fin:
m = pickle.load(fin)
if hasattr(m, "A1"):
A1 = m.A1
print('Loaded A1 as submodel')
else:
A1 = m
A1.to(device)
else:
A1 = get_seq2seq()
if args.init_A1_from_A2:
with open(args.init_A1_from_A2, "rb") as fin:
A1 = pickle.load(fin).A2.to(device)
print('Loaded A1 as an A2 submodel')
A1.flatten_parameters()
weight = torch.ones(len(field.vocab.stoi), device=device)
pad = field.vocab.stoi['<pad>']
loss = NLLLoss(weight, pad)
train_dataset = teacher_train
dev_dataset = teacher_dev
test_dataset = teacher_test
if args.eval is not None:
evaluator = PolyEvaluator(
loss=loss, explosion_rate=args.explosion_eval, batch_size=2048, polyglot=polyglot)
with open(args.eval, "rb") as fin:
model = pickle.load(fin)
eval_results = evaluator.evaluate(model, dev_dataset)
dev_loss, teacher_accuracy, student_accuracy = eval_results
log_msg = "Dev %s: %.4f, Accuracy Teacher: %.4f, Accuracy Student: %.4f" % (
loss.name, dev_loss, teacher_accuracy, student_accuracy)
print(log_msg, flush=True)
def train_model(m, poly, pretraining):
m.train()
optimizer = Optimizer(torch.optim.Adam(
m.parameters(), amsgrad=True), max_grad_norm=5)
t = MirrorTrainer(loss=loss, batch_size=args.batch_size,
checkpoint_every=100,
expt_dir="./experiments", pretraining=pretraining,
polyglot=poly, explosion_train=args.explosion_train, explosion_eval=args.explosion_eval)
m = t.train(m, train_dataset,
n_epochs=args.n_epochs,
dev_data=(None if args.no_dev_eval == 1 else dev_dataset),
test_data=(None if args.no_test_eval ==
1 else test_dataset),
optimizer=optimizer,
teacher_forcing_ratio=args.teacher_forcing_ratio,
resume=False)
return m
def dump(agent, path):
return dump_agent(agent,
torchtext.data.BucketIterator(dataset=dev_dataset, batch_size=1024, sort=True, sort_within_batch=True,
sort_key=lambda x: len(x.src), device=("cuda" if torch.cuda.is_available() else "cpu"), repeat=False),
path,
field)
if args.pretrain_agent:
A1 = train_model(A1, polyglot, pretraining=True)
evaluator = PolyEvaluator(
loss=loss, explosion_rate=args.explosion_eval, batch_size=2048, polyglot=polyglot)
with open(save_model_path, 'wb') as fout:
pickle.dump(A1, fout)
print(f"Saved model to {save_model_path}")
dump_path = f"{save_model_path}.dump"
dump(A1, dump_path)
print(f"Saved model dump to {dump_path}")
if args.generations > 0:
for gen in range(1, args.generations + 1):
set_seed(args.random_seed + gen)
print("*" * 10, f"Starting generation #{gen}", "*" * 10)
A2 = get_seq2seq()
A2.flatten_parameters()
t2s = T2S(A1, A2)
t2s = train_model(t2s, poly=False, pretraining=False)
evaluator = PolyEvaluator(
loss=loss, explosion_rate=args.explosion_eval, batch_size=2048, polyglot=False)
eval_results = evaluator.evaluate(t2s, dev_dataset)
dev_loss, teacher_accuracy, student_accuracy = eval_results
log_msg = "Dev %s: %.4f, Accuracy Teacher: %.4f, Accuracy Student: %.4f" % (
loss.name, dev_loss, teacher_accuracy, student_accuracy)
print(log_msg, flush=True)
evaluator = PolyEvaluator(
loss=loss, explosion_rate=args.explosion_eval, batch_size=2048, polyglot=True)
eval_results = evaluator.evaluate(t2s.A2, dev_dataset)
dev_loss, teacher_accuracy, student_accuracy = eval_results
log_msg = "Dev %s: %.4f, Accuracy Teacher GT: %.4f, Accuracy Student GT: %.4f" % (
loss.name, dev_loss, teacher_accuracy, student_accuracy)
print(log_msg, flush=True)
name = f"{save_model_path}.iteration_{gen}"
with open(name, 'wb') as fout:
pickle.dump(t2s, fout)
print(f"Saved model to {name}")
dump_path = f"{name}.dump"
dump(t2s.A2, dump_path)
print(f"Saved model dump to {dump_path}")
A1 = t2s.A2
|
brica-master
|
train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import random
from torch.distributions import Categorical
import torch.nn.functional as F
from seq2seq.models import DecoderRNN
class SamplingDecoderRNN(DecoderRNN):
"""
Drop-in replacement for DecoderRNN that _always_ samples sequences (even during the evaluation phase).
"""
def __init__(self, *args, **kwargs):
super(SamplingDecoderRNN, self).__init__(*args, **kwargs)
def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None,
function=F.log_softmax, teacher_forcing_ratio=0):
ret_dict = dict()
if self.use_attention:
ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()
inputs, batch_size, max_length = self._validate_args(inputs, encoder_hidden, encoder_outputs,
function, teacher_forcing_ratio)
decoder_hidden = self._init_state(encoder_hidden)
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_outputs = []
sequence_symbols = []
lengths = np.array([max_length] * batch_size)
def decode(step, step_output, step_attn):
decoder_outputs.append(step_output)
if self.use_attention:
ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)
# NB: here we changed argmax for sampling
symbols = Categorical(logits=step_output).sample().unsqueeze(1)
#
sequence_symbols.append(symbols)
eos_batches = symbols.data.eq(self.eos_id)
if eos_batches.dim() > 0:
eos_batches = eos_batches.cpu().view(-1).numpy()
update_idx = ((lengths > step) & eos_batches) != 0
lengths[update_idx] = len(sequence_symbols)
return symbols
# Manual unrolling is used to support random teacher forcing.
# If teacher_forcing_ratio is True or False instead of a probability, the unrolling can be done in graph
if use_teacher_forcing:
decoder_input = inputs[:, :-1]
decoder_output, decoder_hidden, attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs,
function=function)
for di in range(decoder_output.size(1)):
step_output = decoder_output[:, di, :]
if attn is not None:
step_attn = attn[:, di, :]
else:
step_attn = None
decode(di, step_output, step_attn)
else:
decoder_input = inputs[:, 0].unsqueeze(1)
for di in range(max_length):
decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs,
function=function)
step_output = decoder_output.squeeze(1)
symbols = decode(di, step_output, step_attn)
decoder_input = symbols
ret_dict[DecoderRNN.KEY_SEQUENCE] = sequence_symbols
ret_dict[DecoderRNN.KEY_LENGTH] = lengths.tolist()
return decoder_outputs, decoder_hidden, ret_dict
|
brica-master
|
t2s/sampling_decoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
class Seq2seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
@staticmethod
def get_batch_permutation(lengths, device):
"""
Returns a permutation and its reverse that turns `lengths` in a sorted
list in descending order.
>>> lengths = [4, 1, 0, 100]
>>> permutation, inverse = Seq2seq.get_batch_permutation(lengths, torch.device("cpu"))
>>> permutation
tensor([3, 0, 1, 2])
>>> rearranged = torch.index_select(torch.tensor(lengths), 0, permutation)
>>> rearranged
tensor([100, 4, 1, 0])
>>> torch.index_select(rearranged, 0, inverse)
tensor([ 4, 1, 0, 100])
"""
lengths = torch.tensor(lengths, device=device)
_, rearrange = torch.sort(lengths, descending=True)
inverse = torch.zeros_like(lengths)
for i, v in enumerate(rearrange):
inverse[v] = i
return rearrange.to(device), inverse.to(device)
def rearrange_output(self, rearrange, decoder_outputs, other):
new_other = {}
new_other[self.decoder.KEY_LENGTH] = [
other[self.decoder.KEY_LENGTH][i] for i in rearrange]
new_other[self.decoder.KEY_SEQUENCE] = [torch.index_select(
s, 0, rearrange) for s in other[self.decoder.KEY_SEQUENCE]]
new_decoder_outputs = [torch.index_select(
t, 0, rearrange) for t in decoder_outputs]
return new_decoder_outputs, new_other
def flatten_parameters(self):
self.encoder.rnn.flatten_parameters()
self.decoder.rnn.flatten_parameters()
def forward(self, input_variable, input_lengths=None, target_variable=None,
teacher_forcing_ratio=0, presorted=False):
if not input_lengths is None and not presorted:
rearrange, inverse = self.get_batch_permutation(
input_lengths, input_variable.device)
input_variable = torch.index_select(input_variable, 0, rearrange)
input_lengths = [input_lengths[i] for i in rearrange]
if not target_variable is None:
target_variable = torch.index_select(
target_variable, 0, rearrange)
encoder_outputs, encoder_hidden = self.encoder(
input_variable, input_lengths)
result = self.decoder(inputs=target_variable,
encoder_hidden=encoder_hidden,
encoder_outputs=encoder_outputs,
function=F.log_softmax,
teacher_forcing_ratio=teacher_forcing_ratio)
if not input_lengths is None and not presorted:
decoder, other = self.rearrange_output(
inverse, result[0], result[2])
result = decoder, None, other
batch_size = input_variable.size(0)
sos_tensor = torch.tensor(
[self.decoder.sos_id] * batch_size, device=input_variable.device).view(-1, 1)
# add sos at the start
key = self.decoder.KEY_SEQUENCE
result[2][key] = [sos_tensor] + result[2][key]
return result
|
brica-master
|
t2s/s2s.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import itertools
import random
import json
def repeat_explode(input, input_length, n_times):
"""
>>> input, input_length = torch.tensor([[5, 1, 2, 3, 4, 0]]), torch.tensor([5])
>>> exploded_input, exploded_input_length, src_id = repeat_explode(input=input, input_length=input_length, n_times=3)
>>> exploded_input
tensor([[5, 1, 2, 3, 4, 0],
[5, 1, 2, 3, 4, 0],
[5, 1, 2, 3, 4, 0]])
>>> exploded_input_length
tensor([5, 5, 5])
>>> src_id
[0, 0, 0]
"""
new_input, new_input_length = [], []
src_id = []
for i in range(input.size(0)):
new_input.extend([input[i, :]] * n_times)
new_input_length.extend([input_length[i]] * n_times)
src_id.extend([i] * n_times)
device = input.device
new_input = torch.stack(new_input).to(device)
new_input_length = torch.tensor(new_input_length, device=device)
return new_input, new_input_length, src_id
def pretrain_explode(input, input_length, target_variable, polyglot, sos, eos, pad, sample, n_samples):
"""
Batch explosion logic; makes it possible to train an agent on samples from another agent.
Examples:
>>> input, input_length = torch.tensor([[5, 1, 2, 3, 4, 0]]), torch.tensor([5])
>>> target, target_length = torch.tensor([[5, 100, 200, 300, 400, 500, 600, 4, 0, 0]]), torch.tensor([8])
>>> random.seed(7)
>>> exploded = pretrain_explode(input=input, input_length=input_length, target_variable=(target, target_length), polyglot=True, sos=5, eos=4, pad=0, n_samples=3, sample=True)
>>> exploded_input, exploded_input_length, (exploded_target, exploded_target_length), src_ids = exploded
>>> exploded_input
tensor([[5, 1, 2, 3, 4, 0],
[5, 1, 2, 3, 4, 0],
[5, 1, 2, 3, 4, 0]])
>>> exploded_input_length
tensor([5, 5, 5])
>>> exploded_target
tensor([[ 5, 100, 200, 300, 400, 500, 600, 4, 0, 0],
[ 5, 100, 200, 300, 400, 500, 600, 4, 0, 0],
[ 5, 400, 500, 600, 100, 200, 300, 4, 0, 0]])
>>> exploded_target_length
tensor([8, 8, 8])
>>> src_ids
[0, 0, 0]
>>> # now w/o sampling; all possible permutations
>>> exploded = pretrain_explode(input=input, input_length=input_length, target_variable=(target, target_length), polyglot=True, sos=5, eos=4, pad=0, n_samples=6, sample=False)
>>> exploded_input, exploded_input_length, (exploded_target, exploded_target_length), src_ids = exploded
>>> exploded_target
tensor([[ 5, 100, 200, 300, 400, 500, 600, 4, 0, 0],
[ 5, 400, 500, 600, 100, 200, 300, 4, 0, 0]])
>>> src_ids
[0, 0]
"""
new_input, new_length, new_target, new_target_length, src_ids = [], [], [], [], []
batch_size = input.size(0)
target, target_length = target_variable
np_target = target.cpu().numpy()
max_len = target.size(1)
for i in range(batch_size):
l = target_length[i].item()
grouped = np_target[i, 1:l-1].reshape(-1, 3)
n_trigram = grouped.shape[0]
all_permutations = list(itertools.permutations(
range(n_trigram))) if polyglot else [range(n_trigram)]
selected_permutations = random.choices(
all_permutations, k=n_samples) if sample else all_permutations
for permutation in selected_permutations:
permutation = grouped[permutation, :].reshape(-1)
new_input.append(input[i, :])
new_length.append(input_length[i])
permutation = [sos] + permutation.tolist() + [eos] + \
[pad] * (max_len - l)
new_target.append(permutation)
new_target_length.append(l)
src_ids.append(i)
device = input.device
new_input = torch.stack(new_input).to(device)
new_length = torch.tensor(new_length, device=device)
new_target_length = torch.tensor(new_target_length, device=device)
new_target = torch.tensor(new_target, device=device)
return new_input, new_length, (new_target, new_target_length), src_ids
def cut_after_eos(seq):
try:
p = seq.index("<eos>")
out_seq = seq[:p+1]
except ValueError:
out_seq = seq
return out_seq
class LangStats:
def __init__(self):
self.language_stats = {}
def push_stat(self, terms):
length = self.get_length(terms)
name = self.get_language_name(terms)
if length not in self.language_stats:
self.language_stats[length] = {}
self.language_stats[length][name] = self.language_stats[length].get(
name, 0) + 1
def get_length(self, terms):
# minus <sos>, <eos>
return len(terms) - 2
def get_language_name(self, terms):
name = []
for term in terms:
if term in ['first', 'then', 'finally'] + [f'M{i}' for i in range(6)]:
name.append(term)
return '-'.join(name)
def get_json(self):
result = {}
for key, distr in self.language_stats.items():
per_length = list(distr.items())
per_length = sorted(per_length, key=lambda x: x[1], reverse=True)
result[key] = per_length
return json.dumps(result)
def dump_agent(A, iterator, output_file, field, instruction_explosion_rate=10):
def id_to_text(ids): return [field.vocab.itos[x.item()] for x in ids]
stats = LangStats()
with torch.no_grad(), open(output_file, 'w') as log:
batch_generator = iterator.__iter__()
src = []
for batch in batch_generator:
tasks = [(batch.src, instruction_explosion_rate, "a->i"),
(batch.tgt, 1, "i->a")]
for ((src, length), explosion_rate, name) in tasks:
src, length, src_id = repeat_explode(
src, length, explosion_rate)
_1, _2, other = A.forward(src, length, None, 0.0)
out = torch.stack(other['sequence']).squeeze(2).permute(1, 0)
prev_src_id = src_id[0]
for i in range(src.size(0)):
if src_id[i] != prev_src_id:
prev_src_id = src_id[i]
print("*"*20, file=log)
src_seq = cut_after_eos(id_to_text(src[i, :]))
out_seq = cut_after_eos(id_to_text(out[i, :]))
print(src_seq, "->", out_seq, file=log)
if name == "a->i":
stats.push_stat(out_seq)
print("*"*20, file=log)
print("-"*20, file=log)
print(stats.get_json(), file=log)
|
brica-master
|
t2s/util.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
class T2S(nn.Module):
def __init__(self, A1, A2):
super(T2S, self).__init__()
self.A1 = A1
self.A2 = A2
def get_lengths(self, sequence, eos_id):
eos = sequence.eq(eos_id)
# eos contains ones on positions where <eos> occur in the outputs, and zeros otherwise
# eos.cumsum(dim=1) would contain non-zeros on all positions after <eos> occurred
# eos.cumsum(dim=1) > 0 would contain ones on all positions after <eos> occurred
# (eos.cumsum(dim=1) > 0).sum(dim=1) equates to the number of timestamps that happened after <eos> occured (including it)
# eos.size(1) - (eos.cumsum(dim=1) > 0).sum(dim=1) is the number of steps before eos took place
lengths = eos.size(1) - (eos.cumsum(dim=1) > 0).sum(dim=1)
return lengths
def forward(self, input_variable, input_lengths, target_variable, teacher_forcing_ratio=0.0, presorted=False):
# turn off sampling in the teacher or in the student
# when needed.
A1 = self.A1
with torch.no_grad():
teacher_decoder_outputs, _, teacher_other = A1(
input_variable, input_lengths, None, 0.0, presorted=presorted)
sequence_tensor = torch.stack(teacher_other['sequence']).squeeze(2).permute(1, 0)
t_out_lengths = self.get_lengths(
sequence_tensor, A1.decoder.eos_id)
# NOTE: we increase len by 1 so that the final <eos> is also
# fed into the student. At the same time, it might be the case that
# the teacher never produced <eos>. In tat case, we cap length.
max_len = len(teacher_other['sequence'])
t_out_lengths.add_(1.0).clamp_(max=max_len)
student_decoder_outputs, _, student_other = self.A2(sequence_tensor, t_out_lengths, target_variable=target_variable,
teacher_forcing_ratio=teacher_forcing_ratio)
student_other['teacher_decoder'] = teacher_other['sequence']
student_other['teacher_decoder_outputs'] = teacher_decoder_outputs
student_other['teacher_dict'] = teacher_other
student_other['teacher_lengths'] = t_out_lengths
return student_decoder_outputs, None, student_other
|
brica-master
|
t2s/t2s.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .s2s import Seq2seq
from .t2s import T2S
from .util import dump_agent
from .sampling_decoder import SamplingDecoderRNN
|
brica-master
|
t2s/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .poly_evaluator import PolyEvaluator
|
brica-master
|
t2s/evaluator/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function, division
from ..util import repeat_explode, pretrain_explode
from seq2seq.loss import NLLLoss
import seq2seq
import torch
import torchtext
import sys
sys.path.insert(0, 'pytorch_seq2seq')
class PolyEvaluator(object):
""" Class to evaluate models with given datasets.
Args:
loss (seq2seq.loss, optional): loss for evaluator (default: seq2seq.loss.NLLLoss)
batch_size (int, optional): batch size for evaluator (default: 64)
"""
def __init__(self, loss=NLLLoss(), explosion_rate=120, batch_size=1024, polyglot=False):
self.loss = loss
self.batch_size = batch_size
self.polyglot = polyglot
self.explosion_rate = explosion_rate
def evaluate(self, model, data):
""" Evaluate a model on given dataset and return performance.
Args:
model (seq2seq.models): model to evaluate
data (seq2seq.dataset.dataset.Dataset): dataset to evaluate against
Returns:
loss (float): loss of the given model on the given dataset
"""
loss = self.loss
loss.reset()
student_match, teacher_match = 0, 0
student_total, teacher_total = 0, 0
device = torch.device("cuda") if torch.cuda.is_available() else None
batch_iterator = torchtext.data.BucketIterator(
dataset=data, batch_size=self.batch_size,
sort=True, sort_key=lambda x: len(x.src),
device=device, train=False)
tgt_vocab = data.fields[seq2seq.tgt_field_name].vocab
pad = tgt_vocab.stoi[data.fields[seq2seq.tgt_field_name].pad_token]
def eval_one_way(m, src, src_len, dst):
decoder_outputs, decoder_hidden, other = m(src, src_len, dst)
for step, step_output in enumerate(decoder_outputs):
target = dst[:, step + 1]
loss.eval_batch(step_output.view(dst.size(0), -1), target)
match, total = 0, 0
seqlist = other['sequence'][1:] # cut <sos>
predictions = torch.stack(seqlist).squeeze(2).permute(1, 0)
for i in range(src.size(0)):
total += 1
target = dst[i, 1:]
non_padding = target.ne(pad)
correct = predictions[i].view(-1).eq(
target).masked_select(non_padding).sum().item()
len_non_padding = non_padding.sum().item()
if correct == len_non_padding:
match += 1
return total, match
def eval_polyglot_T(m, src, src_len, tgt, tgt_exploded, instance_ids):
"A bit tricky, as we want to allow any valid output out of possible permutations"
decoder_outputs, _, other = m(src, src_len, tgt)
seqlist = other['sequence']
seqlist = torch.stack(seqlist)
output_seq = seqlist.squeeze(2).permute(1, 0)
acc = [0.0 for _ in range(src.size(0))]
for example_id in range(src.size(0)):
# get the possible target candidates
candidate_index = [i for i in range(
len(instance_ids)) if instance_ids[i] == example_id]
for index in candidate_index:
if acc[example_id] == 1:
# already matched, no point in comparing
continue
target_candidate = tgt_exploded[index]
non_padding = target_candidate.ne(pad)
correct = output_seq[example_id, :].view(-1).eq(
target_candidate).masked_select(non_padding).sum().item()
len_non_padding = non_padding.sum().item()
if correct == len_non_padding:
acc[example_id] = 1
return len(acc), sum(acc)
with torch.no_grad():
for batch in batch_iterator:
input_variable, input_lengths = getattr(
batch, seq2seq.src_field_name)
target_variable = getattr(batch, seq2seq.tgt_field_name)
if hasattr(model, "A2"):
A1 = model.A1
exploded_input_variable, exploded_input_lengths, src_ids = repeat_explode(
input_variable, input_lengths, n_times=self.explosion_rate)
teacher_decoder_outputs, _, teacher_other = A1(
exploded_input_variable, exploded_input_lengths, None, 0.0)
max_len = len(teacher_other['sequence'])
A1_target_variable = torch.stack(teacher_other['sequence']).squeeze(2).permute(1, 0)
t_out_lengths = model.get_lengths(
A1_target_variable, A1.decoder.eos_id).add_(1.0).clamp_(max=max_len)
A1_target_variable = torch.stack(
teacher_other['sequence']).squeeze(2).permute(1, 0)
for i in range(A1_target_variable.size(0)):
l = t_out_lengths[i]
A1_target_variable[i, l:] = pad
A2 = model.A2
_total, _match = eval_one_way(
A2, A1_target_variable, t_out_lengths, exploded_input_variable)
student_total += _total
student_match += _match
_total, _match = eval_polyglot_T(
A2, input_variable, input_lengths, None, A1_target_variable, src_ids)
teacher_total += _total
teacher_match += _match
else:
A1 = model
exploded_input, exploded_input_length, (exploded_target, exploded_target_length), src_ids = \
pretrain_explode(input=input_variable, input_length=input_lengths, target_variable=target_variable,
polyglot=self.polyglot, sos=tgt_vocab.stoi[
'<sos>'], eos=tgt_vocab.stoi['<eos>'],
pad=pad, n_samples=self.explosion_rate, sample=True)
_total, _match = eval_one_way(
A1, exploded_target, exploded_target_length, exploded_input)
student_total += _total
student_match += _match
_total, _match = eval_polyglot_T(
A1, input_variable, input_lengths, target_variable[0], exploded_target, src_ids)
teacher_total += _total
teacher_match += _match
student_accuracy, teacher_accuracy = student_match / \
student_total, teacher_match / teacher_total
return loss.get_loss(), teacher_accuracy, student_accuracy
|
brica-master
|
t2s/evaluator/poly_evaluator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .mirror_trainer import MirrorTrainer
|
brica-master
|
t2s/trainer/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import division
import torch
from seq2seq.loss import NLLLoss
from ..evaluator import PolyEvaluator
from ..util import repeat_explode, pretrain_explode
from .supervised_trainer import SupervisedTrainer
class MirrorTrainer(SupervisedTrainer):
def __init__(self, expt_dir='experiment', loss=NLLLoss(), batch_size=64,
random_seed=None,
checkpoint_every=100, pretraining=False, polyglot=False, explosion_train=10, explosion_eval=120):
super(MirrorTrainer, self).__init__(
expt_dir=expt_dir, loss=loss, batch_size=batch_size, random_seed=random_seed,
checkpoint_every=checkpoint_every)
self._trainer = "Mirror Trainer"
self.pretraining = pretraining
self.polyglot = polyglot
self.evaluator = PolyEvaluator(
explosion_rate=explosion_eval, loss=self.loss, batch_size=512, polyglot=self.polyglot)
self.explosion_train = explosion_train
def _one_direction_pass(self, input_variable, input_lengths, target_variable, model, teacher_forcing_ratio, presorted):
batch_size = target_variable.size(0)
outputs, _, other = model(input_variable, input_lengths, target_variable,
teacher_forcing_ratio=teacher_forcing_ratio, presorted=presorted)
for step, step_output in enumerate(outputs):
self.loss.eval_batch(step_output.contiguous().view(
batch_size, -1), target_variable[:, step + 1])
return outputs, other
def _train_batch(self, input_variable, input_lengths, target_variable, model, teacher_forcing_ratio):
self.loss.reset()
sos, eos = None, None
if hasattr(model, 'A1'):
sos, eos = model.A1.decoder.sos_id, model.A1.decoder.eos_id
else:
sos, eos = model.decoder.sos_id, model.decoder.eos_id
if self.pretraining:
input_variable, input_lengths, target_variable, _ = pretrain_explode(input_variable, input_lengths, target_variable,
polyglot=self.polyglot,
sos=sos, eos=eos, pad=0,
n_samples=self.explosion_train, sample=True)
self._one_direction_pass(
input_variable, input_lengths, target_variable[0], model, teacher_forcing_ratio, True)
self._one_direction_pass(
target_variable[0], target_variable[1], input_variable, model, teacher_forcing_ratio, False)
else:
input_variable, input_lengths, _ = repeat_explode(
input_variable, input_lengths, self.explosion_train)
_, outputs = self._one_direction_pass(
input_variable, input_lengths, input_variable, model, teacher_forcing_ratio, True)
batch_size = input_variable.size(0)
i_A1 = torch.stack(outputs['teacher_decoder']).squeeze(2).permute(1, 0)
i_A1_length = outputs['teacher_lengths']
pad_id = 0
for i in range(batch_size):
l = i_A1_length[i]
i_A1[i, l:] = pad_id
self._one_direction_pass(
input_variable, input_lengths, i_A1, model.A2, teacher_forcing_ratio, False)
model.zero_grad()
self.loss.backward()
self.optimizer.step()
return self.loss.get_loss()
|
brica-master
|
t2s/trainer/mirror_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
class EarlyStopping_NoImprovement(object):
def __init__(self,
min_delta=1e-5,
patience=5):
self.min_delta = min_delta
self.patience = patience
self.wait = 0
self.best_acc = 0
self.stopped_epoch = 0
self._stop_training = False
def on_epoch_end(self, epoch, current_acc):
if current_acc is None:
pass
else:
if (current_acc - self.best_acc) > self.min_delta:
self.best_acc = current_acc
self.wait = 0 # reset
self._stop_training = False
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch + 1
self._stop_training = True
def on_train_end(self):
if self.stopped_epoch > 0:
print('\nTerminated Training for Early Stopping at Epoch %04i' %
self.stopped_epoch, flush=True)
class EarlyStopping_GoodAccuracy(object):
def __init__(self, patience=5):
self.patience = patience
self.wait = 0
self.best_accuracy = 0.999
self.stopped_epoch = 0
self._stop_training = False
def on_epoch_end(self, epoch, current_acc):
if current_acc is None:
pass
else:
if current_acc >= self.best_accuracy:
self.wait += 1
else:
self.wait = 0
if self.wait >= self.patience:
self.stopped_epoch = epoch + 1
self._stop_training = True
def on_train_end(self):
if self.stopped_epoch > 0:
print('\nTerminated Training for Early Stopping at Epoch %04i' %
self.stopped_epoch, flush=True)
|
brica-master
|
t2s/trainer/early_stopping.py
|
#
# Copyright 2017- IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Below is a modification of the SupervisedTrainer from pytorch-seq2seq/seq2seq/trainer/supervised_trainer.py
# which is adopted from https://github.com/IBM/pytorch-seq2seq/blob/master/seq2seq/trainer/supervised_trainer.py
# The changes we have introduced relate to: early stopping and progress report logic.
import sys
sys.path.insert(0, './pytorch_seq2seq')
from seq2seq.util.checkpoint import Checkpoint
from seq2seq.optim import Optimizer
from seq2seq.loss import NLLLoss
import seq2seq
import os
import random
import torch
import torchtext
from torch import optim
from .early_stopping import EarlyStopping_NoImprovement
class SupervisedTrainer(object):
def __init__(self, expt_dir='experiment', loss=NLLLoss(), batch_size=64,
random_seed=None,
checkpoint_every=100, patience=5):
self._trainer = "Simple Trainer"
self.random_seed = random_seed
if random_seed is not None:
random.seed(random_seed)
torch.manual_seed(random_seed)
self.loss = loss
# set by a subclass
self.evaluator = None
self.optimizer = None
self.checkpoint_every = checkpoint_every
self.early_stopping_teacher = EarlyStopping_NoImprovement(
patience=patience)
self.early_stopping_student = EarlyStopping_NoImprovement(
patience=patience)
if not os.path.isabs(expt_dir):
expt_dir = os.path.join(os.getcwd(), expt_dir)
self.expt_dir = expt_dir
if not os.path.exists(self.expt_dir):
os.makedirs(self.expt_dir)
self.batch_size = batch_size
def _train_batch(self, input_variable, input_lengths, target_variable, model, teacher_forcing_ratio):
raise NotImplementedError('Not implemented!')
def _train_epoches(self, data, model, n_epochs, start_epoch, start_step,
dev_data=None, test_data=None, teacher_forcing_ratio=0):
epoch_loss_total = 0 # Reset every epoch
device = torch.device("cuda") if torch.cuda.is_available() else -1
batch_iterator = torchtext.data.BucketIterator(
dataset=data, batch_size=self.batch_size,
sort=False, sort_within_batch=True,
sort_key=lambda x: len(x.src),
device=device, repeat=False)
steps_per_epoch = len(batch_iterator)
total_steps = steps_per_epoch * n_epochs
print('total steps is equal to ', total_steps)
step = start_step
step_elapsed = 0
epoch = start_epoch
max_epoch_iteration = n_epochs + 1
while epoch < max_epoch_iteration:
batch_generator = batch_iterator.__iter__()
# consuming seen batches from previous training
for _ in range((epoch - 1) * steps_per_epoch, step):
next(batch_generator)
for batch in batch_generator:
step += 1
step_elapsed += 1
input_variables, input_lengths = getattr(
batch, seq2seq.src_field_name)
target_variables = getattr(batch, seq2seq.tgt_field_name)
loss = self._train_batch(input_variables, input_lengths.tolist(
), target_variables, model, teacher_forcing_ratio)
# Record average loss
epoch_loss_total += loss
if step_elapsed == 0:
continue
epoch_loss_avg = epoch_loss_total / \
min(steps_per_epoch, step - start_step)
epoch_loss_total = 0
log_msg = "Finished epoch %d: Train %s: %.4f" % (
epoch, self.loss.name, epoch_loss_avg)
time_logging = 1
if dev_data is not None and (epoch % time_logging) == 0:
eval_results = self.evaluator.evaluate(model, dev_data)
assert len(eval_results) == 3
# if two-hat agent, we have accuracies for the both hats (Student and Teacher)
dev_loss, teacher_accuracy, student_accuracy = eval_results
log_msg += ", Dev %s: %.4f, Accuracy Teacher: %.4f, Accuracy Student: %.4f" % (
self.loss.name, dev_loss, teacher_accuracy, student_accuracy)
self.early_stopping_student.on_epoch_end(
epoch, student_accuracy)
self.early_stopping_teacher.on_epoch_end(
epoch, teacher_accuracy)
if self.early_stopping_student._stop_training and self.early_stopping_teacher._stop_training:
max_epoch_iteration = self.early_stopping_teacher.stopped_epoch
model.train(mode=True)
if test_data is not None and (epoch % time_logging) == 0:
assert len(eval_results) == 3
# if two-hat agent, we have accuracies for the both hats (Student and Teacher)
test_loss, teacher_accuracy, student_accuracy = eval_results
log_msg += ", Test %s: %.4f, Accuracy Teacher: %.4f, Accuracy Student: %.4f" % (
self.loss.name, test_loss, teacher_accuracy, student_accuracy)
epoch += 1
print(log_msg, flush=True)
def train(self, model, data, n_epochs=5,
resume=False, dev_data=None, test_data=None,
optimizer=None, teacher_forcing_ratio=0):
""" Run training for a given model.
Args:
model (seq2seq.models): model to run training on, if `resume=True`, it would be
overwritten by the model loaded from the latest checkpoint.
data (seq2seq.dataset.dataset.Dataset): dataset object to train on
n_epochs (int, optional): number of epochs to run (default 5)
resume(bool, optional): resume training with the latest checkpoint, (default False)
dev_data (seq2seq.dataset.dataset.Dataset, optional): dev Dataset (default None)
test_data (seq2seq.dataset.dataset.Dataset, optional): test Dataset (default None)
optimizer (seq2seq.optim.Optimizer, optional): optimizer for training
(default: Optimizer(pytorch.optim.Adam, max_grad_norm=5))
teacher_forcing_ratio (float, optional): teaching forcing ratio (default 0)
Returns:
model (seq2seq.models): trained model.
"""
# If training is set to resume
if resume:
latest_checkpoint_path = Checkpoint.get_latest_checkpoint(
self.expt_dir)
resume_checkpoint = Checkpoint.load(latest_checkpoint_path)
model = resume_checkpoint.model
self.optimizer = resume_checkpoint.optimizer
# A walk around to set optimizing parameters properly
resume_optim = self.optimizer.optimizer
defaults = resume_optim.param_groups[0]
defaults.pop('params', None)
defaults.pop('initial_lr', None)
self.optimizer.optimizer = resume_optim.__class__(
model.parameters(), **defaults)
start_epoch = resume_checkpoint.epoch
step = resume_checkpoint.step
else:
start_epoch = 1
step = 0
if optimizer is None:
optimizer = Optimizer(optim.Adam(
model.parameters()), max_grad_norm=5)
self.optimizer = optimizer
self._train_epoches(data, model, n_epochs,
start_epoch, step, dev_data=dev_data, test_data=test_data,
teacher_forcing_ratio=teacher_forcing_ratio)
if self.early_stopping_student.stopped_epoch > 0 and self.early_stopping_teacher._stop_training > 0:
print('\nTerminated Training for Early Stopping at Epoch %04i' %
(max(self.early_stopping_student.stopped_epoch, self.early_stopping_teacher.stopped_epoch)), flush=True)
return model
|
brica-master
|
t2s/trainer/supervised_trainer.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import find_packages, setup
requirements = [
"torch",
"gpytorch",
"botorch>=0.6",
"scipy",
"jupyter",
"matplotlib",
"nevergrad",
"sklearn",
"statsmodels",
"xgboost",
]
dev_requires = [
"black",
"flake8",
"pytest",
"coverage",
]
setup(
name="bo_pr",
version="0.1",
description="Bayesian Optimization over Discrete and Mixed Spaces via Probabilistic Reparameterization",
author="Anonymous Authors",
packages=find_packages(),
install_requires=requirements,
extras_require={"dev": dev_requires},
)
|
bo_pr-main
|
setup.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import List, Optional, Tuple
import torch
from statsmodels.distributions.empirical_distribution import ECDF
from torch import Tensor
from torch.distributions import Normal
def apply_normal_copula_transform(
Y: Tensor, ecdfs: Optional[List[ECDF]] = None
) -> Tuple[Tensor, List[ECDF]]:
r"""Apply a copula transform independently to each output.
Values are first mapped to quantiles through the empirical cdf, then
through an inverse standard normal cdf.
Note: this is not currently differentiable and it does not support
batched `Y`.
TODO: Remove dependency on ECDF or at least write an abstract specification
of what we expect ECDF to do.
Args:
Y: A `n x m`-dim tensor of values
ecdfs: A list of ecdfs to use in the transformation
Returns:
2-element tuple containing
- A `n x m`-dim tensor of transformed values
- A list of `m` ECDF objects.
"""
if Y.ndim > 2:
raise NotImplementedError("Batched Y is not currently supported.")
normal = Normal(0, 1)
Y_i_tfs = []
ecdfs = ecdfs or []
for i in range(Y.shape[-1]):
Y_i = Y[:, i].cpu().numpy()
if len(ecdfs) <= i:
# compute new ecdf if None were provided
ecdf = ECDF(Y_i)
ecdfs.append(ecdf)
else:
# Otherwise use existing ecdf
ecdf = ecdfs[i]
# clamp quantiles here to avoid (-)infs at the extremes
Y_i_tf = normal.icdf(torch.from_numpy(ecdf(Y_i)).to(Y).clamp(0.0001, 0.9999))
Y_i_tfs.append(Y_i_tf)
return torch.stack(Y_i_tfs, dim=-1), ecdfs
|
bo_pr-main
|
discrete_mixed_bo/model_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Methods for optimizing acquisition functions.
This is a copy of botorch.optim.optimize.py that uses the custom
gen_candidates_scipy method in ./gen.py to use finite differences when
specified.
"""
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from botorch.acquisition.acquisition import (
AcquisitionFunction,
OneShotAcquisitionFunction,
)
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
from botorch.logging import logger
from botorch.optim.stopping import ExpMAStoppingCriterion
from torch import Tensor
from discrete_mixed_bo.gen import gen_candidates_scipy, gen_candidates_torch
from discrete_mixed_bo.initializers import (
gen_batch_initial_conditions,
gen_one_shot_kg_initial_conditions,
)
INIT_OPTION_KEYS = {
# set of options for initialization that we should
# not pass to scipy.optimize.minimize to avoid
# warnings
"alpha",
"batch_limit",
"eta",
"init_batch_limit",
"nonnegative",
"n_burnin",
"sample_around_best",
"sample_around_best_sigma",
"sample_around_best_prob_perturb",
"sample_around_best_prob_perturb",
"seed",
"thinning",
}
def optimize_acqf(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
nonlinear_inequality_constraints: Optional[List[Callable]] = None,
fixed_features: Optional[Dict[int, float]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
return_best_only: bool = True,
sequential: bool = False,
stochastic: bool = False,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of candidates via multi-start optimization.
Args:
acq_function: An AcquisitionFunction.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The number of candidates.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as an
input and return a `(num_restarts) x q`-dim tensor with the constraint
values. The constraints will later be passed to SLSQP. You need to pass in
`batch_initial_conditions` in this case. Using non-linear inequality
constraints also requires that `batch_limit` is set to 1, which will be
done automatically if not specified in `options`.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
batch_initial_conditions: A tensor to specify the initial conditions. Set
this if you do not want to use default initialization strategy.
return_best_only: If False, outputs the solutions corresponding to all
random restart initializations of the optimization.
sequential: If False, uses joint optimization, otherwise uses sequential
optimization.
kwargs: Additonal keyword arguments.
Returns:
A two-element tuple containing
- a `(num_restarts) x q x d`-dim tensor of generated candidates.
- a tensor of associated acquisition values. If `sequential=False`,
this is a `(num_restarts)`-dim tensor of joint acquisition values
(with explicit restart dimension if `return_best_only=False`). If
`sequential=True`, this is a `q`-dim tensor of expected acquisition
values conditional on having observed canidates `0,1,...,i-1`.
Example:
>>> # generate `q=2` candidates jointly using 20 random restarts
>>> # and 512 raw samples
>>> candidates, acq_value = optimize_acqf(qEI, bounds, 2, 20, 512)
>>> generate `q=3` candidates sequentially using 15 random restarts
>>> # and 256 raw samples
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0.], [1.]])
>>> candidates, acq_value_list = optimize_acqf(
>>> qEI, bounds, 3, 15, 256, sequential=True
>>> )
"""
if not (bounds.ndim == 2 and bounds.shape[0] == 2):
raise ValueError(
f"bounds should be a `2 x d` tensor, current shape: {list(bounds.shape)}."
)
if sequential and q > 1:
if not return_best_only:
raise NotImplementedError(
"`return_best_only=False` only supported for joint optimization."
)
if isinstance(acq_function, OneShotAcquisitionFunction):
raise NotImplementedError(
"sequential optimization currently not supported for one-shot "
"acquisition functions. Must have `sequential=False`."
)
candidate_list, acq_value_list = [], []
base_X_pending = acq_function.X_pending
for i in range(q):
candidate, acq_value = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
batch_initial_conditions=None,
return_best_only=True,
sequential=False,
stochastic=stochastic,
)
candidate_list.append(candidate)
acq_value_list.append(acq_value)
candidates = torch.cat(candidate_list, dim=-2)
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
logger.info(f"Generated sequential candidate {i+1} of {q}")
# Reset acq_func to previous X_pending state
acq_function.set_X_pending(base_X_pending)
return candidates, torch.stack(acq_value_list)
options = options or {}
# Handle the trivial case when all features are fixed
if fixed_features is not None and len(fixed_features) == bounds.shape[-1]:
X = torch.tensor(
[fixed_features[i] for i in range(bounds.shape[-1])],
device=bounds.device,
dtype=bounds.dtype,
)
X = X.expand(q, *X.shape)
with torch.no_grad():
acq_value = acq_function(X)
return X, acq_value
if batch_initial_conditions is None:
if nonlinear_inequality_constraints:
raise NotImplementedError(
"`batch_initial_conditions` must be given if there are non-linear "
"inequality constraints."
)
if raw_samples is None:
raise ValueError(
"Must specify `raw_samples` when `batch_initial_conditions` is `None`."
)
ic_gen = (
gen_one_shot_kg_initial_conditions
if isinstance(acq_function, qKnowledgeGradient)
else gen_batch_initial_conditions
)
batch_initial_conditions = ic_gen(
acq_function=acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
fixed_features=fixed_features,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
batch_limit: int = options.get(
"batch_limit", num_restarts if not nonlinear_inequality_constraints else 1
)
batch_candidates_list: List[Tensor] = []
batch_acq_values_list: List[Tensor] = []
batched_ics = batch_initial_conditions.split(batch_limit)
gen_kwargs = {}
if stochastic:
gen_candidates = gen_candidates_torch
else:
gen_candidates = gen_candidates_scipy
gen_kwargs.update(
{
"inequality_constraints": inequality_constraints,
"equality_constraints": equality_constraints,
"nonlinear_inequality_constraints": nonlinear_inequality_constraints,
}
)
for i, batched_ics_ in enumerate(batched_ics):
# optimize using random restart optimization
batch_candidates_curr, batch_acq_values_curr = gen_candidates(
initial_conditions=batched_ics_,
acquisition_function=acq_function,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
options={k: v for k, v in options.items() if k not in INIT_OPTION_KEYS},
fixed_features=fixed_features,
**gen_kwargs,
)
batch_candidates_list.append(batch_candidates_curr)
batch_acq_values_list.append(batch_acq_values_curr)
logger.info(f"Generated candidate batch {i+1} of {len(batched_ics)}.")
batch_candidates = torch.cat(batch_candidates_list)
batch_acq_values = torch.cat(batch_acq_values_list)
if post_processing_func is not None:
batch_candidates = post_processing_func(batch_candidates)
if return_best_only:
best = torch.argmax(batch_acq_values.view(-1), dim=0)
batch_candidates = batch_candidates[best]
batch_acq_values = batch_acq_values[best]
if isinstance(acq_function, OneShotAcquisitionFunction):
if not kwargs.get("return_full_tree", False):
batch_candidates = acq_function.extract_candidates(X_full=batch_candidates)
return batch_candidates, batch_acq_values
def optimize_acqf_cyclic(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
cyclic_options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of `q` candidates via cyclic optimization.
Args:
acq_function: An AcquisitionFunction
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The number of candidates.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
batch_initial_conditions: A tensor to specify the initial conditions.
If no initial conditions are provided, the default initialization will
be used.
cyclic_options: Options for stopping criterion for outer cyclic optimization.
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- a `q`-dim tensor of expected acquisition values, where the value at
index `i` is the acquisition value conditional on having observed
all candidates except candidate `i`.
Example:
>>> # generate `q=3` candidates cyclically using 15 random restarts
>>> # 256 raw samples, and 4 cycles
>>>
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0.], [1.]])
>>> candidates, acq_value_list = optimize_acqf_cyclic(
>>> qEI, bounds, 3, 15, 256, cyclic_options={"maxiter": 4}
>>> )
"""
# for the first cycle, optimize the q candidates sequentially
candidates, acq_vals = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
return_best_only=True,
sequential=True,
)
if q > 1:
cyclic_options = cyclic_options or {}
stopping_criterion = ExpMAStoppingCriterion(**cyclic_options)
stop = stopping_criterion.evaluate(fvals=acq_vals)
base_X_pending = acq_function.X_pending
idxr = torch.ones(q, dtype=torch.bool, device=bounds.device)
while not stop:
for i in range(q):
# optimize only candidate i
idxr[i] = 0
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates[idxr]], dim=-2)
if base_X_pending is not None
else candidates[idxr]
)
candidate_i, acq_val_i = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
batch_initial_conditions=candidates[i].unsqueeze(0),
return_best_only=True,
sequential=True,
)
candidates[i] = candidate_i
acq_vals[i] = acq_val_i
idxr[i] = 1
stop = stopping_criterion.evaluate(fvals=acq_vals)
acq_function.set_X_pending(base_X_pending)
return candidates, acq_vals
def optimize_acqf_list(
acq_function_list: List[AcquisitionFunction],
bounds: Tensor,
num_restarts: int,
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Generate a list of candidates from a list of acquisition functions.
The acquisition functions are optimized in sequence, with previous candidates
set as `X_pending`. This is also known as sequential greedy optimization.
Args:
acq_function_list: A list of acquisition functions.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- a `q`-dim tensor of expected acquisition values, where the value at
index `i` is the acquisition value conditional on having observed
all candidates except candidate `i`.
"""
if not acq_function_list:
raise ValueError("acq_function_list must be non-empty.")
candidate_list, acq_value_list = [], []
candidates = torch.tensor([], device=bounds.device, dtype=bounds.dtype)
base_X_pending = acq_function_list[0].X_pending
for acq_function in acq_function_list:
if candidate_list:
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
candidate, acq_value = optimize_acqf(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
fixed_features=fixed_features,
post_processing_func=post_processing_func,
return_best_only=True,
sequential=False,
)
candidate_list.append(candidate)
acq_value_list.append(acq_value)
candidates = torch.cat(candidate_list, dim=-2)
return candidates, torch.stack(acq_value_list)
def optimize_acqf_mixed(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
fixed_features_list: List[Dict[int, float]],
raw_samples: Optional[int] = None,
options: Optional[Dict[str, Union[bool, float, int, str]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
batch_initial_conditions: Optional[Tensor] = None,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Optimize over a list of fixed_features and returns the best solution.
This is useful for optimizing over mixed continuous and discrete domains.
For q > 1 this function always performs sequential greedy optimization (with
proper conditioning on generated candidates).
Args:
acq_function: An AcquisitionFunction
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The number of candidates.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
fixed_features_list: A list of maps `{feature_index: value}`. The i-th
item represents the fixed_feature for the i-th optimization.
options: Options for candidate generation.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`
post_processing_func: A function that post-processes an optimization
result appropriately (i.e., according to `round-trip`
transformations).
batch_initial_conditions: A tensor to specify the initial conditions. Set
this if you do not want to use default initialization strategy.
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- an associated acquisition value.
"""
if not fixed_features_list:
raise ValueError("fixed_features_list must be non-empty.")
if isinstance(acq_function, OneShotAcquisitionFunction):
if not hasattr(acq_function, "evaluate") and q > 1:
raise ValueError(
"`OneShotAcquisitionFunction`s that do not implement `evaluate` "
"are currently not supported when `q > 1`. This is needed to "
"compute the joint acquisition value."
)
all_columns = set(range(bounds.shape[-1]))
if q == 1:
ff_candidate_list, ff_acq_value_list = [], []
for fixed_features in fixed_features_list:
ff_columns, ff_vals = list(zip(*fixed_features.items()))
ff_acq_function = FixedFeatureAcquisitionFunction(
acq_function=acq_function,
d=bounds.shape[-1],
columns=list(ff_columns),
values=torch.tensor(ff_vals, dtype=bounds.dtype, device=bounds.device),
)
keep_columns = sorted(list(set(all_columns) - set(ff_columns)))
candidate, acq_value = optimize_acqf(
acq_function=ff_acq_function,
bounds=bounds[:, keep_columns],
q=q,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
return_best_only=True,
)
candidate = ff_acq_function._construct_X_full(X=candidate)
ff_candidate_list.append(candidate)
ff_acq_value_list.append(acq_value)
ff_acq_values = torch.stack(ff_acq_value_list)
best = torch.argmax(ff_acq_values)
return ff_candidate_list[best], ff_acq_values[best]
# For batch optimization with q > 1 we do not want to enumerate all n_combos^n
# possible combinations of discrete choices. Instead, we use sequential greedy
# optimization.
base_X_pending = acq_function.X_pending
candidates = torch.tensor([], device=bounds.device, dtype=bounds.dtype)
for _ in range(q):
candidate, acq_value = optimize_acqf_mixed(
acq_function=acq_function,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
fixed_features_list=fixed_features_list,
options=options or {},
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
post_processing_func=post_processing_func,
batch_initial_conditions=batch_initial_conditions,
)
candidates = torch.cat([candidates, candidate], dim=-2)
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
acq_function.set_X_pending(base_X_pending)
# compute joint acquisition value
if isinstance(acq_function, OneShotAcquisitionFunction):
acq_value = acq_function.evaluate(X=candidates, bounds=bounds)
else:
acq_value = acq_function(candidates)
return candidates, acq_value
def optimize_acqf_discrete(
acq_function: AcquisitionFunction,
q: int,
choices: Tensor,
max_batch_size: int = 2048,
unique: bool = True,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Optimize over a discrete set of points using batch evaluation.
For `q > 1` this function generates candidates by means of sequential
conditioning (rather than joint optimization), since for all but the
smalles number of choices the set `choices^q` of discrete points to
evaluate quickly explodes.
Args:
acq_function: An AcquisitionFunction.
q: The number of candidates.
choices: A `num_choices x d` tensor of possible choices.
max_batch_size: The maximum number of choices to evaluate in batch.
A large limit can cause excessive memory usage if the model has
a large training set.
unique: If True return unique choices, o/w choices may be repeated
(only relevant if `q > 1`).
Returns:
A three-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- an associated acquisition value.
"""
choices_batched = choices.unsqueeze(-2)
if q > 1:
candidate_list, acq_value_list = [], []
base_X_pending = acq_function.X_pending
for _ in range(q):
with torch.no_grad():
acq_values = _split_batch_eval_acqf(
acq_function=acq_function,
X=choices_batched,
max_batch_size=max_batch_size,
)
best_idx = torch.argmax(acq_values)
candidate_list.append(choices_batched[best_idx])
acq_value_list.append(acq_values[best_idx])
# set pending points
candidates = torch.cat(candidate_list, dim=-2)
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
# need to remove choice from choice set if enforcing uniqueness
if unique:
choices_batched = torch.cat(
[choices_batched[:best_idx], choices_batched[best_idx + 1 :]]
)
# Reset acq_func to previous X_pending state
acq_function.set_X_pending(base_X_pending)
return candidates, torch.stack(acq_value_list)
with torch.no_grad():
acq_values = _split_batch_eval_acqf(
acq_function=acq_function, X=choices_batched, max_batch_size=max_batch_size
)
best_idx = torch.argmax(acq_values)
return choices_batched[best_idx], acq_values[best_idx]
def _split_batch_eval_acqf(
acq_function: AcquisitionFunction, X: Tensor, max_batch_size: int
) -> Tensor:
return torch.cat([acq_function(X_) for X_ in X.split(max_batch_size)])
def _generate_neighbors(
x: Tensor,
discrete_choices: List[Tensor],
X_avoid: Tensor,
inequality_constraints: List[Tuple[Tensor, Tensor, float]],
):
# generate all 1D perturbations
npts = sum([len(c) for c in discrete_choices])
X_loc = x.repeat(npts, 1)
j = 0
for i, c in enumerate(discrete_choices):
X_loc[j : j + len(c), i] = c
j += len(c)
# remove invalid and infeasible points (also remove x)
X_loc = _filter_invalid(X=X_loc, X_avoid=torch.cat((X_avoid, x)))
X_loc = _filter_infeasible(X=X_loc, inequality_constraints=inequality_constraints)
return X_loc
def _filter_infeasible(
X: Tensor, inequality_constraints: List[Tuple[Tensor, Tensor, float]]
):
"""Remove all points from `X` that don't satisfy the constraints."""
is_feasible = torch.ones(X.shape[0], dtype=torch.bool, device=X.device)
for (inds, weights, bound) in inequality_constraints:
is_feasible &= (X[..., inds] * weights).sum(dim=-1) >= bound
return X[is_feasible]
def _filter_invalid(X: Tensor, X_avoid: Tensor):
"""Remove all occurences of `X_avoid` from `X`."""
return X[~(X == X_avoid.unsqueeze(-2)).all(dim=-1).any(dim=-2)]
def _gen_batch_initial_conditions_local_search(
discrete_choices: List[Tensor],
raw_samples: int,
X_avoid: Tensor,
inequality_constraints: List[Tuple[Tensor, Tensor, float]],
min_points: int,
max_tries: int = 100,
):
"""Generate initial conditions for local search."""
tkwargs = {"device": discrete_choices[0].device, "dtype": discrete_choices[0].dtype}
dim = len(discrete_choices)
X = torch.zeros(0, dim, **tkwargs)
for _ in range(max_tries):
X_new = torch.zeros(raw_samples, dim, **tkwargs)
for i, c in enumerate(discrete_choices):
X_new[:, i] = c[
torch.randint(low=0, high=len(c), size=(raw_samples,), device=c.device)
]
X = torch.unique(torch.cat((X, X_new)), dim=0)
X = _filter_invalid(X=X, X_avoid=X_avoid)
X = _filter_infeasible(X=X, inequality_constraints=inequality_constraints)
if len(X) >= min_points:
return X
raise RuntimeError(f"Failed to generate at least {min_points} initial conditions")
def optimize_acqf_discrete_local_search(
acq_function: AcquisitionFunction,
discrete_choices: List[Tensor],
q: int,
num_restarts: int = 20,
raw_samples: int = 4096,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
X_avoid: Optional[Tensor] = None,
batch_initial_conditions: Optional[Tensor] = None,
max_batch_size: int = 2048,
unique: bool = True,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
r"""Optimize acquisition function over a lattice.
This is useful when d is large and enumeration of the search space
isn't possible. For q > 1 this function always performs sequential
greedy optimization (with proper conditioning on generated candidates).
NOTE: While this method supports arbitrary lattices, it has only been
thoroughly tested for {0, 1}^d. Consider it to be in alpha stage for
the more general case.
Args:
acq_function: An AcquisitionFunction
discrete_choices: A list of possible discrete choices for each dimension.
Each element in the list is expected to be a torch tensor.
q: The number of candidates.
num_restarts: Number of starting points for multistart acquisition
function optimization.
raw_samples: Number of samples for initialization. This is required
if `batch_initial_conditions` is not specified.
inequality_constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
X_avoid: An `n x d` tensor of candidates that we aren't allowed to pick.
batch_initial_conditions: A tensor of size `n x 1 x d` to specify the
initial conditions. Set this if you do not want to use default
initialization strategy.
max_batch_size: The maximum number of choices to evaluate in batch.
A large limit can cause excessive memory usage if the model has
a large training set.
unique: If True return unique choices, o/w choices may be repeated
(only relevant if `q > 1`).
Returns:
A two-element tuple containing
- a `q x d`-dim tensor of generated candidates.
- an associated acquisition value.
"""
candidate_list = []
base_X_pending = acq_function.X_pending if q > 1 else None
base_X_avoid = X_avoid
tkwargs = {"device": discrete_choices[0].device, "dtype": discrete_choices[0].dtype}
dim = len(discrete_choices)
if X_avoid is None:
X_avoid = torch.zeros(0, dim, **tkwargs)
inequality_constraints = inequality_constraints or []
for i in range(q):
# generate some starting points
if i == 0 and batch_initial_conditions is not None:
X0 = _filter_invalid(X=batch_initial_conditions.squeeze(1), X_avoid=X_avoid)
X0 = _filter_infeasible(
X=X0, inequality_constraints=inequality_constraints
).unsqueeze(1)
else:
X_init = _gen_batch_initial_conditions_local_search(
discrete_choices=discrete_choices,
raw_samples=raw_samples,
X_avoid=X_avoid,
inequality_constraints=inequality_constraints,
min_points=num_restarts,
)
# pick the best starting points
with torch.no_grad():
acqvals_init = _split_batch_eval_acqf(
acq_function=acq_function,
X=X_init.unsqueeze(1),
max_batch_size=max_batch_size,
).unsqueeze(-1)
X0 = X_init[acqvals_init.topk(k=num_restarts, largest=True, dim=0).indices]
# optimize from the best starting points
best_xs = torch.zeros(len(X0), dim, **tkwargs)
best_acqvals = torch.zeros(len(X0), 1, **tkwargs)
for j, x in enumerate(X0):
curr_x, curr_acqval = x.clone(), acq_function(x.unsqueeze(1))
while True:
# this generates all feasible neighbors that are one bit away
X_loc = _generate_neighbors(
x=curr_x,
discrete_choices=discrete_choices,
X_avoid=X_avoid,
inequality_constraints=inequality_constraints,
)
# there may not be any neighbors
if len(X_loc) == 0:
break
with torch.no_grad():
acqval_loc = acq_function(X_loc.unsqueeze(1))
# break if no neighbor is better than the current point (local optimum)
if acqval_loc.max() <= curr_acqval:
break
best_ind = acqval_loc.argmax().item()
curr_x, curr_acqval = X_loc[best_ind].unsqueeze(0), acqval_loc[best_ind]
best_xs[j, :], best_acqvals[j] = curr_x, curr_acqval
# pick the best
best_idx = best_acqvals.argmax()
candidate_list.append(best_xs[best_idx].unsqueeze(0))
# set pending points
candidates = torch.cat(candidate_list, dim=-2)
if q > 1:
acq_function.set_X_pending(
torch.cat([base_X_pending, candidates], dim=-2)
if base_X_pending is not None
else candidates
)
# Update points to avoid if unique is True
if unique:
X_avoid = (
torch.cat([base_X_avoid, candidates], dim=-2)
if base_X_avoid is not None
else candidates
)
# Reset acq_func to original X_pending state
if q > 1:
acq_function.set_X_pending(base_X_pending)
with torch.no_grad():
acq_value = acq_function(candidates) # compute joint acquisition value
return candidates, acq_value
|
bo_pr-main
|
discrete_mixed_bo/optimize.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for running experiments."""
from collections import OrderedDict
from copy import deepcopy
from itertools import product
from math import log
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import (
ConstrainedExpectedImprovement,
ExpectedImprovement,
PosteriorMean,
UpperConfidenceBound,
)
from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement
from botorch.acquisition.multi_objective.monte_carlo import (
qExpectedHypervolumeImprovement,
)
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.models import FixedNoiseGP, ModelListGP, SingleTaskGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.models.transforms.input import ChainedInputTransform, Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.test_functions.base import ConstrainedBaseTestProblem
from botorch.test_functions.multi_objective import DTLZ2
from botorch.test_functions.synthetic import Ackley, Hartmann, Rosenbrock
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
)
from botorch.utils.sampling import draw_sobol_samples
from botorch.utils.transforms import normalize, unnormalize
from gpytorch.constraints import GreaterThan, Interval
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import SumMarginalLogLikelihood
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.priors import GammaPrior
from scipy.stats.mstats import winsorize as scipy_winsorize
from statsmodels.distributions.empirical_distribution import ECDF
from torch import Tensor
from torch.nn.functional import one_hot
from discrete_mixed_bo.input import (
LatentCategoricalEmbedding,
LatentCategoricalSpec,
OneHotToNumeric,
Round,
)
from discrete_mixed_bo.kernels import get_kernel
from discrete_mixed_bo.model_utils import apply_normal_copula_transform
from discrete_mixed_bo.probabilistic_reparameterization import (
AnalyticProbabilisticReparameterization,
MCProbabilisticReparameterization,
)
from discrete_mixed_bo.problems.base import (
DiscreteTestProblem,
DiscretizedBotorchTestProblem,
)
from discrete_mixed_bo.problems.binary import LABS, Contamination
from discrete_mixed_bo.problems.cco.cco import CCO
from discrete_mixed_bo.problems.chemistry import Chemistry
from discrete_mixed_bo.problems.coco_mixed_integer import Sphere
from discrete_mixed_bo.problems.environmental import Environmental
from discrete_mixed_bo.problems.nashpobench2 import NASHPOBenchII
from discrete_mixed_bo.problems.oil_sorbent import OilSorbent, OilSorbentMixed
from discrete_mixed_bo.problems.pest import PestControl
from discrete_mixed_bo.problems.re_problems import PressureVessel
from discrete_mixed_bo.problems.svm import SVMFeatureSelection
from discrete_mixed_bo.problems.welded_beam import WeldedBeam
from discrete_mixed_bo.problems.xgboost_hp import XGBoostHyperparameter
from discrete_mixed_bo.rffs import get_gp_sample_w_transforms
def eval_problem(X: Tensor, base_function: DiscreteTestProblem) -> Tensor:
is_constrained = isinstance(base_function, ConstrainedBaseTestProblem)
X_numeric = torch.zeros(
*X.shape[:-1],
base_function.bounds.shape[-1],
dtype=X.dtype,
device=X.device,
)
X_numeric[..., base_function.integer_indices] = X[
..., base_function.integer_indices
]
X_numeric[..., base_function.cont_indices] = X[..., base_function.cont_indices]
start_idx = None
# X is one-hot encoded
# transform from one-hot space to numeric space
for i, cardinality in base_function.categorical_features.items():
if start_idx is None:
start_idx = i
end_idx = start_idx + cardinality
X_numeric[..., i] = (
X[..., start_idx:end_idx].argmax(dim=-1).to(dtype=X_numeric.dtype)
)
start_idx = end_idx
# normalize from integers to unit cube
if len(base_function.categorical_features) > 0:
X_numeric[..., base_function.categorical_indices] = normalize(
X_numeric[..., base_function.categorical_indices],
base_function.categorical_bounds,
)
X_numeric = unnormalize(X_numeric, base_function.bounds)
Y = base_function(X_numeric)
if Y.ndim == X_numeric.ndim - 1:
Y = Y.unsqueeze(-1)
if is_constrained:
# here, non-negative Y_con implies feasibility
Y_con = base_function.evaluate_slack(X_numeric)
Y = torch.cat([Y, Y_con], dim=-1)
return Y
def get_exact_rounding_func(
bounds: Tensor,
integer_indices: Optional[List[int]] = None,
categorical_features: Optional[Dict[int, int]] = None,
initialization: bool = False,
return_numeric: bool = False,
use_ste: bool = False,
) -> ChainedInputTransform:
"""Get an exact rounding function.
The rounding function will take inputs from the unit cube,unnormalize them to the raw search space, round the inputs,
and normalize them back to the unit cube.
Categoricals are assumed to be one-hot encoded.
Args:
bounds: The raw search space bounds.
integer_indices: The indices of the integer parameters
categorical_features: A dictionary mapping indices to cardinalities for the categorical features.
initialization: A boolean indication whether this exact rounding
function is for initialization.
return_numeric: a boolean indicating whether to return numeric or one-hot encoded categoricals
use_ste: whether to use straight-through gradient estimation
"""
if initialization:
# this gives the extremes the same probability as the
# interior values
init_bounds = bounds.clone()
init_bounds[0, integer_indices] -= 0.4999
init_bounds[1, integer_indices] += 0.4999
else:
init_bounds = bounds
tfs = OrderedDict()
if integer_indices is not None and len(integer_indices) > 0:
# unnormalize to integer space
tfs["unnormalize_tf"] = Normalize(
d=bounds.shape[1],
bounds=init_bounds,
indices=integer_indices,
transform_on_train=False,
transform_on_eval=True,
transform_on_fantasize=False,
reverse=True,
)
# round
tfs["round"] = Round(
approximate=False,
transform_on_train=False,
transform_on_fantasize=False,
# TODO: generalize
integer_indices=integer_indices,
categorical_features=categorical_features,
use_ste=use_ste,
)
if integer_indices is not None and len(integer_indices) > 0:
# renormalize to unit cube
tfs["normalize_tf"] = Normalize(
d=bounds.shape[1],
bounds=bounds,
indices=integer_indices,
transform_on_train=False,
transform_on_eval=True,
transform_on_fantasize=False,
reverse=False,
)
if return_numeric:
tfs["one_hot_to_numeric"] = OneHotToNumeric(
categorical_features=categorical_features,
transform_on_train=False,
transform_on_eval=True,
transform_on_fantasize=False,
use_ste=use_ste,
)
tf = ChainedInputTransform(**tfs)
tf.to(dtype=bounds.dtype, device=bounds.device)
tf.eval()
return tf
def generate_initial_data(
n: int,
base_function: DiscreteTestProblem,
bounds: Tensor,
tkwargs: dict,
init_exact_rounding_func: ChainedInputTransform,
) -> Tuple[Tensor, Tensor]:
r"""
Generates the initial data for the experiments.
Args:
n: Number of training points.
base_function: The base problem.
bounds: The bounds to generate the training points from. `2 x d`-dim tensor.
tkwargs: Arguments for tensors, dtype and device.
init_exact_rounding_func: The exact rounding function for initialization.
Returns:
The train_X and train_Y. `n x d` and `n x m`.
"""
raw_x = draw_sobol_samples(bounds=bounds, n=n, q=1).squeeze(-2).to(**tkwargs)
train_x = init_exact_rounding_func(raw_x)
train_obj = eval_problem(train_x, base_function=base_function)
return train_x, train_obj
def apply_winsorize(
y: Tensor, winsorization_level: float, maximize: bool = True
) -> Tensor:
if maximize:
winsorize_limits = (winsorization_level, None)
else:
winsorize_limits = (None, winsorization_level)
return torch.from_numpy(scipy_winsorize(y.cpu().numpy(), winsorize_limits)).to(y)
def initialize_model(
train_x: Tensor,
train_y: Tensor,
binary_dims: List[int],
categorical_features: Optional[List[int]] = None,
use_model_list: bool = False,
use_fixed_noise: bool = True,
kernel_type: str = "mixed",
copula: bool = False,
winsorize: bool = False,
latent_emb_dim: Optional[int] = None,
use_ard_binary: bool = False,
function_name: Optional[str] = None,
) -> Tuple[
Union[ExactMarginalLogLikelihood, SumMarginalLogLikelihood],
Union[FixedNoiseGP, SingleTaskGP, ModelListGP],
Optional[List[ECDF]],
]:
r"""Constructs the model and its MLL.
TODO: add better kernel selection for binary inputs.
Args:
train_x: An `n x d`-dim tensor of training inputs.
train_y: An `n x m`-dim tensor of training outcomes.
use_model_list: If True, returns a ModelListGP with models for each outcome.
use_fixed_noise: If True, assumes noise-free outcomes and uses FixedNoiseGP.
Returns:
The MLL and the model. Note: the model is not trained!
"""
base_model_class = FixedNoiseGP if use_fixed_noise else SingleTaskGP
# define models for objective and constraint
if copula:
train_y, ecdfs = apply_normal_copula_transform(train_y)
else:
ecdfs = None
if winsorize:
train_y = apply_winsorize(train_y, winsorization_level=0.2)
if use_fixed_noise:
train_Yvar = torch.full_like(train_y, 1e-7) * train_y.std(dim=0).pow(2)
if kernel_type in ("mixed_categorical", "mixed_latent"):
# map one-hot categoricals to numeric representation
input_transform = OneHotToNumeric(categorical_features=categorical_features)
input_transform.eval()
train_x = input_transform(train_x)
if categorical_features is None or kernel_type == "mixed_latent":
categorical_dims = []
else:
categorical_dims = list(categorical_features.keys())
categorical_transformed_features = categorical_features
model_kwargs = []
for i in range(train_y.shape[-1]):
transformed_x = train_x
if kernel_type == "mixed_latent":
categorical_transformed_features = {}
specs = []
start = None
for idx, card in categorical_features.items():
if start is None:
start = idx
spec = LatentCategoricalSpec(
idx=idx,
num_categories=card,
latent_dim=latent_emb_dim
if latent_emb_dim is not None
else (1 if card <= 3 else 2),
)
categorical_transformed_features[start] = spec.latent_dim
start = start + spec.latent_dim
specs.append(spec)
input_transform = LatentCategoricalEmbedding(
specs,
dim=train_x.shape[-1],
).to(train_x)
with torch.no_grad():
transformed_x = input_transform(train_x)
cat_start = train_x.shape[-1] - len(categorical_features)
categorical_dims = list(range(cat_start, transformed_x.shape[-1]))
else:
input_transform = None
model_kwargs.append(
{
"train_X": train_x,
"train_Y": train_y[..., i : i + 1],
"covar_module": get_kernel(
kernel_type=kernel_type,
dim=transformed_x.shape[-1],
binary_dims=binary_dims,
categorical_transformed_features=categorical_transformed_features,
train_X=train_x,
train_Y=train_y,
function_name=function_name,
use_ard_binary=use_ard_binary,
),
"input_transform": input_transform,
}
)
if use_fixed_noise:
model_kwargs[i]["train_Yvar"] = train_Yvar[..., i : i + 1]
else:
model_kwargs[i]["likelihood"] = GaussianLikelihood(
noise_prior=GammaPrior(0.9, 10.0),
noise_constraint=Interval(1e-7, 1e-3),
)
models = [base_model_class(**model_kwargs[i]) for i in range(train_y.shape[-1])]
if len(models) > 1:
model = ModelListGP(*models)
mll = SumMarginalLogLikelihood(model.likelihood, model)
else:
model = models[0]
mll = ExactMarginalLogLikelihood(model.likelihood, model)
return mll, model, ecdfs
def get_EI(
model: Model,
train_Y: Tensor,
num_constraints: int,
standardize_tf: Standardize,
posterior_transform: Optional[ScalarizedPosteriorTransform] = None,
) -> ExpectedImprovement:
if posterior_transform is not None:
obj = posterior_transform.evaluate(train_Y)
else:
obj = train_Y[..., 0]
if num_constraints > 0:
constraints = {}
slacks = torch.zeros(
num_constraints, dtype=train_Y.dtype, device=train_Y.device
)
for i in range(1, num_constraints + 1):
slack = (
(0.0 - standardize_tf.means[0, i]) / standardize_tf.stdvs[0, i]
).item()
slacks[i - 1] = slack
constraints[i] = (slack, None)
feas = (train_Y[..., 1:] >= slacks).all(dim=-1)
if feas.any():
best_f = obj[feas].max()
else:
# take worst point
best_f = obj.min()
if posterior_transform is not None:
raise NotImplementedError
return ConstrainedExpectedImprovement(
model=model,
best_f=best_f,
objective_index=0,
constraints=constraints,
)
return ExpectedImprovement(
model, best_f=obj.max(), posterior_transform=posterior_transform
)
def get_EHVI(
model: Model, train_Y: Tensor, ref_point: Tensor
) -> ExpectedHypervolumeImprovement:
bd = FastNondominatedPartitioning(ref_point=ref_point, Y=train_Y)
return ExpectedHypervolumeImprovement(
model=model,
ref_point=ref_point.tolist(),
partitioning=bd,
)
def get_qEHVI(
model: Model, train_Y: Tensor, ref_point: Tensor
) -> qExpectedHypervolumeImprovement:
bd = FastNondominatedPartitioning(ref_point=ref_point, Y=train_Y)
return qExpectedHypervolumeImprovement(
model=model,
ref_point=ref_point.tolist(),
partitioning=bd,
)
def get_acqf(
label: str,
mc_samples: int,
model: GPyTorchModel,
X_baseline: Tensor,
train_Y: Tensor,
iteration: int,
tkwargs: dict,
num_constraints: int,
base_function: DiscreteTestProblem,
exact_rounding_func: ChainedInputTransform,
standardize_tf: Standardize,
batch_size: int = 1,
**kwargs,
) -> Union[AcquisitionFunction, List[AcquisitionFunction]]:
r"""Combines a few of the above utils to construct the acqf."""
if base_function.is_moo:
ref_point = standardize_tf(base_function.ref_point.unsqueeze(0))[0].squeeze(0)
is_constrained = isinstance(base_function, ConstrainedBaseTestProblem)
if is_constrained and label[-2:] != "ei":
raise NotImplementedError("Only EI is currently supported with constraints.")
num_constraints = base_function.num_constraints if is_constrained else 0
if base_function.objective_weights is not None:
posterior_transform = ScalarizedPosteriorTransform(
weights=base_function.objective_weights
)
else:
posterior_transform = None
if label[-3:] == "ucb":
beta = 0.2 * X_baseline.shape[-1] * log(2 * iteration)
if ("exact_round" in label) or ("enumerate" in label):
if isinstance(model, ModelListGP):
models = model.models
for m in model.models:
if hasattr(m, "input_transform"):
m.input_transform = ChainedInputTransform(
round=deepcopy(exact_rounding_func), orig=m.input_transform
)
else:
m.input_transform = deepcopy(exact_rounding_func)
else:
if hasattr(model, "input_transform"):
model.input_transform = ChainedInputTransform(
round=exact_rounding_func, orig=model.input_transform
)
else:
model.input_transform = exact_rounding_func
if batch_size == 1:
if label[-2:] == "ei":
acq_func = get_EI(
model=model,
train_Y=train_Y,
num_constraints=num_constraints,
posterior_transform=posterior_transform,
standardize_tf=standardize_tf,
)
elif label[-3:] == "ucb":
acq_func = UpperConfidenceBound(
model=model,
beta=beta,
posterior_transform=posterior_transform,
)
elif label[-2:] == "ts" or label[-7:] == "nehvi-1":
model = get_gp_sample_w_transforms(
model=model,
num_outputs=model.num_outputs,
n_samples=1,
num_rff_features=kwargs.get("num_rff_features", 512),
)
if label[-2:] == "ts":
acq_func = PosteriorMean(
model=model,
posterior_transform=posterior_transform,
)
if label[-7:] == "nehvi-1":
with torch.no_grad():
preds = model.posterior(X_baseline).mean
acq_func = get_qEHVI(
model=model,
train_Y=preds,
ref_point=ref_point,
)
elif label[-4:] == "ehvi":
acq_func = get_EHVI(
model=model,
train_Y=train_Y,
ref_point=ref_point,
)
else:
raise NotImplementedError
if "pr" in label:
if kwargs.get("pr_use_analytic", False):
acq_func = AnalyticProbabilisticReparameterization(
acq_function=acq_func,
dtype=train_Y.dtype,
device=train_Y.device,
integer_indices=base_function.integer_indices.cpu().tolist(),
integer_bounds=base_function.integer_bounds,
categorical_features=base_function.categorical_features,
dim=X_baseline.shape[-1],
apply_numeric=kwargs.get("apply_numeric", False),
tau=kwargs.get("pr_tau", 0.1),
)
else:
acq_func = MCProbabilisticReparameterization(
acq_function=acq_func,
integer_indices=base_function.integer_indices.cpu().tolist(),
integer_bounds=base_function.integer_bounds,
categorical_features=base_function.categorical_features,
dim=X_baseline.shape[-1],
batch_limit=kwargs.get("pr_batch_limit", 32),
mc_samples=kwargs.get("pr_mc_samples", 1024),
apply_numeric=kwargs.get("apply_numeric", False),
tau=kwargs.get("pr_tau", 0.1),
grad_estimator=kwargs.get("pr_grad_estimator", "reinforce"),
)
return acq_func
def get_problem(name: str, dim: Optional[int] = None, **kwargs) -> DiscreteTestProblem:
r"""Initialize the test function."""
if name == "discrete_hartmann":
dim = 6
integer_bounds = torch.zeros(2, 4)
integer_bounds[1, :2] = 2 # 3 values
integer_bounds[1, 2:4] = 9 # 10 values
hartmann = Hartmann(dim=dim, negate=True)
return DiscretizedBotorchTestProblem(
problem=hartmann,
integer_indices=list(range(4)),
integer_bounds=integer_bounds,
)
elif name == "discrete_hartmann2":
dim = 6
integer_bounds = torch.zeros(2, 4)
integer_bounds[1, :2] = 3 # 3 values
integer_bounds[1, 2:4] = 19 # 10 values
hartmann = Hartmann(dim=dim, negate=True)
return DiscretizedBotorchTestProblem(
problem=hartmann,
integer_indices=list(range(2, 6)),
integer_bounds=integer_bounds,
)
elif name == "categorical_hartmann":
dim = 6
categorical_bounds = torch.zeros(2, 2)
categorical_bounds[1, :2] = 2 # 3 values
hartmann = Hartmann(dim=dim, negate=True)
return DiscretizedBotorchTestProblem(
problem=hartmann,
categorical_indices=list(range(4, 6)),
categorical_bounds=categorical_bounds,
)
elif name == "discrete_ackley":
dim = 20
integer_bounds = torch.zeros(2, dim - 5)
integer_bounds[1, :5] = 2 # 3 values
integer_bounds[1, 5:10] = 4 # 5 values
integer_bounds[1, 10:15] = 9 # 10 values
ackley = Ackley(dim=dim, negate=True)
return DiscretizedBotorchTestProblem(
problem=ackley,
integer_indices=list(range(dim - 5)),
integer_bounds=integer_bounds,
)
elif name == "ackley13":
dim = 13
ackley = Ackley(dim=dim, negate=True)
ackley.bounds[0, :-3] = 0
ackley.bounds[1] = 1
ackley.bounds[0, -3:] = -1
integer_bounds = torch.zeros(2, dim - 3)
integer_bounds[1] = 1 # 2 values
return DiscretizedBotorchTestProblem(
problem=ackley,
integer_indices=list(range(dim - 3)),
integer_bounds=integer_bounds,
)
elif name == "integer_ackley13":
dim = 13
ackley = Ackley(dim=dim, negate=True)
ackley.bounds[0, :-3] = 0
ackley.bounds[1] = 1
ackley.bounds[0, -3:] = -1
integer_bounds = torch.zeros(2, dim - 3)
integer_bounds[1, :5] = 2 # 3 values
integer_bounds[1, dim - 8 :] = 4 # 5 values
return DiscretizedBotorchTestProblem(
problem=ackley,
integer_indices=list(range(dim - 3)),
integer_bounds=integer_bounds,
)
elif name == "contamination":
assert dim is not None
return Contamination(dim=dim, negate=True)
elif name == "labs":
assert dim is not None
return LABS(dim=dim, negate=True)
elif name == "svm":
data = kwargs.get("data")
assert data is not None
assert dim is not None
return SVMFeatureSelection(
data=data,
dim=dim,
negate=True,
)
elif name == "discrete_oil":
return OilSorbent(negate=True)
elif name == "mixed_oil":
return OilSorbentMixed(negate=True)
elif name == "cco":
return CCO(
data=kwargs.get("data"),
negate=True,
scalarize=kwargs.get("scalarize", False),
n_int_values=kwargs.get("n_int_values", 6),
)
elif name == "discrete_dtlz2":
dim = 6
integer_bounds = torch.zeros(2, 4)
integer_bounds[1, :1] = 10 # 100 values
integer_bounds[1, 1:4] = 4 # 5 values
dtlz2 = DTLZ2(dim=dim, negate=True)
return DiscretizedBotorchTestProblem(
problem=dtlz2,
integer_indices=list(range(4)),
integer_bounds=integer_bounds,
)
elif name == "environmental":
return Environmental(negate=True)
elif name == "mixed_int_f1":
integer_bounds = torch.zeros(2, 8)
integer_bounds[1, :2] = 1
integer_bounds[1, 2:4] = 2
integer_bounds[1, 4:6] = 4
integer_bounds[1, 6:] = 6
return DiscretizedBotorchTestProblem(
problem=Sphere(negate=True, dim=16),
integer_indices=list(range(8)),
integer_bounds=integer_bounds,
)
elif name == "mixed_int_f3":
integer_bounds = torch.zeros(2, 16)
integer_bounds[1, :4] = 1
integer_bounds[1, 4:8] = 3
integer_bounds[1, 8:12] = 7
integer_bounds[1, 12:16] = 15
return DiscretizedBotorchTestProblem(
problem=Sphere(negate=True, dim=20),
integer_indices=list(range(16)),
integer_bounds=integer_bounds,
)
elif name == "nashpobench2":
data = kwargs.get("data")
assert data is not None
return NASHPOBenchII(
data=data,
negate=True,
num_objectives=kwargs.get("num_objectives", 1),
use_12_epoch_result=kwargs.get("use_12_epoch_result", False),
use_log=kwargs.get("use_log", False),
)
elif name == "pressure_vessel":
return PressureVessel(negate=True)
elif name == "rosenbrock10":
rosen = Rosenbrock(dim=10, negate=True)
integer_bounds = torch.zeros(2, 6)
integer_bounds[1, :] = 2 # 3 values
return DiscretizedBotorchTestProblem(
problem=rosen,
integer_indices=list(range(4, 10)),
integer_bounds=integer_bounds,
)
elif name == "rosenbrock10_scaled":
rosen = Rosenbrock(dim=10, negate=True)
rosen.bounds[0, :6] = -2.1
rosen.bounds[1, :6] = 2.3
rosen.bounds[0, 6:] = -2
rosen.bounds[1, 6:] = 2
integer_bounds = torch.zeros(2, 6)
integer_bounds[1, :] = 3 # 4 values
return DiscretizedBotorchTestProblem(
problem=rosen,
integer_indices=list(range(6)),
integer_bounds=integer_bounds,
)
elif name == "pest":
return PestControl(
dim=kwargs.get("dim", 25), n_choice=kwargs.get("n_choice", 5), negate=True
)
elif name == "xgb":
return XGBoostHyperparameter(
task=kwargs.get("task", "mnist"),
negate=True,
data=kwargs.get("data"),
)
elif name == "chemistry":
return Chemistry(
negate=True,
data=kwargs.get("data"),
)
elif name == "welded_beam":
return WeldedBeam(
negate=True,
continuous=kwargs.get("continuous", False),
)
else:
raise ValueError(f"Unknown function name: {name}!")
def generate_discrete_options(
base_function: DiscreteTestProblem,
return_tensor: bool = False,
) -> Union[List[Dict[int, float]], Tensor]:
categorical_features = base_function.categorical_features
discrete_indices = torch.cat(
[base_function.integer_indices, base_function.categorical_indices], dim=0
)
cardinalities = (
(
base_function.bounds[1, discrete_indices]
- base_function.bounds[0, discrete_indices]
+ 1
)
.long()
.tolist()
)
discrete_indices_list = discrete_indices.tolist()
discrete_options = torch.tensor(
list(product(*[range(c) for c in cardinalities])),
dtype=torch.long,
)
# normalize to unit cube
discrete_options = normalize(
discrete_options, base_function.bounds[:, discrete_indices].cpu()
)
if len(base_function.categorical_features) > 0:
# unnormalize categoricals
cat_indices = base_function.categorical_indices.cpu() - len(
base_function.cont_indices
) # shift by number of continuous indices
discrete_options[..., cat_indices] = unnormalize(
discrete_options[..., cat_indices],
base_function.categorical_bounds.cpu(),
)
indices = base_function.integer_indices.tolist()
# now one-hot encode the categoricals
if categorical_features is not None:
one_hot_categoricals = [
one_hot(discrete_options[:, i].long(), num_classes=cardinalities[i])
for i in range(
base_function.integer_indices.shape[0], discrete_options.shape[1]
)
]
discrete_options = torch.cat(
[
discrete_options[:, : base_function.integer_indices.shape[0]],
*one_hot_categoricals,
],
dim=-1,
)
# get a list of the starting and ending indices of each categorical feature in one-hot space
start_idx = None
for i in sorted(categorical_features.keys()):
if start_idx is None:
start_idx = i
end_idx = start_idx + categorical_features[i]
categ_indices = list(range(start_idx, end_idx))
indices.extend(categ_indices)
start_idx = end_idx
# create a list of dictionaries of mapping indices to values
# the list has a dictionary for each discrete configuration
if return_tensor:
return discrete_options.to(base_function.bounds)
return [dict(zip(indices, xi)) for xi in discrete_options.tolist()]
|
bo_pr-main
|
discrete_mixed_bo/experiment_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A wrapper classes around AquisitionFunctions to modify inputs and outputs.
"""
from __future__ import annotations
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.models.model import Model
from torch import Tensor
from torch.nn import Module
class AcquisitionFunctionWrapper(AcquisitionFunction):
r"""Abstract acquisition wrapper."""
def __init__(self, acq_function: AcquisitionFunction) -> None:
Module.__init__(self)
self.acq_function = acq_function
@property
def X_baseline(self) -> Optional[Tensor]:
return self.acq_function.X_baseline
@property
def model(self) -> Model:
return self.acq_function.model
class IntegratedAcquisitionFunction(AcquisitionFunction):
r"""Integrate acquisition function wrapper.
This can be used for integrating over batch dimensions. For example,
this can be used to integrate over hyperparameter samples from a
fully bayesian (MCMC) model.
"""
def __init__(
self, acq_function: AcquisitionFunction, marginalize_dim: int = -1
) -> None:
super().__init__(acq_function=acq_function)
self._marginalize_dim = marginalize_dim
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the acquisition function and integrate over marginalize_dim.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of acquisition function values at the given
design points `X`.
"""
return self.acq_function(X=X).mean(dim=self._marginalize_dim)
|
bo_pr-main
|
discrete_mixed_bo/wrapper.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Candidate generation utilities.
This is a copy of botorch.gen.py that uses finite differences when
specified.
"""
from __future__ import annotations
import warnings
from functools import partial
from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union
import numpy as np
import torch
from botorch.acquisition import AcquisitionFunction
from botorch.generation.utils import _remove_fixed_features_from_optimization
from botorch.optim.parameter_constraints import (
NLC_TOL,
_arrayify,
make_scipy_bounds,
make_scipy_linear_constraints,
make_scipy_nonlinear_inequality_constraints,
)
from botorch.optim.stopping import ExpMAStoppingCriterion
from botorch.optim.utils import _filter_kwargs, columnwise_clamp, fix_features
from scipy.optimize import minimize
from torch import Tensor
def gen_candidates_scipy(
initial_conditions: Tensor,
acquisition_function: AcquisitionFunction,
lower_bounds: Optional[Union[float, Tensor]] = None,
upper_bounds: Optional[Union[float, Tensor]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
nonlinear_inequality_constraints: Optional[List[Callable]] = None,
options: Optional[Dict[str, Any]] = None,
fixed_features: Optional[Dict[int, Optional[float]]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Generate a set of candidates using `scipy.optimize.minimize`.
Optimizes an acquisition function starting from a set of initial candidates
using `scipy.optimize.minimize` via a numpy converter.
Args:
initial_conditions: Starting points for optimization.
acquisition_function: Acquisition function to be used.
lower_bounds: Minimum values for each column of initial_conditions.
upper_bounds: Maximum values for each column of initial_conditions.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
nonlinear_inequality_constraints: A list of callables with that represent
non-linear inequality constraints of the form `callable(x) >= 0`. Each
callable is expected to take a `(num_restarts) x q x d`-dim tensor as
an input and return a `(num_restarts) x q`-dim tensor with the
constraint values. The constraints will later be passed to SLSQP.
options: Options used to control the optimization including "method"
and "maxiter". Select method for `scipy.minimize` using the
"method" key. By default uses L-BFGS-B for box-constrained problems
and SLSQP if inequality or equality constraints are present.
fixed_features: This is a dictionary of feature indices to values, where
all generated candidates will have features fixed to these values.
If the dictionary value is None, then that feature will just be
fixed to the clamped value and not optimized. Assumes values to be
compatible with lower_bounds and upper_bounds!
Returns:
2-element tuple containing
- The set of generated candidates.
- The acquisition value for each t-batch.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0., 0.], [1., 2.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
>>> batch_candidates, batch_acq_values = gen_candidates_scipy(
initial_conditions=Xinit,
acquisition_function=qEI,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
)
"""
options = options or {}
# if there are fixed features we may optimize over a domain of lower dimension
reduced_domain = False
if fixed_features:
# TODO: We can support fixed features, see Max's comment on D33551393. We can
# consider adding this at a later point.
if nonlinear_inequality_constraints:
raise NotImplementedError(
"Fixed features are not supported when non-linear inequality "
"constraints are given."
)
# if there are no constraints things are straightforward
if not (inequality_constraints or equality_constraints):
reduced_domain = True
# if there are we need to make sure features are fixed to specific values
else:
reduced_domain = None not in fixed_features.values()
if reduced_domain:
_no_fixed_features = _remove_fixed_features_from_optimization(
fixed_features=fixed_features,
acquisition_function=acquisition_function,
initial_conditions=initial_conditions,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
# call the routine with no fixed_features
clamped_candidates, batch_acquisition = gen_candidates_scipy(
initial_conditions=_no_fixed_features.initial_conditions,
acquisition_function=_no_fixed_features.acquisition_function,
lower_bounds=_no_fixed_features.lower_bounds,
upper_bounds=_no_fixed_features.upper_bounds,
inequality_constraints=_no_fixed_features.inequality_constraints,
equality_constraints=_no_fixed_features.equality_constraints,
options=options,
fixed_features=None,
)
clamped_candidates = _no_fixed_features.acquisition_function._construct_X_full(
clamped_candidates
)
return clamped_candidates, batch_acquisition
clamped_candidates = columnwise_clamp(
X=initial_conditions, lower=lower_bounds, upper=upper_bounds
)
shapeX = clamped_candidates.shape
x0 = clamped_candidates.view(-1)
bounds = make_scipy_bounds(
X=initial_conditions, lower_bounds=lower_bounds, upper_bounds=upper_bounds
)
constraints = make_scipy_linear_constraints(
shapeX=clamped_candidates.shape,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
with_grad = options.get("with_grad", True)
if with_grad:
def f_np_wrapper(x: np.ndarray, f: Callable):
"""Given a torch callable, compute value + grad given a numpy array."""
if np.isnan(x).any():
raise RuntimeError(
f"{np.isnan(x).sum()} elements of the {x.size} element array "
f"`x` are NaN."
)
X = (
torch.from_numpy(x)
.to(initial_conditions)
.view(shapeX)
.contiguous()
.requires_grad_(True)
)
X_fix = fix_features(X, fixed_features=fixed_features)
loss = f(X_fix).sum()
# compute gradient w.r.t. the inputs (does not accumulate in leaves)
gradf = _arrayify(torch.autograd.grad(loss, X)[0].contiguous().view(-1))
if np.isnan(gradf).any():
msg = (
f"{np.isnan(gradf).sum()} elements of the {x.size} element "
"gradient array `gradf` are NaN. This often indicates numerical issues."
)
if initial_conditions.dtype != torch.double:
msg += " Consider using `dtype=torch.double`."
raise RuntimeError(msg)
fval = loss.item()
return fval, gradf
else:
def f_np_wrapper(x: np.ndarray, f: Callable):
X = torch.from_numpy(x).to(initial_conditions).view(shapeX).contiguous()
with torch.no_grad():
X_fix = fix_features(X=X, fixed_features=fixed_features)
loss = f(X_fix).sum()
fval = loss.item()
return fval
if nonlinear_inequality_constraints:
# Make sure `batch_limit` is 1 for now.
if not (len(shapeX) == 3 and shapeX[:2] == torch.Size([1, 1])):
raise ValueError(
"`batch_limit` must be 1 when non-linear inequality constraints "
"are given."
)
constraints += make_scipy_nonlinear_inequality_constraints(
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
f_np_wrapper=f_np_wrapper,
x0=x0,
)
x0 = _arrayify(x0)
def f(x):
return -acquisition_function(x)
res = minimize(
fun=f_np_wrapper,
args=(f,),
x0=x0,
method=options.get("method", "SLSQP" if constraints else "L-BFGS-B"),
jac=with_grad,
bounds=bounds,
constraints=constraints,
callback=options.get("callback", None),
options={
k: v
for k, v in options.items()
if k
not in [
"method",
"callback",
"with_grad",
"sample_around_best_sigma",
"sample_around_best_subset_sigma",
]
},
)
candidates = fix_features(
X=torch.from_numpy(res.x).to(initial_conditions).reshape(shapeX),
fixed_features=fixed_features,
)
# SLSQP sometimes fails in the line search or may just fail to find a feasible
# candidate in which case we just return the starting point. This happens rarely,
# so it shouldn't be an issue given enough restarts.
if nonlinear_inequality_constraints and any(
nlc(candidates.view(-1)) < NLC_TOL for nlc in nonlinear_inequality_constraints
):
candidates = torch.from_numpy(x0).to(candidates).reshape(shapeX)
warnings.warn(
"SLSQP failed to converge to a solution the satisfies the non-linear "
"constraints. Returning the feasible starting point."
)
clamped_candidates = columnwise_clamp(
X=candidates, lower=lower_bounds, upper=upper_bounds, raise_on_violation=True
)
with torch.no_grad():
batch_acquisition = acquisition_function(clamped_candidates)
return clamped_candidates, batch_acquisition
def gen_candidates_torch(
initial_conditions: Tensor,
acquisition_function: AcquisitionFunction,
lower_bounds: Optional[Union[float, Tensor]] = None,
upper_bounds: Optional[Union[float, Tensor]] = None,
options: Optional[Dict[str, Union[float, str]]] = None,
callback: Optional[Callable[[int, Tensor, Tensor], NoReturn]] = None,
fixed_features: Optional[Dict[int, Optional[float]]] = None,
optimizer=torch.optim.Adam,
) -> Tuple[Tensor, Tensor]:
options = options or {}
# if there are fixed features we may optimize over a domain of lower dimension
if fixed_features:
subproblem = _remove_fixed_features_from_optimization(
fixed_features=fixed_features,
acquisition_function=acquisition_function,
initial_conditions=initial_conditions,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
inequality_constraints=None,
equality_constraints=None,
)
# call the routine with no fixed_features
clamped_candidates, batch_acquisition = gen_candidates_torch(
initial_conditions=subproblem.initial_conditions,
acquisition_function=subproblem.acquisition_function,
lower_bounds=subproblem.lower_bounds,
upper_bounds=subproblem.upper_bounds,
optimizer=optimizer,
options=options,
callback=callback,
fixed_features=None,
)
clamped_candidates = subproblem.acquisition_function._construct_X_full(
clamped_candidates
)
return clamped_candidates, batch_acquisition
_clamp = partial(columnwise_clamp, lower=lower_bounds, upper=upper_bounds)
clamped_candidates = _clamp(initial_conditions).requires_grad_(True)
lr = options.get("lr", 0.025)
_optimizer = optimizer(params=[clamped_candidates], lr=lr)
i = 0
stop = False
if options.get("rel_tol") is None:
# turn off exponential stopping
options["rel_tol"] = float("-inf")
stopping_criterion = ExpMAStoppingCriterion(
**_filter_kwargs(ExpMAStoppingCriterion, **options)
)
decay = options.get("decay", False)
if callback is None:
callback = options.get("callback")
while not stop:
i += 1
with torch.no_grad():
X = _clamp(clamped_candidates).requires_grad_(True)
loss = -acquisition_function(X).sum()
grad = torch.autograd.grad(loss, X)[0]
if callback:
callback(i, loss, grad, X)
def assign_grad():
_optimizer.zero_grad()
clamped_candidates.grad = grad
return loss
_optimizer.step(assign_grad)
if decay:
_optimizer = optimizer(params=[clamped_candidates], lr=lr / i**0.7)
stop = stopping_criterion.evaluate(fvals=loss.detach())
clamped_candidates = clamped_candidates.detach()
clamped_candidates = _clamp(clamped_candidates)
with torch.no_grad():
batch_acquisition = acquisition_function(clamped_candidates)
return clamped_candidates, batch_acquisition
def get_best_candidates(batch_candidates: Tensor, batch_values: Tensor) -> Tensor:
r"""Extract best (q-batch) candidate from batch of candidates
Args:
batch_candidates: A `b x q x d` tensor of `b` q-batch candidates, or a
`b x d` tensor of `b` single-point candidates.
batch_values: A tensor with `b` elements containing the value of the
respective candidate (higher is better).
Returns:
A tensor of size `q x d` (if q-batch mode) or `d` from batch_candidates
with the highest associated value.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0., 0.], [1., 2.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
>>> batch_candidates, batch_acq_values = gen_candidates_scipy(
initial_conditions=Xinit,
acquisition_function=qEI,
lower_bounds=bounds[0],
upper_bounds=bounds[1],
)
>>> best_candidates = get_best_candidates(batch_candidates, batch_acq_values)
"""
best = torch.argmax(batch_values.view(-1), dim=0)
return batch_candidates[best]
|
bo_pr-main
|
discrete_mixed_bo/gen.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for random fourier features.
"""
import torch
from botorch.models import ModelListGP, MultiTaskGP
from botorch.models.converter import batched_to_model_list
from botorch.models.deterministic import DeterministicModel, GenericDeterministicModel
from botorch.models.model import Model
from botorch.utils.gp_sampling import (
RandomFourierFeatures,
get_deterministic_model,
get_deterministic_model_multi_samples,
get_weights_posterior,
)
def get_gp_samples(
model: Model,
num_outputs: int,
n_samples: int,
num_rff_features: int = 512,
) -> GenericDeterministicModel:
r"""Sample functions from GP posterior using RFFs. The returned
`GenericDeterministicModel` effectively wraps `num_outputs` models,
each of which has a batch shape of `n_samples`. Refer
`get_deterministic_model_multi_samples` for more details.
Args:
model: The model.
num_outputs: The number of outputs.
n_samples: The number of functions to be sampled IID.
num_rff_features: The number of random Fourier features.
Returns:
A batched `GenericDeterministicModel` that batch evaluates `n_samples`
sampled functions.
"""
if num_outputs > 1:
if not isinstance(model, ModelListGP):
models = batched_to_model_list(model).models
else:
models = model.models
else:
models = [model]
if isinstance(models[0], MultiTaskGP):
raise NotImplementedError
weights = []
bases = []
for m in range(num_outputs):
train_X = models[m].train_inputs[0]
train_targets = models[m].train_targets
# get random fourier features
# sample_shape controls the number of iid functions.
basis = RandomFourierFeatures(
kernel=models[m].covar_module,
input_dim=train_X.shape[-1],
num_rff_features=num_rff_features,
sample_shape=torch.Size([n_samples] if n_samples > 1 else []),
)
bases.append(basis)
# TODO: when batched kernels are supported in RandomFourierFeatures,
# the following code can be uncommented.
# if train_X.ndim > 2:
# batch_shape_train_X = train_X.shape[:-2]
# dataset_shape = train_X.shape[-2:]
# train_X = train_X.unsqueeze(-3).expand(
# *batch_shape_train_X, n_samples, *dataset_shape
# )
# train_targets = train_targets.unsqueeze(-2).expand(
# *batch_shape_train_X, n_samples, dataset_shape[0]
# )
phi_X = basis(train_X)
# Sample weights from bayesian linear model
# 1. When inputs are not batched, train_X.shape == (n, d)
# weights.sample().shape == (n_samples, num_rff_features)
# 2. When inputs are batched, train_X.shape == (batch_shape_input, n, d)
# This is expanded to (batch_shape_input, n_samples, n, d)
# to maintain compatibility with RFF forward semantics
# weights.sample().shape == (batch_shape_input, n_samples, num_rff_features)
mvn = get_weights_posterior(
X=phi_X,
y=train_targets,
sigma_sq=models[m].likelihood.noise.mean().item(),
)
weights.append(mvn.sample())
# TODO: Ideally support RFFs for multi-outputs instead of having to
# generate a basis for each output serially.
if n_samples > 1:
return get_deterministic_model_multi_samples(
weights=weights,
bases=bases,
)
return get_deterministic_model(
weights=weights,
bases=bases,
)
def get_gp_sample_w_transforms(
model: Model,
num_outputs: int,
n_samples: int,
num_rff_features: int = 512,
) -> DeterministicModel:
intf = None
octf = None
if hasattr(model, "input_transform"):
intf = model.input_transform
if hasattr(model, "outcome_transform"):
octf = model.outcome_transform
model.outcome_transform = None
base_gp_samples = get_gp_samples(
model=model,
num_outputs=num_outputs,
n_samples=n_samples,
num_rff_features=num_rff_features,
)
if intf is not None:
base_gp_samples.input_transform = intf
model.input_transform = intf
if octf is not None:
base_gp_samples.outcome_transform = octf
model.outcome_transform = octf
return base_gp_samples
|
bo_pr-main
|
discrete_mixed_bo/rffs.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Straight Through Estimators.
"""
import torch
from torch import Tensor
from torch.autograd import Function
from torch.nn import Module
from torch.nn.functional import one_hot
class RoundSTE(Function):
r"""Apply a rounding function and use a ST gradient estimator."""
@staticmethod
def forward(
ctx,
input: Tensor,
):
return input.round()
@staticmethod
def backward(ctx, grad_output):
return grad_output
class OneHotArgmaxSTE(Function):
r"""Apply a discretization (argmax) to a one-hot encoded categorical, return a one-hot encoded categorical, and use a STE gradient estimator."""
@staticmethod
def forward(
ctx,
input: Tensor,
num_categories: int,
):
return one_hot(input.argmax(dim=-1), num_classes=num_categories)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class OneHotToNumericSTE(Function):
r"""Apply an argmax function and use a STE gradient estimator."""
@staticmethod
def forward(
ctx,
input: Tensor,
):
return input.argmax(dim=-1)
@staticmethod
def backward(ctx, grad_output):
return grad_output
|
bo_pr-main
|
discrete_mixed_bo/ste.py
|
#!/usr/bin/env python3
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bo_pr-main
|
discrete_mixed_bo/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Run one replication.
"""
import gc
from time import time
from typing import Callable, Dict, List, Optional
import nevergrad as ng
import numpy as np
import torch
from botorch.acquisition.utils import is_nonnegative
from botorch.fit import fit_gpytorch_model
from botorch.models.transforms.outcome import Standardize
from botorch.optim.optimize import optimize_acqf_discrete
from botorch.test_functions.base import (
ConstrainedBaseTestProblem,
MultiObjectiveTestProblem,
)
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.sampling import draw_sobol_samples
from botorch.utils.transforms import normalize, unnormalize
from torch import Tensor
from discrete_mixed_bo.experiment_utils import (
eval_problem,
generate_discrete_options,
generate_initial_data,
get_acqf,
get_exact_rounding_func,
get_problem,
initialize_model,
)
from discrete_mixed_bo.input import OneHotToNumeric
from discrete_mixed_bo.model_utils import apply_normal_copula_transform
from discrete_mixed_bo.optimize import optimize_acqf, optimize_acqf_mixed
from discrete_mixed_bo.probabilistic_reparameterization import (
AbstractProbabilisticReparameterization,
)
from discrete_mixed_bo.trust_region import TurboState, update_state
supported_labels = [
"sobol",
"cont_optim__round_after__ei",
"pr__ei",
"exact_round__fin_diff__ei",
"exact_round__ste__ei",
"enumerate__ei",
"cont_optim__round_after__ts",
"pr__ts",
"exact_round__fin_diff__ts",
"exact_round__ste__ts",
"enumerate__ts",
"cont_optim__round_after__ucb",
"pr__ucb",
"exact_round__fin_diff__ucb",
"exact_round__ste__ucb",
"enumerate__ucb",
"cont_optim__round_after__ehvi",
"pr__ehvi",
"exact_round__fin_diff__ehvi",
"exact_round__ste__ehvi",
"enumerate__ehvi",
"cont_optim__round_after__nehvi-1",
"pr__nehvi-1",
"exact_round__fin_diff__nehvi-1",
"exact_round__ste__nehvi-1",
"enumerate__nehvi-1",
"nevergrad_portfolio",
]
def run_one_replication(
seed: int,
label: str,
iterations: int,
function_name: str,
batch_size: int,
mc_samples: int,
n_initial_points: Optional[int] = None,
optimization_kwargs: Optional[dict] = None,
dim: Optional[int] = None,
acqf_kwargs: Optional[dict] = None,
model_kwargs: Optional[dict] = None,
save_frequency: Optional[int] = None,
dtype: torch.dtype = torch.double,
device: Optional[torch.device] = None,
save_callback: Optional[Callable[[Tensor], None]] = None,
problem_kwargs: Optional[Dict[str, np.ndarray]] = None,
use_trust_region: bool = False,
acqf_optim_seed: Optional[int] = None,
X_init: Optional[Tensor] = None,
Y_init: Optional[Tensor] = None,
) -> None:
r"""Run the BO loop for given number of iterations. Supports restarting of
prematurely killed experiments.
Args:
seed: The experiment seed.
label: The label / algorithm to use.
iterations: Number of iterations of the BO loop to perform.
n_initial_points: Number of initial evaluations to use.
function_name: The name of the test function to use.
batch_size: The q-batch size, i.e., number of parallel function evaluations.
mc_samples: Number of MC samples used for MC acquisition functions (e.g., NEI).
optimization_kwargs: Arguments passed to `optimize_acqf`. Includes `num_restarts`
and `raw_samples` and other optional arguments.
model_kwargs: Arguments for `initialize_model`. The default behavior is to use
a ModelListGP consisting of noise-free FixedNoiseGP models.
save_frequency: How often to save the output.
dtype: The tensor dtype to use.
device: The device to use.
save_callback: method to save results to file
acqf_optim_seed: a seed for AF optimization.
"""
assert label in supported_labels, "Label not supported!"
torch.manual_seed(seed)
np.random.seed(seed)
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tkwargs = {"dtype": dtype, "device": device}
model_kwargs = model_kwargs or {}
acqf_kwargs = acqf_kwargs or {}
problem_kwargs = problem_kwargs or {}
optimization_kwargs = optimization_kwargs or {}
# TODO: use model list when there are constraints
# or multiple objectives
base_function = get_problem(name=function_name, dim=dim, **problem_kwargs)
base_function.to(**tkwargs)
binary_dims = base_function.integer_indices
binary_mask = base_function.integer_bounds[1] - base_function.integer_bounds[0] == 1
if binary_mask.any():
binary_dims = (
base_function.integer_indices.clone()
.detach()
.to(dtype=torch.int32)[binary_mask]
.cpu()
.tolist()
)
else:
binary_dims = []
is_constrained = isinstance(base_function, ConstrainedBaseTestProblem)
num_constraints = base_function.num_constraints if is_constrained else 0
is_moo = base_function.is_moo
model_kwargs.setdefault("use_model_list", is_moo or is_constrained)
kernel_type = model_kwargs.get("kernel_type")
if "cont_optim__round_after" in label:
if kernel_type in (
"mixed_categorical",
"mixed_latent",
):
# cannot use a continuous relaxation + gradient optimization with mixed categorical
model_kwargs["kernel_type"] = "mixed"
elif "__ste__" in label:
acqf_kwargs["use_ste"] = True
if kernel_type in ("mixed_categorical", "mixed_latent"):
acqf_kwargs["apply_numeric"] = True
# set default optimization parameters
optimization_kwargs.setdefault("num_restarts", 20)
optimization_kwargs.setdefault("raw_samples", 1024)
options = optimization_kwargs.get("options")
if options is None:
options = {}
optimization_kwargs["options"] = options
options.setdefault("batch_limit", 5)
options.setdefault("init_batch_limit", 32)
options.setdefault("maxiter", 200)
if "pr" in label:
# set pr defaults
acqf_kwargs.setdefault("pr_mc_samples", 128)
# use moving average baseline in reinforce gradient estimator
acqf_kwargs.setdefault("pr_grad_estimator", "reinforce_ma")
# use stochastic optimization
acqf_kwargs.setdefault("pr_resample", True)
optimization_kwargs.setdefault("stochastic", True)
if "__fin_diff__" in label:
options["with_grad"] = False
if options.get("sample_around_best", False):
sigma = torch.full((base_function.dim,), 1e-3, **tkwargs)
sigma[base_function.integer_indices] = 0.5 / (
base_function.integer_bounds[1] - base_function.integer_bounds[0]
)
options["sample_around_best_sigma"] = sigma
options["sample_around_best_subset_sigma"] = sigma
exact_rounding_func = get_exact_rounding_func(
bounds=base_function.one_hot_bounds,
integer_indices=base_function.integer_indices.tolist(),
categorical_features=base_function.categorical_features,
initialization=False,
)
init_exact_rounding_func = get_exact_rounding_func(
bounds=base_function.one_hot_bounds,
integer_indices=base_function.integer_indices.tolist(),
categorical_features=base_function.categorical_features,
initialization=True,
)
standard_bounds = torch.ones(2, base_function.effective_dim, **tkwargs)
standard_bounds[0] = 0
# Get the initial data.
if n_initial_points is None:
n_initial_points = min(20, 2 * base_function.effective_dim)
if X_init is None:
X, Y = generate_initial_data(
n=n_initial_points,
base_function=base_function,
bounds=standard_bounds,
tkwargs=tkwargs,
init_exact_rounding_func=init_exact_rounding_func,
)
else:
# use provided initial data
assert Y_init is not None
assert X_init.shape[-1] == base_function.effective_dim
X = X_init.to(**tkwargs)
Y = Y_init.to(**tkwargs)
standardize_tf = Standardize(m=Y.shape[-1])
stdized_Y, _ = standardize_tf(Y)
standardize_tf.eval()
max_af_values = []
# Set some counters to keep track of things.
start_time = time()
existing_iterations = 0
wall_time = torch.zeros(iterations, dtype=dtype)
if is_moo:
bd = DominatedPartitioning(ref_point=base_function.ref_point, Y=Y)
# Abusing this variable name. This is HV.
best_objs = bd.compute_hypervolume().view(-1).cpu()
elif is_constrained:
# compute feasibility
feas = (Y[..., 1:] >= 0).all(dim=-1)
if feas.any():
best_objs = Y[feas, 0].max().view(-1).cpu()
else:
best_objs = torch.tensor([float("-inf")], dtype=Y.dtype)
else:
if base_function.objective_weights is not None:
obj = Y @ base_function.objective_weights
else:
obj = Y
best_objs = obj.max().view(-1).cpu()
if use_trust_region:
assert not is_moo
trbo_state = TurboState(
dim=base_function.effective_dim,
batch_size=batch_size,
is_constrained=is_constrained,
)
# setup nevergrad_portfolio
if label == "nevergrad_portfolio":
params = []
for i in base_function.cont_indices:
params.append(
ng.p.Scalar(
lower=base_function.bounds[0, i].item(),
upper=base_function.bounds[1, i].item(),
)
)
for i in base_function.integer_indices:
params.append(
ng.p.TransitionChoice(
list(
range(
int(base_function.bounds[0, i].item()),
int(base_function.bounds[1, i].item()) + 1,
)
)
)
)
for i in base_function.categorical_indices:
params.append(
ng.p.Choice(
list(
range(
int(base_function.bounds[0, i].item()),
int(base_function.bounds[1, i].item()) + 1,
)
)
)
)
params = ng.p.Instrumentation(*params)
ohe_to_numeric = OneHotToNumeric(
categorical_features=base_function.categorical_features,
transform_on_train=True,
)
X_numeric = ohe_to_numeric(X)
if len(base_function.categorical_features) > 0:
X_numeric[..., base_function.categorical_indices] = normalize(
X_numeric[..., base_function.categorical_indices],
base_function.categorical_bounds,
)
X_numeric = unnormalize(X_numeric, base_function.bounds)
params.value = (tuple(X_numeric[-1].tolist()), {})
optimizer = ng.optimizers.PortfolioDiscreteOnePlusOne(
parametrization=params,
budget=iterations + X.shape[0],
num_workers=1,
)
optimizer.ask() # clear initial value
for xi, yi in zip(X_numeric.cpu().numpy(), Y.cpu().numpy()):
xi = optimizer.parametrization.spawn_child(
new_value=(tuple(xi.tolist()), {})
)
optimizer.tell(xi, -yi.item())
# whether to sample discrete candidates from the resulting distribution or use the MLE
sample_candidates = acqf_kwargs.get("sample_candidates", True)
# BO loop for as many iterations as needed.
all_loss_trajs = []
all_xs_trajs = []
all_true_af_trajs = []
one_hot_to_numeric = None
for i in range(existing_iterations, iterations):
loss_traj = []
xs_traj = []
true_af_traj = []
print(
f"Starting label {label}, seed {seed}, iteration {i}, "
f"time: {time()-start_time}, current best obj: {best_objs[-1]}."
)
# Fit the model.
mll, model, ecdfs = initialize_model(
train_x=X,
train_y=stdized_Y,
binary_dims=binary_dims,
categorical_features=base_function.categorical_features,
function_name=function_name,
**model_kwargs,
)
fit_gpytorch_model(mll)
if label == "sobol":
raw_candidates = (
draw_sobol_samples(
bounds=standard_bounds,
n=1,
q=batch_size,
)
.squeeze(0)
.to(**tkwargs)
)
candidates = init_exact_rounding_func(raw_candidates)
elif label == "nevergrad_portfolio":
X_numeric = ohe_to_numeric(X[-1:])
if len(base_function.categorical_features) > 0:
X_numeric[..., base_function.categorical_indices] = normalize(
X_numeric[..., base_function.categorical_indices],
base_function.categorical_bounds,
)
X_numeric = unnormalize(X_numeric, base_function.bounds)
xi = optimizer.parametrization.spawn_child(
new_value=(tuple(X_numeric.view(-1).tolist()), {})
)
optimizer.tell(xi, -Y[-1].item())
candidates_numeric = torch.tensor(
optimizer.ask().value[0], dtype=X.dtype, device=X.device
).view(1, -1)
candidates = normalize(candidates_numeric, base_function.bounds)
if len(base_function.categorical_features) > 0:
candidates[..., base_function.categorical_indices] = unnormalize(
candidates[..., base_function.categorical_indices],
base_function.categorical_bounds,
)
candidates = ohe_to_numeric.untransform(candidates)
else:
# Construct the acqf.
acqf_exact_rounding_func = get_exact_rounding_func(
bounds=base_function.one_hot_bounds,
integer_indices=base_function.integer_indices.tolist(),
categorical_features=base_function.categorical_features,
initialization=False,
return_numeric=acqf_kwargs.get("apply_numeric", False),
use_ste=acqf_kwargs.get("use_ste", False),
)
acq_func = get_acqf(
label=label,
mc_samples=mc_samples,
model=model,
X_baseline=X,
num_constraints=num_constraints,
iteration=i + 1,
tkwargs=tkwargs,
base_function=base_function,
batch_size=batch_size,
exact_rounding_func=acqf_exact_rounding_func,
train_Y=stdized_Y,
standardize_tf=standardize_tf,
**acqf_kwargs,
)
true_acq_func = acq_func
if isinstance(acq_func, AbstractProbabilisticReparameterization):
# PR itself maps one-hot to numeric
# (not the model)
# so we need to do so here ourselves
if acq_func.one_hot_to_numeric is not None:
one_hot_to_numeric = acq_func.one_hot_to_numeric
if "pr" in label:
options["nonnegative"] = is_nonnegative(acq_func.acq_function)
if use_trust_region:
scaled_length = trbo_state.length * (
standard_bounds[1] - standard_bounds[0]
)
if is_constrained:
feas = (Y[..., 1:] >= 0).all(dim=-1)
if feas.any():
merit = Y[:, 0].clone()
merit[~feas] = -float("inf")
x_center = X[merit.argmax()]
else:
violation = torch.clamp_max(Y[..., 1:], 0.0).abs().sum(dim=-1)
x_center = X[violation.argmin()]
else:
x_center = X[Y.argmax()].clone()
bounds = torch.stack(
(x_center - scaled_length, x_center + scaled_length)
)
# Clamp bounds
bounds[0] = torch.maximum(bounds[0], standard_bounds[0])
bounds[1] = torch.minimum(bounds[1], standard_bounds[1])
# Reset binary dimenions
bounds[0, binary_dims] = 0
bounds[1, binary_dims] = 1
# Reset categorical_dims
if len(base_function.categorical_features) > 0:
start = base_function.categorical_indices.min().item()
bounds[0, start:] = 0
bounds[1, start:] = 1
else:
bounds = standard_bounds
# Optimize the acqf.
torch.cuda.empty_cache()
if "enumerate" in label:
# enumerate the discrete options and optimize the continuous
# parameters for each discrete option, if there are any
# construct a list of dictionaries mapping indices in one-hot space
# to parameter values.
discrete_options = generate_discrete_options(
base_function=base_function,
return_tensor=base_function.cont_indices.shape[0] == 0,
)
if base_function.cont_indices.shape[0] > 0:
# optimize mixed
candidates, _ = optimize_acqf_mixed(
acq_function=acq_func,
bounds=bounds,
q=batch_size,
fixed_features_list=discrete_options,
**optimization_kwargs,
)
else:
# optimize discrete
candidates, _ = optimize_acqf_discrete(
acq_function=acq_func,
q=batch_size,
choices=discrete_options,
**optimization_kwargs,
)
else:
if acqf_optim_seed is not None:
torch.manual_seed(acqf_optim_seed)
if isinstance(acq_func, AbstractProbabilisticReparameterization):
true_acq_func = acq_func.acq_function
if (
optimization_kwargs.get("stochastic", False)
and acqf_optim_seed is not None
):
def callback(i, loss, grad, X):
# this is a sum over batches
X = X.detach().clone()
xs_traj.append(X.cpu())
with torch.no_grad():
X_rounded = exact_rounding_func(X)
if one_hot_to_numeric is not None:
X_rounded = acq_func.one_hot_to_numeric(X_rounded)
true_af_traj.append(true_acq_func(X_rounded).cpu())
loss_traj.append(acq_func(X).cpu())
optimization_kwargs["options"]["callback"] = callback
raw_candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=batch_size,
**optimization_kwargs,
# return candidates for all random restarts
return_best_only=False,
)
if (
isinstance(acq_func, AbstractProbabilisticReparameterization)
and sample_candidates
):
with torch.no_grad():
candidates = acq_func.sample_candidates(raw_candidates)
else:
# use maximum likelihood candidates for PR
# and round candidates for other methods
candidates = exact_rounding_func(raw_candidates)
# compute acquisition values of rounded candidates
# and select best across restarts
if one_hot_to_numeric is not None:
candidates_numeric = acq_func.one_hot_to_numeric(candidates)
else:
candidates_numeric = candidates
with torch.no_grad():
# TODO: support q-batches here
if batch_size > 1:
raise NotImplementedError
max_af = true_acq_func(candidates_numeric).max(dim=0)
best_idx = max_af.indices.item()
max_af_values.append(max_af.values.item())
if candidates.ndim > 2:
# select best across restarts
candidates = candidates[best_idx]
torch.cuda.empty_cache()
# free memory
del acq_func, mll, model
gc.collect()
# Get the new observations and update the data.
new_y = eval_problem(candidates, base_function=base_function)
if use_trust_region:
old_length = trbo_state.length
trbo_state = update_state(state=trbo_state, Y_next=new_y)
if trbo_state.length != old_length:
print(
f"TR length changed from {old_length:.3f} to {trbo_state.length:3f}"
)
if trbo_state.restart_triggered:
print("Restarting trust region")
trbo_state = TurboState(
dim=base_function.effective_dim,
batch_size=batch_size,
is_constrained=is_constrained,
)
X = torch.cat([X, candidates], dim=0)
Y = torch.cat([Y, new_y], dim=0)
standardize_tf.train()
stdized_Y, _ = standardize_tf(Y)
standardize_tf.eval()
wall_time[i] = time() - start_time
all_xs_trajs.append(xs_traj)
all_loss_trajs.append(loss_traj)
all_true_af_trajs.append(true_af_traj)
# TODO: add support for constraints by applying feasibility
if is_moo:
bd = DominatedPartitioning(ref_point=base_function.ref_point, Y=Y)
# Abusing this variable name. This is HV.
best_obj = bd.compute_hypervolume()
elif is_constrained:
# compute feasibility
feas = (Y[..., 1:] >= 0).all(dim=-1)
if feas.any():
best_obj = Y[feas, 0].max()
else:
best_obj = torch.tensor([float("-inf")], dtype=Y.dtype, device=Y.device)
else:
if base_function.objective_weights is not None:
obj = Y @ base_function.objective_weights
else:
obj = Y
best_obj = obj.max().view(-1).cpu()
best_objs = torch.cat([best_objs, best_obj.view(-1).cpu()], dim=0)
# Periodically save the output.
if save_frequency is not None and iterations % save_frequency == 0:
output_dict = {
"label": label,
"X": X.cpu(),
"Y": Y.cpu(),
"wall_time": wall_time[: i + 1],
"best_objs": best_objs,
"max_af_values": max_af_values,
}
save_callback(output_dict)
# Save the final output
output_dict = {
"label": label,
"X": X.cpu(),
"Y": Y.cpu(),
"wall_time": wall_time,
"best_objs": best_objs,
"max_af_values": max_af_values,
"all_loss_trajs": all_loss_trajs,
"all_xs_trajs": all_xs_trajs,
"all_true_af_trajs": all_true_af_trajs,
}
save_callback(output_dict)
|
bo_pr-main
|
discrete_mixed_bo/run_one_replication.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
A copy of botorch.optim.initializers that support vector-valued sigma
for the sample_around_best heuristic.
References
.. [Regis]
R. G. Regis, C. A. Shoemaker. Combining radial basis function
surrogates and dynamic coordinate search in high-dimensional
expensive black-box optimization, Engineering Optimization, 2013.
"""
from __future__ import annotations
import warnings
from math import ceil
from typing import Dict, List, Optional, Tuple, Union
import torch
from botorch import settings
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.knowledge_gradient import (
_get_value_function,
qKnowledgeGradient,
)
from botorch.acquisition.utils import is_nonnegative
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.exceptions.warnings import (
BadInitialCandidatesWarning,
BotorchWarning,
SamplingWarning,
)
from botorch.models.model import Model
from botorch.optim.utils import fix_features, get_X_baseline
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.sampling import (
batched_multinomial,
draw_sobol_samples,
get_polytope_samples,
manual_seed,
)
from botorch.utils.transforms import normalize, standardize, unnormalize
from torch import Tensor
from torch.distributions import Normal
from torch.quasirandom import SobolEngine
def gen_batch_initial_conditions(
acq_function: AcquisitionFunction,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: int,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> Tensor:
r"""Generate a batch of initial conditions for random-restart optimziation.
TODO: Support t-batches of initial conditions.
Args:
acq_function: The acquisition function to be optimized.
bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
q: The number of candidates to consider.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic. Note: if `sample_around_best` is True (the default is False),
then `2 * raw_samples` samples are used.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for initial condition generation. For valid options see
`initialize_q_batch` and `initialize_q_batch_nonneg`. If `options`
contains a `nonnegative=True` entry, then `acq_function` is
assumed to be non-negative (useful when using custom acquisition
functions). In addition, an "init_batch_limit" option can be passed
to specify the batch limit for the initialization. This is useful
for avoiding memory limits when computing the batch posterior over
raw samples.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
Returns:
A `num_restarts x q x d` tensor of initial conditions.
Example:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> bounds = torch.tensor([[0.], [1.]])
>>> Xinit = gen_batch_initial_conditions(
>>> qEI, bounds, q=3, num_restarts=25, raw_samples=500
>>> )
"""
options = options or {}
seed: Optional[int] = options.get("seed")
batch_limit: Optional[int] = options.get(
"init_batch_limit", options.get("batch_limit")
)
batch_initial_arms: Tensor
factor, max_factor = 1, 5
init_kwargs = {}
device = bounds.device
bounds_cpu = bounds.cpu()
if "eta" in options:
init_kwargs["eta"] = options.get("eta")
if options.get("nonnegative") or is_nonnegative(acq_function):
init_func = initialize_q_batch_nonneg
if "alpha" in options:
init_kwargs["alpha"] = options.get("alpha")
else:
init_func = initialize_q_batch
q = 1 if q is None else q
# the dimension the samples are drawn from
effective_dim = bounds.shape[-1] * q
if effective_dim > SobolEngine.MAXDIM and settings.debug.on():
warnings.warn(
f"Sample dimension q*d={effective_dim} exceeding Sobol max dimension "
f"({SobolEngine.MAXDIM}). Using iid samples instead.",
SamplingWarning,
)
while factor < max_factor:
with warnings.catch_warnings(record=True) as ws:
n = raw_samples * factor
if inequality_constraints is None and equality_constraints is None:
if effective_dim <= SobolEngine.MAXDIM:
X_rnd = draw_sobol_samples(bounds=bounds_cpu, n=n, q=q, seed=seed)
else:
with manual_seed(seed):
# load on cpu
X_rnd_nlzd = torch.rand(
n, q, bounds_cpu.shape[-1], dtype=bounds.dtype
)
X_rnd = bounds_cpu[0] + (bounds_cpu[1] - bounds_cpu[0]) * X_rnd_nlzd
else:
X_rnd = (
get_polytope_samples(
n=n * q,
bounds=bounds,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
seed=seed,
n_burnin=options.get("n_burnin", 10000),
thinning=options.get("thinning", 32),
)
.view(n, q, -1)
.cpu()
)
# sample points around best
if options.get("sample_around_best", False):
X_best_rnd = sample_points_around_best(
acq_function=acq_function,
n_discrete_points=n * q,
sigma=options.get("sample_around_best_sigma", 1e-3),
bounds=bounds,
subset_sigma=options.get("sample_around_best_subset_sigma", 1e-1),
prob_perturb=options.get("sample_around_best_prob_perturb"),
)
if X_best_rnd is not None:
X_rnd = torch.cat(
[
X_rnd,
X_best_rnd.view(n, q, bounds.shape[-1]).cpu(),
],
dim=0,
)
X_rnd = fix_features(X_rnd, fixed_features=fixed_features)
with torch.no_grad():
if batch_limit is None:
batch_limit = X_rnd.shape[0]
Y_rnd_list = []
start_idx = 0
while start_idx < X_rnd.shape[0]:
end_idx = min(start_idx + batch_limit, X_rnd.shape[0])
Y_rnd_curr = acq_function(
X_rnd[start_idx:end_idx].to(device=device)
).cpu()
Y_rnd_list.append(Y_rnd_curr)
start_idx += batch_limit
Y_rnd = torch.cat(Y_rnd_list)
batch_initial_conditions = init_func(
X=X_rnd, Y=Y_rnd, n=num_restarts, **init_kwargs
).to(device=device)
if not any(issubclass(w.category, BadInitialCandidatesWarning) for w in ws):
return batch_initial_conditions
if factor < max_factor:
factor += 1
if seed is not None:
seed += 1 # make sure to sample different X_rnd
warnings.warn(
"Unable to find non-zero acquisition function values - initial conditions "
"are being selected randomly.",
BadInitialCandidatesWarning,
)
return batch_initial_conditions
def gen_one_shot_kg_initial_conditions(
acq_function: qKnowledgeGradient,
bounds: Tensor,
q: int,
num_restarts: int,
raw_samples: int,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int]]] = None,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
) -> Optional[Tensor]:
r"""Generate a batch of smart initializations for qKnowledgeGradient.
This function generates initial conditions for optimizing one-shot KG using
the maximizer of the posterior objective. Intutively, the maximizer of the
fantasized posterior will often be close to a maximizer of the current
posterior. This function uses that fact to generate the initital conditions
for the fantasy points. Specifically, a fraction of `1 - frac_random` (see
options) is generated by sampling from the set of maximizers of the
posterior objective (obtained via random restart optimization) according to
a softmax transformation of their respective values. This means that this
initialization strategy internally solves an acquisition function
maximization problem. The remaining `frac_random` fantasy points as well as
all `q` candidate points are chosen according to the standard initialization
strategy in `gen_batch_initial_conditions`.
Args:
acq_function: The qKnowledgeGradient instance to be optimized.
bounds: A `2 x d` tensor of lower and upper bounds for each column of
task features.
q: The number of candidates to consider.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for initial condition generation. These contain all
settings for the standard heuristic initialization from
`gen_batch_initial_conditions`. In addition, they contain
`frac_random` (the fraction of fully random fantasy points),
`num_inner_restarts` and `raw_inner_samples` (the number of random
restarts and raw samples for solving the posterior objective
maximization problem, respectively) and `eta` (temperature parameter
for sampling heuristic from posterior objective maximizers).
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
equality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
Returns:
A `num_restarts x q' x d` tensor that can be used as initial conditions
for `optimize_acqf()`. Here `q' = q + num_fantasies` is the total number
of points (candidate points plus fantasy points).
Example:
>>> qKG = qKnowledgeGradient(model, num_fantasies=64)
>>> bounds = torch.tensor([[0., 0.], [1., 1.]])
>>> Xinit = gen_one_shot_kg_initial_conditions(
>>> qKG, bounds, q=3, num_restarts=10, raw_samples=512,
>>> options={"frac_random": 0.25},
>>> )
"""
options = options or {}
frac_random: float = options.get("frac_random", 0.1)
if not 0 < frac_random < 1:
raise ValueError(
f"frac_random must take on values in (0,1). Value: {frac_random}"
)
q_aug = acq_function.get_augmented_q_batch_size(q=q)
# TODO: Avoid unnecessary computation by not generating all candidates
ics = gen_batch_initial_conditions(
acq_function=acq_function,
bounds=bounds,
q=q_aug,
num_restarts=num_restarts,
raw_samples=raw_samples,
fixed_features=fixed_features,
options=options,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
# compute maximizer of the value function
value_function = _get_value_function(
model=acq_function.model,
objective=acq_function.objective,
sampler=acq_function.inner_sampler,
project=getattr(acq_function, "project", None),
)
from botorch.optim.optimize import optimize_acqf
fantasy_cands, fantasy_vals = optimize_acqf(
acq_function=value_function,
bounds=bounds,
q=1,
num_restarts=options.get("num_inner_restarts", 20),
raw_samples=options.get("raw_inner_samples", 1024),
fixed_features=fixed_features,
return_best_only=False,
inequality_constraints=inequality_constraints,
equality_constraints=equality_constraints,
)
# sampling from the optimizers
n_value = int((1 - frac_random) * (q_aug - q)) # number of non-random ICs
eta = options.get("eta", 2.0)
weights = torch.exp(eta * standardize(fantasy_vals))
idx = torch.multinomial(weights, num_restarts * n_value, replacement=True)
# set the respective initial conditions to the sampled optimizers
ics[..., -n_value:, :] = fantasy_cands[idx, 0].view(num_restarts, n_value, -1)
return ics
def gen_value_function_initial_conditions(
acq_function: AcquisitionFunction,
bounds: Tensor,
num_restarts: int,
raw_samples: int,
current_model: Model,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[Dict[str, Union[bool, float, int]]] = None,
) -> Tensor:
r"""Generate a batch of smart initializations for optimizing
the value function of qKnowledgeGradient.
This function generates initial conditions for optimizing the inner problem of
KG, i.e. its value function, using the maximizer of the posterior objective.
Intutively, the maximizer of the fantasized posterior will often be close to a
maximizer of the current posterior. This function uses that fact to generate the
initital conditions for the fantasy points. Specifically, a fraction of `1 -
frac_random` (see options) of raw samples is generated by sampling from the set of
maximizers of the posterior objective (obtained via random restart optimization)
according to a softmax transformation of their respective values. This means that
this initialization strategy internally solves an acquisition function
maximization problem. The remaining raw samples are generated using
`draw_sobol_samples`. All raw samples are then evaluated, and the initial
conditions are selected according to the standard initialization strategy in
'initialize_q_batch' individually for each inner problem.
Args:
acq_function: The value function instance to be optimized.
bounds: A `2 x d` tensor of lower and upper bounds for each column of
task features.
num_restarts: The number of starting points for multistart acquisition
function optimization.
raw_samples: The number of raw samples to consider in the initialization
heuristic.
current_model: The model of the KG acquisition function that was used to
generate the fantasy model of the value function.
fixed_features: A map `{feature_index: value}` for features that
should be fixed to a particular value during generation.
options: Options for initial condition generation. These contain all
settings for the standard heuristic initialization from
`gen_batch_initial_conditions`. In addition, they contain
`frac_random` (the fraction of fully random fantasy points),
`num_inner_restarts` and `raw_inner_samples` (the number of random
restarts and raw samples for solving the posterior objective
maximization problem, respectively) and `eta` (temperature parameter
for sampling heuristic from posterior objective maximizers).
Returns:
A `num_restarts x batch_shape x q x d` tensor that can be used as initial
conditions for `optimize_acqf()`. Here `batch_shape` is the batch shape
of value function model.
Example:
>>> fant_X = torch.rand(5, 1, 2)
>>> fantasy_model = model.fantasize(fant_X, SobolQMCNormalSampler(16))
>>> value_function = PosteriorMean(fantasy_model)
>>> bounds = torch.tensor([[0., 0.], [1., 1.]])
>>> Xinit = gen_value_function_initial_conditions(
>>> value_function, bounds, num_restarts=10, raw_samples=512,
>>> options={"frac_random": 0.25},
>>> )
"""
options = options or {}
seed: Optional[int] = options.get("seed")
frac_random: float = options.get("frac_random", 0.6)
if not 0 < frac_random < 1:
raise ValueError(
f"frac_random must take on values in (0,1). Value: {frac_random}"
)
# compute maximizer of the current value function
value_function = _get_value_function(
model=current_model,
objective=getattr(acq_function, "objective", None),
posterior_transform=acq_function.posterior_transform,
sampler=getattr(acq_function, "sampler", None),
project=getattr(acq_function, "project", None),
)
from botorch.optim.optimize import optimize_acqf
fantasy_cands, fantasy_vals = optimize_acqf(
acq_function=value_function,
bounds=bounds,
q=1,
num_restarts=options.get("num_inner_restarts", 20),
raw_samples=options.get("raw_inner_samples", 1024),
fixed_features=fixed_features,
return_best_only=False,
options={
k: v
for k, v in options.items()
if k
not in ("frac_random", "num_inner_restarts", "raw_inner_samples", "eta")
},
)
batch_shape = acq_function.model.batch_shape
# sampling from the optimizers
n_value = int((1 - frac_random) * raw_samples) # number of non-random ICs
if n_value > 0:
eta = options.get("eta", 2.0)
weights = torch.exp(eta * standardize(fantasy_vals))
idx = batched_multinomial(
weights=weights.expand(*batch_shape, -1),
num_samples=n_value,
replacement=True,
).permute(-1, *range(len(batch_shape)))
resampled = fantasy_cands[idx]
else:
resampled = torch.empty(
0,
*batch_shape,
1,
bounds.shape[-1],
dtype=fantasy_cands.dtype,
device=fantasy_cands.device,
)
# add qMC samples
randomized = draw_sobol_samples(
bounds=bounds, n=raw_samples - n_value, q=1, batch_shape=batch_shape, seed=seed
).to(resampled)
# full set of raw samples
X_rnd = torch.cat([resampled, randomized], dim=0)
X_rnd = fix_features(X_rnd, fixed_features=fixed_features)
# evaluate the raw samples
with torch.no_grad():
Y_rnd = acq_function(X_rnd)
# select the restart points using the heuristic
return initialize_q_batch(
X=X_rnd, Y=Y_rnd, n=num_restarts, eta=options.get("eta", 2.0)
)
def initialize_q_batch(X: Tensor, Y: Tensor, n: int, eta: float = 1.0) -> Tensor:
r"""Heuristic for selecting initial conditions for candidate generation.
This heuristic selects points from `X` (without replacement) with probability
proportional to `exp(eta * Z)`, where `Z = (Y - mean(Y)) / std(Y)` and `eta`
is a temperature parameter.
When using an acquisiton function that is non-negative and possibly zero
over large areas of the feature space (e.g. qEI), you should use
`initialize_q_batch_nonneg` instead.
Args:
X: A `b x batch_shape x q x d` tensor of `b` - `batch_shape` samples of
`q`-batches from a d`-dim feature space. Typically, these are generated
using qMC sampling.
Y: A tensor of `b x batch_shape` outcomes associated with the samples.
Typically, this is the value of the batch acquisition function to be
maximized.
n: The number of initial condition to be generated. Must be less than `b`.
eta: Temperature parameter for weighting samples.
Returns:
A `n x batch_shape x q x d` tensor of `n` - `batch_shape` `q`-batch initial
conditions, where each batch of `n x q x d` samples is selected independently.
Example:
>>> # To get `n=10` starting points of q-batch size `q=3`
>>> # for model with `d=6`:
>>> qUCB = qUpperConfidenceBound(model, beta=0.1)
>>> Xrnd = torch.rand(500, 3, 6)
>>> Xinit = initialize_q_batch(Xrnd, qUCB(Xrnd), 10)
"""
n_samples = X.shape[0]
batch_shape = X.shape[1:-2] or torch.Size()
if n > n_samples:
raise RuntimeError(
f"n ({n}) cannot be larger than the number of "
f"provided samples ({n_samples})"
)
elif n == n_samples:
return X
Ystd = Y.std(dim=0)
if torch.any(Ystd == 0):
warnings.warn(
"All acquisition values for raw samples points are the same for "
"at least one batch. Choosing initial conditions at random.",
BadInitialCandidatesWarning,
)
return X[torch.randperm(n=n_samples, device=X.device)][:n]
max_val, max_idx = torch.max(Y, dim=0)
Z = (Y - Y.mean(dim=0)) / Ystd
etaZ = eta * Z
weights = torch.exp(etaZ)
while torch.isinf(weights).any():
etaZ *= 0.5
weights = torch.exp(etaZ)
if batch_shape == torch.Size():
idcs = torch.multinomial(weights, n)
else:
idcs = batched_multinomial(
weights=weights.permute(*range(1, len(batch_shape) + 1), 0), num_samples=n
).permute(-1, *range(len(batch_shape)))
# make sure we get the maximum
if max_idx not in idcs:
idcs[-1] = max_idx
if batch_shape == torch.Size():
return X[idcs]
else:
return X.gather(
dim=0, index=idcs.view(*idcs.shape, 1, 1).expand(n, *X.shape[1:])
)
def initialize_q_batch_nonneg(
X: Tensor, Y: Tensor, n: int, eta: float = 1.0, alpha: float = 1e-4
) -> Tensor:
r"""Heuristic for selecting initial conditions for non-neg. acquisition functions.
This function is similar to `initialize_q_batch`, but designed specifically
for acquisition functions that are non-negative and possibly zero over
large areas of the feature space (e.g. qEI). All samples for which
`Y < alpha * max(Y)` will be ignored (assuming that `Y` contains at least
one positive value).
Args:
X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim.
feature space. Typically, these are generated using qMC.
Y: A tensor of `b` outcomes associated with the samples. Typically, this
is the value of the batch acquisition function to be maximized.
n: The number of initial condition to be generated. Must be less than `b`.
eta: Temperature parameter for weighting samples.
alpha: The threshold (as a fraction of the maximum observed value) under
which to ignore samples. All input samples for which
`Y < alpha * max(Y)` will be ignored.
Returns:
A `n x q x d` tensor of `n` `q`-batch initial conditions.
Example:
>>> # To get `n=10` starting points of q-batch size `q=3`
>>> # for model with `d=6`:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> Xrnd = torch.rand(500, 3, 6)
>>> Xinit = initialize_q_batch(Xrnd, qEI(Xrnd), 10)
"""
n_samples = X.shape[0]
if n > n_samples:
raise RuntimeError("n cannot be larger than the number of provided samples")
elif n == n_samples:
return X
max_val, max_idx = torch.max(Y, dim=0)
if torch.any(max_val <= 0):
warnings.warn(
"All acquisition values for raw sampled points are nonpositive, so "
"initial conditions are being selected randomly.",
BadInitialCandidatesWarning,
)
return X[torch.randperm(n=n_samples, device=X.device)][:n]
# make sure there are at least `n` points with positive acquisition values
pos = Y > 0
num_pos = pos.sum().item()
if num_pos < n:
# select all positive points and then fill remaining quota with randomly
# selected points
remaining_indices = (~pos).nonzero(as_tuple=False).view(-1)
rand_indices = torch.randperm(remaining_indices.shape[0], device=Y.device)
sampled_remaining_indices = remaining_indices[rand_indices[: n - num_pos]]
pos[sampled_remaining_indices] = 1
return X[pos]
# select points within alpha of max_val, iteratively decreasing alpha by a
# factor of 10 as necessary
alpha_pos = Y >= alpha * max_val
while alpha_pos.sum() < n:
alpha = 0.1 * alpha
alpha_pos = Y >= alpha * max_val
alpha_pos_idcs = torch.arange(len(Y), device=Y.device)[alpha_pos]
weights = torch.exp(eta * (Y[alpha_pos] / max_val - 1))
idcs = alpha_pos_idcs[torch.multinomial(weights, n)]
if max_idx not in idcs:
idcs[-1] = max_idx
return X[idcs]
def sample_points_around_best(
acq_function: AcquisitionFunction,
n_discrete_points: int,
sigma: Tensor,
bounds: Tensor,
best_pct: float = 5.0,
subset_sigma: float = 1e-1,
prob_perturb: Optional[float] = None,
) -> Optional[Tensor]:
r"""Find best points and sample nearby points.
Args:
acq_function: The acquisition function.
n_discrete_points: The number of points to sample.
sigma: A `(d)`-dim tensor containing the standard deviation of
the additive gaussian noise for perturbing the best points.
bounds: A `2 x d`-dim tensor containing the bounds.
best_pct: The percentage of best points to perturb.
subset_sigma: A `(d)`-dim tensor containing the standard deviation of
the additive gaussian noise for perturbing the best points.
prob_perturb: The probability of perturbing each dimension.
Returns:
An optional `n_discrete_points x d`-dim tensor containing the
sampled points. This is None if no baseline points are found.
"""
X = get_X_baseline(acq_function=acq_function)
if X is None:
return
with torch.no_grad():
try:
posterior = acq_function.model.posterior(X)
except AttributeError:
warnings.warn(
"Failed to sample around previous best points.",
BotorchWarning,
)
return
mean = posterior.mean
while mean.ndim > 2:
# take average over batch dims
mean = mean.mean(dim=0)
try:
f_pred = acq_function.objective(mean)
# Some acquisition functions do not have an objective
# and for some acquisition functions the objective is None
except (AttributeError, TypeError):
f_pred = mean
if hasattr(acq_function, "maximize"):
# make sure that the optimiztaion direction is set properly
if not acq_function.maximize:
f_pred = -f_pred
try:
# handle constraints for EHVI-based acquisition functions
constraints = acq_function.constraints
if constraints is not None:
neg_violation = -torch.stack(
[c(mean).clamp_min(0.0) for c in constraints], dim=-1
).sum(dim=-1)
feas = neg_violation == 0
if feas.any():
f_pred[~feas] = float("-inf")
else:
# set objective equal to negative violation
f_pred = neg_violation
except AttributeError:
pass
if f_pred.ndim == mean.ndim and f_pred.shape[-1] > 1:
# multi-objective
# find pareto set
is_pareto = is_non_dominated(f_pred)
best_X = X[is_pareto]
else:
if f_pred.shape[-1] == 1:
f_pred = f_pred.squeeze(-1)
n_best = max(1, round(X.shape[0] * best_pct / 100))
# the view() is to ensure that best_idcs is not a scalar tensor
best_idcs = torch.topk(f_pred, n_best).indices.view(-1)
best_X = X[best_idcs]
n_trunc_normal_points = (
n_discrete_points // 2 if best_X.shape[-1] > 20 else n_discrete_points
)
perturbed_X = sample_truncated_normal_perturbations(
X=best_X,
n_discrete_points=n_trunc_normal_points,
sigma=sigma,
bounds=bounds,
)
if best_X.shape[-1] > 20 or prob_perturb is not None:
perturbed_subset_dims_X = sample_perturbed_subset_dims(
X=best_X,
bounds=bounds,
# ensure that we return n_discrete_points
n_discrete_points=n_discrete_points - n_trunc_normal_points,
sigma=sigma,
prob_perturb=prob_perturb,
)
perturbed_X = torch.cat([perturbed_X, perturbed_subset_dims_X], dim=0)
# shuffle points
perm = torch.randperm(perturbed_X.shape[0], device=X.device)
perturbed_X = perturbed_X[perm]
return perturbed_X
def sample_truncated_normal_perturbations(
X: Tensor,
n_discrete_points: int,
sigma: Tensor,
bounds: Tensor,
qmc: bool = True,
) -> Tensor:
r"""Sample points around `X`.
Sample perturbed points around `X` such that the added perturbations
are sampled from N(0, sigma^2 I) and truncated to be within [0,1]^d.
Args:
X: A `n x d`-dim tensor starting points.
n_discrete_points: The number of points to sample.
sigma: A `(d)`-dim tensor containing the standard deviation of
the additive gaussian noise for perturbing the best points.
bounds: A `2 x d`-dim tensor containing the bounds.
qmc: A boolean indicating whether to use qmc.
Returns:
A `n_discrete_points x d`-dim tensor containing the sampled points.
"""
X = normalize(X, bounds=bounds)
d = X.shape[1]
# sample points from N(X_center, sigma^2 I), truncated to be within
# [0, 1]^d.
if X.shape[0] > 1:
rand_indices = torch.randint(X.shape[0], (n_discrete_points,), device=X.device)
X = X[rand_indices]
if qmc:
std_bounds = torch.zeros(2, d, dtype=X.dtype, device=X.device)
std_bounds[1] = 1
u = draw_sobol_samples(bounds=std_bounds, n=n_discrete_points, q=1).squeeze(1)
else:
u = torch.rand((n_discrete_points, d), dtype=X.dtype, device=X.device)
# compute bounds to sample from
a = -X
b = 1 - X
# compute z-score of bounds
alpha = a / sigma
beta = b / sigma
normal = Normal(0, 1)
cdf_alpha = normal.cdf(alpha)
# use inverse transform
perturbation = normal.icdf(cdf_alpha + u * (normal.cdf(beta) - cdf_alpha)) * sigma
# add perturbation and clip points that are still outside
perturbed_X = (X + perturbation).clamp(0.0, 1.0)
return unnormalize(perturbed_X, bounds=bounds)
def sample_perturbed_subset_dims(
X: Tensor,
bounds: Tensor,
n_discrete_points: int,
sigma: Union[Tensor, float] = 1e-1,
qmc: bool = True,
prob_perturb: Optional[float] = None,
) -> Tensor:
r"""Sample around `X` by perturbing a subset of the dimensions.
By default, dimensions are perturbed with probability equal to
`min(20 / d, 1)`. As shown in [Regis]_, perturbing a small number
of dimensions can be beneificial. The perturbations are sampled
from N(0, sigma^2 I) and truncated to be within [0,1]^d.
Args:
X: A `n x d`-dim tensor starting points. `X`
must be normalized to be within `[0, 1]^d`.
bounds: The bounds to sample perturbed values from
n_discrete_points: The number of points to sample.
sigma: The standard deviation of the additive gaussian noise for
perturbing the points.
qmc: A boolean indicating whether to use qmc.
prob_perturb: The probability of perturbing each dimension. If omitted,
defaults to `min(20 / d, 1)`.
Returns:
A `n_discrete_points x d`-dim tensor containing the sampled points.
"""
if bounds.ndim != 2:
raise BotorchTensorDimensionError("bounds must be a `2 x d`-dim tensor.")
elif X.ndim != 2:
raise BotorchTensorDimensionError("X must be a `n x d`-dim tensor.")
d = bounds.shape[-1]
if prob_perturb is None:
# Only perturb a subset of the features
prob_perturb = min(20.0 / d, 1.0)
if X.shape[0] == 1:
X_cand = X.repeat(n_discrete_points, 1)
else:
rand_indices = torch.randint(X.shape[0], (n_discrete_points,), device=X.device)
X_cand = X[rand_indices]
pert = sample_truncated_normal_perturbations(
X=X_cand,
n_discrete_points=n_discrete_points,
sigma=sigma,
bounds=bounds,
qmc=qmc,
)
# find cases where we are not perturbing any dimensions
mask = (
torch.rand(
n_discrete_points,
d,
dtype=bounds.dtype,
device=bounds.device,
)
<= prob_perturb
)
ind = (~mask).all(dim=-1).nonzero()
# perturb `n_perturb` of the dimensions
n_perturb = ceil(d * prob_perturb)
perturb_mask = torch.zeros(d, dtype=mask.dtype, device=mask.device)
perturb_mask[:n_perturb].fill_(1)
# TODO: use batched `torch.randperm` when available:
# https://github.com/pytorch/pytorch/issues/42502
for idx in ind:
mask[idx] = perturb_mask[torch.randperm(d, device=bounds.device)]
# Create candidate points
X_cand[mask] = pert[mask]
return X_cand
|
bo_pr-main
|
discrete_mixed_bo/initializers.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import abstractmethod
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional
import torch
from botorch.models.transforms.input import InputTransform
from botorch.utils.sampling import (
draw_sobol_normal_samples,
draw_sobol_samples,
sample_simplex,
)
from botorch.utils.transforms import normalize_indices
from gpytorch.constraints import GreaterThan, Interval
from gpytorch.module import Module as GPyTorchModule
from gpytorch.priors import NormalPrior
from torch import Tensor, nn
from torch.distributions import Categorical
from torch.nn import Module
from torch.nn.functional import one_hot
from discrete_mixed_bo.ste import OneHotArgmaxSTE, OneHotToNumericSTE, RoundSTE
class OneHotToNumeric(InputTransform, Module):
r"""Transformation that maps categorical parameters from one-hot representation to numeric representation.
This assumes that the categoricals are the trailing dimensions
"""
def __init__(
self,
categorical_features: Optional[Dict[int, int]] = None,
transform_on_train: bool = False,
transform_on_eval: bool = True,
transform_on_fantasize: bool = False,
use_ste: bool = False,
) -> None:
super().__init__()
self.transform_on_train = transform_on_train
self.transform_on_eval = transform_on_eval
self.transform_on_fantasize = transform_on_fantasize
self.categorical_starts = []
self.categorical_ends = []
self.categorical_features = (
None
if ((categorical_features is None) or (len(categorical_features) == 0))
else categorical_features
)
if self.categorical_features is not None:
start_idx = None
for i in sorted(categorical_features.keys()):
if start_idx is None:
start_idx = i
self.categorical_starts.append(start_idx)
end_idx = start_idx + categorical_features[i]
self.categorical_ends.append(end_idx)
start_idx = end_idx
self.numeric_dim = min(self.categorical_starts) + len(categorical_features)
self.use_ste = use_ste
def transform(self, X: Tensor) -> Tensor:
r"""Round the inputs.
Args:
X: A `batch_shape x n x d`-dim tensor of inputs.
Returns:
A `batch_shape x n x d`-dim tensor of rounded inputs.
"""
if self.categorical_features is not None:
X_numeric = X[..., : self.numeric_dim].clone()
idx = self.categorical_starts[0]
for start, end in zip(self.categorical_starts, self.categorical_ends):
if self.use_ste:
X_numeric[..., idx] = OneHotToNumericSTE.apply(X[..., start:end])
else:
X_numeric[..., idx] = X[..., start:end].argmax(dim=-1)
idx += 1
return X_numeric
return X
def untransform(self, X: Tensor) -> Tensor:
r"""Un-transform the inputs to a model.
Un-transforms of the individual transforms are applied in reverse sequence.
Args:
X: A `batch_shape x n x d`-dim tensor of transformed inputs.
Returns:
A `batch_shape x n x d`-dim tensor of un-transformed inputs.
"""
if X.requires_grad:
raise NotImplementedError
if self.categorical_features is not None:
one_hot_categoricals = [
one_hot(X[..., idx].long(), num_classes=cardinality)
for idx, cardinality in sorted(
self.categorical_features.items(), key=lambda x: x[0]
)
]
X = torch.cat(
[
X[..., : min(self.categorical_features.keys())],
*one_hot_categoricals,
],
dim=-1,
)
return X
class Round(InputTransform, Module):
r"""A rounding transformation for integer inputs.
This will typically be used in conjunction with normalization as
follows:
In eval() mode (i.e. after training), the inputs pass
would typically be normalized to the unit cube (e.g. during candidate
optimization). 1. These are unnormalized back to the raw input space.
2. The integers are rounded. 3. All values are normalized to the unit
cube.
In train() mode, the inputs can either (a) be normalized to the unit
cube or (b) provided using their raw values. In the case of (a)
transform_on_train should be set to True, so that the normalized inputs
are unnormalized before rounding. In the case of (b) transform_on_train
should be set to False, so that the raw inputs are rounded and then
normalized to the unit cube.
This transformation uses differentiable approximate rounding by default.
The rounding function is approximated with a piece-wise function where
each piece is a hyperbolic tangent function.
Example:
>>> unnormalize_tf = Normalize(
>>> d=d,
>>> bounds=bounds,
>>> transform_on_eval=True,
>>> transform_on_train=True,
>>> reverse=True,
>>> )
>>> round_tf = Round(integer_indices)
>>> normalize_tf = Normalize(d=d, bounds=bounds)
>>> tf = ChainedInputTransform(
>>> tf1=unnormalize_tf, tf2=round_tf, tf3=normalize_tf
>>> )
"""
def __init__(
self,
integer_indices: Optional[List[int]] = None,
categorical_features: Optional[Dict[int, int]] = None,
transform_on_train: bool = True,
transform_on_eval: bool = True,
transform_on_fantasize: bool = True,
approximate: bool = True,
tau: float = 1e-3,
use_ste: bool = False,
) -> None:
r"""Initialize transform.
Args:
indices: The indices of the integer inputs.
transform_on_train: A boolean indicating whether to apply the
transforms in train() mode. Default: True.
transform_on_eval: A boolean indicating whether to apply the
transform in eval() mode. Default: True.
transform_on_fantasize: A boolean indicating whether to apply the
transform when called from within a `fantasize` call. Default: True.
approximate: A boolean indicating whether approximate or exact
rounding should be used. Default: approximate.
tau: The temperature parameter for approximate rounding.
use_ste: use straight-through gradient estimator
"""
super().__init__()
self.transform_on_train = transform_on_train
self.transform_on_eval = transform_on_eval
self.transform_on_fantasize = transform_on_fantasize
integer_indices = integer_indices or []
self.register_buffer(
"integer_indices", torch.tensor(integer_indices, dtype=torch.long)
)
self.categorical_starts = []
self.categorical_ends = []
if categorical_features is not None:
start_idx = None
for i in sorted(categorical_features.keys()):
if start_idx is None:
start_idx = i
self.categorical_starts.append(start_idx)
end_idx = start_idx + categorical_features[i]
self.categorical_ends.append(end_idx)
start_idx = end_idx
self.approximate = approximate
self.tau = tau
self.use_ste = use_ste
def transform(self, X: Tensor) -> Tensor:
r"""Round the inputs.
Args:
X: A `batch_shape x n x d`-dim tensor of inputs.
Returns:
A `batch_shape x n x d`-dim tensor of rounded inputs.
"""
X_rounded = X.clone()
# round integers
X_int = X_rounded[..., self.integer_indices]
if self.approximate:
X_int = approximate_round(X_int, tau=self.tau)
elif self.use_ste:
X_int = RoundSTE.apply(X_int)
else:
X_int = X_int.round()
X_rounded[..., self.integer_indices] = X_int
# discrete categoricals to the category with the largest value
# in the continuous relaxation of the one-hot encoding
for start, end in zip(self.categorical_starts, self.categorical_ends):
cardinality = end - start
if self.approximate:
raise NotImplementedError
elif self.use_ste:
X_rounded[..., start:end] = OneHotArgmaxSTE.apply(
X[..., start:end],
cardinality,
)
else:
X_rounded[..., start:end] = one_hot(
X[..., start:end].argmax(dim=-1), num_classes=cardinality
)
return X_rounded
class AnalyticProbabilisticReparameterizationInputTransform(InputTransform, Module):
r"""Probabilistic reparameterization input transform.
This will typically be used in conjunction with normalization as
follows:
In eval() mode (i.e. after training), the inputs pass
would typically be normalized to the unit cube (e.g. during candidate
optimization).
1. These are unnormalized back to the raw input space.
2. The discrete values are created.
3. All values are normalized to the unitcube.
Example:
>>> unnormalize_tf = Normalize(
>>> d=d,
>>> bounds=bounds,
>>> transform_on_eval=True,
>>> transform_on_train=True,
>>> reverse=True,
>>> )
>>> pr = ProbabilisticReparameterizationInputTransform(integer_indices)
>>> normalize_tf = Normalize(d=d, bounds=bounds)
>>> tf = ChainedInputTransform(
>>> tf1=unnormalize_tf, tf2=pr, tf3=normalize_tf
>>> )
"""
def __init__(
self,
dim: int,
integer_indices: Optional[List[int]] = None,
integer_bounds: Optional[Tensor] = None,
categorical_features: Optional[Dict[int, int]] = None,
transform_on_train: bool = False,
transform_on_eval: bool = True,
transform_on_fantasize: bool = True,
tau: float = 0.1,
) -> None:
r"""Initialize transform.
Args:
integer_indices: The indices of the integer inputs.
categorical_features: The indices and cardinality of
each categorical feature. The features are assumed
to be one-hot encoded. TODO: generalize to support
alternative representations.
transform_on_train: A boolean indicating whether to apply the
transforms in train() mode. Default: True.
transform_on_eval: A boolean indicating whether to apply the
transform in eval() mode. Default: True.
transform_on_fantasize: A boolean indicating whether to apply the
transform when called from within a `fantasize` call. Default: True.
mc_samples: The number of MC samples.
resample: A boolean indicating whether to resample base samples
at each forward pass.
flip: A boolean indicating whether round based on u < p or 1 - p < u.
tau: The temperature parameter.
"""
super().__init__()
if integer_indices is None and categorical_features is None:
raise ValueError(
"integer_indices and/or categorical_features must be provided."
)
self.transform_on_train = transform_on_train
self.transform_on_eval = transform_on_eval
self.transform_on_fantasize = transform_on_fantasize
discrete_indices = []
if integer_indices is not None and len(integer_indices) > 0:
assert integer_bounds is not None
self.register_buffer(
"integer_indices", torch.tensor(integer_indices, dtype=torch.long)
)
self.register_buffer("integer_bounds", integer_bounds)
discrete_indices += integer_indices
else:
self.integer_indices = None
self.categorical_features = categorical_features
categorical_starts = []
categorical_ends = []
if self.categorical_features is not None:
start = None
for i, n_categories in categorical_features.items():
if start is None:
start = i
end = start + n_categories
categorical_starts.append(start)
categorical_ends.append(end)
discrete_indices += list(range(start, end))
start = end
self.register_buffer(
"discrete_indices", torch.tensor(discrete_indices, dtype=torch.long)
)
self.register_buffer(
"categorical_starts", torch.tensor(categorical_starts, dtype=torch.long)
)
self.register_buffer(
"categorical_ends", torch.tensor(categorical_ends, dtype=torch.long)
)
self.tau = tau
# create cartesian product of discrete options
discrete_options = []
# add zeros for continuous params to simplify code
for i in range(dim - len(discrete_indices)):
discrete_options.append(
torch.zeros(
1,
dtype=torch.long,
)
)
if integer_bounds is not None:
for i in range(integer_bounds.shape[-1]):
discrete_options.append(
torch.arange(
integer_bounds[0, i], integer_bounds[1, i] + 1, dtype=torch.long
)
)
if categorical_features is not None:
for cardinality in categorical_features.values():
discrete_options.append(torch.arange(cardinality, dtype=torch.long))
# categoricals are in numeric representation
all_discrete_options = torch.cartesian_prod(*discrete_options)
# one-hot encode the categoricals
if categorical_features is not None and len(categorical_features) > 0:
X_categ = torch.empty(
*all_discrete_options.shape[:-1], sum(categorical_features.values())
)
i = 0
for idx, cardinality in categorical_features.items():
X_categ[..., i : i + cardinality] = one_hot(
all_discrete_options[..., idx],
num_classes=cardinality,
).to(X_categ)
i = i + cardinality
all_discrete_options = torch.cat(
[all_discrete_options[..., : -len(categorical_features)], X_categ],
dim=-1,
)
self.register_buffer("all_discrete_options", all_discrete_options)
def get_rounding_prob(self, X: Tensor) -> Tensor:
# todo consolidate this the MCProbabilisticReparameterizationInputTransform
X_prob = X.detach().clone()
if self.integer_indices is not None:
# compute probabilities for integers
X_int = X_prob[..., self.integer_indices]
X_int_abs = X_int.abs()
offset = X_int_abs.floor()
if self.tau is not None:
X_prob[..., self.integer_indices] = torch.sigmoid(
(X_int_abs - offset - 0.5) / self.tau
)
else:
X_prob[..., self.integer_indices] = X_int_abs - offset
# compute probabilities for categoricals
for start, end in zip(self.categorical_starts, self.categorical_ends):
X_categ = X_prob[..., start:end]
if self.tau is not None:
X_prob[..., start:end] = torch.softmax(
(X_categ - 0.5) / self.tau, dim=-1
)
else:
X_prob[..., start:end] = X_categ / X_categ.sum(dim=-1)
return X_prob[..., self.discrete_indices]
def get_probs(self, X: Tensor) -> Tensor:
"""
Args:
X: a `batch_shape x n x d`-dim tensor
Returns:
A `batch_shape x n_discrete x n`-dim tensors of probabilities of each discrete config under X.
"""
# note this method should be differentiable
X_prob = torch.ones(
*X.shape[:-2],
self.all_discrete_options.shape[0],
X.shape[-2],
dtype=X.dtype,
device=X.device,
)
# n_discrete x batch_shape x n x d
all_discrete_options = self.all_discrete_options.view(
*([1] * (X.ndim - 2)), self.all_discrete_options.shape[0], *X.shape[-2:]
).expand(*X.shape[:-2], self.all_discrete_options.shape[0], *X.shape[-2:])
X = X.unsqueeze(-3)
if self.integer_indices is not None:
# compute probabilities for integers
X_int = X[..., self.integer_indices]
X_int_abs = X_int.abs()
offset = X_int_abs.floor()
# note we don't actually need the sigmoid here
X_prob_int = torch.sigmoid((X_int_abs - offset - 0.5) / self.tau)
# X_prob_int = X_int_abs - offset
for int_idx, idx in enumerate(self.integer_indices):
offset_i = offset[..., int_idx]
all_discrete_i = all_discrete_options[..., idx]
diff = (offset_i + 1) - all_discrete_i
round_up_mask = diff == 0
round_down_mask = diff == 1
neither_mask = ~(round_up_mask | round_down_mask)
prob = X_prob_int[..., int_idx].expand(round_up_mask.shape)
# need to be careful with in-place ops here for autograd
X_prob[round_up_mask] = X_prob[round_up_mask] * prob[round_up_mask]
X_prob[round_down_mask] = X_prob[round_down_mask] * (
1 - prob[round_down_mask]
)
X_prob[neither_mask] = X_prob[neither_mask] * 0
# compute probabilities for categoricals
for start, end in zip(self.categorical_starts, self.categorical_ends):
X_categ = X[..., start:end]
X_prob_c = torch.softmax((X_categ - 0.5) / self.tau, dim=-1).expand(
*X_categ.shape[:-3], all_discrete_options.shape[-3], *X_categ.shape[-2:]
)
for i in range(X_prob_c.shape[-1]):
mask = all_discrete_options[..., start + i] == 1
X_prob[mask] = X_prob[mask] * X_prob_c[..., i][mask]
return X_prob
def transform(self, X: Tensor) -> Tensor:
r"""Round the inputs.
This is not sample-path differentiable.
Args:
X: A `batch_shape x 1 x n x d`-dim tensor of inputs.
Returns:
A `batch_shape x n_discrete x n x d`-dim tensor of rounded inputs.
"""
n_discrete = self.discrete_indices.shape[0]
all_discrete_options = self.all_discrete_options.view(
*([1] * (X.ndim - 3)), self.all_discrete_options.shape[0], *X.shape[-2:]
).expand(*X.shape[:-3], self.all_discrete_options.shape[0], *X.shape[-2:])
if X.shape[-1] > n_discrete:
X = X.expand(
*X.shape[:-3], self.all_discrete_options.shape[0], *X.shape[-2:]
)
return torch.cat(
[X[..., :-n_discrete], all_discrete_options[..., -n_discrete:]], dim=-1
)
return self.all_discrete_options
def equals(self, other: InputTransform) -> bool:
r"""Check if another input transform is equivalent.
Args:
other: Another input transform.
Returns:
A boolean indicating if the other transform is equivalent.
"""
# TODO: update this
return super().equals(other=other) and torch.equal(
self.integer_indices, other.integer_indices
)
class MCProbabilisticReparameterizationInputTransform(InputTransform, Module):
r"""Probabilistic reparameterization for ordinal and binary variables.
This will typically be used in conjunction with normalization as
follows:
In eval() mode (i.e. after training), the inputs pass
would typically be normalized to the unit cube (e.g. during candidate
optimization).
1. These are unnormalized back to the raw input space.
2. The discrete ordinal valeus are sampled.
3. All values are normalized to the unitcube.
Example:
>>> unnormalize_tf = Normalize(
>>> d=d,
>>> bounds=bounds,
>>> transform_on_eval=True,
>>> transform_on_train=True,
>>> reverse=True,
>>> )
>>> pr = OrdinalProbabilisticReparameterization(integer_indices)
>>> normalize_tf = Normalize(d=d, bounds=bounds)
>>> tf = ChainedInputTransform(
>>> tf1=unnormalize_tf, tf2=pr, tf3=normalize_tf
>>> )
"""
def __init__(
self,
integer_indices: Optional[List[int]] = None,
integer_bounds: Optional[Tensor] = None,
categorical_features: Optional[Dict[int, int]] = None,
transform_on_train: bool = False,
transform_on_eval: bool = True,
transform_on_fantasize: bool = True,
mc_samples: int = 128,
resample: bool = False,
flip: bool = False,
tau: float = 0.1,
) -> None:
r"""Initialize transform.
Args:
integer_indices: The indices of the integer inputs.
categorical_features: The indices and cardinality of
each categorical feature. The features are assumed
to be one-hot encoded. TODO: generalize to support
alternative representations.
transform_on_train: A boolean indicating whether to apply the
transforms in train() mode. Default: True.
transform_on_eval: A boolean indicating whether to apply the
transform in eval() mode. Default: True.
transform_on_fantasize: A boolean indicating whether to apply the
transform when called from within a `fantasize` call. Default: True.
mc_samples: The number of MC samples.
resample: A boolean indicating whether to resample base samples
at each forward pass.
flip: A boolean indicating whether round based on u < p or 1 - p < u.
tau: The temperature parameter.
"""
super().__init__()
if integer_indices is None and categorical_features is None:
raise ValueError(
"integer_indices and/or categorical_features must be provided."
)
self.transform_on_train = transform_on_train
self.transform_on_eval = transform_on_eval
self.transform_on_fantasize = transform_on_fantasize
discrete_indices = []
if integer_indices is not None and len(integer_indices) > 0:
self.register_buffer(
"integer_indices", torch.tensor(integer_indices, dtype=torch.long)
)
discrete_indices += integer_indices
else:
self.integer_indices = None
self.categorical_features = categorical_features
categorical_starts = []
categorical_ends = []
if self.categorical_features is not None:
start = None
for i, n_categories in categorical_features.items():
if start is None:
start = i
end = start + n_categories
categorical_starts.append(start)
categorical_ends.append(end)
discrete_indices += list(range(start, end))
start = end
self.register_buffer(
"discrete_indices", torch.tensor(discrete_indices, dtype=torch.long)
)
self.register_buffer(
"categorical_starts", torch.tensor(categorical_starts, dtype=torch.long)
)
self.register_buffer(
"categorical_ends", torch.tensor(categorical_ends, dtype=torch.long)
)
if integer_indices is None:
self.register_buffer("integer_bounds", torch.tensor([], dtype=torch.long))
else:
self.register_buffer("integer_bounds", integer_bounds)
self.mc_samples = mc_samples
self.resample = resample
self.flip = flip
self.tau = tau
def get_rounding_prob(self, X: Tensor) -> Tensor:
X_prob = X.detach().clone()
if self.integer_indices is not None:
# compute probabilities for integers
X_int = X_prob[..., self.integer_indices]
X_int_abs = X_int.abs()
offset = X_int_abs.floor()
if self.tau is not None:
X_prob[..., self.integer_indices] = torch.sigmoid(
(X_int_abs - offset - 0.5) / self.tau
)
else:
X_prob[..., self.integer_indices] = X_int_abs - offset
# compute probabilities for categoricals
for start, end in zip(self.categorical_starts, self.categorical_ends):
X_categ = X_prob[..., start:end]
if self.tau is not None:
X_prob[..., start:end] = torch.softmax(
(X_categ - 0.5) / self.tau, dim=-1
)
else:
X_prob[..., start:end] = X_categ / X_categ.sum(dim=-1)
return X_prob[..., self.discrete_indices]
def transform(self, X: Tensor) -> Tensor:
r"""Round the inputs.
This is not sample-path differentiable.
Args:
X: A `batch_shape x n x d`-dim tensor of inputs.
Returns:
A `batch_shape x n x d`-dim tensor of rounded inputs.
"""
X_expanded = X.expand(*X.shape[:-3], self.mc_samples, *X.shape[-2:]).clone()
X_prob = self.get_rounding_prob(X=X)
if self.integer_indices is not None:
X_int = X[..., self.integer_indices].detach()
assert X.ndim > 1
if X.ndim == 2:
X.unsqueeze(-1)
if (
not hasattr(self, "base_samples")
or self.base_samples.shape[-2:] != X_int.shape[-2:]
or self.resample
):
# construct sobol base samples
bounds = torch.zeros(
2, X_int.shape[-1], dtype=X_int.dtype, device=X_int.device
)
bounds[1] = 1
self.register_buffer(
"base_samples",
draw_sobol_samples(
bounds=bounds,
n=self.mc_samples,
q=X_int.shape[-2],
seed=torch.randint(0, 100000, (1,)).item(),
),
)
X_int_abs = X_int.abs()
# perform exact rounding
is_negative = X_int < 0
offset = X_int_abs.floor()
prob = X_prob[..., : self.integer_indices.shape[0]]
if self.flip:
rounding_component = (1 - prob < self.base_samples).to(
dtype=X.dtype,
)
else:
rounding_component = (prob >= self.base_samples).to(
dtype=X.dtype,
)
X_abs_rounded = offset + rounding_component
X_int_new = (-1) ** is_negative.to(offset) * X_abs_rounded
# clamp to bounds
X_expanded[..., self.integer_indices] = torch.minimum(
torch.maximum(X_int_new, self.integer_bounds[0]), self.integer_bounds[1]
)
# sample for categoricals
if self.categorical_features is not None and len(self.categorical_features) > 0:
if (
not hasattr(self, "base_samples_categorical")
or self.base_samples_categorical.shape[-2] != X.shape[-2]
or self.resample
):
bounds = torch.zeros(
2, len(self.categorical_features), dtype=X.dtype, device=X.device
)
bounds[1] = 1
self.register_buffer(
"base_samples_categorical",
draw_sobol_samples(
bounds=bounds,
n=self.mc_samples,
q=X.shape[-2],
seed=torch.randint(0, 100000, (1,)).item(),
),
)
# sample from multinomial as argmin_c [sample_c * exp(-x_c)]
sample_d_start_idx = 0
X_categ_prob = X_prob
if self.integer_indices is not None:
n_ints = self.integer_indices.shape[0]
if n_ints > 0:
X_categ_prob = X_prob[..., n_ints:]
for i, (idx, cardinality) in enumerate(self.categorical_features.items()):
sample_d_end_idx = sample_d_start_idx + cardinality
start = self.categorical_starts[i]
end = self.categorical_ends[i]
cum_prob = X_categ_prob[
..., sample_d_start_idx:sample_d_end_idx
].cumsum(dim=-1)
categories = (
(
(cum_prob > self.base_samples_categorical[..., i : i + 1])
.long()
.cumsum(dim=-1)
== 1
)
.long()
.argmax(dim=-1)
)
# one-hot encode
X_expanded[..., start:end] = one_hot(
categories, num_classes=cardinality
).to(X)
sample_d_start_idx = sample_d_end_idx
return X_expanded
def equals(self, other: InputTransform) -> bool:
r"""Check if another input transform is equivalent.
Args:
other: Another input transform.
Returns:
A boolean indicating if the other transform is equivalent.
"""
return (
super().equals(other=other)
and (self.resample == other.resample)
and torch.equal(self.base_samples, other.base_samples)
and (self.flip == other.flip)
and torch.equal(self.integer_indices, other.integer_indices)
)
@dataclass
class CategoricalSpec:
idx: int
num_categories: int
@dataclass
class LatentCategoricalSpec(CategoricalSpec):
latent_dim: int
class EmbeddingTransform(InputTransform):
r"""Abstract base class for Embedding-based transforms"""
_emb_dim: int
_transformed_dim: int
dim: int
categ_idcs: Tensor
non_categ_mask: Tensor
def transform(self, X: Tensor) -> Tensor:
r"""Transform categorical variables using embedding."""
X_emb = torch.empty(
X.shape[:-1] + torch.Size([self._emb_dim]), dtype=X.dtype, device=X.device
)
start_idx = 0
for idx in self.categ_idcs.tolist():
emb_table = self.get_emb_table(idx)
emb_dim = emb_table.shape[-1]
end_idx = start_idx + emb_dim
emb = emb_table.index_select(dim=0, index=X[..., idx].reshape(-1).long())
X_emb[..., start_idx:end_idx] = emb.view(
X_emb.shape[:-1] + torch.Size([emb_dim])
)
start_idx = end_idx
return torch.cat([X[..., self.non_categ_mask], X_emb], dim=-1)
@abstractmethod
def get_emb_table(self, idx: int) -> Tensor:
r"""Get the embedding table for the specified categorical feature.
Args:
idx: The index of the categorical feature
Returns:
A `num_categories x emb_dim`-dim tensor containing the embeddings
for each category.
"""
pass
def transform_bounds(self, bounds: Tensor) -> Tensor:
r"""Update bounds based on embedding transform.
Args:
bounds: A `2 x d`-dim tensor of lower and upper bounds
Returns:
A x `2 x d_cont + d_emb`-dim tensor of lower and upper bounds
"""
d_cont = self.dim - self.categ_idcs.shape[0]
tf_bounds = torch.zeros(
2, d_cont + self._emb_dim, dtype=bounds.dtype, device=bounds.device
)
tf_bounds[:, :d_cont] = bounds[:, self.non_categ_mask]
tf_bounds[1, d_cont:] = 1
return tf_bounds
class LatentCategoricalEmbedding(EmbeddingTransform, GPyTorchModule):
r"""Latent embeddings for categorical variables.
Note: this current uses the same latent embeddings across batched.
This means that a batched multi-output model will use the same latent
embeddings for all outputs.
"""
def __init__(
self,
categorical_specs: List[LatentCategoricalSpec],
dim: int,
transform_on_train: bool = True,
transform_on_eval: bool = True,
transform_on_preprocess: bool = False,
transform_on_fantasize: bool = False,
eps: float = 1e-7,
) -> None:
r"""Initialize input transform.
Args:
categorical_specs: A list of LatentCategoricalSpec objects.
dim: the total dimension of the inputs.
transform_on_eval: A boolean indicating whether to apply the
transform in eval() mode. Default: False
"""
GPyTorchModule.__init__(self)
self._eps = eps
self.dim = dim
self.transform_on_train = transform_on_train
self.transform_on_eval = transform_on_eval
self.transform_on_preprocess = transform_on_preprocess
self.transform_on_fantasize = transform_on_fantasize
self._emb_dim = 0
categ_idcs = []
# TODO: replace with ParameterDict when supported in GPyTorch
for c in categorical_specs:
nlzd_idx = normalize_indices([c.idx], dim)[0]
categ_idcs.append(nlzd_idx)
init_emb_table = draw_sobol_normal_samples(
n=c.num_categories, d=c.latent_dim
).squeeze(0)
self.register_parameter(
f"raw_latent_emb_tables_{nlzd_idx}",
nn.Parameter(init_emb_table),
)
def raw_latent_emb_table_setter(m, v, idx=nlzd_idx):
m.initialize(f"raw_latent_emb_tables_{idx}", value=v)
def raw_latent_emb_table_getter(m, idx=nlzd_idx):
return getattr(m, f"raw_latent_emb_tables_{idx}")
self._emb_dim += c.latent_dim
self.register_buffer("categ_idcs", torch.tensor(categ_idcs, dtype=torch.long))
non_categ_mask = torch.ones(dim, dtype=bool)
non_categ_mask[self.categ_idcs] = 0
self.register_buffer("non_categ_mask", non_categ_mask)
def get_emb_table(self, idx: int) -> Tensor:
r"""Get the embedding table for the specified categorical feature.
Args:
idx: The index of the categorical feature
Returns:
A `num_categories x latent_dim`-dim tensor containing the embeddings
for each category.
"""
# This technique is recommended in https://arxiv.org/abs/2003.03300
raw_emb_table = getattr(self, f"raw_latent_emb_tables_{idx}")
with torch.no_grad():
raw_emb_table[0] = 0 # force one embedding to be the origin
if raw_emb_table.shape[1] > 1 and raw_emb_table.shape[0] > 1:
raw_emb_table[
1, 0
] = 0 # force one embedding to be on the x-axis if the embedding has two dimensions
return raw_emb_table
def untransform(
self,
X: Tensor,
dist_func: Optional[Callable[[Tensor, Tensor, int], Tensor]] = None,
) -> Tensor:
r"""Untransform X to represent categoricals as integers.
The transformation assigns the category to be the index corresponding to the
closest embedding. Note: this is not differentiable.
Args:
X: A `batch_shape x n x d_cont + d_latent`-dim tensor of transformed valiues
dist_func: A broadcastable distance function mapping a two input tensors with
shapes `batch_shape x n x 1 x d_latent` and `n_categories x d_latent` and
an integer starting index to to a `batch_shape x n x n_categories`-dim
tensor of distances. The default is L2 distance.
Returns:
The untransformed tensor.
"""
new_X = torch.empty(
X.shape[:-1] + torch.Size([self.dim]), dtype=X.dtype, device=X.device
)
num_non_categ_features = X.shape[-1] - self._emb_dim
new_X[..., self.non_categ_mask] = X[..., :num_non_categ_features]
start_idx = self.dim - self.categ_idcs.shape[0]
for idx in self.categ_idcs.tolist():
emb_table = self.get_emb_table(idx)
emb_dim = emb_table.shape[-1]
end_idx = start_idx + emb_dim
x = X[..., start_idx:end_idx].unsqueeze(-2)
x_emb = emb_table.unsqueeze(-3)
if dist_func is not None:
dist = dist_func(x, x_emb, start_idx)
else:
dist = torch.norm(x - x_emb, dim=-1)
int_categories = dist.argmin(dim=-1).to(dtype=X.dtype)
new_X[..., idx] = int_categories
start_idx = end_idx
return new_X
|
bo_pr-main
|
discrete_mixed_bo/input.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import numpy as np
import torch
from botorch.models.kernels import CategoricalKernel
from gpytorch.constraints import GreaterThan, Interval
from gpytorch.kernels import Kernel, MaternKernel, RBFKernel, ScaleKernel
from gpytorch.priors.torch_priors import GammaPrior, LogNormalPrior
from torch import Tensor
def get_kernel(
kernel_type: str,
dim: int,
binary_dims: List[int],
categorical_transformed_features: Dict[int, int],
train_X: Tensor,
train_Y: Tensor,
function_name: Optional[str] = None,
use_ard_binary: bool = False,
) -> Optional[Kernel]:
"""Helper function for kernel construction."""
# ard kernel for continuous features
if kernel_type == "mixed_categorical":
categorical_dims = list(categorical_transformed_features.keys())
else:
if len(categorical_transformed_features) > 0:
start = min(categorical_transformed_features.keys())
categorical_dims = list(range(start, dim))
else:
categorical_dims = []
if "mixed" in kernel_type:
cont_dims = list(set(list(range(dim))) - set(binary_dims))
if ("latent" in kernel_type) or ("categorical" in kernel_type):
cont_dims = list(set(cont_dims) - set(categorical_dims))
kernels = []
# ard kernel for continuous features
if len(cont_dims) > 0:
kernels.append(
MaternKernel(
nu=2.5,
ard_num_dims=len(cont_dims),
active_dims=cont_dims,
lengthscale_constraint=Interval(0.1, 20.0),
)
)
# isotropic kernel for binary features
if len(binary_dims) > 0:
kernels.append(
MaternKernel(
nu=2.5,
ard_num_dims=len(binary_dims) if use_ard_binary else None,
active_dims=binary_dims,
lengthscale_constraint=Interval(0.1, 20.0),
)
)
if kernel_type == "mixed_categorical":
if len(categorical_dims) > 0:
kernels.append(
CategoricalKernel(
ard_num_dims=len(categorical_dims),
active_dims=categorical_dims,
lengthscale_constraint=Interval(1e-3, 20.0),
)
)
elif kernel_type == "mixed_latent":
for start, latent_dim in categorical_transformed_features.items():
kernels.append(
MaternKernel(
# Use a isotropic kernel --
# one kernel for each set of latent embeddings
ard_num_dims=None,
active_dims=list(range(start, start + latent_dim)),
lengthscale_constraint=Interval(1e-3, 20.0),
)
)
prod_kernel = kernels[0]
for k in kernels[1:]:
prod_kernel *= k
if kernel_type != "mixed_categorical":
return ScaleKernel(prod_kernel)
sum_kernel = kernels[0]
for k in kernels[1:]:
prod_kernel *= k
sum_kernel += k
return ScaleKernel(prod_kernel) + ScaleKernel(sum_kernel)
elif kernel_type == "botorch_default":
return None
elif kernel_type == "ard_combo":
return CombinatorialCovarModule(ard_num_dims=dim)
elif kernel_type == "iso_combo":
return CombinatorialCovarModule(ard_num_dims=None)
raise ValueError(f"{kernel_type} is not supported.")
class CombinatorialCovarModule(Kernel):
r"""This kernel is suitable for a {0, 1}^d domain and used for combo design."""
def __init__(self, ard_num_dims=None, **kwargs):
super().__init__(**kwargs)
use_ard = ard_num_dims is not None and ard_num_dims > 1
lengthscale_constraint = Interval(0.1, 20.0) if use_ard else None
lengthscale_prior = GammaPrior(2.0, 1.0) if use_ard else None
base_kernel = MaternKernel(
nu=2.5,
ard_num_dims=ard_num_dims,
lengthscale_constraint=lengthscale_constraint,
lengthscale_prior=lengthscale_prior,
active_dims=kwargs.get("active_dims"),
)
covar_module = ScaleKernel(
base_kernel=base_kernel,
outputscale_prior=LogNormalPrior(9.0, 3.0), # Flexible prior
outputscale_constraint=Interval(1e-3, 1e3),
)
self._covar_module = covar_module
def forward(self, x1, x2, diag=False, **params):
return self._covar_module.forward(x1, x2, diag=diag, **params)
|
bo_pr-main
|
discrete_mixed_bo/kernels.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Trust Region Utilities.
"""
import math
from dataclasses import dataclass
import torch
@dataclass
class TurboState:
dim: int
batch_size: int
is_constrained: bool
length: float = 0.8
length_min: float = 0.5**7
length_max: float = 1.6
failure_counter: int = 0
failure_tolerance: int = float("nan") # Note: Post-initialized
success_counter: int = 0
success_tolerance: int = 10
best_value: float = -float("inf") # Goal is maximization
constraint_violation = float("inf")
restart_triggered: bool = False
def __post_init__(self):
self.failure_tolerance = math.ceil(
max([4.0 / self.batch_size, float(self.dim) / self.batch_size])
)
def update_state(state, Y_next):
if not state.is_constrained:
better_than_current = Y_next.max() > state.best_value + 1e-3 * math.fabs(
state.best_value
)
state.best_value = max(state.best_value, Y_next.max().item())
else:
feas = (Y_next[..., 1:] >= 0).all(dim=-1)
if feas.any() and state.constraint_violation == 0: # (1) Both are feasible
new_best = Y_next[feas, 0].max()
better_than_current = new_best > state.best_value + 1e-3 * math.fabs(
state.best_value
)
state.best_value = max(state.best_value, Y_next[feas, 0].max().item())
elif feas.any() and state.constraint_violation > 0: # (2) New is feasible
better_than_current = True
state.best_value = Y_next[feas, 0].max().item()
state.constraint_violation = 0.0
elif not feas.any() and state.constraint_violation > 0: # (3) None are feasible
violation = torch.clamp_max(Y_next[..., 1:], 0.0).abs().sum(dim=-1)
better_than_current = (
violation.min()
< state.constraint_violation
- 1e-3 * math.fabs(state.constraint_violation)
)
state.constraint_violation = min(
state.constraint_violation, violation.min().item()
)
else: # All of these count as failures
better_than_current = False
if better_than_current:
state.success_counter += 1
state.failure_counter = 0
else:
state.success_counter = 0
state.failure_counter += 1
if state.success_counter == state.success_tolerance: # Expand trust region
state.length = min(2.0 * state.length, state.length_max)
state.success_counter = 0
elif state.failure_counter == state.failure_tolerance: # Shrink trust region
state.length /= 2.0
state.failure_counter = 0
if state.length < state.length_min:
state.restart_triggered = True
return state
|
bo_pr-main
|
discrete_mixed_bo/trust_region.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Probabilistic Reparameterization (with gradients) using Monte Carlo estimators.
"""
from abc import ABC, abstractmethod
from collections import OrderedDict
from contextlib import ExitStack
from typing import Dict, List, Optional
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.models.transforms.input import (
ChainedInputTransform,
InputTransform,
Normalize,
)
from torch import Tensor
from torch.autograd import Function
from torch.nn import Module
from torch.nn.functional import one_hot
from discrete_mixed_bo.input import (
AnalyticProbabilisticReparameterizationInputTransform,
MCProbabilisticReparameterizationInputTransform,
OneHotToNumeric,
)
from discrete_mixed_bo.wrapper import AcquisitionFunctionWrapper
def get_probabilistic_reparameterization_input_transform(
dim: int,
integer_indices: List[int],
integer_bounds: Tensor,
categorical_features: Optional[Dict[int, int]] = None,
use_analytic: bool = False,
mc_samples: int = 1024,
resample: bool = False,
flip: bool = False,
tau: float = 0.1,
) -> ChainedInputTransform:
bounds = torch.zeros(
2, dim, dtype=integer_bounds.dtype, device=integer_bounds.device
)
bounds[1] = 1
bounds[:, integer_indices] = integer_bounds
tfs = OrderedDict()
if integer_indices is not None and len(integer_indices) > 0:
# unnormalize to integer space
tfs["unnormalize"] = Normalize(
d=dim,
bounds=bounds,
indices=integer_indices,
transform_on_train=False,
transform_on_eval=True,
transform_on_fantasize=False,
reverse=True,
)
# round
if use_analytic:
tfs["round"] = AnalyticProbabilisticReparameterizationInputTransform(
dim=dim,
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
tau=tau,
)
else:
tfs["round"] = MCProbabilisticReparameterizationInputTransform(
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
resample=resample,
mc_samples=mc_samples,
flip=flip,
tau=tau,
)
if integer_indices is not None and len(integer_indices) > 0:
# normalize to unit cube
tfs["normalize"] = Normalize(
d=dim,
bounds=bounds,
indices=integer_indices,
transform_on_train=False,
transform_on_eval=True,
transform_on_fantasize=False,
reverse=False,
)
tf = ChainedInputTransform(**tfs)
tf.eval()
return tf
class AbstractProbabilisticReparameterization(AcquisitionFunctionWrapper, ABC):
"""Acquisition Function Wrapper that leverages probabilistic reparameterization."""
def __init__(
self,
acq_function: AcquisitionFunction,
dim: int,
integer_indices: Optional[List[int]] = None,
integer_bounds: Optional[Tensor] = None,
categorical_features: Optional[Dict[int, int]] = None,
batch_limit: Optional[int] = None,
apply_numeric: bool = False,
**kwargs
) -> None:
if categorical_features is None and (
integer_indices is None or integer_bounds is None
):
raise NotImplementedError(
"categorical_features or integer indices and integer_bounds must be provided."
)
super().__init__(acq_function=acq_function)
self.batch_limit = batch_limit
if apply_numeric:
self.one_hot_to_numeric = OneHotToNumeric(
categorical_features=categorical_features,
transform_on_train=False,
transform_on_eval=True,
transform_on_fantasize=False,
)
self.one_hot_to_numeric.eval()
else:
self.one_hot_to_numeric = None
discrete_indices = []
if integer_indices is not None:
self.register_buffer(
"integer_indices",
torch.tensor(
integer_indices, dtype=torch.long, device=integer_bounds.device
),
)
self.register_buffer("integer_bounds", integer_bounds)
discrete_indices.extend(integer_indices)
else:
self.register_buffer(
"integer_indices",
torch.tensor([], dtype=torch.long, device=integer_bounds.device),
)
self.register_buffer(
"integer_bounds",
torch.tensor(
[], dtype=integer_bounds.dtype, device=integer_bounds.device
),
)
if categorical_features is not None and len(categorical_features) > 0:
categorical_indices = list(range(min(categorical_features.keys()), dim))
discrete_indices.extend(categorical_indices)
self.register_buffer(
"categorical_indices",
torch.tensor(
categorical_indices,
dtype=torch.long,
device=integer_bounds.device,
),
)
self.categorical_features = categorical_features
else:
self.register_buffer(
"categorical_indices",
torch.tensor(
[],
dtype=torch.long,
device=integer_bounds.device,
),
)
self.register_buffer(
"cont_indices",
torch.tensor(
sorted(list(set(range(dim)) - set(discrete_indices))),
dtype=torch.long,
device=integer_bounds.device,
),
)
self.model = acq_function.model # for sample_around_best heuristic
# moving average baseline
self.register_buffer(
"ma_counter",
torch.zeros(1, dtype=torch.double, device=integer_bounds.device),
)
self.register_buffer(
"ma_hidden",
torch.zeros(1, dtype=torch.double, device=integer_bounds.device),
)
self.register_buffer(
"ma_baseline",
torch.zeros(1, dtype=torch.double, device=integer_bounds.device),
)
def sample_candidates(self, X: Tensor) -> Tensor:
if "unnormalize" in self.input_transform:
unnormalized_X = self.input_transform["unnormalize"](X)
else:
unnormalized_X = X.clone()
prob = self.input_transform["round"].get_rounding_prob(X=unnormalized_X)
discrete_idx = 0
for i in self.integer_indices:
p = prob[..., discrete_idx]
rounding_component = torch.distributions.Bernoulli(probs=p).sample()
unnormalized_X[..., i] = unnormalized_X[..., i].floor() + rounding_component
discrete_idx += 1
unnormalized_X[..., self.integer_indices] = torch.minimum(
torch.maximum(
unnormalized_X[..., self.integer_indices], self.integer_bounds[0]
),
self.integer_bounds[1],
)
# this is the starting index for the categoricals in unnormalized_X
raw_idx = self.cont_indices.shape[0] + discrete_idx
if self.categorical_indices.shape[0] > 0:
for i, cardinality in self.categorical_features.items():
discrete_end = discrete_idx + cardinality
p = prob[..., discrete_idx:discrete_end]
z = one_hot(
torch.distributions.Categorical(probs=p).sample(),
num_classes=cardinality,
)
raw_end = raw_idx + cardinality
unnormalized_X[..., raw_idx:raw_end] = z
discrete_idx = discrete_end
raw_idx = raw_end
# normalize X
if "normalize" in self.input_transform:
return self.input_transform["normalize"](unnormalized_X)
return unnormalized_X
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
"""Compute PR."""
pass
class AnalyticProbabilisticReparameterization(AbstractProbabilisticReparameterization):
"""Acquisition Function Wrapper that leverages analytic probabilistic reparameterization.
Note: this is only reasonable from a computation perspective for relatively small numbers of discrete options (probably less than a few thousand).
"""
def __init__(
self,
acq_function: AcquisitionFunction,
dim: int,
dtype: torch.dtype,
device: torch.device,
integer_indices: Optional[List[int]] = None,
integer_bounds: Optional[Tensor] = None,
categorical_features: Optional[Dict[int, int]] = None,
batch_limit: Optional[int] = None,
apply_numeric: bool = False,
tau: float = 0.1,
) -> None:
super().__init__(
acq_function=acq_function,
dim=dim,
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
batch_limit=batch_limit,
apply_numeric=apply_numeric,
)
# create input transform
# need to compute cross product of discrete options and weights
self.input_transform = get_probabilistic_reparameterization_input_transform(
dim=dim,
use_analytic=True,
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
tau=tau,
)
self.input_transform.to(dtype=dtype, device=device)
if self.batch_limit is None:
self.batch_limit = self.input_transform["round"].all_discrete_options.shape[
0
]
def forward(self, X: Tensor) -> Tensor:
"""Evaluate PR."""
X_discrete_all = self.input_transform(X.unsqueeze(-3))
acq_values_list = []
start_idx = 0
if self.one_hot_to_numeric is not None:
X_discrete_all = self.one_hot_to_numeric(X_discrete_all)
if X.shape[-2] != 1:
raise NotImplementedError
# save the probabilities
if "unnormalize" in self.input_transform:
unnormalized_X = self.input_transform["unnormalize"](X)
else:
unnormalized_X = X
# this is batch_shape x n_discrete (after squeezing)
probs = self.input_transform["round"].get_probs(X=unnormalized_X).squeeze(-1)
# TODO: filter discrete configs with zero probability
# this requires padding because there may be a different number in each batch. Each batch bucket needs at least
# nonzero_prob.sum(dim=-1).max() elements to avoid ragged tensors
# nonzero_prob = probs > 0
# try:
# X_discrete_all = X_discrete_all[nonzero_prob].view(*X_discrete_all.shape[:-3], -1, *X_discrete_all.shape[-2:])
# except RuntimeError:
# import pdb
# pdb.set_trace()
# probs = probs[nonzero_prob].view(*probs.shape[:-1], -1)
while start_idx < X_discrete_all.shape[-3]:
end_idx = min(start_idx + self.batch_limit, X_discrete_all.shape[-3])
acq_values = self.acq_function(X_discrete_all[..., start_idx:end_idx, :, :])
acq_values_list.append(acq_values)
start_idx += self.batch_limit
# this is batch_shape x n_discrete
acq_values = torch.cat(acq_values_list, dim=-1)
# now weight the acquisition values by probabilities
return (acq_values * probs).sum(dim=-1)
class MCProbabilisticReparameterization(AbstractProbabilisticReparameterization):
"""Acquisition Function Wrapper that leverages MC-based probabilistic reparameterization."""
def __init__(
self,
acq_function: AcquisitionFunction,
dim: int,
integer_indices: Optional[List[int]] = None,
integer_bounds: Optional[Tensor] = None,
categorical_features: Optional[Dict[int, int]] = None,
batch_limit: Optional[int] = None,
apply_numeric: bool = False,
mc_samples: int = 1024,
grad_estimator: str = "reinforce",
tau: float = 0.1,
) -> None:
super().__init__(
acq_function=acq_function,
dim=dim,
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
batch_limit=batch_limit,
apply_numeric=apply_numeric,
)
if self.batch_limit is None:
self.batch_limit = mc_samples
self.grad_estimator = grad_estimator
self._pr_acq_function = _MCProbabilisticReparameterization()
# create input transform
self.input_transform = get_probabilistic_reparameterization_input_transform(
dim=dim,
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
mc_samples=mc_samples,
tau=tau,
)
if grad_estimator in ("arm", "u2g"):
self.input_transform_flip = (
get_mc_probabilistic_reparameterization_input_transform(
dim=dim,
integer_indices=integer_indices,
integer_bounds=integer_bounds,
categorical_features=categorical_features,
mc_samples=mc_samples,
flip=True,
)
)
else:
self.input_transform_flip = None
def forward(self, X: Tensor) -> Tensor:
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
return self._pr_acq_function.apply(
X,
self.acq_function,
self.input_transform,
self.input_transform_flip,
self.batch_limit,
self.integer_indices,
self.cont_indices,
self.categorical_indices,
self.grad_estimator,
self.one_hot_to_numeric,
self.ma_counter,
self.ma_hidden,
)
class _MCProbabilisticReparameterization(Function):
r"""Evaluate the acquisition function use a custom MC gradient estimator."""
@staticmethod
def forward(
ctx,
input: Tensor,
acq_function: AcquisitionFunction,
input_tf: InputTransform,
input_tf_flip: Optional[InputTransform],
batch_limit: Optional[int],
integer_indices: Tensor,
cont_indices: Tensor,
categorical_indices: Tensor,
grad_estimator: str,
one_hot_to_numeric: Optional[OneHotToNumeric],
ma_counter: Optional[Tensor],
ma_hidden: Optional[Tensor],
):
"""Evaluate the expectation of the acquisition function under
probabilistic reparameterization. Compute this in chunks of size
batch_limit to enable scaling to large numbers of samples from the
proposal distribution.
"""
with ExitStack() as es:
if ctx.needs_input_grad[0]:
es.enter_context(torch.enable_grad())
if cont_indices.shape[0] > 0:
# only require gradient for continuous parameters
ctx.cont_input = input[..., cont_indices].detach().requires_grad_(True)
cont_idx = 0
cols = []
for col in range(input.shape[-1]):
# cont_indices is sorted in ascending order
if (
cont_idx < cont_indices.shape[0]
and col == cont_indices[cont_idx]
):
cols.append(ctx.cont_input[..., cont_idx])
cont_idx += 1
else:
cols.append(input[..., col])
input = torch.stack(cols, dim=-1)
else:
ctx.cont_input = None
ctx.input = input
ctx.integer_indices = integer_indices
ctx.discrete_indices = input_tf["round"].discrete_indices
ctx.cont_indices = cont_indices
ctx.categorical_indices = categorical_indices
ctx.ma_counter = ma_counter
ctx.ma_hidden = ma_hidden
tilde_x_samples = input_tf(input.unsqueeze(-3))
# save the rounding component
rounding_component = tilde_x_samples.clone()
if integer_indices.shape[0] > 0:
input_integer_params = input[..., integer_indices].unsqueeze(-3)
rounding_component[..., integer_indices] = (
(tilde_x_samples[..., integer_indices] - input_integer_params > 0)
| (input_integer_params == 1)
).to(tilde_x_samples)
if categorical_indices.shape[0] > 0:
rounding_component[..., categorical_indices] = tilde_x_samples[
..., categorical_indices
]
ctx.rounding_component = rounding_component[..., ctx.discrete_indices]
ctx.tau = input_tf["round"].tau
if hasattr(input_tf["round"], "base_samples"):
ctx.base_samples = input_tf["round"].base_samples.detach()
# save the probabilities
if "unnormalize" in input_tf:
unnormalized_input = input_tf["unnormalize"](input)
else:
unnormalized_input = input
# this is only for the integer parameters
ctx.prob = input_tf["round"].get_rounding_prob(unnormalized_input)
if categorical_indices.shape[0] > 0:
ctx.base_samples_categorical = input_tf[
"round"
].base_samples_categorical.clone()
# compute the acquisition function where inputs are rounded according to base_samples < prob
ctx.tilde_x_samples = tilde_x_samples
ctx.grad_estimator = grad_estimator
acq_values_list = []
start_idx = 0
if one_hot_to_numeric is not None:
tilde_x_samples = one_hot_to_numeric(tilde_x_samples)
while start_idx < tilde_x_samples.shape[-3]:
end_idx = min(start_idx + batch_limit, tilde_x_samples.shape[-3])
acq_values = acq_function(tilde_x_samples[..., start_idx:end_idx, :, :])
acq_values_list.append(acq_values)
start_idx += batch_limit
acq_values = torch.cat(acq_values_list, dim=-1)
ctx.mean_acq_values = acq_values.mean(
dim=-1
) # average over samples from proposal distribution
ctx.acq_values = acq_values
# update moving average baseline
ctx.ma_hidden = ma_hidden.clone()
ctx.ma_counter = ctx.ma_counter.clone()
# update in place
decay = 0.7
ma_counter.add_(1)
ma_hidden.sub_((ma_hidden - acq_values.detach().mean()) * (1 - decay))
if ctx.needs_input_grad[0]:
if grad_estimator in ("arm", "u2g"):
if input_tf["round"].categorical_starts.shape[0] > 0:
raise NotImplementedError
# use the same base samples in input_tf_flip
if (
not hasattr(input_tf_flip, "base_samples")
or input_tf_flip.base_samples.shape[-2] != input.shape[-2]
):
input_tf_flip["round"].base_samples = (
input_tf["round"].base_samples.detach().clone()
)
ctx.base_samples = input_tf_flip["round"].base_samples.detach()
with torch.no_grad():
tilde_x_samples_flip = input_tf_flip(input.unsqueeze(-3))
# save the rounding component
ctx.rounding_component_flip = (
tilde_x_samples_flip[..., integer_indices]
- input[..., integer_indices].unsqueeze(-3)
> 0
).to(tilde_x_samples_flip)
# compute the acquisition function where inputs are rounded according to base_samples > 1-prob
# This is used for the ARM/U2G gradient estimators
if one_hot_to_numeric is not None:
tilde_x_samples_flip = one_hot_to_numeric(
tilde_x_samples_flip
)
acq_values_flip_list = []
start_idx = 0
while start_idx < tilde_x_samples_flip.shape[-3]:
end_idx = min(
start_idx + batch_limit, tilde_x_samples_flip.shape[-3]
)
acq_values_flip = acq_function(
tilde_x_samples_flip[..., start_idx:end_idx, :, :]
)
acq_values_flip_list.append(acq_values_flip)
start_idx += batch_limit
acq_values_flip = torch.cat(acq_values_flip_list, dim=-1)
ctx.mean_acq_values_flip = acq_values_flip.mean(
dim=-1
) # average over samples from proposal distribution
ctx.acq_values_flip = acq_values_flip
return ctx.mean_acq_values.detach()
@staticmethod
def backward(ctx, grad_output):
"""
Compute the gradient of the expectation of the acquisition function
with respect to the parameters of the proposal distribution using
Monte Carlo.
"""
# this is overwriting the entire gradient w.r.t. x'
# x' has shape batch_shape x q x d
if ctx.needs_input_grad[0]:
acq_values = ctx.acq_values
mean_acq_values = ctx.mean_acq_values
tilde_x_samples = ctx.tilde_x_samples
integer_indices = ctx.integer_indices
cont_indices = ctx.cont_indices
discrete_indices = ctx.discrete_indices
rounding_component = ctx.rounding_component
input = ctx.input
# retrieve only the ordinal parameters
expanded_acq_values = acq_values.view(*acq_values.shape, 1, 1).expand(
acq_values.shape + rounding_component.shape[-2:]
)
prob = ctx.prob.unsqueeze(-3)
if ctx.grad_estimator in ("arm", "u2g"):
rounding_component_flip = ctx.rounding_component_flip
acq_values_flip = ctx.acq_values_flip
mean_acq_values_flip = ctx.mean_acq_values_flip
expanded_acq_values_flip = acq_values_flip.view(
*acq_values_flip.shape, 1, 1
).expand(acq_values_flip.shape + rounding_component_flip.shape[-2:])
if ctx.grad_estimator == "arm":
sample_level = (
(expanded_acq_values_flip - expanded_acq_values)
* (ctx.base_samples - 0.5)
* torch.abs(rounding_component_flip - rounding_component)
)
elif ctx.grad_estimator == "u2g":
prob_abs = prob.clone()
prob_abs[prob_abs < 0.5] = 1 - prob_abs[prob_abs < 0.5]
sample_level = (
0.5
* (expanded_acq_values_flip - expanded_acq_values)
* prob_abs
* (rounding_component_flip - rounding_component)
)
elif ctx.grad_estimator == "reinforce":
sample_level = expanded_acq_values * (rounding_component - prob)
elif ctx.grad_estimator == "reinforce_ma":
# use reinforce with the moving average baseline
if ctx.ma_counter == 0:
baseline = 0.0
else:
decay = 0.7
baseline = ctx.ma_hidden / (1.0 - torch.pow(decay, ctx.ma_counter))
sample_level = (expanded_acq_values - baseline) * (
rounding_component - prob
)
grads = (sample_level / ctx.tau).mean(dim=-3)
new_grads = (
grad_output.view(
*grad_output.shape,
*[1 for _ in range(grads.ndim - grad_output.ndim)]
)
.expand(*grad_output.shape, *input.shape[-2:])
.clone()
)
# multiply upstream grad_output by new gradients
new_grads[..., discrete_indices] *= grads
# use autograd for gradients w.r.t. the continuous parameters
if ctx.cont_input is not None:
auto_grad = torch.autograd.grad(
# note: this multiplies the gradient of mean_acq_values w.r.t to input
# by grad_output
mean_acq_values,
ctx.cont_input,
grad_outputs=grad_output,
)[0]
# overwrite grad_output since the previous step already applied the chain rule
new_grads[..., cont_indices] = auto_grad
return (
new_grads,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
return None, None, None, None, None, None, None, None, None, None, None, None
|
bo_pr-main
|
discrete_mixed_bo/probabilistic_reparameterization.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Problems with only binary variables.
"""
from typing import Optional
import numpy as np
import torch
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
# Code for the contamination problem is adapted from:
# https://github.com/QUVA-Lab/COMBO/blob/master/COMBO/experiments/test_functions/binary_categorical.py.
def generate_contamination_dynamics(dim, random_seed=None):
n_stages = dim
n_simulations = 100
init_alpha = 1.0
init_beta = 30.0
contam_alpha = 1.0
contam_beta = 17.0 / 3.0
restore_alpha = 1.0
restore_beta = 3.0 / 7.0
init_Z = np.random.RandomState(random_seed).beta(
init_alpha, init_beta, size=(n_simulations,)
)
lambdas = np.random.RandomState(random_seed).beta(
contam_alpha, contam_beta, size=(n_stages, n_simulations)
)
gammas = np.random.RandomState(random_seed).beta(
restore_alpha, restore_beta, size=(n_stages, n_simulations)
)
return init_Z, lambdas, gammas
def _contamination(x, dim, cost, init_Z, lambdas, gammas, U, epsilon):
assert x.size == dim
rho = 1.0
n_simulations = 100
Z = np.zeros((x.size, n_simulations))
Z[0] = (
lambdas[0] * (1.0 - x[0]) * (1.0 - init_Z) + (1.0 - gammas[0] * x[0]) * init_Z
)
for i in range(
1,
dim,
):
Z[i] = (
lambdas[i] * (1.0 - x[i]) * (1.0 - Z[i - 1])
+ (1.0 - gammas[i] * x[i]) * Z[i - 1]
)
below_threshold = Z < U
constraints = np.mean(below_threshold, axis=1) - (1.0 - epsilon)
return np.sum(x * cost - rho * constraints)
class Contamination(DiscreteTestProblem):
"""
Contamination Control Problem.
The search space consists of only binary variables.
"""
def __init__(
self,
dim: int,
lamda: float = 0.0,
noise_std: Optional[float] = None,
negate: bool = False,
random_seed: int = 0,
) -> None:
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(dim)]
super().__init__(
noise_std=noise_std, negate=negate, integer_indices=list(range(dim))
)
self.lamda = lamda
self.init_Z, self.lambdas, self.gammas = generate_contamination_dynamics(
dim=dim, random_seed=random_seed
)
def evaluate_true(self, X: Tensor) -> Tensor:
res = torch.cat([self._evaluate_single(x) for x in X.view(-1, self.dim)], dim=0)
return res.view(X.shape[:-1])
def _evaluate_single(self, x: Tensor) -> Tensor:
assert x.dim() == 1
if x.dim() == 2:
x = x.squeeze(0)
evaluation = _contamination(
x=(x.cpu() if x.is_cuda else x).numpy(),
dim=self.dim,
cost=np.ones(x.numel()),
init_Z=self.init_Z,
lambdas=self.lambdas,
gammas=self.gammas,
U=0.1,
epsilon=0.05,
)
evaluation += self.lamda * float(torch.sum(x))
return evaluation * x.new_ones((1,)).float()
class LABS(DiscreteTestProblem):
"""
Low auto-correlation binary. This problem is adapted from:
https://github.com/aryandeshwal/MerCBO/blob/main/MerCBO/experiments/test_functions/labs.py
"""
def __init__(
self,
dim: int,
noise_std: Optional[float] = None,
negate: bool = False,
random_seed: int = 0,
) -> None:
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(dim)]
super().__init__(
noise_std=noise_std, negate=negate, integer_indices=list(range(dim))
)
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.tensor(
[self._evaluate_single(x) for x in X.view(-1, self.dim).cpu()],
dtype=X.dtype,
device=X.device,
).view(X.shape[:-1])
def _evaluate_single(self, x: Tensor) -> np.ndarray:
assert x.dim() == 1
if x.dim() == 2:
x = x.squeeze(0)
x = x.numpy()
N = x.shape[0]
E = 0
for k in range(1, N):
C_k = 0
for j in range(0, N - k - 1):
C_k += (-1) ** (1 - x[j] * x[j + k])
E += C_k**2
return -1.0 * N / (2 * E)
|
bo_pr-main
|
discrete_mixed_bo/problems/binary.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import pickle
import random
import sys
from itertools import product
from logging import Logger
from typing import Optional, Union
import numpy as np
import torch
from botorch.test_functions.base import MultiObjectiveTestProblem
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class NASHPOBench2API:
def __init__(
self,
data: dict,
seed: int = 0,
logger: Optional[Logger] = None,
verbose: bool = False,
):
self.logger = logger
self.verbose = verbose
if logger is None:
self.logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
self.logger.addHandler(handler)
self.logger.propagate = False
self.bench12 = data["bench12"]
self.cellinfo = data["cellinfo"]
self.avgaccs200 = data["avgaccs200"]
# set serach space
self.cellcodes = sorted(list(set(self.cellinfo["hash"].keys())))
self.lrs = sorted(list(set(self.bench12["lr"].values())))
self.batch_sizes = sorted(list(set(self.bench12["batch_size"].values())))
self.seeds = sorted(list(set(self.bench12["seed"].values())))
self.epochs = [12, 200]
# set objects for log
self._init_logdata()
# set seed
if seed:
self.set_seed(seed)
else:
self.seed = self.seeds[0]
def _init_logdata(self):
## acc
self.acc_trans = []
self.best_acc = -1
self.best_acc_trans = []
## cost
self.total_cost = 0.0
self.total_cost_trans = []
self.best_cost = None
## key
self.key_trans = []
self.best_key = None
self.best_key_trans = []
# set verbose
if self.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
return
def __len__(self):
return len(self.cellcodes) * len(self.lrs) * len(self.batch_sizes)
def __str__(self):
return "NAS-HPO-Bench-II"
def get_key_from_idx(self, idx: int):
key = {
"cellcode": self.cellcodes[
int(idx / (len(self.lrs) * len(self.batch_sizes)))
],
"lr": self.lrs[int(idx / len(self.batch_sizes)) % len(self.lrs)],
"batch_size": self.batch_sizes[idx % len(self.batch_sizes)],
}
return key
def get_idx_from_key(self, key: dict):
cellidx = self.cellcodes.index(key["cellcode"])
lridx = self.lrs.index(key["lr"])
batchidx = self.batch_sizes.index(key["batch_size"])
return (
cellidx * len(self.lrs) * len(self.batch_sizes)
+ lridx * len(self.batch_sizes)
+ batchidx
)
def __getitem__(
self,
idx: int,
):
key = self.get_key_from_idx(idx)
return self.query_by_key(**key)
def query_by_index(
self,
cell_idx: int,
lr_idx: int,
batch_size_idx: int,
epoch: Union[int, str] = 12,
iepoch: Optional[int] = None,
seed: Optional[int] = None,
):
cellcode = self.cellcodes[cell_idx]
lr = self.lrs[lr_idx]
batch_size = self.batch_sizes[batch_size_idx]
if seed:
self.set_seed(seed)
return self.query_by_key(cellcode, lr, batch_size, epoch=epoch, iepoch=iepoch)
def query_by_key(
self,
cellcode: str,
lr: float,
batch_size: int,
epoch: Union[int, str] = 12,
mode: str = "valid",
iepoch: Optional[int] = None,
seed: Optional[int] = None,
enable_log: bool = True,
):
# check if a key is valid
self._check_key(cellcode, lr, batch_size, epoch)
assert mode in ["train", "valid", "test"], ValueError(
f"mode {mode} should be train, valid, or test"
)
# iepoch
if epoch != 12:
assert iepoch is None, ValueError(
f"iepoch is not available in epoch {epoch}"
)
if iepoch == None:
iepoch = epoch
assert iepoch <= epoch, ValueError(
f"iepoch {iepoch} is graeter than epoch {epoch}"
)
# set seed
if seed:
self.set_seed(seed)
# cellcode to hashvalue
hashv = self._cellcode2hash(cellcode)
# 12 epoch
if epoch == 12:
# acc
bench = self.bench12
acc = bench[f"{mode}_acc_{iepoch-1}"][(hashv, lr, batch_size, self.seed)]
# cost
## if iepoch == 12 (, the cost is pre-calculated)
if iepoch == epoch:
if mode == "train":
cost = bench[f"total_train_time"][
(hashv, lr, batch_size, self.seed)
]
elif mode == "valid":
cost = bench[f"total_trainval_time"][
(hashv, lr, batch_size, self.seed)
]
elif mode == "test":
cost = (
bench[f"total_trainval_time"][
(hashv, lr, batch_size, self.seed)
]
+ bench[f"total_test_time"][(hashv, lr, batch_size, self.seed)]
)
## else (less than 12 epoch)
else:
if mode == "train":
time_modes = ["train"]
elif mode == "valid":
time_modes = ["train", "valid"]
elif mode == "test":
time_modes = ["train", "valid", "test"]
tmp = [
bench[f"{m}_time_{i}"][(hashv, lr, batch_size, self.seed)]
for m, i in product(time_modes, range(iepoch))
]
cost = sum(tmp)
key = {"cellcode": cellcode, "lr": lr, "batch_size": batch_size}
if enable_log:
self._write_log(acc, cost, key)
return acc, cost
# 200 epoch
elif epoch == 200:
# the expected value of test accuracy
bench = self.avgaccs200
acc = bench["avg_acc"][(hashv, lr, batch_size)]
return acc, None
def _write_log(
self,
acc: float,
cost: float,
key: dict,
):
if len(self.acc_trans) == 0:
self.logger.debug(
f' {"valid acc":<8} {"cost":<8} {"cellcode"} {"lr":<7} {"batch_size":<3}'
)
self.logger.debug(
f'{acc:>8.2f} % {cost:>8.2f} sec {key["cellcode"]} {key["lr"]:<7.5f} {key["batch_size"]:<3}'
)
# current status
self.acc_trans.append(acc)
self.key_trans.append(key)
self.total_cost += cost
self.total_cost_trans.append(self.total_cost)
# update the best status
if self.best_key is None or self.best_acc < acc:
self.best_acc, self.best_cost, self.best_key = acc, cost, key
# current best status
self.best_acc_trans.append(self.best_acc)
self.best_key_trans.append(self.best_key)
return
def get_total_cost(self):
return self.total_cost
def get_results(self, epoch: Union[int, str] = "both", mode: str = "test"):
# log
self.logger.info("-" * 23 + " finished " + "-" * 23)
self.logger.info("The best setting is")
self.logger.info(
f' {"valid acc":<8} {"cost":<8} {"cellcode"} {"lr":<7} {"batch_size":<3}'
)
self.logger.info(
f'{self.best_acc:>8.2f} % {self.best_cost:>8.2f} sec {self.best_key["cellcode"]} {self.best_key["lr"]:<7.5f} {self.best_key["batch_size"]:<3}'
)
self.logger.info(
f" in {len(self.key_trans)} trials ({self.total_cost:.2f} sec)"
)
# get the test accuracies of the best-valid-acc model (finalaccs)
if epoch == "both":
epochs = [12, 200]
else:
epochs = [epoch]
self.logger.info("-" * 56)
self.final_accs = []
for e in epochs:
final_acc, _ = self.query_by_key(
**self.best_key, epoch=e, mode=mode, enable_log=False
)
self.final_accs.append(final_acc)
self.logger.info(f"{e}-epoch {mode} accuracy is {final_acc:.2f}%")
self.logger.info("-" * 56)
# return results
nlist = [
"acc_trans",
"key_trans",
"best_acc_trans",
"best_key_trans",
"total_cost_trans",
"final_accs",
]
return {n: eval("self." + n, {"self": self}) for n in nlist}
def reset_logdata(self, logger: Optional[Logger] = None, verbose: bool = None):
if logger is not None:
self.logger = logger
if verbose is not None:
self.verbose = verbose
self._init_logdata()
return
def _cellcode2hash(self, cellcode: str):
return self.cellinfo["hash"][cellcode]
def get_random_key(self):
cellcode = random.choice(self.cellcodes)
lr = random.choice(self.lrs)
batch_size = random.choice(self.batch_sizes)
return {"cellcode": cellcode, "lr": lr, "batch_size": batch_size}
def _check_key(self, cellcode: str, lr: float, batch_size: int, epoch: int):
if cellcode not in self.cellcodes:
raise ValueError(f"choose a cellcode {cellcode} from search space.")
if lr not in self.lrs:
raise ValueError(f"choose lr from {self.lrs}.")
if batch_size not in self.batch_sizes:
raise ValueError(f"choose batch size from {self.batch_sizes}.")
if epoch not in self.epochs:
raise ValueError(f"choose epoch from {self.epochs}.")
return
def get_search_space(self):
return self.cellcodes, self.lrs, self.batch_sizes
def set_seed(self, seed: int):
if seed in self.seeds:
self.seed = seed
else:
raise ValueError(f"choose a seed value from {self.seeds}.")
# class NASHPOBenchII(MultiObjectiveTestProblem, DiscreteTestProblem):
class NASHPOBenchII(DiscreteTestProblem):
"""
NAS-HPO-Bench-II dataset that jointly optimizes the architecture and hyperparameters on a simplified NAS-Bench-201
search space.
"""
_bounds = [(0, 3) for _ in range(6)] + [(0, 5), (0, 7)]
# first 6: NAS dimensions -- specify the operators on the cell
# final 2: batch sizes -- log-ordinal variables
_ref_point = [
0.0933,
0.0144,
] # <- found by running this python file as main using botorch heuristic in finding ref point
# (see the main code block to see how this is determined)
dim: int = 8
def __init__(
self,
data: dict,
nb201_path: str = None,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = True,
use_12_epoch_result: bool = False,
use_log: bool = False,
):
"""
Args:
data_path: path to the stored NAS-HPO-Bench-II dataset
nb201_path: Optional. Compulsory if num_objectives > 1. Path to stored NATS-Bench dataset
num_objectives: 1 or 2.
1: only validation error is returned
2: (validation error, latency) --> both need to be minimized
noise_std:
negate:
use_12_epoch_result: if True, return the 12-epoch result (otherwise return 200-epoch)
"""
super(NASHPOBenchII, self).__init__(
negate=negate,
noise_std=noise_std,
categorical_indices=list(range(6)),
integer_indices=[6, 7],
)
assert num_objectives in [1, 2]
if num_objectives > 1:
raise NotImplementedError
# assert nb201_path is not None, 'NB201 path is needed in multi-objective mode!'
self.num_objectives = num_objectives
self.use_12_epoch_result = use_12_epoch_result
self.api = NASHPOBench2API(data=data)
# self.nb201_api = nats_bench.create(nb201_path, 'tss', fast_mode=True, verbose=True)
self.nb201_api = None
# note this is specified by the NAS-HPO-Bench-II authors
self.primitives = {
0: "nor_conv_3x3",
1: "avg_pool_3x3",
2: "skip_connect",
3: "none",
}
self.use_log = use_log
def parse_input(self, x_array):
"""Parse an array input into a format understood by the NAS-HPO-Bench-II API"""
assert x_array.shape[0] == self.dim
x_array = x_array.int().detach().cpu().numpy()
nas_code, bs_code, lr_code = x_array[:6], x_array[6], x_array[7]
cellcode = f"{nas_code[0]}|{nas_code[1]}{nas_code[2]}|{nas_code[3]}{nas_code[4]}{nas_code[5]}"
bs = int(2 ** (bs_code + 4)) # so 0 -> 16, 1 -> 32, ..., 5 -> 512
lr = float(
0.003125 * (2**lr_code)
) # so 0 -> 0.003125, 1 -> 0.00625, ..., 7 -> 0.4
return cellcode, bs, lr
def get_nb201_code(self, x_array):
"""Get a NAS-Bench-201/NATS-Bench style code -- used to query the model parameters & FLOPS"""
assert x_array.shape[0] == self.dim
nas_code = x_array[:6]
op_list = [self.primitives[i] for i in nas_code]
arch_str = get_string_from_op_list(op_list)
return arch_str
def nashpo2nb201(self, nas_code):
"""Convert a NAS-HPO-Bench-II code back to NATSBench/NAS-Bench-201 code"""
nas_code_splitted = nas_code.split("|")
op_list = []
for node in nas_code_splitted: # e.g. ['1', '12', '123']
for prim in node:
op_list.append(self.primitives[int(prim)])
arch_str = get_string_from_op_list(op_list)
return arch_str
def _evaluate_single(self, input: torch.Tensor):
cellcode, bs, lr = self.parse_input(input)
accs, costs = [], []
for seed in self.api.seeds:
acc, cost = self.api.query_by_key(
cellcode=cellcode,
batch_size=bs,
lr=lr,
epoch=12 if self.use_12_epoch_result else 200,
seed=seed,
)
accs.append(acc)
costs.append(cost)
print(accs)
acc = sum(accs) / len(accs)
if costs[0] is not None:
cost = sum(costs) / len(costs)
err = (100.0 - acc) / 100.0
if self.num_objectives == 1:
return torch.tensor(err, dtype=torch.float)
else: # also query the model size
nb201_str = self.get_nb201_code(input)
arch_index = self.nb201_api.query_index_by_arch(nb201_str)
cost_result = self.nb201_api.get_cost_info(arch_index, "cifar10")
latency = cost_result["latency"]
return torch.tensor([err, latency], dtype=torch.float).view(-1)
def evaluate_true(self, X: torch.Tensor) -> torch.Tensor:
res = (
torch.stack(
[self._evaluate_single(x) for x in X.cpu().view(-1, self.dim)],
)
.to(X)
.view(*X.shape[:-1], self.num_objectives)
)
if self.use_log:
res = res.log()
if self.num_objectives == 1:
return res.squeeze(-1)
return res
|
bo_pr-main
|
discrete_mixed_bo/problems/nashpobench2.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Problems mixed integer continuous search spaces.
References
.. [Hansen2019]
N. Hansen, D. Brockhoff, O. Mersmann, T. Tusar, D. Tusar, O. A. ElHara, Phillipe R. Sampaio, A. Atamna, K. Varelas, U. Batu, D. M. Nguyen, F. Matzner, A. Auger. COmparing Continuous Optimizers: numbbo/COCO on Github. Zenodo, DOI:10.5281/zenodo.2594848, March 2019.
This code leverages the COCO library (see [Hansen2019]_) and is adapted from
https://github.com/aryandeshwal/HyBO/blob/master/experiments/test_functions/mixed_integer.py
"""
# import cocoex
from typing import Optional
# prepare mixed integer suite
# suite_name = "bbob-mixint"
# output_folder = "cocex-optimize-fmin"
# suite = cocoex.Suite(suite_name, "", "")
import numpy as np
import torch
from botorch.test_functions.synthetic import SyntheticTestFunction
from botorch.utils.sampling import manual_seed
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class BBOBTestFunction(SyntheticTestFunction):
r"""Base class for BBOB functions.
d-dimensional function (usually evaluated on the hypercube `[-5, 5]^d`).
See for [Hansen2019]_ details.
"""
def __init__(
self,
dim: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
seed: int = 0,
) -> None:
self.dim = dim
self._bounds = [(-5.0, 5.0) for _ in range(self.dim)]
with manual_seed(seed):
# sample x_opt uniformly in [-4, 4]^d
self._optimizers = 8 * torch.rand(dim) - 4
# sample f_opt from a Cauchy distribution with median of 0
# and roughly 50% of the values between -100 and 100. Clamp
# the value to be within -1000 and 1000. Round the value to
# the nearest integer
self._optimal_value = min(
max(
round(10000 * (torch.randn(1) / torch.randn(1)).item()) / 100,
-1000.0,
),
1000.0,
)
super().__init__(noise_std=noise_std, negate=negate)
class Sphere(BBOBTestFunction):
r"""Sphere function.
d-dimensional function (usually evaluated on the hypercube `[-5, 5]^d`):
f(x) = \sum_{i=1}^d (x_i - x_{i,opt})^2 + f_opt
See for [Hansen2019]_ details.
"""
def evaluate_true(self, X: Tensor) -> Tensor:
return (X - self.optimizers).pow(2).sum(dim=-1) + self._optimal_value
# class MixedIntegerCOCO(DiscreteTestProblem):
# """
# Mixed Integer Black box optimization using cocoex library
# """
# def __init__(self, negate: bool = False,noise_std: Optional[float] = None, problem_id: Optional[str] = None) -> None:
# self.problem_id = problem_id
# self.problem = suite.get_problem(self.problem_id)
# self.integer_indices = list(range(self.problem.number_of_integer_variables))
# self._bounds = list(zip(self.problem.lower_bounds, self.problem.upper_bounds))
# super().__init__(negate=negate, noise_std=noise_std)
# def evaluate_true(self, X: Tensor) -> Tensor:
# X_shape = X.shape
# if X.dim() > 2:
# X = X.view(-1, X.shape[-1])
# return torch.tensor([self.problem(xi) for xi in X.cpu().numpy()], dtype=X.dtype, device=X.device).view(X_shape[:-1])
|
bo_pr-main
|
discrete_mixed_bo/problems/coco_mixed_integer.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Problems with only binary variables.
"""
from collections import OrderedDict
from typing import Optional
import numpy as np
import torch
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
def _pest_spread(curr_pest_frac, spread_rate, control_rate, apply_control):
if apply_control:
next_pest_frac = (1.0 - control_rate) * curr_pest_frac
else:
next_pest_frac = spread_rate * (1 - curr_pest_frac) + curr_pest_frac
return next_pest_frac
def _pest_control_score(x, seed=None):
U = 0.1
n_stages = x.size
n_simulations = 100
init_pest_frac_alpha = 1.0
init_pest_frac_beta = 30.0
spread_alpha = 1.0
spread_beta = 17.0 / 3.0
control_alpha = 1.0
control_price_max_discount = {1: 0.2, 2: 0.3, 3: 0.3, 4: 0.0}
tolerance_develop_rate = {1: 1.0 / 7.0, 2: 2.5 / 7.0, 3: 2.0 / 7.0, 4: 0.5 / 7.0}
control_price = {1: 1.0, 2: 0.8, 3: 0.7, 4: 0.5}
# below two changes over stages according to x
control_beta = {1: 2.0 / 7.0, 2: 3.0 / 7.0, 3: 3.0 / 7.0, 4: 5.0 / 7.0}
payed_price_sum = 0
above_threshold = 0
if seed is not None:
init_pest_frac = np.random.RandomState(seed).beta(
init_pest_frac_alpha, init_pest_frac_beta, size=(n_simulations,)
)
else:
init_pest_frac = np.random.beta(
init_pest_frac_alpha, init_pest_frac_beta, size=(n_simulations,)
)
curr_pest_frac = init_pest_frac
for i in range(n_stages):
if seed is not None:
spread_rate = np.random.RandomState(seed).beta(
spread_alpha, spread_beta, size=(n_simulations,)
)
else:
spread_rate = np.random.beta(
spread_alpha, spread_beta, size=(n_simulations,)
)
do_control = x[i] > 0
if do_control:
if seed is not None:
control_rate = np.random.RandomState(seed).beta(
control_alpha, control_beta[x[i]], size=(n_simulations,)
)
else:
control_rate = np.random.beta(
control_alpha, control_beta[x[i]], size=(n_simulations,)
)
next_pest_frac = _pest_spread(
curr_pest_frac, spread_rate, control_rate, True
)
# torelance has been developed for pesticide type 1
control_beta[x[i]] += tolerance_develop_rate[x[i]] / float(n_stages)
# you will get discount
payed_price = control_price[x[i]] * (
1.0
- control_price_max_discount[x[i]]
/ float(n_stages)
* float(np.sum(x == x[i]))
)
else:
next_pest_frac = _pest_spread(curr_pest_frac, spread_rate, 0, False)
payed_price = 0
payed_price_sum += payed_price
above_threshold += np.mean(curr_pest_frac > U)
curr_pest_frac = next_pest_frac
return payed_price_sum + above_threshold
class PestControl(DiscreteTestProblem):
"""
Pest Control Problem.
"""
def __init__(
self,
dim: int = 25,
n_choice: int = 5,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
self._bounds = [(0, n_choice - 1) for _ in range(dim)]
self.dim = dim
super().__init__(
negate=negate,
noise_std=noise_std,
categorical_indices=list(range(self.dim)),
)
self.seed = 0
def evaluate_true(self, X: Tensor) -> Tensor:
return self.compute(X.cpu().view(-1, self.dim)).to(X).view(X.shape[:-1])
def compute(self, x):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
x = x.int()
if x.dim() == 1:
x = x.reshape(1, -1)
res = torch.tensor([self._compute(x_) for x_ in x])
# # Add a small ammount of noise to prevent training instabilities
# res += 1e-6 * torch.randn_like(res)
return res
def _compute(self, x):
assert x.numel() == self.dim
if x.dim() == 2:
x = x.squeeze(0)
evaluation = _pest_control_score(
(x.cpu() if x.is_cuda else x).numpy(), seed=self.seed
)
# evaluation = _pest_control_score((x.cpu() if x.is_cuda else x).numpy(), seed=None)
res = float(evaluation) * x.new_ones((1,)).float()
return res
|
bo_pr-main
|
discrete_mixed_bo/problems/pest.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Environmental model calibration problem from:
https://github.com/aryandeshwal/HyBO/blob/master/experiments/test_functions/em_func.py
"""
from typing import Optional
import numpy as np
import torch
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
def c(s, t, M, D, L, tau) -> float:
val = (M * np.exp(-(s**2) / 4 * D * t)) / np.sqrt(4 * np.pi * D * t)
if t > tau:
val += (
(t > tau) * M * np.exp(-((s - L) ** 2) / (4 * D * (t - tau)))
) / np.sqrt(4 * np.pi * D * (t - tau))
return val
def objective(x: np.ndarray) -> float:
tau = 30.01 + x[0] / 1000
M = 7 + ((x[1] + 1) * 6) / 2
D = 0.02 + ((x[2] + 1) * 0.10) / 2
L = 0.01 + ((x[3] + 1) * 2.99) / 2
val = 0.0
for s in [0, 1, 2.5]:
for t in [15, 30, 45, 60]:
val += (
c(s=s, t=t, M=10, D=0.07, L=1.505, tau=30.1525)
- c(s=s, t=t, M=M, D=D, L=L, tau=tau)
) ** 2
return val
class Environmental(DiscreteTestProblem):
"""
Environmental model function
"""
_bounds = [
(0, 284),
(-1, 1),
(-1, 1),
(-1, 1),
]
dim = 4
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
super().__init__(noise_std=noise_std, negate=negate, integer_indices=[0])
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.tensor(
[objective(x=x) for x in X.view(-1, self.dim).cpu().numpy()],
dtype=X.dtype,
device=X.device,
).view(X.shape[:-1])
|
bo_pr-main
|
discrete_mixed_bo/problems/environmental.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bo_pr-main
|
discrete_mixed_bo/problems/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
3-Objective Electrospun Oil Sorbent optimization problem
References
.. [Wang2020]
B. Wang, J. Cai, C. Liu, J. Yang, X. Ding. Harnessing a Novel Machine-Learning-Assisted Evolutionary Algorithm to Co-optimize Three Characteristics of an Electrospun Oil Sorbent. ACS Applied Materials & Interfaces, 2020.
"""
from typing import List, Optional
import numpy as np
import torch
from botorch.test_functions.base import MultiObjectiveTestProblem
from botorch.utils.torch import BufferDict
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class OilSorbent(DiscreteTestProblem, MultiObjectiveTestProblem):
"""All objectives should be minimized.
The reference point comes from using the infer_reference_point
method on the full discrete search space.
"""
# _max_hv = 1177461.0 # full discrete case
_max_hv = 1279774.75
_discrete_values = {
# "V1": [3 / 7, 4 / 6, 1, 6 / 4, 7 / 3],
"V2": [0.7, 1, 1.4, 1.7, 2],
"V3": [12, 15, 18, 21, 24],
"V4": [0.12, 0.135, 0.15, 0.165, 0.18],
# "V5": [0, 0.04, 0.08, 0.10, 0.20],
"V6": [16, 20, 26, 28],
"V7": [0.41, 0.6, 0.84, 1.32],
}
_bounds = [
(0, 1), # continuous
(0, 4), # 5 ordinal values
(0, 4), # 5 ordinal values
(0, 4), # 5 ordinal values
(0, 1), # continuous
(0, 3), # 4 ordinal values
(0, 3), # 4 ordinal values
]
dim = 7
num_objectives = 3
# _ref_point = [-133.9736, -4.8289, 38.6565] # full discrete case
_ref_point = [-125.3865, -57.8292, 43.2665]
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
integer_indices: Optional[List[int]] = None,
) -> None:
if integer_indices is None:
integer_indices = [1, 2, 3, 5, 6]
MultiObjectiveTestProblem.__init__(
self,
noise_std=noise_std,
negate=negate,
)
self._setup(integer_indices=integer_indices)
self.discrete_values = BufferDict()
for k, v in self._discrete_values.items():
self.discrete_values[k] = torch.tensor(v, dtype=torch.float)
self.discrete_values[k] /= self.discrete_values[k].max()
def evaluate_true(self, X: Tensor) -> Tensor:
X_split = list(torch.split(X, 1, -1))
# remap from integer space to proper space
for i, V_i in enumerate(X_split):
name = f"V{i+1}"
if name in self.discrete_values:
X_split[i] = self.discrete_values[name][V_i.view(-1).long()].view(
V_i.shape
)
V1, V2, V3, V4, V5, V6, V7 = X_split
wca = (
-197.0928
- 78.3309 * V1
+ 98.6355 * V2
+ 300.0701 * V3
+ 89.8360 * V4
+ 208.2343 * V5
+ 332.9341 * V6
+ 135.6621 * V7
- 11.0715 * V1 * V2
+ 201.8934 * V1 * V3
+ 17.1270 * V1 * V4
+ 2.5198 * V1 * V5
- 109.3922 * V1 * V6
+ 30.1607 * V1 * V7
- 46.1790 * V2 * V3
+ 19.2888 * V2 * V4
- 102.9493 * V2 * V5
- 19.1245 * V2 * V6
+ 53.6297 * V2 * V7
- 73.0649 * V3 * V4
- 37.7181 * V3 * V5
- 219.1268 * V3 * V6
- 55.3704 * V3 * V7
+ 3.8778 * V4 * V5
- 6.9252 * V4 * V6
- 105.1650 * V4 * V7
- 34.3181 * V5 * V6
- 36.3892 * V5 * V7
- 82.3222 * V6 * V7
- 16.7536 * V1.pow(2)
- 45.6507 * V2.pow(2)
- 91.4134 * V3.pow(2)
- 76.8701 * V5.pow(2)
)
q = (
-212.8531
+ 245.7998 * V1
- 127.3395 * V2
+ 305.8461 * V3
+ 638.1605 * V4
+ 301.2118 * V5
- 451.3796 * V6
- 115.5485 * V7
+ 42.8351 * V1 * V2
+ 262.3775 * V1 * V3
- 103.5274 * V1 * V4
- 196.1568 * V1 * V5
- 394.7975 * V1 * V6
- 176.3341 * V1 * V7
+ 74.8291 * V2 * V3
+ 4.1557 * V2 * V4
- 133.8683 * V2 * V5
+ 65.8711 * V2 * V6
- 42.6911 * V2 * V7
- 323.9363 * V3 * V4
- 107.3983 * V3 * V5
- 323.2353 * V3 * V6
+ 46.9172 * V3 * V7
- 144.4199 * V4 * V5
+ 272.3729 * V4 * V6
+ 49.0799 * V4 * V7
+ 318.4706 * V5 * V6
- 236.2498 * V5 * V7
+ 252.4848 * V6 * V7
- 286.0182 * V4.pow(2)
+ 393.5992 * V6.pow(2)
)
sigma = (
7.7696
+ 15.4344 * V1
- 10.6190 * V2
- 17.9367 * V3
+ 17.1385 * V4
+ 2.5026 * V5
- 24.3010 * V6
+ 10.6058 * V7
- 1.2041 * V1 * V2
- 37.2207 * V1 * V3
- 3.2265 * V1 * V4
+ 7.3121 * V1 * V5
+ 52.3994 * V1 * V6
+ 9.7485 * V1 * V7
- 15.9371 * V2 * V3
- 1.1706 * V2 * V4
- 2.6297 * V2 * V5
+ 7.0225 * V2 * V6
- 1.4938 * V2 * V7
+ 30.2786 * V3 * V4
+ 14.5061 * V3 * V5
+ 48.5021 * V3 * V6
- 11.4857 * V3 * V7
- 3.1381 * V4 * V5
- 14.9747 * V4 * V6
+ 4.5204 * V4 * V7
- 17.6907 * V5 * V6
- 19.2489 * V5 * V7
- 9.8219 * V6 * V7
- 18.7356 * V1.pow(2)
+ 12.1928 * V2.pow(2)
- 17.5460 * V4.pow(2)
+ 5.4997 * V5.pow(2)
- 26.2718 * V6.pow(2)
)
return -torch.cat([wca, q, sigma], dim=-1)
class OilSorbentMixed(DiscreteTestProblem, MultiObjectiveTestProblem):
"""All objectives should be minimized.
The reference point comes from using the infer_reference_point
method on the full discrete search space where the continuous parameters are discretized into 100 values.
"""
# _max_hv = 1177461.0 # full discrete case
# _max_hv = 1279774.75 # approximate for continuous
_discrete_values = {
# "V1": [3 / 7, 4 / 6, 1, 6 / 4, 7 / 3],
"V2": [0.7, 1, 1.4, 1.7, 2],
"V3": [12, 15, 18, 21, 24],
"V4": [0.12, 0.135, 0.15, 0.165, 0.18],
# "V5": [0, 0.04, 0.08, 0.10, 0.20],
"V6": [16, 20, 26, 28],
"V7": [0.41, 0.6, 0.84, 1.32],
}
_bounds = [
(0, 1), # continuous
(0, 4), # 5 ordinal values
(0, 4), # 5 ordinal values
(0, 4), # 5 ordinal values
(0, 1), # continuous
(0, 3), # 4 ordinal values
(0, 3), # 4 ordinal values
]
dim = 7
num_objectives = 3
# _ref_point = [-133.9736, -4.8289, 38.6565] # full discrete case
_ref_point = [-125.3865, -57.8292, 43.2665]
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
integer_indices: Optional[List[int]] = None,
) -> None:
if integer_indices is None:
integer_indices = [1, 2, 3, 5, 6]
# integer_indices = list(range(self.dim))
MultiObjectiveTestProblem.__init__(
self,
noise_std=noise_std,
negate=negate,
# integer_indices=integer_indices
)
self._setup(integer_indices=integer_indices)
self.discrete_values = BufferDict()
for k, v in self._discrete_values.items():
self.discrete_values[k] = torch.tensor(v, dtype=torch.float)
self.discrete_values[k] /= self.discrete_values[k].max()
def evaluate_true(self, X: Tensor) -> Tensor:
X_split = list(torch.split(X, 1, -1))
# remap from integer space to proper space
for i, V_i in enumerate(X_split):
name = f"V{i+1}"
if name in self.discrete_values:
X_split[i] = self.discrete_values[name][V_i.view(-1).long()].view(
V_i.shape
)
V1, V2, V3, V4, V5, V6, V7 = X_split
wca = (
-197.0928
- 78.3309 * V1
+ 98.6355 * V2
+ 300.0701 * V3
+ 89.8360 * V4
+ 208.2343 * V5
+ 332.9341 * V6
+ 135.6621 * V7
- 11.0715 * V1 * V2
+ 201.8934 * V1 * V3
+ 17.1270 * V1 * V4
+ 2.5198 * V1 * V5
- 109.3922 * V1 * V6
+ 30.1607 * V1 * V7
- 46.1790 * V2 * V3
+ 19.2888 * V2 * V4
- 102.9493 * V2 * V5
- 19.1245 * V2 * V6
+ 53.6297 * V2 * V7
- 73.0649 * V3 * V4
- 37.7181 * V3 * V5
- 219.1268 * V3 * V6
- 55.3704 * V3 * V7
+ 3.8778 * V4 * V5
- 6.9252 * V4 * V6
- 105.1650 * V4 * V7
- 34.3181 * V5 * V6
- 36.3892 * V5 * V7
- 82.3222 * V6 * V7
- 16.7536 * V1.pow(2)
- 45.6507 * V2.pow(2)
- 91.4134 * V3.pow(2)
- 76.8701 * V5.pow(2)
)
q = (
-212.8531
+ 245.7998 * V1
- 127.3395 * V2
+ 305.8461 * V3
+ 638.1605 * V4
+ 301.2118 * V5
- 451.3796 * V6
- 115.5485 * V7
+ 42.8351 * V1 * V2
+ 262.3775 * V1 * V3
- 103.5274 * V1 * V4
- 196.1568 * V1 * V5
- 394.7975 * V1 * V6
- 176.3341 * V1 * V7
+ 74.8291 * V2 * V3
+ 4.1557 * V2 * V4
- 133.8683 * V2 * V5
+ 65.8711 * V2 * V6
- 42.6911 * V2 * V7
- 323.9363 * V3 * V4
- 107.3983 * V3 * V5
- 323.2353 * V3 * V6
+ 46.9172 * V3 * V7
- 144.4199 * V4 * V5
+ 272.3729 * V4 * V6
+ 49.0799 * V4 * V7
+ 318.4706 * V5 * V6
- 236.2498 * V5 * V7
+ 252.4848 * V6 * V7
- 286.0182 * V4.pow(2)
+ 393.5992 * V6.pow(2)
)
sigma = (
7.7696
+ 15.4344 * V1
- 10.6190 * V2
- 17.9367 * V3
+ 17.1385 * V4
+ 2.5026 * V5
- 24.3010 * V6
+ 10.6058 * V7
- 1.2041 * V1 * V2
- 37.2207 * V1 * V3
- 3.2265 * V1 * V4
+ 7.3121 * V1 * V5
+ 52.3994 * V1 * V6
+ 9.7485 * V1 * V7
- 15.9371 * V2 * V3
- 1.1706 * V2 * V4
- 2.6297 * V2 * V5
+ 7.0225 * V2 * V6
- 1.4938 * V2 * V7
+ 30.2786 * V3 * V4
+ 14.5061 * V3 * V5
+ 48.5021 * V3 * V6
- 11.4857 * V3 * V7
- 3.1381 * V4 * V5
- 14.9747 * V4 * V6
+ 4.5204 * V4 * V7
- 17.6907 * V5 * V6
- 19.2489 * V5 * V7
- 9.8219 * V6 * V7
- 18.7356 * V1.pow(2)
+ 12.1928 * V2.pow(2)
- 17.5460 * V4.pow(2)
+ 5.4997 * V5.pow(2)
- 26.2718 * V6.pow(2)
)
return -torch.cat([wca, q, sigma], dim=-1)
|
bo_pr-main
|
discrete_mixed_bo/problems/oil_sorbent.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Welded Beam problem from https://link.springer.com/content/pdf/10.1007/s00158-018-2182-1.pdf
"""
from copy import deepcopy
from math import sqrt
from typing import Optional, Tuple
import numpy as np
import torch
from botorch.test_functions.base import ConstrainedBaseTestProblem
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class WeldedBeam(DiscreteTestProblem, ConstrainedBaseTestProblem):
dim = 6
_bounds = [
[0, 1], # binary, welding type
[0, 3], # categorical, metal material
[0.0625, 2], # cont/ordinal, thickness of the weld
[0.1, 10], # cont/ordinal, length of the welded joint
[2, 20], # cont/ordinal, width of the beam
[0.0625, 2], # cont/ordinal, thickness of the beam
]
num_constraints = 5
F = 6000
delta_max = 0.25
L = 14
material_params = {
# C1, C2, sigma_d, E, G
0: (0.1047, 0.0481, 3e4, 3e7, 12e6), # steel
1: (0.0489, 0.0224, 8e3, 14e6, 6e6), # cast iron
2: (0.5235, 0.2405, 5e3, 1e7, 4e6), # aluminum
3: (0.5584, 0.2566, 8e3, 16e6, 1e7), # brass
}
# In the discrete setting, the optimal value is 1.9553
# in the continuous setting, the optimal value is
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
continuous: bool = False,
) -> None:
integer_indices = [0]
if not continuous:
self._orig_cont_bounds_list = deepcopy(self._bounds)
for i in range(2, 6):
n_ordinal = int((self._bounds[i][1] - self._bounds[i][0]) / 0.0625)
self._bounds[i][0] = 0
self._bounds[i][1] = n_ordinal - 1
integer_indices.append(i)
super().__init__(
negate=negate,
noise_std=noise_std,
integer_indices=integer_indices,
categorical_indices=[1],
)
self.continuous = continuous
if not continuous:
self.register_buffer(
"_orig_cont_bounds_tensor",
torch.tensor(self._orig_cont_bounds_list).t(),
)
def _split_X(
self, X: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
X_int = 0.0625 * X[..., 2:].round() + self._orig_cont_bounds_tensor[0, 2:]
(X2, X3, X4, X5) = torch.split(X_int, 1, dim=-1)
return X[..., 0], X[..., 1], X2, X3, X4, X5
def _evaluate_true(self, X):
params = self._get_params(X=X)
m = params["m"]
C1, C2, sigma_d, E, G = self.material_params[int(m.item())]
t = params["t"]
l = params["l"]
return (1 + C1) * (params["w"] * t + l) * params["h"].pow(2) + C2 * t * params[
"b"
] * (self.L + l)
def _get_params(self, X):
if not self.continuous:
w, m, h, l, t, b = self._split_X(X=X)
else:
w, m, h, l, t, b = torch.split(X, 1, dim=-1)
if w == 0:
A = sqrt(2) * h * l
J = A * ((h + t).pow(2) / 4 + l.pow(2) / 12)
R = 0.5 * torch.sqrt(l.pow(2) + (h + t).pow(2))
elif w == 1:
A = sqrt(2) * h * (t + l)
J = sqrt(2) * h * l * ((h + t).pow(2) / 4 + l.pow(2) / 12) + sqrt(
2
) * h * t * ((h + l).pow(2) / 4 + t.pow(2) / 12)
R = 0.5 * torch.max(
torch.sqrt(l.pow(2) + (h + t).pow(2)),
torch.sqrt(t.pow(2) + (h + l).pow(2)),
)
else:
raise ValueError
cos_theta = l / (2 * R)
return {
"w": w,
"m": m,
"h": h,
"l": l,
"t": t,
"b": b,
"A": A,
"J": J,
"R": R,
"cos_theta": cos_theta,
}
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.stack([self._evaluate_true(x) for x in X.view(-1, 6)], dim=0).view(
X.shape[:-1]
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
return torch.stack(
[self._evaluate_slack_true(x) for x in X.view(-1, 6)], dim=0
).view(*X.shape[:-1], self.num_constraints)
def _evaluate_slack_true(self, X: Tensor) -> Tensor:
params = self._get_params(X=X)
w = params["w"]
m = params["m"]
h = params["h"]
l = params["l"]
t = params["t"]
b = params["b"]
A = params["A"]
J = params["J"]
R = params["R"]
cos_theta = params["cos_theta"]
C1, C2, sigma_d, E, G = self.material_params[int(m.item())]
sigma = 6 * self.F * self.L / (t.pow(2) * b)
delta = 4 * self.F * self.L**3 / (E * t.pow(3) * b)
P_c = (
4.013
* t
* b.pow(3)
* sqrt(E * G)
/ (6 * self.L**2)
* (1 - t / (4 * self.L) * sqrt(E / G))
)
tau_prime = torch.sqrt(self.F / A)
tau_double_prime = self.F * (self.L + 0.5 * l) * R / J
tau = torch.sqrt(
tau_prime.pow(2)
+ tau_double_prime.pow(2)
+ 2 * tau_prime * tau_double_prime * cos_theta
)
g1 = 0.577 * sigma_d - tau
g2 = sigma_d - sigma
g3 = b - h
g4 = P_c - self.F
g5 = self.delta_max - delta
# positive slack implies feasibility
return torch.stack([g1, g2, g3, g4, g5], dim=-1)
|
bo_pr-main
|
discrete_mixed_bo/problems/welded_beam.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from math import exp, log
from typing import Dict, Optional
import numpy as np
import pandas as pd
import torch
import xgboost
from sklearn import datasets, metrics, model_selection
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class XGBoostHyperparameter(DiscreteTestProblem):
dim: int = 11
def __init__(
self,
task="mnist",
split=0.3,
seed=None,
negate: bool = False,
noise_std: Optional[float] = None,
data: Optional[Dict[str, np.ndarray]] = None,
):
"""
The XGboost hyperparameter tuning task on MNIST classification
Args:
task: 'mnist' or 'boston'
split: train-test split
normalize:
seed:
negate:
noise_std:
We optimize the following hyperparameters, in the order presented below:
Categoricals:
0. booster type -- cat (2 choices) -- gbtree, dart
1. grow policy -- cat (2 choices) -- depthwise, lossguide
2. training objective -- cat (2 choices) -- softmax, softprob, 3 choices for regression ['reg:linear', 'reg:gamma', 'reg:tweedie']
Integers:
3. max depth -- int -- [1, 10]
4. min_child_weight: -- uniform int -- [1,10]
Floats:
5. log10-learning rate -- uniform float -- [-5, 0]
6. gamma -- uniform float -- [0, 10]
7. subsample -- log float -- [0.1, 1]
8. lambda (L2 regularization weight) -- log float -- [1e-3, 5]
9. alpha (L1 regularization weight) -- log float -- [1e-3, 5]
10. colsample_by_tree -- uniform float -- (0, 1]
"""
self.task = task
self.split = split
self.seed = seed
if task == "airfoil":
if data is not None:
# data comes from https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat
self.reg_or_clf = "reg"
self.data = data
if data is None:
self.data, self.reg_or_clf = get_data_and_task_type(self.task)
stratify = self.data["target"] if self.reg_or_clf == "clf" else None
if self.reg_or_clf == "clf":
self._bounds = [
(0, 1),
(0, 1),
(0, 1),
(3, 15),
(100, 500),
(-5, -1),
(0, 10),
(log(0.1), log(1)),
(log(1e-3), log(5)),
(log(1e-3), log(5)),
(0, 1),
]
else:
self._bounds = [
(0, 1),
(0, 1),
(0, 2),
(1, 10),
(1, 10),
(-5, -1),
(0, 10),
(0.1, 1),
(0, 5),
(0, 5),
(0.3, 1),
]
super(XGBoostHyperparameter, self).__init__(
negate=negate,
noise_std=noise_std,
categorical_indices=[2],
integer_indices=[0, 1, 3, 4],
)
(
self.train_x,
self.test_x,
self.train_y,
self.test_y,
) = model_selection.train_test_split(
self.data["data"],
self.data["target"],
test_size=self.split,
stratify=stratify,
random_state=self.seed,
)
def _evaluate_single(self, x: torch.Tensor):
model = self.create_model(x)
model.fit(self.train_x, self.train_y)
y_pred = model.predict(self.test_x)
# 1-acc for minimization
if self.reg_or_clf == "clf":
score = 1 - metrics.accuracy_score(self.test_y, y_pred)
elif self.reg_or_clf == "reg":
score = metrics.mean_squared_error(self.test_y, y_pred)
else:
raise NotImplementedError
return torch.tensor(score, dtype=torch.float)
def _parse_input(self, x) -> dict:
"""Parse the input into a dictionary"""
kwargs = {}
x = x.detach().numpy()
assert len(x) == self.dim
args = [
"booster",
"grow_policy",
"objective",
"max_depth",
"min_child_weight",
"learning_rate",
"gamma",
"subsample",
"reg_lambda",
"reg_alpha",
"colsample_bytree",
]
for i, val in enumerate(x):
if args[i] == "booster":
kwargs[args[i]] = ["gbtree", "dart"][int(val)]
elif args[i] == "grow_policy":
kwargs[args[i]] = ["depthwise", "lossguide"][int(val)]
elif args[i] == "objective":
if self.reg_or_clf == "clf":
kwargs[args[i]] = ["multi:softmax", "multi:softprob"][int(val)]
else:
kwargs[args[i]] = ["reg:linear", "reg:gamma", "reg:tweedie"][
int(val)
]
elif args[i] == "learning_rate":
kwargs[args[i]] = float(10**val)
elif args[i] in ("subsample", "reg_lambda", "reg_alpha"):
kwargs[args[i]] = float(val)
elif args[i] in ["max_depth", "min_child_weight", "n_estimators"]:
kwargs[args[i]] = int(val)
else:
kwargs[args[i]] = float(val)
# print(kwargs)
return kwargs
def create_model(self, x):
xgboost_kwargs = self._parse_input(x)
if self.reg_or_clf == "clf":
model = xgboost.XGBClassifier(
eval_metric="mlogloss", **xgboost_kwargs, seed=self.seed, n_jobs=10
)
else:
model = xgboost.XGBRegressor(**xgboost_kwargs, seed=self.seed, n_jobs=10)
return model
def evaluate_true(self, X: torch.Tensor) -> torch.Tensor:
res = (
torch.stack(
[self._evaluate_single(x) for x in X.cpu().view(-1, self.dim)],
)
.to(X)
.view(*X.shape[:-1])
)
return res
def get_data_and_task_type(task):
if task == "boston":
data = datasets.load_boston()
reg_or_clf = "reg" # regression or classification
elif task == "diabetes":
data = datasets.load_diabetes()
reg_or_clf = "reg" # regression or classification
elif task == "airfoil":
# data comes from https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", "airfoil_self_noise.dat")
df = pd.read_csv(path, header=None, sep="\t")
data = {"data": df.iloc[:, :5].values, "target": df.iloc[:, 5].values}
reg_or_clf = "reg"
elif task == "mnist":
data = datasets.load_digits()
reg_or_clf = "clf"
else:
raise NotImplementedError("Bad choice for task")
return data, reg_or_clf
|
bo_pr-main
|
discrete_mixed_bo/problems/xgboost_hp.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict, Optional, Tuple
import gpytorch.settings as gpt_settings
import numpy as np
import torch
from botorch.models.gp_regression_mixed import MixedSingleTaskGP
from botorch.models.transforms.outcome import Standardize
from botorch.utils.transforms import normalize
from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class Chemistry(DiscreteTestProblem):
def __init__(
self,
data: Optional[Dict[str, Any]] = None,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if data is None:
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", "chem_model_info")
with open(path, "rb") as f:
data = torch.load(f)
self.dim = 5
bounds = data["bounds"]
self._bounds = bounds.t().tolist()
super().__init__(
negate=negate, noise_std=noise_std, categorical_indices=list(range(3))
)
self.register_buffer("_model_bounds", bounds)
# construct surrogate
X_norm = normalize(data["X"], bounds=self._model_bounds)
Y = data["Y"]
train_Yvar = torch.full_like(Y, 1e-10) * Y.std().pow(2)
# override the default min fixed noise level
# this requires https://github.com/cornellius-gp/gpytorch/pull/2132
lb = float("-inf")
with gpt_settings.min_fixed_noise(
float_value=lb, double_value=lb, half_value=lb
):
self.model = MixedSingleTaskGP(
train_X=X_norm,
train_Y=Y,
cat_dims=list(range(3)),
outcome_transform=Standardize(m=1),
likelihood=FixedNoiseGaussianLikelihood(noise=train_Yvar.squeeze(-1)),
)
self.model.load_state_dict(data["state_dict"])
def evaluate_true(self, X: Tensor) -> Tensor:
X_norm = normalize(X, self._model_bounds)
with torch.no_grad():
return -self.model.posterior(X_norm.unsqueeze(-2)).mean.view(
X_norm.shape[:-1]
)
|
bo_pr-main
|
discrete_mixed_bo/problems/chemistry.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional, Tuple
import numpy as np
import torch
from sklearn.svm import SVR
from torch import Tensor
from xgboost import XGBRegressor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
def process_uci_data(
data: np.ndarray, n_features: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
# The slice dataset can be downloaded from: https://archive.ics.uci.edu/ml/datasets/Relative+location+of+CT+slices+on+axial+axis
# Get the input data
X = data[:, :-1]
X -= X.min(axis=0)
X = X[:, X.max(axis=0) > 1e-6] # Throw away constant dimensions
X = X / (X.max(axis=0) - X.min(axis=0))
X = 2 * X - 1
assert X.min() == -1 and X.max() == 1
# Standardize targets
y = data[:, -1]
y = (y - y.mean()) / y.std()
# Only keep 10,000 data points and n_features features
shuffled_indices = np.random.RandomState(0).permutation(X.shape[0])[
:10000
] # Use seed 0
X, y = X[shuffled_indices], y[shuffled_indices]
# Use Xgboost to figure out feature importances and keep only the most important features
xgb = XGBRegressor(max_depth=8, random_state=0).fit(X, y)
inds = (-xgb.feature_importances_).argsort()
X = X[:, inds[:n_features]]
# Train/Test split on a subset of the data
train_n = int(math.floor(0.50 * X.shape[0]))
train_x, train_y = X[:train_n], y[:train_n]
test_x, test_y = X[train_n:], y[train_n:]
return train_x, train_y, test_x, test_y
class SVMFeatureSelection(DiscreteTestProblem):
def __init__(
self,
dim: int,
data: np.ndarray,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
n_features = dim - 3
self.train_x, self.train_y, self.test_x, self.test_y = process_uci_data(
data=data, n_features=n_features
)
self.n_features = n_features
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(
negate=negate, noise_std=noise_std, integer_indices=list(range(n_features))
)
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.tensor(
[self._evaluate_true(x.numpy()) for x in X.view(-1, self.dim).cpu()],
dtype=X.dtype,
device=X.device,
).view(X.shape[:-1])
def _evaluate_true(self, x: np.ndarray):
assert x.shape == (self.dim,)
assert (x >= self.bounds[0].cpu().numpy()).all() and (
x <= self.bounds[1].cpu().numpy()
).all()
assert (
(x[: self.n_features] == 0) | (x[: self.n_features] == 1)
).all() # Features must be 0 or 1
inds_selected = np.where(x[: self.n_features] == 1)[0]
if inds_selected.shape[0] == 0:
# if no features, use the mean prediction
pred = self.train_y.mean(axis=0)
else:
epsilon = 0.01 * 10 ** (2 * x[-3]) # Default = 0.1
C = 0.01 * 10 ** (4 * x[-2]) # Default = 1.0
gamma = (
(1 / self.n_features) * 0.1 * 10 ** (2 * x[-1])
) # Default = 1.0 / self.n_features
model = SVR(C=C, epsilon=epsilon, gamma=gamma)
model.fit(self.train_x[:, inds_selected], self.train_y)
pred = model.predict(self.test_x[:, inds_selected])
mse = ((pred - self.test_y) ** 2).mean(axis=0)
return 1 * math.sqrt(mse) # Return RMSE
|
bo_pr-main
|
discrete_mixed_bo/problems/svm.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Problems with only binary variables.
"""
from math import pi
from typing import Optional, Tuple
import numpy as np
import torch
from botorch.test_functions.base import ConstrainedBaseTestProblem
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
class PressureVessel(DiscreteTestProblem, ConstrainedBaseTestProblem):
dim = 4
_bounds = [
(1, 100), # integer
(1, 100), # integer
(10, 200), # continuous
(10, 240), # continuous
]
num_constraints = 3
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
super().__init__(
negate=negate,
noise_std=noise_std,
integer_indices=[0, 1],
)
@staticmethod
def _split_X(X: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
X_int = 0.0625 * X[..., :2].round()
x1 = X_int[..., 0]
x2 = X_int[..., 1]
x3 = X[..., 2]
x4 = X[..., 3]
return x1, x2, x3, x4
def evaluate_true(self, X):
x1, x2, x3, x4 = self._split_X(X=X)
return (
(0.6224 * x1 * x3 * x4)
+ (1.7781 * x2 * x3 * x3)
+ (3.1661 * x1 * x1 * x4)
+ (19.84 * x1 * x1 * x3)
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
# positive slack implies feasibility
x1, x2, x3, x4 = self._split_X(X=X)
g1 = x1 - (0.0193 * x3)
g2 = x2 - (0.00954 * x3)
g3 = pi * x3 * x3 * x4 + 4.0 / 3.0 * pi * x3 * x3 * x3 - 1296000
return torch.stack([g1, g2, g3], dim=-1)
|
bo_pr-main
|
discrete_mixed_bo/problems/re_problems.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Dict, List, Optional
import torch
from botorch.test_functions.base import BaseTestProblem, MultiObjectiveTestProblem
from botorch.utils.transforms import normalize, unnormalize
from torch import Tensor
from torch.nn import Module
class DiscreteTestProblem(BaseTestProblem):
def __init__(
self,
noise_std: Optional[float] = None,
negate: bool = False,
integer_indices: Optional[List[int]] = None,
categorical_indices: Optional[List[int]] = None,
) -> None:
super().__init__(negate=negate, noise_std=noise_std)
self._setup(
integer_indices=integer_indices, categorical_indices=categorical_indices
)
def _setup(
self,
integer_indices: Optional[List[int]] = None,
categorical_indices: Optional[List[int]] = None,
) -> None:
dim = self.bounds.shape[-1]
discrete_indices = []
if integer_indices is None:
integer_indices = []
if categorical_indices is None:
categorical_indices = []
self.register_buffer(
"_orig_integer_indices", torch.tensor(integer_indices, dtype=torch.long)
)
discrete_indices.extend(integer_indices)
self.register_buffer(
"_orig_categorical_indices",
torch.tensor(sorted(categorical_indices), dtype=torch.long),
)
discrete_indices.extend(categorical_indices)
if len(discrete_indices) == 0:
raise ValueError("Expected at least one discrete feature.")
cont_indices = sorted(list(set(range(dim)) - set(discrete_indices)))
self.register_buffer(
"_orig_cont_indices",
torch.tensor(
cont_indices,
dtype=torch.long,
device=self.bounds.device,
),
)
self.register_buffer("_orig_bounds", self.bounds.clone())
# remap inputs so that categorical features come after all of
# the ordinal features
remapper = torch.zeros(
self.bounds.shape[-1], dtype=torch.long, device=self.bounds.device
)
reverse_mapper = remapper.clone()
for i, orig_idx in enumerate(
cont_indices + integer_indices + categorical_indices
):
remapper[i] = orig_idx
reverse_mapper[orig_idx] = i
self.register_buffer("_remapper", remapper)
self.register_buffer("_reverse_mapper", reverse_mapper)
self.bounds = self.bounds[:, remapper]
self.register_buffer("cont_indices", reverse_mapper[cont_indices])
self.register_buffer("integer_indices", reverse_mapper[integer_indices])
self.register_buffer("categorical_indices", reverse_mapper[categorical_indices])
self.effective_dim = (
self.cont_indices.shape[0]
+ self.integer_indices.shape[0]
+ int(sum(self.categorical_features.values()))
)
one_hot_bounds = torch.zeros(
2, self.effective_dim, dtype=self.bounds.dtype, device=self.bounds.device
)
one_hot_bounds[1] = 1
one_hot_bounds[:, self.integer_indices] = self.integer_bounds
one_hot_bounds[:, self.cont_indices] = self.cont_bounds
self.register_buffer("one_hot_bounds", one_hot_bounds)
def forward(self, X: Tensor, noise: bool = True) -> Tensor:
r"""Evaluate the function on a set of points.
Args:
X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the
function.
noise: If `True`, add observation noise as specified by `noise_std`.
Returns:
A `batch_shape`-dim tensor of function evaluations.
"""
batch = X.ndimension() > 1
X = X if batch else X.unsqueeze(0)
# remap to original space
X = X[..., self._reverse_mapper]
f = self.evaluate_true(X=X)
if noise and self.noise_std is not None:
f += self.noise_std * torch.randn_like(f)
if self.negate:
f = -f
return f if batch else f.squeeze(0)
def evaluate_slack(self, X: Tensor, noise: bool = True) -> Tensor:
r"""Evaluate the constraint function on a set of points.
Args:
X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the
function.
noise: If `True`, add observation noise as specified by `noise_std`.
Returns:
A `batch_shape x n_constraints`-dim tensor of function evaluations.
"""
batch = X.ndimension() > 1
X = X if batch else X.unsqueeze(0)
# remap to original space
X = X[..., self._reverse_mapper]
f = self.evaluate_slack_true(X=X)
if noise and self.noise_std is not None:
f += self.noise_std * torch.randn_like(f)
return f if batch else f.squeeze(0)
@property
def integer_bounds(self) -> Optional[Tensor]:
if self.integer_indices is not None:
return self.bounds[:, self.integer_indices]
return None
@property
def cont_bounds(self) -> Optional[Tensor]:
if self.cont_indices is not None:
return self.bounds[:, self.cont_indices]
return None
@property
def categorical_bounds(self) -> Optional[Tensor]:
if self.categorical_indices is not None:
return self.bounds[:, self.categorical_indices]
return None
@property
def categorical_features(self) -> Optional[Dict[int, int]]:
# Return dictionary mapping indices to cardinalities
if self.categorical_indices is not None:
categ_bounds = self.categorical_bounds
return OrderedDict(
zip(
self.categorical_indices.tolist(),
(categ_bounds[1] - categ_bounds[0] + 1).long().tolist(),
)
)
return None
@property
def objective_weights(self) -> Optional[Tensor]:
return None
@property
def is_moo(self) -> bool:
return isinstance(self, MultiObjectiveTestProblem) and (
self.objective_weights is None
)
class DiscretizedBotorchTestProblem(DiscreteTestProblem):
r"""Class for converting continuous botorch test problems into
discrete or mixed problems.
"""
def __init__(
self,
problem: BaseTestProblem,
integer_indices: Optional[List[int]] = None,
integer_bounds: Optional[Tensor] = None,
categorical_indices: Optional[List[int]] = None,
categorical_bounds: Optional[Tensor] = None,
) -> None:
Module.__init__(self)
self.problem = problem
self.dim = problem.dim
self.register_buffer("bounds", problem.bounds.clone())
if integer_indices is not None:
self.bounds[:, integer_indices] = integer_bounds
if categorical_indices is not None:
if (categorical_bounds[1] - categorical_bounds[0] < 2).any():
raise ValueError(
"Binary categorical parameters should be specified as integer parameters, not categoricals."
)
self.bounds[:, categorical_indices] = categorical_bounds
self._setup(
integer_indices=integer_indices, categorical_indices=categorical_indices
)
def get_orig_X(self, X: Tensor) -> Tensor:
# normalize from integer bounds to unit cube
unit_X = normalize(X, self._orig_bounds)
# unnormalize from unit cube to original problem bounds
return unnormalize(unit_X, self.problem.bounds)
def evaluate_true(self, X: Tensor) -> Tensor:
r"""
This assumes that only discrete X are passed.
"""
orig_X = self.get_orig_X(X=X)
return self.problem.evaluate_true(orig_X)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
r"""
This assumes that only discrete X are passed.
"""
orig_X = self.get_orig_X(X=X)
return self.problem.evaluate_slack_true(X)
def forward(self, X: Tensor, noise: bool = True) -> Tensor:
# remap to original indices
X = X[..., self._reverse_mapper]
# remap to original bounds
orig_X = self.get_orig_X(X=X)
return self.problem.forward(X=orig_X, noise=noise)
@property
def __class__(self):
return self.problem.__class__
@property
def ref_point(self) -> Tensor:
return self.problem.ref_point
@property
def num_objectives(self) -> int:
return self.problem.num_objectives
@property
def ref_num_constraints(self) -> int:
return self.problem.num_constraints
|
bo_pr-main
|
discrete_mixed_bo/problems/base.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple, Union
import numpy as np
"""
Problem formulation for the RF Coverage and Capacity Optimization (CCO) problem.
"""
class CCORasterBlanketFormulation:
"""Generate combined reward, over all raster locations, from dual objectives
of minimizing under coverage (holes) and over coverage (interference).
RSRP: Reference Signal Receive Power
Coverage holes: Z = h(x) = weak_coverage_threshold - RSRP_From_TargetCell(x),
where x is the location.
Over coverage: Y = g(x) = sum(RSRP_From_NeighborCells(x))
+ over_coverage_threshold
- RSRP_From_TargetCell(x),
where RSRP_From_TargetCell > weak_coverage_threshold
Suggested : weak_coverage_threshold = -90 dBm, over_coverage_threshold = 6 dBm
Multi-criteria objective formulation:
Objective 1: Min(Sum(f(Z))), f is the activation function
Objective 2: Min(Sum(f(Y))), f is the activation function
f may be sigmoid.
Combined objective := lambda_weight * goal1 + (1 - lambda_weight) * goal2
Metrics: percentages of coverage holes and over coverage
"""
def __init__(
self,
lambda_weight: float,
weak_coverage_threshold: float = -80,
over_coverage_threshold: float = 6,
):
self.lambda_weight = lambda_weight
self.weak_coverage_threshold = weak_coverage_threshold
self.over_coverage_threshold = over_coverage_threshold
def get_objective_value(
self, rsrp_map: np.ndarray, interference_map: np.ndarray
) -> float:
"""Get reward from all the locations in the map"""
f_weak_coverage, g_over_coverage = self.get_reward_components(
rsrp_map=rsrp_map, interference_map=interference_map
)
# calculate the combining reward
reward = (
self.lambda_weight * f_weak_coverage
+ (1 - self.lambda_weight) * g_over_coverage
)
return reward
def get_reward_components(
self, rsrp_map: np.ndarray, interference_map: np.ndarray
) -> Tuple[float, float]:
"""Get individual reward components from all the locations in the map"""
weak_coverage_area, over_coverage_area = self.get_weak_over_coverage_area(
rsrp_map,
interference_map,
self.weak_coverage_threshold,
self.over_coverage_threshold,
)
f_weak_coverage = CCORasterBlanketFormulation.activation_function(
self.weak_coverage_threshold - rsrp_map[weak_coverage_area]
).sum()
g_over_coverage = CCORasterBlanketFormulation.activation_function(
interference_map[over_coverage_area]
+ self.over_coverage_threshold
- rsrp_map[over_coverage_area]
).sum()
return f_weak_coverage, g_over_coverage
def get_weak_over_coverage_area_percentages(
self, rsrp_map: np.ndarray, interference_map: np.ndarray
) -> Tuple[float, float]:
"""Calculate the percentages of coverage hole and over coverage area"""
size = rsrp_map.size
weak_coverage_area, over_coverage_area = self.get_weak_over_coverage_area(
rsrp_map,
interference_map,
self.weak_coverage_threshold,
self.over_coverage_threshold,
)
weak_coverage_percentage = weak_coverage_area.sum() / size
over_coverage_percentage = over_coverage_area.sum() / size
return weak_coverage_percentage, over_coverage_percentage
@staticmethod
def get_weak_over_coverage_area(
rsrp_map: np.ndarray,
interference_map: np.ndarray,
weak_coverage_threshold: float,
over_coverage_threshold: float,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the weak coverage and over coverage areas
as 2D boolean indicator matrices.
"""
weak_coverage_area = rsrp_map < weak_coverage_threshold
over_coverage_area = (rsrp_map >= weak_coverage_threshold) & (
interference_map + over_coverage_threshold > rsrp_map
)
return weak_coverage_area, over_coverage_area
@staticmethod
def activation_function(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Sigmoid Function"""
return 1 / (1 + np.exp(-x))
|
bo_pr-main
|
discrete_mixed_bo/problems/cco/problem_formulation.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bo_pr-main
|
discrete_mixed_bo/problems/cco/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import glob
import logging
import os
import re
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
# System Constants
MIN_INTERFERENCE_POWER_WATT = 1e-24
class SimulatedRSRP:
"""Class that initializes RSRP and interference maps from disk.
The data is provided in a particular format described below.
Build this class from NPZ files. A single NPZ file consists of several lists and matrices:
data['x'] - a list of x coordinates representing the x-axis of the map
data['y'] - a list of y coordinates representing the y-axis of the map
data['y'] - a fixed z coordinate of this map
data['ptx'] - Transmit power of the sectors.
data['Txnpwr'] - a 3D matix, which is the powermap of several sectors located in base station n
data['Txnloc'] - The location of base station n
A NPZ file is named as powermapDT{%d}.npz, where {%d} indicates the downtilt of
all the base stations in this map.
The API consists of the following methods:
get_RSRP_and_interference_powermap:
This method returns the RSRP powermap, interference powermap and
connecting sectors map, given the configuration for each sector.
get_RSRP_and_interference_for_location:
This method returns the RSRP, interference power and connecting sector idx
given a single location and the configuration.
get_configuration_shape:
This method returns the configuration shape. Each configuration is
a 2-D Array with the shape [2, num_total_sectors]. Each column
contains the downtilt and transmit power, respectively, for different sectors.
get_configuration_range:
This method will return the valid ranges for downtilts and transmit powers.
Both are specified as a minimum and maximum value.
get_locations_range:
This method will return the range of the map.
Two Coordinate objects will be returned: xy_min, xy_max
Location(x,y) is the meters from the basestation center(0, 0).
The valid location will be:
xy_min.x <= location.x <= xy_max.y, xy_min.y <= location.y <= xy_max.y
powermatrix path will be like:
powermaps_path = "/mnt/shared/yuchenq/power_maps/*.npz" # Now we have downtilt from 0 to 10, 11 files
Sample Code:
min_Tx_power_dBm, max_Tx_power_dBm = 30, 50
simulated_rsrp = SimulatedRSRP.construct_from_npz_files(powermaps_path, (min_Tx_power_dBm, max_Tx_power_dBm))
# Get configuration range for downtilts and powers
(
(min_downtilt, max_downtilt),
(min_tx_power_dBm, max_tx_power_dBm),
) = self.simulated_rsrp.get_configuration_range()
xy_min, xy_max = simulated_rsrp.get_locations_range()
# Discretize power choices to integer values in range
tx_power_choices = list(range(int(min_tx_power_dBm), int(max_tx_power_dBm + 1)))
downtilts_choices = list(range(int(min_downtilt), int(max_downtilt + 1)))
# Get the number of total sectors
_, num_sectors = self.simulated_rsrp.get_configuration_shape()
# Random configuration
downtilts_for_sectors = np.random.choice(downtilts_choices, num_sectors)
power_for_sectors = np.random.choice(tx_power_choices, num_sectors)
configuration = (downtilts_for_sectors, power_for_sectors)
# Get rsrp and interference powermap
rsrp_powermap, interference_powermap, _ = simulated_rsrp.get_RSRP_and_interference_powermap(configurations)
# Get rsrp and interference from location
location = Coordinate(0, 0)
rsrp, interference, _ = simulated_rsrp.get_RSRP_and_interference_for_location(location, configurations)
"""
@dataclass
class Coordinate:
__slots__ = "x", "y"
x: float
y: float
@dataclass
class Powermap:
"""Dataclass to store a powermap with the specific downtilt of all base stations.
power_matrix: 3D matrix
base_station_locations: base station locations
xy_min: minimum x and y
xy_max: maximum x and y
fixed_z: z value
num_sectors_per_base_station: number of sectors of a single base station
"""
power_matrix: np.ndarray
base_station_locations: np.ndarray
xy_min: "SimulatedRSRP.Coordinate"
xy_max: "SimulatedRSRP.Coordinate"
fixed_z: float
num_sectors_per_base_station: List[int]
@dataclass
class Metadata:
"""Dataclass to store map metadata."""
xy_min: "SimulatedRSRP.Coordinate"
xy_max: "SimulatedRSRP.Coordinate"
fixed_z: float
resolution: float
num_sectors_per_base_station: List[int]
def __init__(
self,
powermaps: Dict[int, Any],
min_TX_power_dBm: float,
max_TX_power_dBm: float,
):
self.downtilts_maps = {
float(i): SimulatedRSRP.build_single_powermap(powermaps[i])
for i in powermaps.keys()
}
# Get maps size, resolution and base stations distribution
metadata = self.get_metadata(self.downtilts_maps)
self.xy_min = metadata.xy_min
self.xy_max = metadata.xy_max
self.fixed_z = (metadata.fixed_z,)
self.resolution = metadata.resolution
self.num_sectors_per_base_station = metadata.num_sectors_per_base_station
self.num_basestations = len(self.num_sectors_per_base_station)
self.num_total_sectors = sum(self.num_sectors_per_base_station)
self.min_TX_power_dBm = min_TX_power_dBm
self.max_TX_power_dBm = max_TX_power_dBm
self.downtilts_keys = np.asarray(list(self.downtilts_maps.keys()))
self.downtilts_keys.sort()
self.min_downtilt = self.downtilts_keys[0]
self.max_downtilt = self.downtilts_keys[-1]
def get_configuration_range(
self,
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""Return valid ranges of input configurations, later useful for
`get_RSRP_and_interference_for_location`.
This method returns the following 2-tuple:
1. 2-tuple specifying (min downtilt, max downtilt) range
2. 2-tuple specifying (min Tx power, max Tx power) range
"""
return (
(self.min_downtilt, self.max_downtilt),
(self.min_TX_power_dBm, self.max_TX_power_dBm),
)
def get_locations_range(
self,
) -> Tuple["SimulatedRSRP.Coordinate", "SimulatedRSRP.Coordinate"]:
"""Return the range of x,y in the map."""
return self.xy_min, self.xy_max
def get_configuration_shape(self) -> Tuple[int, int]:
"""Return the number of base stations and sectors.
In tandem with `get_configuration_range`, this method is useful for
constructing valid configuration inputs for the main API calls:
1. `get_RSRP_and_interference_for_location`
2. `get_RSRP_and_interference_powermap`
"""
return (self.num_basestations, self.num_total_sectors)
def get_basestation_and_sector_idx(
self, flattened_sector_idx: int
) -> Tuple[int, int]:
"""Given the flattened sector id, return the base station idx and its sector idx"""
if flattened_sector_idx >= self.num_total_sectors or flattened_sector_idx < 0:
raise ValueError("flattened_sector_id is out of range")
base_station_idx = 0
sector_idx = 0
while (
sector_idx + self.num_sectors_per_base_station[base_station_idx]
) <= flattened_sector_idx:
sector_idx += self.num_sectors_per_base_station[base_station_idx]
base_station_idx += 1
return base_station_idx, flattened_sector_idx - sector_idx
@staticmethod
def watt_to_dBm(x: Union[float, np.ndarray]) -> float:
return 10 * np.log10(x) + 30
@staticmethod
def dBm_to_watt(x: Union[float, np.ndarray]) -> float:
return 10 ** (x / 10.0 - 3)
@staticmethod
def get_nearest_discrete_downtilts(
downtilts_keys: np.ndarray, downtilt: float
) -> Tuple[float, float]:
"""Return the nearest discrete downtilts for the given downtilt.
downtilts_keys is the sorted numpy 1-D array, storing the discrete
downtilts. Given downtilts, return the interpolation range:
[lower_downtilt, upper_downtilt]
If downtilts_keys only contains one downtilt, the interpolation
will not be needed. Such situation will be checked before this
method is called in the API.
"""
# Check the length
if len(downtilts_keys) <= 1:
raise ValueError("Can't do interpolation with only one discrete downtilt")
# Check if downtilt is in the configiration range
if downtilt > downtilts_keys[-1] or downtilt < downtilts_keys[0]:
raise ValueError("Downtilt is out of the range")
# Using bisect to find the nearest upper downtilt and lower downtilt indices
upper_downtilt_idx = bisect.bisect(
downtilts_keys, downtilt, hi=len(downtilts_keys) - 1
)
lower_downtilt_idx = upper_downtilt_idx - 1
upper_downtilt = downtilts_keys[upper_downtilt_idx]
lower_downtilt = downtilts_keys[lower_downtilt_idx]
return (lower_downtilt, upper_downtilt)
@staticmethod
def get_resolution_from_powermap(
powermap: "SimulatedRSRP.Powermap",
) -> "SimulatedRSRP.Coordinate":
"""Return the reslution of a Powermap Object.
If the length of one axis of the 2-D map is 1, the resolution for this axis
will be set as 1 for easy calculation of x-y index.
"""
x_len, y_len, _ = powermap.power_matrix.shape
resolution_x = (
1 if x_len == 1 else (powermap.xy_max.x - powermap.xy_min.x) / (x_len - 1)
)
resolution_y = (
1 if y_len == 1 else (powermap.xy_max.y - powermap.xy_min.y) / (y_len - 1)
)
return SimulatedRSRP.Coordinate(resolution_x, resolution_y)
@staticmethod
def get_xy_idx(
resolution: "SimulatedRSRP.Coordinate",
location: "SimulatedRSRP.Coordinate",
xy_min: "SimulatedRSRP.Coordinate",
) -> Tuple[int, int]:
"""Return the x and y axis index, given the location and the resolution"""
x_idx, y_idx = (
int((location.x - xy_min.x) // resolution.x),
int((location.y - xy_min.y) // resolution.y),
)
return (x_idx, y_idx)
# Return RSRP of a single point
def get_RSRP_and_interference_for_location(
self,
location: "SimulatedRSRP.Coordinate",
configurations: Tuple[np.ndarray, np.ndarray],
) -> Tuple[float, float, int]:
"""Get RSRP and interference power, given the location and configuration.
Configuration contains downtilts and transmit power for all the sectors.
The first list in the configuration is for downtilts and
the second is for transmit powers.
The return values are RSRP, interference power and serving sector idx.
"""
# Check if the location is in the resonable range
if not (
self.xy_min.x <= location.x <= self.xy_max.x
and self.xy_min.y <= location.y <= self.xy_max.y
):
raise ValueError("Current location is outside of the map!")
# Check if the configurations format has the right shape
try:
assert len(configurations) == 2
assert (
len(configurations[0])
== len(configurations[1])
== self.num_total_sectors
)
except AssertionError:
logging.error("Configurations shape doesn't fit")
# Create an array to store the received powers from all the sectors
rx_powers_dBm = np.zeros(self.num_total_sectors, dtype=np.float32)
# Calculate RSRP, Interference power, and serving sector idx
for i in range(self.num_total_sectors):
# Get configurations, which are transmit power and downtilt
configured_downtilt = configurations[0][i]
configured_transmit_power_dBm = configurations[1][i]
# Check if the configurations are in the right range
if not (
self.min_TX_power_dBm
<= configured_transmit_power_dBm
<= self.max_TX_power_dBm
):
raise ValueError("Transmit Power is out of the range")
# Get the resolution of the map, and calculate the idx of locations
x_idx, y_idx = self.get_xy_idx(self.resolution, location, self.xy_min)
# Get the received power from one sector
received_power_dBm = (
self.get_power_for_downtilt_sector(configured_downtilt, i, x_idx, y_idx)
+ configured_transmit_power_dBm
)
rx_powers_dBm[i] = received_power_dBm
# Calculate RSRP and interference power
# The maximum received power from all the sectors is
# defined as the serving/attached cell RSRP.
# The remaining received powers will be regarded as interference powers.
rsrp_dBm, serving_sector_idx = np.max(rx_powers_dBm), np.argmax(rx_powers_dBm)
interference_power_watt = sum(
SimulatedRSRP.dBm_to_watt(
rx_powers_dBm[np.arange(self.num_total_sectors) != serving_sector_idx]
)
)
# Transfer to dbm
interference_power_dBm = SimulatedRSRP.watt_to_dBm(interference_power_watt)
return rsrp_dBm, interference_power_dBm, serving_sector_idx
def get_RSRP_and_interference_powermap(
self, configurations: Tuple[np.ndarray, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns RSRP and interference power for all the locations in the map.
`configurations` contains downtilts and transmit power for all the sectors.
The first list in the configuration contains downtilts and
the second contains transmit powers.
Return 3 maps for:
rsrp_powermap: RSRP power map
interference_powermap: interference power map
serving_sector_idx_map: conncecting sectors index map
"""
# Check if the configurations format has the right shape
try:
assert len(configurations) == 2
assert (
len(configurations[0])
== len(configurations[1])
== self.num_total_sectors
)
except AssertionError:
logging.error("Configurations shape doesn't fit")
# Get downtilts and tx powers from configurations
downtilts_for_sectors = configurations[0]
tx_powers_for_sectors = configurations[1]
# Check if the power configuration is right
if (
max(tx_powers_for_sectors) > self.max_TX_power_dBm
or min(tx_powers_for_sectors) < self.min_TX_power_dBm
):
raise ValueError("Transmit Power is out of the range")
# Construct power matrices files from the configutration
power_matrices = np.stack(
[
self.get_power_for_downtilt_sector(downtilt, flattened_sector_idx)
+ tx_powers_for_sectors[flattened_sector_idx]
for flattened_sector_idx, downtilt in enumerate(downtilts_for_sectors)
],
-1,
)
# Get RSRP powermap and serving sector idx map
rsrp_power_map_dBm = np.amax(power_matrices, -1)
serving_sector_idx_map = np.argmax(power_matrices, -1)
# 1. Convert power from dBm to watt
# 2. Sum the powers in every location and substract RSRP to get interference power
# 3. Set minimum threshold 1e-24 to avoid 0 watt power
interference_power_map_watt = np.maximum(
MIN_INTERFERENCE_POWER_WATT,
np.sum(SimulatedRSRP.dBm_to_watt(power_matrices), -1)
- SimulatedRSRP.dBm_to_watt(rsrp_power_map_dBm),
)
# Get interference power map in dBm
interference_power_map_dbm = SimulatedRSRP.watt_to_dBm(
interference_power_map_watt
)
return (rsrp_power_map_dBm, interference_power_map_dbm, serving_sector_idx_map)
def get_power_for_downtilt_sector(
self,
downtilt: float,
flattened_sector_idx: int,
x_idx: Optional[int] = None,
y_idx: Optional[int] = None,
) -> Union[np.ndarray, float]:
"""Return interpolated power matrix or scalar power in given location
If the x_idx and y_idx are given, the scalar power for this location
will be calculated. Otherwise it will return the power matrix
for the entire map.
"""
# Check given x_idx and y_idx
is_xy_given = x_idx is not None and y_idx is not None
# Check if the interpolation is needed
if downtilt in self.downtilts_maps:
if is_xy_given:
return self.downtilts_maps[downtilt].power_matrix[
x_idx, y_idx, flattened_sector_idx
]
else:
return self.downtilts_maps[downtilt].power_matrix[
:, :, flattened_sector_idx
]
else:
# Interpolation begin
# 1. Find the nearest lower and upper downtilt
lower_downtilt, upper_downtilt = self.get_nearest_discrete_downtilts(
self.downtilts_keys, downtilt
)
# 2. Get the power matrix or scalar power for lower and upper downtilts
# Check if x_idx and y_idx are given, calculate the scalar power
if is_xy_given:
upper_downtilt_power = self.downtilts_maps[upper_downtilt].power_matrix[
x_idx, y_idx, flattened_sector_idx
]
lower_downtilt_power = self.downtilts_maps[lower_downtilt].power_matrix[
x_idx, y_idx, flattened_sector_idx
]
# Otherwise get the power matrix
else:
upper_downtilt_power = self.downtilts_maps[upper_downtilt].power_matrix[
:, :, flattened_sector_idx
]
lower_downtilt_power = self.downtilts_maps[lower_downtilt].power_matrix[
:, :, flattened_sector_idx
]
# 3. Linear interpolation
downtilt_power = (
(upper_downtilt_power - lower_downtilt_power)
/ (upper_downtilt - lower_downtilt)
) * (downtilt - lower_downtilt) + lower_downtilt_power
return downtilt_power
@staticmethod
def construct_from_npz_files(
power_maps_path: str, power_range: Tuple[float, float]
) -> "SimulatedRSRP":
"""Construct power map data from multiple power maps (npz format).
power_maps_path is filepath (local or mounted), e.g.
"/mnt/shared/yuchenq/power_maps/*.npz".
Power maps are loaded into the downtilts_maps dictionary,
keyed on downtilts.
npz files are generated from original JSON files. Here is the sample code:
powermaps_dir = Path("/mnt/shared/yuchenq/power_maps")
for fn in powermaps_dir.iterdir():
npfn = fn.name.replace(".json", ".npz")
with open(fn, "r") as f:
pmap = json.load(f)
for k, vals in pmap.items():
pmap[k] = np.array(vals)
np.savez_compressed(powermaps_dir.joinpath(npfn), **pmap)
"""
downtilts_maps = {}
power_maps_path = glob.glob(
os.path.abspath(os.path.expandvars(power_maps_path))
)
for file_path in power_maps_path:
try:
downtilt = float(re.search(r"DT\d+", file_path).group()[2:])
except AttributeError:
logging.error("No downtilt parameter configuration find")
raise
if downtilt in downtilts_maps:
logging.info("Duplicated downtilt %d files", downtilt)
else:
downtilts_maps[downtilt] = SimulatedRSRP.build_single_powermap(
np.load(file_path)
)
# Check if the map object has been successfully built
if not downtilts_maps:
logging.error("No power map files found!")
raise
# Construct the simulation object
simulated_rsrp = SimulatedRSRP(
downtilts_maps=downtilts_maps,
min_TX_power_dBm=power_range[0],
max_TX_power_dBm=power_range[1],
)
return simulated_rsrp
@staticmethod
def build_single_powermap(npz_data: Dict[str, Any]) -> "SimulatedRSRP.Powermap":
"""Construct a single power map for the specific downtilt.
The power_matrix will have the following dimensions:
[x, y, num_total_sectors]
The power_matrix contains the received powers
assuming 0 dBm transmit power.
"""
x_coord = npz_data["x"]
y_coord = npz_data["y"]
z_coord = npz_data["z"]
# Check the map if it is the uniform grid
if not np.allclose(
x_coord, np.linspace(x_coord[0], x_coord[-1], len(x_coord))
) or not np.allclose(
y_coord, np.linspace(y_coord[0], y_coord[-1], len(y_coord))
):
raise ValueError("xy 2D map must be uniform grid")
# Transmit power, stored in Watt, converted to dBm
TX_power_dBm = SimulatedRSRP.watt_to_dBm(npz_data["ptx"])
# Try to get the number of base stations from the file
try:
num_base_stations = int(
re.search(r"\d+", list(npz_data.keys())[-1]).group()
)
except ValueError:
logging.error("Unable to determine the number of base stations")
raise
# Store recived powers from different locations
rx_powers = []
# Store number of sectors in different base stations
num_sectors_per_base_station = []
# Transmitter locations (transmitter, [x, y, z])
base_station_locations = np.zeros((num_base_stations, 3))
# Get the number of sectors of a single base station
# The power map structure will be [x, y, num_total_sectors]
for i in range(num_base_stations):
label = "Tx{}".format(i + 1)
num_sectors_per_base_station.append(len(npz_data[label + "pwr"][0][0]))
rx_powers.append(npz_data[label + "pwr"] - TX_power_dBm)
base_station_locations[i] = npz_data[label + "loc"]
powermap = SimulatedRSRP.Powermap(
power_matrix=np.concatenate(rx_powers, -1),
base_station_locations=base_station_locations,
xy_min=SimulatedRSRP.Coordinate(min(x_coord), min(y_coord)),
xy_max=SimulatedRSRP.Coordinate(max(x_coord), max(y_coord)),
fixed_z=z_coord,
num_sectors_per_base_station=num_sectors_per_base_station,
)
return powermap
@staticmethod
def get_metadata(
downtilts_maps: Dict[float, "SimulatedRSRP.Powermap"]
) -> "SimulatedRSRP.Metadata":
"""Analyze the maps and get information about sizes, resolution,
and number of total sectors.
Return xy_min, xy_max, fixed_z, num_total sectors and
a list of num_sectors of each basetation.
"""
xy_min = None
xy_max = None
fixed_z = None
num_sectors_per_base_station = []
for powermap in downtilts_maps.values():
# We need guarantee all the maps in different files have the same range and resolutions
if not xy_min:
xy_min = powermap.xy_min
xy_max = powermap.xy_max
fixed_z = powermap.fixed_z
resolution = SimulatedRSRP.get_resolution_from_powermap(powermap)
else:
try:
assert xy_min == powermap.xy_min
assert xy_max == powermap.xy_max
assert resolution == SimulatedRSRP.get_resolution_from_powermap(
powermap
)
except AssertionError:
logging.error("Powermaps' sizes or resolutions don't match!")
raise
# Check if the number of sectors math
if not num_sectors_per_base_station:
num_sectors_per_base_station = powermap.num_sectors_per_base_station
else:
try:
assert (
num_sectors_per_base_station
== powermap.num_sectors_per_base_station
)
except AssertionError:
logging.error(
"Number of sectors for each base station doesn't match"
)
raise
metadata = SimulatedRSRP.Metadata(
xy_min, xy_max, fixed_z, resolution, num_sectors_per_base_station
)
return metadata
|
bo_pr-main
|
discrete_mixed_bo/problems/cco/simulated_rsrp.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Coverage and Capacity optimization for Cell networks.
Code from: https://github.com/Ryandry1st/CCO-in-ORAN/tree/main/cco_standalone_icassp_2021
Paper: R. M. Dreifuerst, et al. Optimizing Coverage and Capacity in Cellular Networks using Machine Learning. IEEE ICASSP special session on Machine Learning in Networks, 2021.
"""
import os
from typing import Any, Dict, Optional
import numpy as np
import torch
from botorch.test_functions.base import MultiObjectiveTestProblem
from torch import Tensor
from discrete_mixed_bo.problems.base import DiscreteTestProblem
from discrete_mixed_bo.problems.cco.problem_formulation import (
CCORasterBlanketFormulation,
)
from discrete_mixed_bo.problems.cco.simulated_rsrp import SimulatedRSRP
class CCO(DiscreteTestProblem, MultiObjectiveTestProblem):
dim: int = 30
_ref_point = [0.35, 0.35]
def __init__(
self,
data: Optional[Dict[int, Any]] = None,
noise_std: Optional[float] = None,
negate: bool = False,
scalarize: bool = False,
n_int_values: int = 6,
) -> None:
"""
This method requires a `data` object that is constructed as follows:
```
data = {}
for i in range(11):
data[i] = dict(np.load(f"powermaps/powermatrixDT{i}.npz"))
```
The npz files can be retrieved from:
https://github.com/Ryandry1st/CCO-in-ORAN/tree/main/cco_standalone_icassp_2021/data/power_maps
"""
if n_int_values not in (6, 11):
raise ValueError("Only 6 and 11 int values are supported")
self._n_int_values = n_int_values
self._bounds = [
(0.0, n_int_values - 1) for _ in range(15)
] + [ # downtilts (integers)
(30.0, 50.0) for _ in range(15) # transmission power (floats)
]
MultiObjectiveTestProblem.__init__(
self,
negate=negate,
noise_std=noise_std,
)
self._setup(integer_indices=list(range(15)))
if data is None:
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "..", "data", "powermaps")
with open(path, "rb") as f:
data = torch.load(f)
self.simulated_rsrp = SimulatedRSRP(
powermaps=data,
min_TX_power_dBm=30,
max_TX_power_dBm=50,
)
self.problem_formulation = CCORasterBlanketFormulation(lambda_weight=0.9)
_, num_sectors = self.simulated_rsrp.get_configuration_shape()
downtilts_choices, (
min_Tx_power_dBm,
max_Tx_power_dBm,
) = self.simulated_rsrp.get_configuration_range()
xy_min, xy_max = self.simulated_rsrp.get_locations_range()
self.scalarize = scalarize
self.register_buffer("_objective_weights", torch.tensor([0.5, 0.5]))
def _powermap_evaluation_fn(self, input: Tensor) -> Tensor:
(
rsrp_powermap,
interference_powermap,
_,
) = self.simulated_rsrp.get_RSRP_and_interference_powermap(input.numpy())
# Compute aggregate metrics from the powermap
# compute percentages, we want to minimize both of these
(
f_weak_coverage_pct,
g_over_coverage_pct,
) = self.problem_formulation.get_weak_over_coverage_area_percentages(
rsrp_powermap, interference_powermap
)
return torch.tensor(
[f_weak_coverage_pct, g_over_coverage_pct],
dtype=input.dtype,
device=input.device,
)
def evaluate_true(self, X: Tensor) -> Tensor:
X = X.clone()
if self._n_int_values == 6:
X[..., :15] *= 2
Y = (
torch.stack(
[
self._powermap_evaluation_fn(x)
for x in X.view(-1, 2, self.dim // 2).cpu()
],
)
.view(*X.shape[:-1], 2)
.to(X)
)
if self.scalarize:
return Y @ self._objective_weights
else:
return Y
@property
def objective_weights(self) -> Optional[Tensor]:
# if self.scalarize:
# return self._objective_weights
return None
@property
def is_moo(self) -> bool:
return not self.scalarize
|
bo_pr-main
|
discrete_mixed_bo/problems/cco/cco.py
|
#!/usr/bin/env python3
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
bo_pr-main
|
experiments/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
The main script for running a single replication.
"""
import errno
import json
import os
import sys
from typing import Any, Dict
import torch
from discrete_mixed_bo.run_one_replication import run_one_replication
def fetch_data(kwargs: Dict[str, Any]) -> None:
# this modifies kwargs in place
problem_kwargs = kwargs.get("problem_kwargs", {})
key = problem_kwargs.get("datapath")
if key is not None:
data = torch.load(key)
problem_kwargs["data"] = data
kwargs["problem_kwargs"] = problem_kwargs
if __name__ == "__main__":
current_dir = os.path.dirname(os.path.abspath(__file__))
exp_dir = os.path.join(current_dir, sys.argv[1])
config_path = os.path.join(exp_dir, "config.json")
label = sys.argv[2]
seed = int(float(sys.argv[3]))
last_arg = sys.argv[4] if len(sys.argv) > 4 else None
output_path = os.path.join(exp_dir, label, f"{str(seed).zfill(4)}_{label}.pt")
if not os.path.exists(os.path.dirname(output_path)):
try:
os.makedirs(os.path.dirname(output_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(config_path, "r") as f:
kwargs = json.load(f)
save_callback = lambda data: torch.save(data, output_path)
save_frequency = 5
fetch_data(kwargs=kwargs)
run_one_replication(
seed=seed,
label=label,
save_callback=save_callback,
save_frequency=save_frequency,
**kwargs,
)
|
bo_pr-main
|
experiments/main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
from configs import Config
########### running ###########
# torchrun --nproc_per_node=8 main.py <config>
def eval_yfcc15m_in1k_mocob16():
return Config(
output_dir="yfcc15m_in1k_mocob16",
eval=True,
resume="checkpoint-best.pth",
dataset="yfcc15m_tag",
metadata="data/yfcc15m/yfcc15m_w_tag.pkl",
root="data/yfcc15m",
trainable_weight="head-all",
batch_size=1024,
max_bert_length=32,
max_update=5000,
weight_decay=0.2,
head_weight_decay=1.,
eval_steps=500,
curate=100,
min_ratio=0.003,
extra_prompt=True,
aug_tag=True,
nodes=1, ngpus=1,
)
def yfcc15m_in1k_mocob16():
return Config(
val_task="imagenet",
dataset="yfcc15m_tag",
metadata="data/yfcc15m/yfcc15m_w_tag.pkl",
root="data/yfcc15m",
trainable_weight="head-all",
batch_size=1024,
max_bert_length=32,
max_update=5000,
weight_decay=0.2,
head_weight_decay=1.,
eval_steps=500,
curate=100,
min_ratio=0.003,
extra_prompt=True,
aug_tag=True,
nodes=2, ngpus=8,
)
def yfcc100m_in1k_mocob16():
return Config(
val_task="imagenet",
dataset="yfcc100m_tag",
metadata="data/yfcc100m/yfcc100m_image_ids.pkl",
root="/datasets01/yfcc100m/090517",
trainable_weight="head-all",
batch_size=1024,
max_bert_length=32,
max_update=5000,
weight_decay=0.2,
head_weight_decay=1.,
eval_steps=500,
curate=100,
thres=0.7,
sublist=True,
min_ratio=0.01,
extra_prompt=True,
aug_tag=True,
nodes=2, ngpus=8,
)
|
CiT-main
|
run_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import os
import inspect
from collections import OrderedDict
class Config:
dataset = "yfcc15m_tag"
root = "data/yfcc15m"
metadata = "data/yfcc15m/yfcc15m_w_tag.pkl"
# data adaptation
val_task = "imagenet"
max_sample = None
thres = 0.55
num_workers = 6
# model
# model = "moco-bert"
max_bert_length = 32
trainable_weight = "head-all"
vision_backbone = "moco"
vision_pretrained = "pretrained_models/moco_hf"
text_backbone = "bert"
text_pretrained = "princeton-nlp/unsup-simcse-bert-base-uncased"
output_root = "runs"
# training
fp16 = True
lr = 5e-4
warmup_div = 25
min_lr = 1e-5
weight_decay = 0.2
head_weight_decay = 1.
device = "cuda"
dist_eval = False
accum_iter = 1
eval = False
pin_mem = False
resume = None
clip_grad = None
loss = "CiTCLIPLossGrad"
curate = 0
# evaluate
use_template = True
patience = None
eval_steps = 500
seed = 0
dist_on_itp = False
log_dir = None
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
if not hasattr(self, "warmup_steps"):
self.warmup_steps = int(self.max_update / self.warmup_div) # TODO move this to main?
if not hasattr(self, "output_dir"):
self.output_dir = inspect.stack()[1][3]
self.output_dir = os.path.join(self.output_root, self.output_dir)
print("config.output_dir =", self.output_dir)
def add_cmd_args(self, cmd_args):
for key, value in vars(cmd_args).items():
if not key.startswith("__") and value is not None:
setattr(self, key, value)
return self
def __str__(self):
return "\n".join([f"{k}={v}" for k, v in vars(self).items()])
def build_from_sweep_config(sweep_config):
sweep_dict = OrderedDict()
key_to_short = OrderedDict()
key_to_card = OrderedDict()
sweep_name = sweep_config.__name__
cards = 1
for key, value in vars(sweep_config).items():
if not key.startswith("__"):
sweep_dict[key] = value[0] if isinstance(value, tuple) else value
cards *= len(sweep_dict[key])
key_to_card[key] = len(sweep_dict[key])
key_to_short[key] = value[1] if isinstance(value, tuple) else ""
all_update_dicts = []
for sweep_idx in range(cards):
key_to_idx = OrderedDict()
for key in key_to_card:
key_to_idx[key] = sweep_idx % key_to_card[key]
sweep_idx = sweep_idx // key_to_card[key]
update_dict = OrderedDict()
for key, idx in key_to_idx.items():
update_dict[key] = sweep_dict[key][idx]
update_dict["output_dir"] = "_".join([value+str(update_dict[key]).replace("/", ".") for key, value in key_to_short.items()])
update_dict["output_dir"] = os.path.join(sweep_name, update_dict["output_dir"])
all_update_dicts.append(update_dict)
assert len(all_update_dicts) == cards
return all_update_dicts
|
CiT-main
|
configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
from transformers import VisionTextDualEncoderModel
class CiTCLIPVisionTextDualEncoderModel(VisionTextDualEncoderModel):
'''a hf model wrapper to support forward with either or both image/text.
note that HF impl. uses an artificial pooler that most pre-trained models (e.g., ViT) don't have.
# LiT directly uses [CLS] token for both vision and language.
text: https://github.com/google-research/vision_transformer/blob/16fc24d2734f34b0a7b16212a4386c41fe662cb4/vit_jax/models_lit.py#L62
vision: https://github.com/google-research/vision_transformer/blob/16fc24d2734f34b0a7b16212a4386c41fe662cb4/vit_jax/models_vit.py#L283
configs of LiT: https://github.com/google-research/vision_transformer/blob/16fc24d2734f34b0a7b16212a4386c41fe662cb4/vit_jax/configs/models.py#L319
'''
def forward(
self,
input_ids=None,
pixel_values=None,
attention_mask=None,
position_ids=None,
return_loss=None,
token_type_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
skip_text_projection=False,
split=1,
**kwargs,
):
image_embeds, text_embeds = None, None
if pixel_values is not None:
if split > 1: # TODO: test if can merge these two branch.
vision_outputs = []
for splitted_pixel_values in torch.split(pixel_values, pixel_values.size(0) // split):
vision_outputs.append(
self.vision_model(
pixel_values=splitted_pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)[1]
)
image_embeds = torch.cat(vision_outputs, dim=0)
else:
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[1] # pooler_output
image_embeds = self.visual_projection(image_embeds)
if input_ids is not None:
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# SimCSE uses pooler as tanh as in HF.
text_embeds = text_outputs[1] # pooler_output
if not skip_text_projection:
text_embeds = self.text_projection(text_embeds)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
return {"text_embeds": text_embeds, "image_embeds": image_embeds, "logit_scale": logit_scale}
def build_model(args):
import os
import hfmodels
from transformers import AutoTokenizer
os.environ["TOKENIZERS_PARALLELISM"] = "false"
print(f"creating model: {args.vision_backbone}-{args.text_backbone}")
model = CiTCLIPVisionTextDualEncoderModel.from_vision_text_pretrained( # VisionTextDualEncoderModel
args.vision_pretrained, # we dump simclr/moco into HF format.
args.text_pretrained, # all text models are in HF. # vision_model= ... your own model is not HF format.
projection_dim=args.projection_dim if hasattr(args, "projection_dim") else 512
)
tokenizer = AutoTokenizer.from_pretrained(args.text_pretrained, use_fast=True)
return model, tokenizer
|
CiT-main
|
models_citclip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# A script to run multinode training with submitit.
# --------------------------------------------------------
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import argparse
import os
import uuid
from pathlib import Path
import submitit
def parse_args():
parser = argparse.ArgumentParser("Submitit for adaptation")
parser.add_argument("sweep", type=str, help="name of a sweep.")
parser.add_argument("--ngpus", default=1, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=1, type=int, help="Number of nodes to request")
parser.add_argument("--resume", default=None, type=str, help="resume a checkpoint.")
parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit")
parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler")
args = parser.parse_args()
return args
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/adaclip")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
self.args.config.dist_url = get_init_file().as_uri()
def __call__(self):
self._setup_gpu_args()
import main
main.main(self.args.config)
def checkpoint(self):
import os
import submitit
self.args.config.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.config.output_dir, "checkpoint-last.pth")
if os.path.exists(checkpoint_file):
self.args.config.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
import os
from pathlib import Path
job_env = submitit.JobEnvironment()
if self.args.ngpus >= 1:
# self.args.config.seed += job_env.global_rank
# assert 'SLURM_PROCID' in os.environ:
self.args.config.local_rank = job_env.local_rank
self.args.config.rank = job_env.global_rank
self.args.config.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main(args):
if args.job_dir == "":
args.job_dir = get_shared_folder()
assert args.job_dir != ""
args.job_dir = Path(args.job_dir) / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb= 160 * num_gpus_per_node, # if "yfcccc12m" not in args.config.output_dir else 120 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node,
cpus_per_task=7,
nodes=nodes,
timeout_min=timeout_min,
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name=os.path.basename(args.config.output_dir))
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id, "@", str(args.job_dir).replace("%j", job.job_id))
def submit():
args = parse_args()
import sweeps
import run_configs
import configs
from copy import deepcopy
if hasattr(sweeps, args.sweep):
print(f"sweeping {args.sweep} in `sweeps.py`")
sweep_config = getattr(sweeps, args.sweep)
all_update_dicts = configs.build_from_sweep_config(sweep_config)
for update_dict in all_update_dicts:
_args = deepcopy(args)
config = configs.Config(**update_dict)
if args.resume is not None:
config.resume = args.resume
setattr(_args, "config", config)
if hasattr(config, "ngpus"):
_args.ngpus = config.ngpus
if hasattr(config, "nodes"):
_args.nodes = config.nodes
_args.job_dir = config.output_dir
main(_args)
elif hasattr(run_configs, args.sweep):
print(f"launch {args.sweep} in `run_configs.py`")
config = getattr(run_configs, args.sweep)()
_args = deepcopy(args)
if args.resume is not None:
config.resume = args.resume
setattr(_args, "config", config)
if hasattr(config, "ngpus"):
_args.ngpus = config.ngpus
if hasattr(config, "nodes"):
_args.nodes = config.nodes
_args.job_dir = config.output_dir
main(_args)
if __name__ == "__main__":
submit()
|
CiT-main
|
submitit_citclip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import math
import sys
import json
import torch
import util.misc as misc
import util.lr_sched as lr_sched
from typing import Iterable
from collections import defaultdict
def to_device(samples, device, args):
inputs = {}
for key in samples:
if key not in ["image_ids", "captions", "__key__"]:
inputs[key] = samples[key].to(device, non_blocking=True)
if key == "pixel_values" and inputs[key].dtype == torch.uint8:
from main import get_mean_std
# inmem data. normalize it.
inputs[key] = inputs[key].to(torch.float32).div_(255.) # b, 3, 224, 224
mean, std = get_mean_std(args)
mean = torch.as_tensor(mean, device=inputs[key].device)[None, :, None, None]
std = torch.as_tensor(std, device=inputs[key].device)[None, :, None, None]
inputs[key] = inputs[key].sub_(mean).div_(std)
return inputs
@torch.no_grad()
def evaluate(args, model, val_transform, tokenizer):
from clipeval import datasets, eval_zeroshot
catalog, all_templates, all_labels = eval_zeroshot.load_metadata("clipeval")
if args.val_task is None or args.val_task in ["mt", "imagenet21k", "imagenet1k"]: # infer val_task for multitasking.
val_task = "imagenet"
else:
val_task = args.val_task
metrics = {}
for d in catalog: # assume multitask on CLIP suite by default and early stop if IN only.
if not args.eval and d != val_task: # training only eval on val_task.
continue
if args.eval and args.val_task not in ["mt", "imagenet21k", "imagenet1k"] and d != val_task:
continue
val_dataset = datasets.get_downstream_dataset(
catalog, d, is_train=False, transform=val_transform)
templates = all_templates[d]
labels = all_labels[d]
if args.val_task not in ["mt", "imagenet21k", "imagenet1k"] and (hasattr(args, "extra_prompt") and args.extra_prompt) and d == "imagenet": # not eval MT in LiT setup.
templates.extend(["A photo of a {}", "{}"]) # see LiT page 16.
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size//2, shuffle=False,
num_workers=args.num_workers, pin_memory=False, drop_last=False)
if not args.use_template:
templates = ["{}"]
metric = eval_zeroshot.evaluate(d, val_loader, templates, labels, model, tokenizer, args.max_bert_length, False)
metrics[d] = metric
if args.eval:
json_str = json.dumps({"task": d, "acc": metric})
misc.print_json(args.output_dir, json_str)
return metrics if len(metrics) > 1 else metrics[val_task] # be compatible for ImageNet only evaluation.
def append_dataset(dataset, batch, mask_selector, batch_size):
if "pixel_values" in batch:
assert batch["pixel_values"].dtype == torch.uint8
if mask_selector.sum().item() == 0:
return
assert len(dataset[-1]["image_ids"]) <= batch_size
if len(dataset[-1]["image_ids"]) == batch_size:
dataset.append(defaultdict(list))
batch_len = len(batch["image_ids"])
for key in batch:
assert batch_len == len(batch[key])
for ix, selected in enumerate(mask_selector):
if selected:
dataset[-1][key].append(batch[key][ix])
while len(dataset[-1]["image_ids"]) >= batch_size:
last_batch = dataset[-1]
new_batch = {}
for key in last_batch:
value = last_batch[key]
if len(value) >= batch_size:
last_batch[key] = value[:batch_size]
if torch.is_tensor(value[0]):
last_batch[key] = torch.stack(last_batch[key])
if len(value) > batch_size:
new_batch[key] = value[batch_size:]
if new_batch:
dataset.append(new_batch)
else:
return
def train_one_epoch(model: torch.nn.Module, model_without_ddp, criterion: torch.nn.Module, tokenizer,
data_loader: Iterable, data_loader_val: Iterable, val_transform, best_acc, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, step, loss_scaler, eff_batch_size, max_norm: float = 0,
# mixup_fn: Optional[Mixup] = None,
log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
# assuming data_loader is either a real dataloader or inmem as a list of batches?
for data_iter_step, samples in enumerate(metric_logger.log_every(data_loader, print_freq, header, args.max_update)):
if step[0] > args.max_update:
break
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_step_learning_rate(optimizer, step[0], args.lr, args.min_lr, args.warmup_steps, args.max_update)
inputs = to_device(samples, device, args)
with torch.cuda.amp.autocast(enabled=args.fp16):
outputs = model(**inputs)
loss = criterion(**outputs)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= accum_iter
update_grad = (data_iter_step + 1) % accum_iter == 0
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=False,
update_grad=update_grad)
if update_grad:
step[0] += 1
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if log_writer is not None:
log_writer.add_scalar('lr', max_lr, step[0])
log_writer.add_scalar('loss', loss_value_reduce, step[0])
if step[0] and step[0] % args.eval_steps == 0:
metric = evaluate(args, model, val_transform, tokenizer)
json_str = json.dumps({"step": step[0], "acc": metric, "seen": eff_batch_size * step[0]})
misc.print_json(args.output_dir, json_str)
if log_writer is not None:
log_writer.add_scalar('acc', metric, step[0])
if isinstance(data_loader, list) or (hasattr(data_loader, "dataset") and isinstance(data_loader.dataset, torch.utils.data.IterableDataset)):
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=0, epoch_name="last", best_acc=best_acc[0], step=step[0])
if metric > best_acc[0]:
best_acc[0] = metric
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=step[0], epoch_name="best", best_acc=best_acc[0], step=step[0])
model.train(True)
if step[0] and curate_condition(step[0], args):
break
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def curate_condition(step, args):
if args.curate and step % args.curate == 0:
return True
else:
return False
def curate_scheduler(step, args):
return args.curate
def max_sim(logits, thres):
logits, idx = logits.max(dim=-1)
return logits > thres, idx
ratio = 1.0
thres = None
def thres_scheduler(step, args):
return args.thres
def while_condition(example_ids, step, args):
if hasattr(args, "inmem") and args.inmem:
return len(example_ids) < curate_scheduler(step, args) or (len(example_ids) == curate_scheduler(step, args) and len(example_ids[-1]["image_ids"]) < args.batch_size)
else:
return len(example_ids) < (curate_scheduler(step, args) * args.batch_size)
@torch.no_grad()
def iterative_classcurate(step, device, producer_iter, model, tokenizer, args):
model.eval()
from clipeval import eval_zeroshot
catalog, all_templates, all_labels = eval_zeroshot.load_metadata("clipeval")
if args.val_task == "mt":
labels = set()
for d in catalog:
for label in all_labels[d]:
if isinstance(label, list):
for _label in label:
labels.add(_label)
else:
labels.add(label)
labels = list(labels)
elif args.val_task == "imagenet21k":
labels = set()
with open("clipeval/imagenet21k_wordnet_lemmas.txt", "r") as fr:
for line in fr:
labels.add(line.strip())
labels = list(labels)
else:
d = args.val_task # infer catalog_subsets
labels = all_labels[d]
templates = ["{}"] if not (hasattr(args, "templatefilter") and args.templatefilter) else all_templates[args.val_task] # no templates for now.
labels_emb = []
with torch.cuda.amp.autocast():
labels_emb, _, _ = eval_zeroshot.build_text_features(
templates, labels, model, tokenizer, args.max_bert_length, skip_text_projection=True)
labels_emb = labels_emb.t().to(torch.float32)
if hasattr(args, "sublist") and args.sublist:
example_ids = []
else:
example_ids = set()
total_example = 0
global thres
thres = thres_scheduler(step[0], args)
while while_condition(example_ids, step[0], args):
samples = next(producer_iter)
image_ids = samples["image_ids"]
total_example += len(image_ids)
if hasattr(args, "skip_step") and step[0] < args.skip_step:
mask_selector = torch.ones((len(image_ids),), dtype=torch.bool)
else:
inputs = to_device(samples, device, args)
with torch.cuda.amp.autocast():
text_embeds = model(**inputs, skip_text_projection=False if hasattr(args, "project_emb") else True)["text_embeds"]
text_embeds = text_embeds.to(torch.float32)
text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
logits = torch.matmul(text_embeds, labels_emb).cpu()
mask_selector, class_idx = max_sim(logits, thres)
batch_ratio = float(mask_selector.sum() / len(mask_selector))
if hasattr(args, "min_ratio") and batch_ratio < args.min_ratio:
# use topr logic.
max_logits, class_idx = logits.max(dim=-1)
_, idx = max_logits.topk(dim=-1, k=int(args.min_ratio * logits.size(0)))
mask_selector = torch.zeros_like(max_logits, dtype=torch.bool)
mask_selector[idx] = True
if mask_selector.sum() > 0:
assert len(mask_selector.size()) == 1 and len(image_ids) == mask_selector.size(0)
filtered_image_ids = [image_ids[_idx] for _idx in range(len(image_ids)) if mask_selector[_idx]]
for image_id_field in filtered_image_ids:
if hasattr(args, "sublist") and args.sublist:
example_ids.append(image_id_field)
else:
example_ids.add(image_id_field)
global ratio
ratio = len(example_ids) / total_example
misc.print_json(args.output_dir, json.dumps({"step": step[0], "ratio": ratio, "thres": thres}))
model.train()
return example_ids
|
CiT-main
|
engine.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
"""
pre-configed sweeps.
"""
import json
class alltask_5k_mr005:
batch_size = [1536], "bsz"
max_update = [5000], "s"
refilter = [100], "refilter"
prefilter = [0.45], ""
min_ratio = [0.05], "r"
sublist = [True], ""
val_task = [d for d in json.load(open("clipeval/dataset_catalog.json")).keys()], ""
aug_tag = [True], ""
nodes = [1], ""
ngpus = [1], ""
|
CiT-main
|
sweeps.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from util import misc
class AllGather(torch.autograd.Function):
@staticmethod
def forward(ctx, tensor):
output = [torch.empty_like(tensor) for _ in range(misc.get_world_size())]
dist.all_gather(output, tensor)
ctx.rank = misc.get_rank()
ctx.batch_size = tensor.shape[0]
return torch.cat(output, 0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[
ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)
],
None,
)
class CiTCLIPLossGrad(nn.Module):
def forward(self, image_embeds, text_embeds, logit_scale):
# normalized features
image_embeds = F.normalize(image_embeds, dim=-1, p=2)
text_embeds = F.normalize(text_embeds, dim=-1, p=2)
if misc.get_world_size() > 1:
# gather features from all GPUs
image_embeds = AllGather.apply(image_embeds)
text_embeds = AllGather.apply(text_embeds)
# cosine similarity as logits
logits_per_image = logit_scale * image_embeds @ text_embeds.t()
labels = torch.arange(logits_per_image.size(0), device=image_embeds.device)
loss = F.cross_entropy(logits_per_image, labels)
return loss
class CLIPLossGrad(nn.Module):
def forward(self, image_embeds, text_embeds, logit_scale):
image_embeds = F.normalize(image_embeds, dim=-1, p=2)
text_embeds = F.normalize(text_embeds, dim=-1, p=2)
if misc.get_world_size() > 1:
# gather features from all GPUs
image_embeds = AllGather.apply(image_embeds)
text_embeds = AllGather.apply(text_embeds)
# cosine similarity as logits
logits_per_image = logit_scale * image_embeds @ text_embeds.t()
logits_per_text = logit_scale * text_embeds @ image_embeds.t()
labels = torch.arange(logits_per_image.size(0), device=image_embeds.device)
loss = (F.cross_entropy(logits_per_image, labels) + \
F.cross_entropy(logits_per_text, labels)) / 2.
return loss
|
CiT-main
|
losses.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import argparse
import datetime
import numpy as np
import os
import time
import json
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from collections import defaultdict
import losses
import util.misc as misc
from util.misc import NativeScalerWithGradNormCount as NativeScaler
from models_citclip import build_model
from engine import train_one_epoch, evaluate, iterative_classcurate
from weights import freeze_model
def get_mean_std(args):
if "augreg" in args.vision_backbone or "augreg" in args.vision_pretrained:
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
else:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
return mean, std
def get_val_transform(args):
"""moved from SLIP's eval_zeroshot.py"""
import torchvision.transforms as transforms
mean, std = get_mean_std(args)
print(args.vision_backbone, "val_normalizer", mean, std)
return transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
lambda x: x.convert('RGB'),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
def get_train_transform(args):
import torchvision.transforms as transforms
trans = [transforms.RandomResizedCrop(224, scale=(0.5, 1.0))]
if hasattr(args, "inmem") and args.inmem: # use in-mem training / no dataloader for consumer dataset.
from torchvision.transforms.functional import pil_to_tensor
trans.append(pil_to_tensor)
else:
trans.append(transforms.ToTensor())
mean, std = get_mean_std(args)
print(args.vision_backbone, "train_normalizer", mean, std)
trans.append(transforms.Normalize(mean=mean, std=std))
return transforms.Compose(trans)
def build_dataset(args, tokenizer):
from clipeval import datasets
train_transform = get_train_transform(args)
train_task_example_ids = None
if hasattr(args, "pcurate") or (args.val_task is not None and args.curate == 0): # no validation for full yfcc15m training (same as SLIP/CLIP).
thres = args.pcurate if hasattr(args, "pcurate") else args.thres
if args.dataset in ["yfcc15m_tag"]:
task_meta = torch.load(f"data/CLIP/{args.dataset}/{args.val_task}_ub_{args.dataset}_simcse{thres}_{args.max_bert_length}.pt")
if hasattr(args, "sublist") and args.sublist:
train_task_example_ids = task_meta["example_ids"]
else:
train_task_example_ids = set(task_meta["example_ids"])
print("train_task_example_ids_key", len(train_task_example_ids))
else:
task_meta = torch.load(f"data/CLIP/CLIP_eval/{args.val_task}_ub_{args.dataset}_simcse{thres}.pt")
if hasattr(args, "sublist") and args.sublist:
train_task_example_ids = task_meta["example_ids"]
else:
train_task_example_ids = set(task_meta["example_ids"])
print("train_task_example_ids", len(train_task_example_ids))
tar_files = None
train_dataset = datasets.ImageCaptionDatasetCLIP(
args, args.dataset, args.root, args.metadata, train_task_example_ids,
train_transform, tokenizer, args.max_bert_length, max_sample=args.max_sample
)
return train_dataset, None, train_transform, tar_files
def producer_collator(batch_list):
result = defaultdict(list)
for item in batch_list:
for key in item:
if key not in ["__key__"]:
result[key].append(item[key])
for key in result:
if key not in ["image_ids", "__key__", "captions"]:
result[key] = torch.stack(result[key])
return result
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
model, tokenizer = build_model(args)
model = freeze_model(model, args)
model.to(device)
dataset_train, dataset_val, train_transform, tar_files = build_dataset(args, tokenizer)
val_transform = get_val_transform(args)
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
) if not isinstance(dataset_train, torch.utils.data.IterableDataset) else None
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = None if dataset_val is None else torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
from torch.utils.tensorboard import SummaryWriter
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if not isinstance(dataset_train, torch.utils.data.IterableDataset):
print("len(dataset)", len(dataset_train))
else:
print("cannot estimate len of torch.utils.data.IterableDataset.")
if args.distributed:
find_unused = False
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=find_unused)
model_without_ddp = model.module
# https://github.com/rwightman/pytorch-image-models/blob/fd360ac951a179474917f4b2d21db8669bf87f68/timm/models/vision_transformer.py#L407
no_weight_decay_list = {'pos_embed', 'cls_token', 'dist_token'} # THIS DOESN'T MATTER YET as we frozen all.
head_weight_decay_list = {"visual_projection", "text_projection"}
p_wd, p_no_wd = [], []
p_head_wd = []
# only apply 1-dim no decay for now.
for n, p in model.named_parameters():
if not p.requires_grad:
continue # frozen weights
if p.ndim == 1 or n in no_weight_decay_list:
p_no_wd.append(p)
elif hasattr(args, "no_wd_emb") and isinstance(p, torch.nn.Embedding):
p_no_wd.append(p)
elif hasattr(args, "no_wd_ln") and isinstance(p, torch.nn.LayerNorm):
p_no_wd.append(p)
elif hasattr(args, "head_weight_decay") and [True for _part in head_weight_decay_list if _part in n]:
p_head_wd.append(p)
else:
p_wd.append(p)
param_groups = [{"params": p_wd, "weight_decay": args.weight_decay},
{"params": p_no_wd, "weight_decay": 0.}]
if p_head_wd:
param_groups.append({"params": p_head_wd, "weight_decay": args.head_weight_decay})
optimizer = torch.optim.AdamW(param_groups, lr=args.lr, eps=1e-8)
loss_scaler = NativeScaler(args.fp16)
start_epoch, best_acc, step = 0, [0.], [0]
if args.resume:
if args.resume.endswith(".pth"): # a pytorch checkpoint for resuming training.
if args.resume.startswith("checkpoint"):
args.resume = os.path.join(args.output_dir, args.resume)
start_epoch, _, best_acc, step = misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
best_acc, step = [best_acc], [step if step is not None else 0]
if isinstance(dataset_train, torch.utils.data.IterableDataset):
# random from step to avoid dupped train.
dataset_train.start_shard_id = step[0] % dataset_train.num_shards
print("resuming", args.resume, "from step", step[0], "with best_acc", best_acc[0])
else:
print("assuming a huggingface transformer pretrained model (no optimizer states).")
from models_citclip import CiTCLIPVisionTextDualEncoderModel
metric = evaluate(args, model, val_transform, tokenizer)
model = CiTCLIPVisionTextDualEncoderModel.from_pretrained(args.resume)
if args.eval:
metric = evaluate(args, model, val_transform, tokenizer)
json_str = json.dumps({"step": step[0], "acc": metric, "seen": eff_batch_size * step[0]})
print(json_str)
exit(0)
criterion = getattr(losses, args.loss)().to(device)
print("criterion = %s" % str(criterion))
if args.curate is not None and args.curate > 1:
curate_batch_size = args.batch_size * 2
dataset_train.with_vision = True if hasattr(args, "inmem") and args.inmem else False
data_loader_producer = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=curate_batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
collate_fn=producer_collator,
persistent_workers=True
)
def producer_fn(epoch):
while True:
data_loader_producer.sampler.set_epoch(epoch)
for batch in data_loader_producer:
yield batch
epoch += 1
producer_iter = iter(producer_fn(start_epoch))
else:
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = None if dataset_val is None else torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
import math
if not isinstance(dataset_train, torch.utils.data.IterableDataset) and not args.curate:
epochs = math.ceil(args.max_update / (len(dataset_train) // eff_batch_size))
print(f"Start training for {args.max_update} steps / {epochs} epochs")
else:
epochs = 1000000 # a big number to allow infinity run on iterativedataset.
print(f"Start training for {args.max_update} steps on torch.utils.data.IterableDataset or curate dataset, the checkpoint is stateless.")
start_time = time.time()
for epoch in range(start_epoch, epochs):
if step[0] >= args.max_update:
break
if args.curate is not None and (args.curate > 0 and step[0] % args.curate == 0):
curate_cls = iterative_classcurate
all_example_ids = curate_cls(step, device, producer_iter, model, tokenizer, args)
print(len(all_example_ids), "after curate", args.curate * args.batch_size, "expected")
if hasattr(args, "inmem") and args.inmem:
data_loader_train = all_example_ids
else:
if hasattr(args, "sublist") and args.sublist:
assert isinstance(all_example_ids, list)
all_example_ids = all_example_ids[:args.curate * args.batch_size]
else:
all_example_ids = set(list(all_example_ids)[:args.curate * args.batch_size])
assert len(all_example_ids) == args.curate * args.batch_size
from clipeval import datasets
dataset_train = datasets.ImageCaptionDatasetCLIP(args,
args.dataset, args.root, args.metadata, all_example_ids,
train_transform, tokenizer, args.max_bert_length, max_sample=args.max_sample
)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, shuffle=True, # just a local sampler.
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if hasattr(data_loader_train, "sampler") and isinstance(data_loader_train.sampler, torch.utils.data.DistributedSampler):
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, model_without_ddp, criterion, tokenizer, data_loader_train, data_loader_val, val_transform, best_acc,
optimizer, device, epoch, step, loss_scaler, eff_batch_size,
args.clip_grad,
log_writer=log_writer,
args=args
)
if not isinstance(dataset_train, torch.utils.data.IterableDataset):
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, epoch_name="last", best_acc=best_acc[0], step=step[0])
else:
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=0, epoch_name="last", best_acc=best_acc[0], step=step[0])
# if log_writer is not None:
# log_writer.finish()
args.resume = os.path.join(args.output_dir, "checkpoint-best.pth")
if os.path.isfile(args.resume):
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
metric = evaluate(args, model, val_transform, tokenizer)
json_str = json.dumps({"step": step[0], "acc": metric, "seen": eff_batch_size * step[0]})
print(json_str)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
'''see configs.py or sweep.py (we only allow pre-defined config).'''
parser = argparse.ArgumentParser(description='CiTCLIP', add_help=False)
parser.add_argument('config_name', type=str, help='see configs.py')
parser.add_argument('--world_size', default=1, type=int)
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://')
parser.add_argument('--resume', default=None, type=str)
parser.add_argument('--eval', default=None, action='store_true')
cmd_args = parser.parse_args()
import run_configs
config = getattr(run_configs, cmd_args.config_name)().add_cmd_args(cmd_args)
return config
if __name__ == '__main__':
args = parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
CiT-main
|
main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
"""
pre-configed trainable weights.
"""
pre_projection_weights = ['logit_scale', 'visual_projection.weight', 'text_projection.weight']
# TODO: unify layer selection for all models.
pre_vision_trainable_weights = {
"moco": {
"head": ['moco.head'],
"none": [],
"all": ["[ALL]"]
},
"augreg": {
"none": [],
"all": ["[ALL]"],
},
"swag": {
"none": [],
"all": ["[ALL]"],
}
}
pre_text_trainable_weights = {
"bert": {
"pool": ['pooler.dense.weight', 'pooler.dense.bias'],
"all": ["[ALL]"]
},
}
def _freeze_model(model, trainable_weights):
'''we assume pretrained model has unknown freezing status.
all model must pass through this function.
(e.g.,, MoCo teacher is freezed after pretraining.
[ALL] indicates fully trainable.
'''
for name, parameter in model.named_parameters():
for param in trainable_weights:
if name.startswith(param) or param == "[ALL]":
parameter.requires_grad = True
break
else:
parameter.requires_grad = False
trainable_parameters = []
for name, parameter in model.named_parameters():
if parameter.requires_grad:
trainable_parameters.append(name)
print(f"{model.__class__.__name__} trainable weights:", trainable_parameters)
def freeze_model(model, args):
assert "-" in args.trainable_weight, "trainable_weight needs format <vision_weight_config>-<text_weight_config>."
vision_config, text_config = args.trainable_weight.split("-")
vision_trainable_weights = pre_vision_trainable_weights[args.vision_backbone][vision_config]
text_trainable_weights = pre_text_trainable_weights[args.text_backbone][text_config]
_freeze_model(model, pre_projection_weights)
_freeze_model(model.vision_model, vision_trainable_weights)
_freeze_model(model.text_model, text_trainable_weights)
return model
|
CiT-main
|
weights.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import builtins
import datetime
import os
import time
from collections import defaultdict, deque
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None, max_update=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
if hasattr(iterable, "dataset") and isinstance(iterable.dataset, torch.utils.data.IterableDataset):
len_iter = max_update
else:
len_iter = len(iterable)
space_fmt = ':' + str(len(str(len_iter))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len_iter - 1:
eta_seconds = iter_time.global_avg * (len_iter - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len_iter, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len_iter, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len_iter))
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = force or (get_world_size() > 8)
if is_master or force:
now = datetime.datetime.now()
builtin_print('[{}] '.format(now), end='') # print with time stamp
builtin_print(*args, **kwargs)
builtins.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
setup_for_distributed(is_master=True) # hack
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self, fp16=True):
self._scaler = torch.cuda.amp.GradScaler(enabled=fp16)
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def save_model(args, epoch, epoch_name, model, model_without_ddp, optimizer, loss_scaler, best_val_loss=None, best_acc=None, step=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
'best_val_loss': best_val_loss,
'best_acc': best_acc,
'step': step,
}
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch, 'best_val_loss': best_val_loss, 'best_acc': best_acc, 'step': step}
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def load_model(args, model_without_ddp, optimizer, loss_scaler):
if args.resume:
start_epoch, best_val_loss = None, None
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval):
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
best_val_loss = checkpoint['best_val_loss'] if 'best_val_loss' in checkpoint else None
best_acc = checkpoint['best_acc'] if 'best_acc' in checkpoint else 0.
if isinstance(best_acc, list): # TODO: be backward compatible; remove this line before release;
best_acc = best_acc[0]
step = checkpoint['step'] if 'step' in checkpoint else 0
return start_epoch, best_val_loss, best_acc, step
def all_reduce_mean(x):
world_size = get_world_size()
if world_size > 1:
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
def print_json(output_dir, json_str, mode="a"):
print(json_str)
if output_dir and is_main_process():
with open(os.path.join(output_dir, "log.txt"), mode=mode, encoding="utf-8") as f:
f.write(json_str + "\n")
|
CiT-main
|
util/misc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import math
def adjust_step_learning_rate(optimizer, step, lr, min_lr, warmup_steps, max_update):
"""huxu: add supports for steps instead of epoch.
Decay the learning rate with half-cycle cosine after warmup"""
if step < warmup_steps:
lr = lr * step / warmup_steps
else:
lr = min_lr + (lr - min_lr) * 0.5 * \
(1. + math.cos(math.pi * (step - warmup_steps) / (max_update - warmup_steps)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
|
CiT-main
|
util/lr_sched.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
import torch
from transformers import (
PreTrainedModel,
PretrainedConfig,
AutoConfig,
AutoModel,
)
from transformers.modeling_outputs import BaseModelOutputWithPooling
import timm
assert timm.__version__ >= "0.4.12", "make sure timm uses augreg checkpoints."
class AugRegConfig(PretrainedConfig):
"""
HF or older timm doesn't load augreg weights.
"""
model_type = "augreg"
def __init__(
self,
config_name="vit_base_patch32_224_in21k",
hidden_size=768,
**kwargs
):
super().__init__(**kwargs)
self.config_name = config_name
self.hidden_size = hidden_size
AutoConfig.register("augreg", AugRegConfig)
class AugRegModel(PreTrainedModel):
config_class = AugRegConfig
@classmethod
def from_orig_pretrained(cls, config_name):
augreg = timm.create_model(config_name, pretrained=True)
config = AugRegConfig(config_name=config_name, hidden_size=augreg.embed_dim)
model = AugRegModel(config)
model.augreg = augreg
return model
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
import os
ckpt_path = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin")
state_dict = torch.load(os.path.join(ckpt_path))
config = AutoConfig.from_pretrained(pretrained_model_name_or_path)
model = AugRegModel(config)
model.load_state_dict(state_dict, strict=True)
return model
def __init__(self, config):
super().__init__(config)
self.config = config
self.augreg = timm.create_model(config.config_name, pretrained=False)
self.post_init()
def _init_weights(self, module):
self.augreg._init_weights(module)
def forward(
self,
pixel_values=None,
# attention_mask=None,
# head_mask=None,
output_attentions=None,
output_hidden_states=None,
# interpolate_pos_encoding=None,
return_dict=None
):
# https://github.com/rwightman/pytorch-image-models/blob/e0c4eec4b66dc14ae96097c7b4a7ef2af45ba309/timm/models/vision_transformer.py#L358
# pre_logits is nn.Identity and token means from CLS [:, 0]
sequence_output = self.augreg.forward_features(pixel_values)
pooled_output = sequence_output
if not return_dict:
return (sequence_output, pooled_output)
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=None, # encoder_outputs.hidden_states,
attentions=None, # encoder_outputs.attentions,
)
AutoModel.register(AugRegConfig, AugRegModel)
if __name__ == '__main__':
# dump this model for AutoModel: `python -m hfmodels.augreg`
models = ["vit_base_patch32_224_in21k", "vit_base_patch16_224_in21k", "vit_large_patch16_224_in21k"]
for model in models:
vision_model = AugRegModel.from_orig_pretrained(model)
vision_model.save_pretrained(f"pretrained_models/{model}_augreg_hf")
|
CiT-main
|
hfmodels/augreg.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.